diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml
index 6544db61..fc821194 100644
--- a/.github/workflows/codeql-analysis.yml
+++ b/.github/workflows/codeql-analysis.yml
@@ -29,12 +29,12 @@ jobs:
     # Initializes the CodeQL tools for scanning.
     -
       name: Initialize CodeQL
-      uses: github/codeql-action/init@v2
+      uses: github/codeql-action/init@v3
       with:
         languages: 'python'
     -
       name: Autobuild
-      uses: github/codeql-action/autobuild@v2
+      uses: github/codeql-action/autobuild@v3
     -
       name: Perform CodeQL Analysis
-      uses: github/codeql-action/analyze@v2
+      uses: github/codeql-action/analyze@v3
diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml
index d9de09d2..095d7358 100644
--- a/.github/workflows/stale.yml
+++ b/.github/workflows/stale.yml
@@ -17,20 +17,23 @@ jobs:
       issues: write
 
     steps:
-      - uses: actions/stale@v8.0.0
+      - uses: actions/stale@v9.0.0
         with:
           repo-token: ${{ secrets.GITHUB_TOKEN }}
           days-before-stale: 30
           days-before-close: 5
           stale-issue-message: 'This issue is stale because it has been open 30 days with no activity. Please comment or update this issue or it will be closed in 5 days.'
-          stale-issue-label: $stale_label
+          stale-issue-label: '${{ env.stale_label }}'
           exempt-issue-labels: 'Internal, Fixed in next release, Bug: Confirmed, Documentation Needed'
           exempt-all-issue-assignees: true
           operations-per-run: 300
           close-issue-reason: 'not_planned'
 
-  remove_stale: # trigger "stale" removal immediately when stale issues are commented on
-    if: github.event_name == 'issue_comment'
+  remove_stale:
+    # trigger "stale" removal immediately when stale issues are commented on
+    # we need to explicitly check that the trigger does not run on comment on a PR as
+    # https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#issue_comment-on-issues-only-or-pull-requests-only
+    if: ${{ !github.event.issue.pull_request && github.event_name != 'schedule' }}
     permissions:
       contents: read #  for actions/checkout
       issues: write #  to edit issues label
@@ -39,7 +42,7 @@ jobs:
       - name: Checkout
         uses: actions/checkout@v4.1.1
       - name: Remove 'stale' label
-        run: gh issue edit ${{ github.event.issue.number }} --remove-label $stale_label
+        run: gh issue edit ${{ github.event.issue.number }} --remove-label ${{ env.stale_label }}
         env:
           GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
 
diff --git a/.github/workflows/stale_pr.yml b/.github/workflows/stale_pr.yml
index 2db2a25d..96650818 100644
--- a/.github/workflows/stale_pr.yml
+++ b/.github/workflows/stale_pr.yml
@@ -17,7 +17,7 @@ jobs:
       pull-requests: write
 
     steps:
-      - uses: actions/stale@v8.0.0
+      - uses: actions/stale@v9.0.0
         with:
           repo-token: ${{ secrets.GITHUB_TOKEN }}
           # Do not automatically mark PR/issue as stale
diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
index 8166d253..2a2b50dc 100644
--- a/.github/workflows/test.yml
+++ b/.github/workflows/test.yml
@@ -64,9 +64,8 @@ jobs:
             ubuntu_23,
             centos_8,
             centos_9,
-            fedora_36,
-            fedora_37,
-            fedora_38,
+             fedora_38,
+            fedora_39,
           ]
     env:
       DISTRO: ${{matrix.distro}}
@@ -75,7 +74,7 @@ jobs:
         uses: actions/checkout@v4.1.1
 
       - name: Set up Python 3.10
-        uses: actions/setup-python@v4.7.1
+        uses: actions/setup-python@v5.0.0
         with:
           python-version: "3.10"
 
diff --git a/advanced/Scripts/api.sh b/advanced/Scripts/api.sh
index 449f146f..18a48ce7 100755
--- a/advanced/Scripts/api.sh
+++ b/advanced/Scripts/api.sh
@@ -21,20 +21,60 @@
 TestAPIAvailability() {
 
     # as we are running locally, we can get the port value from FTL directly
-    PORT="$(pihole-FTL --config webserver.port)"
-    PORT="${PORT%%,*}"
+    local chaos_api_list availabilityResonse
 
-    availabilityResonse=$(curl -s -o /dev/null -w "%{http_code}" "http://localhost:${PORT}/api/auth")
+    # Query the API URLs from FTL using CHAOS TXT local.api.ftl
+    # The result is a space-separated enumeration of full URLs
+    # e.g., "http://localhost:80/api/" "https://localhost:443/api/"
+    chaos_api_list="$(dig +short chaos txt local.api.ftl @127.0.0.1)"
 
-    # test if http status code was 200 (OK) or 401 (authentication required)
-    if [ ! "${availabilityResonse}" = 200 ] && [ ! "${availabilityResonse}" = 401 ]; then
-        echo "API not available at: http://localhost:${PORT}/api"
+    # If the query was not successful, the variable is empty
+    if [ -z "${chaos_api_list}" ]; then
+        echo "API not available. Please check connectivity"
+        exit 1
+    fi
+
+    # Iterate over space-separated list of URLs
+    while [ -n "${chaos_api_list}" ]; do
+        # Get the first URL
+        API_URL="${chaos_api_list%% *}"
+        # Strip leading and trailing quotes
+        API_URL="${API_URL%\"}"
+        API_URL="${API_URL#\"}"
+
+        # Test if the API is available at this URL
+        availabilityResonse=$(curl -skS -o /dev/null -w "%{http_code}" "${API_URL}auth")
+
+        # Test if http status code was 200 (OK) or 401 (authentication required)
+        if [ ! "${availabilityResonse}" = 200 ] && [ ! "${availabilityResonse}" = 401 ]; then
+            # API is not available at this port/protocol combination
+            API_PORT=""
+        else
+            # API is available at this URL combination
+            break
+        fi
+
+        # Remove the first URL from the list
+        local last_api_list
+        last_api_list="${chaos_api_list}"
+        chaos_api_list="${chaos_api_list#* }"
+
+        # If the list did not change, we are at the last element
+        if [ "${last_api_list}" = "${chaos_api_list}" ]; then
+            # Remove the last element
+            chaos_api_list=""
+        fi
+    done
+
+    # if API_PORT is empty, no working API port was found
+    if [ -n "${API_PORT}" ]; then
+        echo "API not available at: ${API_URL}"
         echo "Exiting."
         exit 1
     fi
 }
 
-Authenthication() {
+Authentication() {
     # Try to authenticate
     LoginAPI
 
@@ -54,28 +94,27 @@ Authenthication() {
 }
 
 LoginAPI() {
-	sessionResponse="$(curl --silent -X POST "http://localhost:${PORT}/api/auth" --user-agent "Pi-hole cli " --data "{\"password\":\"${password}\"}" )"
+  sessionResponse="$(curl -skS -X POST "${API_URL}auth" --user-agent "Pi-hole cli " --data "{\"password\":\"${password}\"}" )"
 
   if [ -z "${sessionResponse}" ]; then
     echo "No response from FTL server. Please check connectivity"
     exit 1
   fi
-	# obtain validity and session ID from session response
-	validSession=$(echo "${sessionResponse}"| jq .session.valid 2>/dev/null)
-	SID=$(echo "${sessionResponse}"| jq --raw-output .session.sid 2>/dev/null)
+  # obtain validity and session ID from session response
+  validSession=$(echo "${sessionResponse}"| jq .session.valid 2>/dev/null)
+  SID=$(echo "${sessionResponse}"| jq --raw-output .session.sid 2>/dev/null)
 }
 
 DeleteSession() {
-    # if a valid Session exists (no password required or successful authenthication) and
-    # SID is not null (successful authenthication only), delete the session
+    # if a valid Session exists (no password required or successful Authentication) and
+    # SID is not null (successful Authentication only), delete the session
     if [ "${validSession}" = true ] && [ ! "${SID}" = null ]; then
         # Try to delete the session. Omit the output, but get the http status code
-        deleteResponse=$(curl -s -o /dev/null -w "%{http_code}" -X DELETE "http://localhost:${PORT}/api/auth"  -H "Accept: application/json" -H "sid: ${SID}")
+        deleteResponse=$(curl -skS -o /dev/null -w "%{http_code}" -X DELETE "${API_URL}auth"  -H "Accept: application/json" -H "sid: ${SID}")
 
         case "${deleteResponse}" in
-            "200") printf "%b" "A session that was not created cannot be deleted (e.g., empty API password).\n";;
+            "204") printf "%b" "Session successfully deleted.\n";;
             "401") printf "%b" "Logout attempt without a valid session. Unauthorized!\n";;
-            "410") printf "%b" "Session successfully deleted.\n";;
          esac;
     fi
 
@@ -84,7 +123,7 @@ DeleteSession() {
 GetFTLData() {
   local data response status
   # get the data from querying the API as well as the http status code
-  response=$(curl -s -w "%{http_code}" -X GET "http://localhost:${PORT}/api$1" -H "Accept: application/json" -H "sid: ${SID}" )
+  response=$(curl -skS -w "%{http_code}" -X GET "${API_URL}$1" -H "Accept: application/json" -H "sid: ${SID}" )
 
   # status are the last 3 characters
   status=$(printf %s "${response#"${response%???}"}")
@@ -93,7 +132,7 @@ GetFTLData() {
 
   if [ "${status}" = 200 ]; then
     # response OK
-    echo "${data}"
+    printf %s "${data}"
   elif [ "${status}" = 000 ]; then
     # connection lost
     echo "000"
diff --git a/advanced/Scripts/database_migration/gravity-db.sh b/advanced/Scripts/database_migration/gravity-db.sh
index e36d9b1e..e99f1df2 100755
--- a/advanced/Scripts/database_migration/gravity-db.sh
+++ b/advanced/Scripts/database_migration/gravity-db.sh
@@ -18,14 +18,19 @@ upgrade_gravityDB(){
 	piholeDir="${2}"
 	auditFile="${piholeDir}/auditlog.list"
 
+	# Exit early if the database does not exist (e.g. in CI tests)
+	if [[ ! -f "${database}" ]]; then
+		return
+	fi
+
 	# Get database version
-	version="$(pihole-FTL sqlite3 "${database}" "SELECT \"value\" FROM \"info\" WHERE \"property\" = 'version';")"
+	version="$(pihole-FTL sqlite3 -ni "${database}" "SELECT \"value\" FROM \"info\" WHERE \"property\" = 'version';")"
 
 	if [[ "$version" == "1" ]]; then
 		# This migration script upgrades the gravity.db file by
 		# adding the domain_audit table
 		echo -e "  ${INFO} Upgrading gravity database from version 1 to 2"
-		pihole-FTL sqlite3 "${database}" < "${scriptPath}/1_to_2.sql"
+		pihole-FTL sqlite3 -ni "${database}" < "${scriptPath}/1_to_2.sql"
 		version=2
 
 		# Store audit domains in database table
@@ -40,28 +45,28 @@ upgrade_gravityDB(){
 		# renaming the regex table to regex_blacklist, and
 		# creating a new regex_whitelist table + corresponding linking table and views
 		echo -e "  ${INFO} Upgrading gravity database from version 2 to 3"
-		pihole-FTL sqlite3 "${database}" < "${scriptPath}/2_to_3.sql"
+		pihole-FTL sqlite3 -ni "${database}" < "${scriptPath}/2_to_3.sql"
 		version=3
 	fi
 	if [[ "$version" == "3" ]]; then
 		# This migration script unifies the formally separated domain
 		# lists into a single table with a UNIQUE domain constraint
 		echo -e "  ${INFO} Upgrading gravity database from version 3 to 4"
-		pihole-FTL sqlite3 "${database}" < "${scriptPath}/3_to_4.sql"
+		pihole-FTL sqlite3 -ni "${database}" < "${scriptPath}/3_to_4.sql"
 		version=4
 	fi
 	if [[ "$version" == "4" ]]; then
 		# This migration script upgrades the gravity and list views
 		# implementing necessary changes for per-client blocking
 		echo -e "  ${INFO} Upgrading gravity database from version 4 to 5"
-		pihole-FTL sqlite3 "${database}" < "${scriptPath}/4_to_5.sql"
+		pihole-FTL sqlite3 -ni "${database}" < "${scriptPath}/4_to_5.sql"
 		version=5
 	fi
 	if [[ "$version" == "5" ]]; then
 		# This migration script upgrades the adlist view
 		# to return an ID used in gravity.sh
 		echo -e "  ${INFO} Upgrading gravity database from version 5 to 6"
-		pihole-FTL sqlite3 "${database}" < "${scriptPath}/5_to_6.sql"
+		pihole-FTL sqlite3 -ni "${database}" < "${scriptPath}/5_to_6.sql"
 		version=6
 	fi
 	if [[ "$version" == "6" ]]; then
@@ -69,7 +74,7 @@ upgrade_gravityDB(){
 		# which is automatically associated to all clients not
 		# having their own group assignments
 		echo -e "  ${INFO} Upgrading gravity database from version 6 to 7"
-		pihole-FTL sqlite3 "${database}" < "${scriptPath}/6_to_7.sql"
+		pihole-FTL sqlite3 -ni "${database}" < "${scriptPath}/6_to_7.sql"
 		version=7
 	fi
 	if [[ "$version" == "7" ]]; then
@@ -77,21 +82,21 @@ upgrade_gravityDB(){
 		# to ensure uniqueness on the group name
 		# We also add date_added and date_modified columns
 		echo -e "  ${INFO} Upgrading gravity database from version 7 to 8"
-		pihole-FTL sqlite3 "${database}" < "${scriptPath}/7_to_8.sql"
+		pihole-FTL sqlite3 -ni "${database}" < "${scriptPath}/7_to_8.sql"
 		version=8
 	fi
 	if [[ "$version" == "8" ]]; then
 		# This migration fixes some issues that were introduced
 		# in the previous migration script.
 		echo -e "  ${INFO} Upgrading gravity database from version 8 to 9"
-		pihole-FTL sqlite3 "${database}" < "${scriptPath}/8_to_9.sql"
+		pihole-FTL sqlite3 -ni "${database}" < "${scriptPath}/8_to_9.sql"
 		version=9
 	fi
 	if [[ "$version" == "9" ]]; then
 		# This migration drops unused tables and creates triggers to remove
 		# obsolete groups assignments when the linked items are deleted
 		echo -e "  ${INFO} Upgrading gravity database from version 9 to 10"
-		pihole-FTL sqlite3 "${database}" < "${scriptPath}/9_to_10.sql"
+		pihole-FTL sqlite3 -ni "${database}" < "${scriptPath}/9_to_10.sql"
 		version=10
 	fi
 	if [[ "$version" == "10" ]]; then
@@ -101,44 +106,57 @@ upgrade_gravityDB(){
 		# to keep the copying process generic (needs the same columns in both the
 		# source and the destination databases).
 		echo -e "  ${INFO} Upgrading gravity database from version 10 to 11"
-		pihole-FTL sqlite3 "${database}" < "${scriptPath}/10_to_11.sql"
+		pihole-FTL sqlite3 -ni "${database}" < "${scriptPath}/10_to_11.sql"
 		version=11
 	fi
 	if [[ "$version" == "11" ]]; then
 		# Rename group 0 from "Unassociated" to "Default"
 		echo -e "  ${INFO} Upgrading gravity database from version 11 to 12"
-		pihole-FTL sqlite3 "${database}" < "${scriptPath}/11_to_12.sql"
+		pihole-FTL sqlite3 -ni "${database}" < "${scriptPath}/11_to_12.sql"
 		version=12
 	fi
 	if [[ "$version" == "12" ]]; then
 		# Add column date_updated to adlist table
 		echo -e "  ${INFO} Upgrading gravity database from version 12 to 13"
-		pihole-FTL sqlite3 "${database}" < "${scriptPath}/12_to_13.sql"
+		pihole-FTL sqlite3 -ni "${database}" < "${scriptPath}/12_to_13.sql"
 		version=13
 	fi
 	if [[ "$version" == "13" ]]; then
 		# Add columns number and status to adlist table
 		echo -e "  ${INFO} Upgrading gravity database from version 13 to 14"
-		pihole-FTL sqlite3 "${database}" < "${scriptPath}/13_to_14.sql"
+		pihole-FTL sqlite3 -ni "${database}" < "${scriptPath}/13_to_14.sql"
 		version=14
 	fi
 	if [[ "$version" == "14" ]]; then
 		# Changes the vw_adlist created in 5_to_6
 		echo -e "  ${INFO} Upgrading gravity database from version 14 to 15"
-		pihole-FTL sqlite3 "${database}" < "${scriptPath}/14_to_15.sql"
+		pihole-FTL sqlite3 -ni "${database}" < "${scriptPath}/14_to_15.sql"
 		version=15
 	fi
 	if [[ "$version" == "15" ]]; then
 		# Add column abp_entries to adlist table
 		echo -e "  ${INFO} Upgrading gravity database from version 15 to 16"
-		pihole-FTL sqlite3 "${database}" < "${scriptPath}/15_to_16.sql"
+		pihole-FTL sqlite3 -ni "${database}" < "${scriptPath}/15_to_16.sql"
 		version=16
 	fi
 	if [[ "$version" == "16" ]]; then
 		# Add antigravity table
 		# Add column type to adlist table (to support adlist types)
 		echo -e "  ${INFO} Upgrading gravity database from version 16 to 17"
-		pihole-FTL sqlite3 "${database}" < "${scriptPath}/16_to_17.sql"
+		pihole-FTL sqlite3 -ni "${database}" < "${scriptPath}/16_to_17.sql"
 		version=17
 	fi
+	if [[ "$version" == "17" ]]; then
+		# Add adlist.id to vw_gravity and vw_antigravity
+		echo -e "  ${INFO} Upgrading gravity database from version 17 to 18"
+		pihole-FTL sqlite3 -ni "${database}" < "${scriptPath}/17_to_18.sql"
+		version=18
+	fi
+	if [[ "$version" == "18" ]]; then
+		# Modify DELETE triggers to delete BEFORE instead of AFTER to prevent
+		# foreign key constraint violations
+		echo -e "  ${INFO} Upgrading gravity database from version 18 to 19"
+		pihole-FTL sqlite3 -ni "${database}" < "${scriptPath}/18_to_19.sql"
+		version=19
+	fi
 }
diff --git a/advanced/Scripts/database_migration/gravity/17_to_18.sql b/advanced/Scripts/database_migration/gravity/17_to_18.sql
new file mode 100644
index 00000000..00171a9a
--- /dev/null
+++ b/advanced/Scripts/database_migration/gravity/17_to_18.sql
@@ -0,0 +1,25 @@
+.timeout 30000
+
+PRAGMA FOREIGN_KEYS=OFF;
+
+BEGIN TRANSACTION;
+
+DROP VIEW vw_gravity;
+CREATE VIEW vw_gravity AS SELECT domain, adlist.id AS adlist_id, adlist_by_group.group_id AS group_id
+    FROM gravity
+    LEFT JOIN adlist_by_group ON adlist_by_group.adlist_id = gravity.adlist_id
+    LEFT JOIN adlist ON adlist.id = gravity.adlist_id
+    LEFT JOIN "group" ON "group".id = adlist_by_group.group_id
+    WHERE adlist.enabled = 1 AND (adlist_by_group.group_id IS NULL OR "group".enabled = 1);
+
+DROP VIEW vw_antigravity;
+CREATE VIEW vw_antigravity AS SELECT domain, adlist.id AS adlist_id, adlist_by_group.group_id AS group_id
+    FROM antigravity
+    LEFT JOIN adlist_by_group ON adlist_by_group.adlist_id = antigravity.adlist_id
+    LEFT JOIN adlist ON adlist.id = antigravity.adlist_id
+    LEFT JOIN "group" ON "group".id = adlist_by_group.group_id
+    WHERE adlist.enabled = 1 AND (adlist_by_group.group_id IS NULL OR "group".enabled = 1) AND adlist.type = 1;
+
+UPDATE info SET value = 18 WHERE property = 'version';
+
+COMMIT;
diff --git a/advanced/Scripts/database_migration/gravity/18_to_19.sql b/advanced/Scripts/database_migration/gravity/18_to_19.sql
new file mode 100644
index 00000000..c85a4d57
--- /dev/null
+++ b/advanced/Scripts/database_migration/gravity/18_to_19.sql
@@ -0,0 +1,27 @@
+.timeout 30000
+
+PRAGMA FOREIGN_KEYS=OFF;
+
+BEGIN TRANSACTION;
+
+DROP TRIGGER tr_domainlist_delete;
+CREATE TRIGGER tr_domainlist_delete BEFORE DELETE ON domainlist
+    BEGIN
+      DELETE FROM domainlist_by_group WHERE domainlist_id = OLD.id;
+    END;
+
+DROP TRIGGER tr_adlist_delete;
+CREATE TRIGGER tr_adlist_delete BEFORE DELETE ON adlist
+    BEGIN
+      DELETE FROM adlist_by_group WHERE adlist_id = OLD.id;
+    END;
+
+DROP TRIGGER tr_client_delete;
+CREATE TRIGGER tr_client_delete BEFORE DELETE ON client
+    BEGIN
+      DELETE FROM client_by_group WHERE client_id = OLD.id;
+    END;
+
+UPDATE info SET value = 19 WHERE property = 'version';
+
+COMMIT;
diff --git a/advanced/Scripts/list.sh b/advanced/Scripts/list.sh
index b76a7ef7..76558e58 100755
--- a/advanced/Scripts/list.sh
+++ b/advanced/Scripts/list.sh
@@ -150,18 +150,18 @@ AddDomain() {
     domain="$1"
 
     # Is the domain in the list we want to add it to?
-    num="$(pihole-FTL sqlite3 "${gravityDBfile}" "SELECT COUNT(*) FROM domainlist WHERE domain = '${domain}';")"
+    num="$(pihole-FTL sqlite3 -ni "${gravityDBfile}" "SELECT COUNT(*) FROM domainlist WHERE domain = '${domain}';")"
     requestedListname="$(GetListnameFromTypeId "${typeId}")"
 
     if [[ "${num}" -ne 0 ]]; then
-        existingTypeId="$(pihole-FTL sqlite3 "${gravityDBfile}" "SELECT type FROM domainlist WHERE domain = '${domain}';")"
+        existingTypeId="$(pihole-FTL sqlite3 -ni "${gravityDBfile}" "SELECT type FROM domainlist WHERE domain = '${domain}';")"
         if [[ "${existingTypeId}" == "${typeId}" ]]; then
             if [[ "${verbose}" == true ]]; then
                 echo -e "  ${INFO} ${1} already exists in ${requestedListname}, no need to add!"
             fi
         else
             existingListname="$(GetListnameFromTypeId "${existingTypeId}")"
-            pihole-FTL sqlite3 "${gravityDBfile}" "UPDATE domainlist SET type = ${typeId} WHERE domain='${domain}';"
+            pihole-FTL sqlite3 -ni "${gravityDBfile}" "UPDATE domainlist SET type = ${typeId} WHERE domain='${domain}';"
             if [[ "${verbose}" == true ]]; then
                 echo -e "  ${INFO} ${1} already exists in ${existingListname}, it has been moved to ${requestedListname}!"
             fi
@@ -177,10 +177,10 @@ AddDomain() {
     # Insert only the domain here. The enabled and date_added fields will be filled
     # with their default values (enabled = true, date_added = current timestamp)
     if [[ -z "${comment}" ]]; then
-        pihole-FTL sqlite3 "${gravityDBfile}" "INSERT INTO domainlist (domain,type) VALUES ('${domain}',${typeId});"
+        pihole-FTL sqlite3 -ni "${gravityDBfile}" "INSERT INTO domainlist (domain,type) VALUES ('${domain}',${typeId});"
     else
         # also add comment when variable has been set through the "--comment" option
-        pihole-FTL sqlite3 "${gravityDBfile}" "INSERT INTO domainlist (domain,type,comment) VALUES ('${domain}',${typeId},'${comment}');"
+        pihole-FTL sqlite3 -ni "${gravityDBfile}" "INSERT INTO domainlist (domain,type,comment) VALUES ('${domain}',${typeId},'${comment}');"
     fi
 }
 
@@ -189,7 +189,7 @@ RemoveDomain() {
     domain="$1"
 
     # Is the domain in the list we want to remove it from?
-    num="$(pihole-FTL sqlite3 "${gravityDBfile}" "SELECT COUNT(*) FROM domainlist WHERE domain = '${domain}' AND type = ${typeId};")"
+    num="$(pihole-FTL sqlite3 -ni "${gravityDBfile}" "SELECT COUNT(*) FROM domainlist WHERE domain = '${domain}' AND type = ${typeId};")"
 
     requestedListname="$(GetListnameFromTypeId "${typeId}")"
 
@@ -206,14 +206,14 @@ RemoveDomain() {
     fi
     reload=true
     # Remove it from the current list
-    pihole-FTL sqlite3 "${gravityDBfile}" "DELETE FROM domainlist WHERE domain = '${domain}' AND type = ${typeId};"
+    pihole-FTL sqlite3 -ni "${gravityDBfile}" "DELETE FROM domainlist WHERE domain = '${domain}' AND type = ${typeId};"
 }
 
 Displaylist() {
     local count num_pipes domain enabled status nicedate requestedListname
 
     requestedListname="$(GetListnameFromTypeId "${typeId}")"
-    data="$(pihole-FTL sqlite3 "${gravityDBfile}" "SELECT domain,enabled,date_modified FROM domainlist WHERE type = ${typeId};" 2> /dev/null)"
+    data="$(pihole-FTL sqlite3 -ni "${gravityDBfile}" "SELECT domain,enabled,date_modified FROM domainlist WHERE type = ${typeId};" 2> /dev/null)"
 
     if [[ -z $data ]]; then
         echo -e "Not showing empty list"
@@ -251,10 +251,10 @@ Displaylist() {
 }
 
 NukeList() {
-    count=$(pihole-FTL sqlite3 "${gravityDBfile}" "SELECT COUNT(1) FROM domainlist WHERE type = ${typeId};")
+    count=$(pihole-FTL sqlite3 -ni "${gravityDBfile}" "SELECT COUNT(1) FROM domainlist WHERE type = ${typeId};")
     listname="$(GetListnameFromTypeId "${typeId}")"
     if [ "$count" -gt 0 ];then
-        pihole-FTL sqlite3 "${gravityDBfile}" "DELETE FROM domainlist WHERE type = ${typeId};"
+        pihole-FTL sqlite3 -ni "${gravityDBfile}" "DELETE FROM domainlist WHERE type = ${typeId};"
         echo "  ${TICK} Removed ${count} domain(s) from the ${listname}"
     else
         echo "  ${INFO} ${listname} already empty. Nothing to do!"
diff --git a/advanced/Scripts/piholeARPTable.sh b/advanced/Scripts/piholeARPTable.sh
index 5daa025d..b92dd124 100755
--- a/advanced/Scripts/piholeARPTable.sh
+++ b/advanced/Scripts/piholeARPTable.sh
@@ -39,7 +39,7 @@ flushARP(){
     # Truncate network_addresses table in pihole-FTL.db
     # This needs to be done before we can truncate the network table due to
     # foreign key constraints
-    if ! output=$(pihole-FTL sqlite3 "${DBFILE}" "DELETE FROM network_addresses" 2>&1); then
+    if ! output=$(pihole-FTL sqlite3 -ni "${DBFILE}" "DELETE FROM network_addresses" 2>&1); then
         echo -e "${OVER}  ${CROSS} Failed to truncate network_addresses table"
         echo "  Database location: ${DBFILE}"
         echo "  Output: ${output}"
@@ -47,7 +47,7 @@ flushARP(){
     fi
 
     # Truncate network table in pihole-FTL.db
-    if ! output=$(pihole-FTL sqlite3 "${DBFILE}" "DELETE FROM network" 2>&1); then
+    if ! output=$(pihole-FTL sqlite3 -ni "${DBFILE}" "DELETE FROM network" 2>&1); then
         echo -e "${OVER}  ${CROSS} Failed to truncate network table"
         echo "  Database location: ${DBFILE}"
         echo "  Output: ${output}"
diff --git a/advanced/Scripts/piholeCheckout.sh b/advanced/Scripts/piholeCheckout.sh
index 39d39b1c..41fd8606 100755
--- a/advanced/Scripts/piholeCheckout.sh
+++ b/advanced/Scripts/piholeCheckout.sh
@@ -164,7 +164,9 @@ checkout() {
         path="${2}/${binary}"
         oldbranch="$(pihole-FTL -b)"
 
-        if check_download_exists "$path"; then
+        check_download_exists "$path"
+        local ret=$?
+        if [ $ret -eq 0 ]; then
             echo "  ${TICK} Branch ${2} exists"
             echo "${2}" > /etc/pihole/ftlbranch
             chmod 644 /etc/pihole/ftlbranch
@@ -175,11 +177,19 @@ checkout() {
             # Update local and remote versions via updatechecker
             /opt/pihole/updatecheck.sh
         else
-            echo "  ${CROSS} Requested branch \"${2}\" is not available"
-            ftlbranches=( $(git ls-remote https://github.com/pi-hole/ftl | grep 'heads' | sed 's/refs\/heads\///;s/ //g' | awk '{print $2}') )
-            echo -e "  ${INFO} Available branches for FTL are:"
-            for e in "${ftlbranches[@]}"; do echo "      - $e"; done
-            exit 1
+            if [[ $ret -eq 1 ]]; then
+                echo "  ${CROSS} Requested branch \"${2}\" is not available"
+                ftlbranches=( $(git ls-remote https://github.com/pi-hole/ftl | grep 'heads' | sed 's/refs\/heads\///;s/ //g' | awk '{print $2}') )
+                echo -e "  ${INFO} Available branches for FTL are:"
+                for e in "${ftlbranches[@]}"; do echo "      - $e"; done
+                exit 1
+            elif [[ $ret -eq 2 ]]; then
+                printf "  %b Unable to download from ftl.pi-hole.net. Please check your Internet connection and try again later.\\n" "${CROSS}"
+                exit 1
+            else
+                printf "  %b Unknown error. Please contact Pi-hole Support\\n" "${CROSS}"
+                exit 1
+            fi
         fi
 
     else
diff --git a/advanced/Scripts/piholeDebug.sh b/advanced/Scripts/piholeDebug.sh
index 931e95a4..f5a57278 100755
--- a/advanced/Scripts/piholeDebug.sh
+++ b/advanced/Scripts/piholeDebug.sh
@@ -74,7 +74,6 @@ PIHOLE_CRON_FILE="${CRON_D_DIRECTORY}/pihole"
 
 PIHOLE_INSTALL_LOG_FILE="${PIHOLE_DIRECTORY}/install.log"
 PIHOLE_RAW_BLOCKLIST_FILES="${PIHOLE_DIRECTORY}/list.*"
-PIHOLE_LOCAL_HOSTS_FILE="${PIHOLE_DIRECTORY}/local.list"
 PIHOLE_LOGROTATE_FILE="${PIHOLE_DIRECTORY}/logrotate"
 PIHOLE_FTL_CONF_FILE="${PIHOLE_DIRECTORY}/pihole.toml"
 PIHOLE_VERSIONS_FILE="${PIHOLE_DIRECTORY}/versions"
@@ -547,17 +546,24 @@ ping_gateway() {
     ping_ipv4_or_ipv6 "${protocol}"
     # Check if we are using IPv4 or IPv6
     # Find the default gateways using IPv4 or IPv6
-    local gateway
+    local gateway gateway_addr gateway_iface
 
     log_write "${INFO} Default IPv${protocol} gateway(s):"
 
     while IFS= read -r gateway; do
-        log_write "     ${gateway}"
-    done < <(ip -"${protocol}" route | grep default | cut -d ' ' -f 3)
+        log_write "     $(cut -d ' ' -f 3 <<< "${gateway}")%$(cut -d ' ' -f 5 <<< "${gateway}")"
+    done < <(ip -"${protocol}" route | grep default)
 
-    gateway=$(ip -"${protocol}" route | grep default | cut -d ' ' -f 3 | head -n 1)
+    gateway_addr=$(ip -"${protocol}" route | grep default | cut -d ' ' -f 3 | head -n 1)
+    gateway_iface=$(ip -"${protocol}" route | grep default | cut -d ' ' -f 5 | head -n 1)
     # If there was at least one gateway
-    if [ -n "${gateway}" ]; then
+    if [ -n "${gateway_addr}" ]; then
+        # Append the interface to the gateway address if it is a link-local address
+        if [[ "${gateway_addr}" =~ ^fe80 ]]; then
+            gateway="${gateway_addr}%${gateway_iface}"
+        else
+            gateway="${gateway_addr}"
+        fi
         # Let the user know we will ping the gateway for a response
         log_write "   * Pinging first gateway ${gateway}..."
         # Try to quietly ping the gateway 3 times, with a timeout of 3 seconds, using numeric output only,
@@ -718,7 +724,7 @@ dig_at() {
     # This helps emulate queries to different domains that a user might query
     # It will also give extra assurance that Pi-hole is correctly resolving and blocking domains
     local random_url
-    random_url=$(pihole-FTL sqlite3 "${PIHOLE_GRAVITY_DB_FILE}" "SELECT domain FROM vw_gravity WHERE domain not like '||%^' ORDER BY RANDOM() LIMIT 1")
+    random_url=$(pihole-FTL sqlite3 -ni "${PIHOLE_GRAVITY_DB_FILE}" "SELECT domain FROM vw_gravity WHERE domain not like '||%^' ORDER BY RANDOM() LIMIT 1")
     # Fallback if no non-ABP style domains were found
     if [ -z "${random_url}" ]; then
         random_url="flurry.com"
@@ -757,24 +763,29 @@ dig_at() {
         #          Removes CIDR and everything thereafter (e.g., scope properties)
         addresses="$(ip address show dev "${iface}" | sed "/${sed_selector} /!d;s/^.*${sed_selector} //g;s/\/.*$//g;")"
         if [ -n "${addresses}" ]; then
-          while IFS= read -r local_address ; do
+            while IFS= read -r local_address ; do
+                # If ${local_address} is an IPv6 link-local address, append the interface name to it
+                if [[ "${local_address}" =~ ^fe80 ]]; then
+                    local_address="${local_address}%${iface}"
+                fi
+
               # Check if Pi-hole can use itself to block a domain
-              if local_dig="$(dig +tries=1 +time=2 -"${protocol}" "${random_url}" @"${local_address}" "${record_type}")"; then
-                  # If it can, show success
-                  if [[ "${local_dig}" == *"status: NOERROR"* ]]; then
-                    local_dig="NOERROR"
-                  elif [[ "${local_dig}" == *"status: NXDOMAIN"* ]]; then
-                    local_dig="NXDOMAIN"
-                  else
-                    # Extract the first entry in the answer section from dig's output,
-                    # replacing any multiple spaces and tabs with a single space
-                    local_dig="$(echo "${local_dig}" | grep -A1 "ANSWER SECTION" | grep -v "ANSWER SECTION" | tr -s " \t" " ")"
-                  fi
-                  log_write "${TICK} ${random_url} ${COL_GREEN}is ${local_dig}${COL_NC} on ${COL_CYAN}${iface}${COL_NC} (${COL_CYAN}${local_address}${COL_NC})"
-              else
-                  # Otherwise, show a failure
-                  log_write "${CROSS} ${COL_RED}Failed to resolve${COL_NC} ${random_url} on ${COL_RED}${iface}${COL_NC} (${COL_RED}${local_address}${COL_NC})"
-              fi
+                if local_dig="$(dig +tries=1 +time=2 -"${protocol}" "${random_url}" @"${local_address}" "${record_type}")"; then
+                    # If it can, show success
+                    if [[ "${local_dig}" == *"status: NOERROR"* ]]; then
+                        local_dig="NOERROR"
+                    elif [[ "${local_dig}" == *"status: NXDOMAIN"* ]]; then
+                        local_dig="NXDOMAIN"
+                    else
+                        # Extract the first entry in the answer section from dig's output,
+                        # replacing any multiple spaces and tabs with a single space
+                        local_dig="$(echo "${local_dig}" | grep -A1 "ANSWER SECTION" | grep -v "ANSWER SECTION" | tr -s " \t" " ")"
+                    fi
+                    log_write "${TICK} ${random_url} ${COL_GREEN}is ${local_dig}${COL_NC} on ${COL_CYAN}${iface}${COL_NC} (${COL_CYAN}${local_address}${COL_NC})"
+                else
+                    # Otherwise, show a failure
+                    log_write "${CROSS} ${COL_RED}Failed to resolve${COL_NC} ${random_url} on ${COL_RED}${iface}${COL_NC} (${COL_RED}${local_address}${COL_NC})"
+                fi
           done <<< "${addresses}"
         else
           log_write "${TICK} No IPv${protocol} address available on ${COL_CYAN}${iface}${COL_NC}"
@@ -1064,7 +1075,7 @@ show_db_entries() {
     IFS=$'\r\n'
     local entries=()
     mapfile -t entries < <(\
-        pihole-FTL sqlite3 "${PIHOLE_GRAVITY_DB_FILE}" \
+        pihole-FTL sqlite3 -ni "${PIHOLE_GRAVITY_DB_FILE}" \
             -cmd ".headers on" \
             -cmd ".mode column" \
             -cmd ".width ${widths}" \
@@ -1089,7 +1100,7 @@ show_FTL_db_entries() {
     IFS=$'\r\n'
     local entries=()
     mapfile -t entries < <(\
-        pihole-FTL sqlite3 "${PIHOLE_FTL_DB_FILE}" \
+        pihole-FTL sqlite3 -ni "${PIHOLE_FTL_DB_FILE}" \
             -cmd ".headers on" \
             -cmd ".mode column" \
             -cmd ".width ${widths}" \
@@ -1155,7 +1166,7 @@ analyze_gravity_list() {
     fi
 
     show_db_entries "Info table" "SELECT property,value FROM info" "20 40"
-    gravity_updated_raw="$(pihole-FTL sqlite3 "${PIHOLE_GRAVITY_DB_FILE}" "SELECT value FROM info where property = 'updated'")"
+    gravity_updated_raw="$(pihole-FTL sqlite3 -ni "${PIHOLE_GRAVITY_DB_FILE}" "SELECT value FROM info where property = 'updated'")"
     gravity_updated="$(date -d @"${gravity_updated_raw}")"
     log_write "   Last gravity run finished at: ${COL_CYAN}${gravity_updated}${COL_NC}"
     log_write ""
@@ -1163,7 +1174,7 @@ analyze_gravity_list() {
     OLD_IFS="$IFS"
     IFS=$'\r\n'
     local gravity_sample=()
-    mapfile -t gravity_sample < <(pihole-FTL sqlite3 "${PIHOLE_GRAVITY_DB_FILE}" "SELECT domain FROM vw_gravity LIMIT 10")
+    mapfile -t gravity_sample < <(pihole-FTL sqlite3 -ni "${PIHOLE_GRAVITY_DB_FILE}" "SELECT domain FROM vw_gravity LIMIT 10")
     log_write "   ${COL_CYAN}----- First 10 Gravity Domains -----${COL_NC}"
 
     for line in "${gravity_sample[@]}"; do
@@ -1195,7 +1206,7 @@ database_integrity_check(){
 
       log_write "${INFO} Checking foreign key constraints of ${database} ... (this can take several minutes)"
       unset result
-      result="$(pihole-FTL sqlite3 "${database}" -cmd ".headers on" -cmd ".mode column" "PRAGMA foreign_key_check" 2>&1 & spinner)"
+      result="$(pihole-FTL sqlite3 -ni "${database}" -cmd ".headers on" -cmd ".mode column" "PRAGMA foreign_key_check" 2>&1 & spinner)"
       if [[ -z ${result} ]]; then
         log_write "${TICK} No foreign key errors in ${database}"
       else
diff --git a/advanced/Scripts/piholeLogFlush.sh b/advanced/Scripts/piholeLogFlush.sh
index 14542e4b..4d97fec5 100755
--- a/advanced/Scripts/piholeLogFlush.sh
+++ b/advanced/Scripts/piholeLogFlush.sh
@@ -63,7 +63,7 @@ else
         fi
     fi
     # Delete most recent 24 hours from FTL's database, leave even older data intact (don't wipe out all history)
-    deleted=$(pihole-FTL sqlite3 "${DBFILE}" "DELETE FROM query_storage WHERE timestamp >= strftime('%s','now')-86400; select changes() from query_storage limit 1")
+    deleted=$(pihole-FTL sqlite3 -ni "${DBFILE}" "DELETE FROM query_storage WHERE timestamp >= strftime('%s','now')-86400; select changes() from query_storage limit 1")
 
     # Restart pihole-FTL to force reloading history
     sudo pihole restartdns
diff --git a/advanced/Scripts/query.sh b/advanced/Scripts/query.sh
index 2279df85..493c75ea 100755
--- a/advanced/Scripts/query.sh
+++ b/advanced/Scripts/query.sh
@@ -27,7 +27,7 @@ colfile="/opt/pihole/COL_TABLE"
 # Source api functions
 . "${PI_HOLE_INSTALL_DIR}/api.sh"
 
-Help(){
+Help() {
     echo "Usage: pihole -q [option] <domain>
 Example: 'pihole -q --partial domain.com'
 Query the adlists for a specified domain
@@ -36,29 +36,28 @@ Options:
   --partial            Search the adlists for partially matching domains
   --all                Return all query matches within the adlists
   -h, --help           Show this help dialog"
-  exit 0
+    exit 0
 }
 
-
-GenerateOutput(){
+GenerateOutput() {
     local data gravity_data lists_data num_gravity num_lists search_type_str
-    local gravity_data_csv lists_data_csv line current_domain
+    local gravity_data_csv lists_data_csv line current_domain url type color
     data="${1}"
 
     # construct a new json for the list results where each object contains the domain and the related type
-    lists_data=$(echo "${data}" | jq '.search.domains | [.[] | {domain: .domain, type: .type}]')
+    lists_data=$(printf %s "${data}" | jq '.search.domains | [.[] | {domain: .domain, type: .type}]')
 
     # construct a new json for the gravity results where each object contains the adlist URL and the related domains
-    gravity_data=$(echo "${data}" | jq '.search.gravity  | group_by(.address) | map({ address: (.[0].address), domains: [.[] | .domain] })')
+    gravity_data=$(printf %s "${data}" | jq '.search.gravity  | group_by(.address,.type) | map({ address: (.[0].address), type: (.[0].type), domains: [.[] | .domain] })')
 
     # number of objects in each json
-    num_gravity=$(echo "${gravity_data}" | jq length )
-    num_lists=$(echo "${lists_data}" | jq length )
+    num_gravity=$(printf %s "${gravity_data}" | jq length)
+    num_lists=$(printf %s "${lists_data}" | jq length)
 
     if [ "${partial}" = true ]; then
-      search_type_str="partially"
+        search_type_str="partially"
     else
-      search_type_str="exactly"
+        search_type_str="exactly"
     fi
 
     # Results from allow/deny list
@@ -66,7 +65,7 @@ GenerateOutput(){
     if [ "${num_lists}" -gt 0 ]; then
         # Convert the data to a csv, each line is a "domain,type" string
         # not using jq's @csv here as it quotes each value individually
-        lists_data_csv=$(echo "${lists_data}" | jq --raw-output '.[] | [.domain, .type] | join(",")' )
+        lists_data_csv=$(printf %s "${lists_data}" | jq --raw-output '.[] | [.domain, .type] | join(",")')
 
         # Generate output for each csv line, separating line in a domain and type substring at the ','
         echo "${lists_data_csv}" | while read -r line; do
@@ -79,18 +78,30 @@ GenerateOutput(){
     if [ "${num_gravity}" -gt 0 ]; then
         # Convert the data to a csv, each line is a "URL,domain,domain,...." string
         # not using jq's @csv here as it quotes each value individually
-        gravity_data_csv=$(echo "${gravity_data}" | jq --raw-output '.[] | [.address, .domains[]] | join(",")' )
+        gravity_data_csv=$(printf %s "${gravity_data}" | jq --raw-output '.[] | [.address, .type, .domains[]] | join(",")')
 
         # Generate line-by-line output for each csv line
         echo "${gravity_data_csv}" | while read -r line; do
+            # Get first part of the line, the URL
+            url=${line%%,*}
+
+            # cut off URL, leaving "type,domain,domain,...."
+            line=${line#*,}
+            type=${line%%,*}
+            # type == "block" -> red, type == "allow" -> green
+            if [ "${type}" = "block" ]; then
+                color="${COL_RED}"
+            else
+                color="${COL_GREEN}"
+            fi
 
             # print adlist URL
-            printf "%s\n\n" "  - ${COL_BLUE}${line%%,*}${COL_NC}"
+            printf "%s (%s)\n\n" "  - ${COL_BLUE}${url}${COL_NC}" "${color}${type}${COL_NC}"
 
-            # cut off URL, leaving "domain,domain,...."
+            # cut off type, leaving "domain,domain,...."
             line=${line#*,}
             # print each domain and remove it from the string until nothing is left
-            while  [ ${#line} -gt 0 ]; do
+            while [ ${#line} -gt 0 ]; do
                 current_domain=${line%%,*}
                 printf '    - %s\n' "${COL_GREEN}${current_domain}${COL_NC}"
                 # we need to remove the current_domain and the comma in two steps because
@@ -103,17 +114,17 @@ GenerateOutput(){
     fi
 }
 
-Main(){
+Main() {
     local data
 
     if [ -z "${domain}" ]; then
-        echo "No domain specified"; exit 1
+        echo "No domain specified"
+        exit 1
     fi
     # domains are lowercased and converted to punycode by FTL since
     # https://github.com/pi-hole/FTL/pull/1715
     # no need to do it here
 
-
     # Test if the authentication endpoint is available
     TestAPIAvailability
 
@@ -121,14 +132,14 @@ Main(){
     # or b) for the /search endpoint (webserver.api.searchAPIauth) no authentication is required.
     # Therefore, we try to query directly without authentication but do authenticat if 401 is returned
 
-    data=$(GetFTLData "/search/${domain}?N=${max_results}&partial=${partial}")
+    data=$(GetFTLData "search/${domain}?N=${max_results}&partial=${partial}")
 
     if [ "${data}" = 401 ]; then
         # Unauthenticated, so authenticate with the FTL server required
-        Authenthication
+        Authentication
 
         # send query again
-        data=$(GetFTLData "/search/${domain}?N=${max_results}&partial=${partial}")
+        data=$(GetFTLData "search/${domain}?N=${max_results}&partial=${partial}")
     fi
 
     GenerateOutput "${data}"
@@ -137,13 +148,13 @@ Main(){
 
 # Process all options (if present)
 while [ "$#" -gt 0 ]; do
-  case "$1" in
-    "-h" | "--help"     ) Help;;
-    "--partial"         ) partial="true";;
-    "--all"             ) max_results=10000;; # hard-coded FTL limit
-    *                   ) domain=$1;;
-  esac
-  shift
+    case "$1" in
+    "-h" | "--help") Help ;;
+    "--partial") partial="true" ;;
+    "--all") max_results=10000 ;; # hard-coded FTL limit
+    *) domain=$1 ;;
+    esac
+    shift
 done
 
 Main "${domain}"
diff --git a/advanced/Scripts/update.sh b/advanced/Scripts/update.sh
index 9dae66df..8a35ef2e 100755
--- a/advanced/Scripts/update.sh
+++ b/advanced/Scripts/update.sh
@@ -144,7 +144,7 @@ main() {
     local binary
     binary="pihole-FTL${funcOutput##*pihole-FTL}" #binary name will be the last line of the output of get_binary_name (it always begins with pihole-FTL)
 
-    if FTLcheckUpdate "${binary}" > /dev/null; then
+    if FTLcheckUpdate "${binary}"; then
         FTL_update=true
         echo -e "  ${INFO} FTL:\\t\\t${COL_YELLOW}update available${COL_NC}"
     else
@@ -155,8 +155,13 @@ main() {
             2)
                 echo -e "  ${INFO} FTL:\\t\\t${COL_LIGHT_RED}Branch is not available.${COL_NC}\\n\\t\\t\\tUse ${COL_LIGHT_GREEN}pihole checkout ftl [branchname]${COL_NC} to switch to a valid branch."
                 ;;
+            3)
+                echo -e "  ${INFO} FTL:\\t\\t${COL_LIGHT_RED}Something has gone wrong, cannot reach download server${COL_NC}"
+                exit 1
+                ;;
             *)
                 echo -e "  ${INFO} FTL:\\t\\t${COL_LIGHT_RED}Something has gone wrong, contact support${COL_NC}"
+                exit 1
         esac
         FTL_update=false
     fi
diff --git a/advanced/Scripts/updatecheck.sh b/advanced/Scripts/updatecheck.sh
index 66f1a7ab..eda6c403 100755
--- a/advanced/Scripts/updatecheck.sh
+++ b/advanced/Scripts/updatecheck.sh
@@ -10,27 +10,31 @@
 
 function get_local_branch() {
     # Return active branch
-    cd "${1}" 2> /dev/null || return 1
+    cd "${1}" 2>/dev/null || return 1
     git rev-parse --abbrev-ref HEAD || return 1
 }
 
 function get_local_version() {
     # Return active version
-    cd "${1}" 2> /dev/null || return 1
-    git describe --tags --always 2> /dev/null || return 1
+    cd "${1}" 2>/dev/null || return 1
+    git describe --tags --always 2>/dev/null || return 1
 }
 
 function get_local_hash() {
-    cd "${1}" 2> /dev/null || return 1
+    cd "${1}" 2>/dev/null || return 1
     git rev-parse --short=8 HEAD || return 1
 }
 
 function get_remote_version() {
-    curl -s "https://api.github.com/repos/pi-hole/${1}/releases/latest" 2> /dev/null | jq --raw-output .tag_name || return 1
+    # if ${2} is = "master" we need to use the "latest" endpoint, otherwise, we simply return null
+    if [[ "${2}" == "master" ]]; then
+        curl -s "https://api.github.com/repos/pi-hole/${1}/releases/latest" 2>/dev/null | jq --raw-output .tag_name || return 1
+    else
+        echo "null"
+    fi
 }
 
-
-function get_remote_hash(){
+function get_remote_hash() {
     git ls-remote "https://github.com/pi-hole/${1}" --tags "${2}" | awk '{print substr($0, 1,8);}' || return 1
 }
 
@@ -52,16 +56,15 @@ chmod 644 "${VERSION_FILE}"
 DOCKER_TAG=$(cat /pihole.docker.tag 2>/dev/null)
 regex='^([0-9]+\.){1,2}(\*|[0-9]+)(-.*)?$|(^nightly$)|(^dev.*$)'
 if [[ ! "${DOCKER_TAG}" =~ $regex ]]; then
-  # DOCKER_TAG does not match the pattern (see https://regex101.com/r/RsENuz/1), so unset it.
-  unset DOCKER_TAG
+    # DOCKER_TAG does not match the pattern (see https://regex101.com/r/RsENuz/1), so unset it.
+    unset DOCKER_TAG
 fi
 
 # used in cronjob
 if [[ "$1" == "reboot" ]]; then
-        sleep 30
+    sleep 30
 fi
 
-
 # get Core versions
 
 CORE_VERSION="$(get_local_version /etc/.pihole)"
@@ -73,13 +76,12 @@ addOrEditKeyValPair "${VERSION_FILE}" "CORE_BRANCH" "${CORE_BRANCH}"
 CORE_HASH="$(get_local_hash /etc/.pihole)"
 addOrEditKeyValPair "${VERSION_FILE}" "CORE_HASH" "${CORE_HASH}"
 
-GITHUB_CORE_VERSION="$(get_remote_version pi-hole)"
+GITHUB_CORE_VERSION="$(get_remote_version pi-hole "${CORE_BRANCH}")"
 addOrEditKeyValPair "${VERSION_FILE}" "GITHUB_CORE_VERSION" "${GITHUB_CORE_VERSION}"
 
 GITHUB_CORE_HASH="$(get_remote_hash pi-hole "${CORE_BRANCH}")"
 addOrEditKeyValPair "${VERSION_FILE}" "GITHUB_CORE_HASH" "${GITHUB_CORE_HASH}"
 
-
 # get Web versions
 
 WEB_VERSION="$(get_local_version /var/www/html/admin)"
@@ -91,7 +93,7 @@ addOrEditKeyValPair "${VERSION_FILE}" "WEB_BRANCH" "${WEB_BRANCH}"
 WEB_HASH="$(get_local_hash /var/www/html/admin)"
 addOrEditKeyValPair "${VERSION_FILE}" "WEB_HASH" "${WEB_HASH}"
 
-GITHUB_WEB_VERSION="$(get_remote_version web)"
+GITHUB_WEB_VERSION="$(get_remote_version web "${WEB_BRANCH}")"
 addOrEditKeyValPair "${VERSION_FILE}" "GITHUB_WEB_VERSION" "${GITHUB_WEB_VERSION}"
 
 GITHUB_WEB_HASH="$(get_remote_hash web "${WEB_BRANCH}")"
@@ -108,13 +110,12 @@ addOrEditKeyValPair "${VERSION_FILE}" "FTL_BRANCH" "${FTL_BRANCH}"
 FTL_HASH="$(pihole-FTL --hash)"
 addOrEditKeyValPair "${VERSION_FILE}" "FTL_HASH" "${FTL_HASH}"
 
-GITHUB_FTL_VERSION="$(get_remote_version FTL)"
+GITHUB_FTL_VERSION="$(get_remote_version FTL "${FTL_BRANCH}")"
 addOrEditKeyValPair "${VERSION_FILE}" "GITHUB_FTL_VERSION" "${GITHUB_FTL_VERSION}"
 
 GITHUB_FTL_HASH="$(get_remote_hash FTL "${FTL_BRANCH}")"
 addOrEditKeyValPair "${VERSION_FILE}" "GITHUB_FTL_HASH" "${GITHUB_FTL_HASH}"
 
-
 # get Docker versions
 
 if [[ "${DOCKER_TAG}" ]]; then
diff --git a/advanced/Scripts/version.sh b/advanced/Scripts/version.sh
index e3b4a6ae..2b0883af 100755
--- a/advanced/Scripts/version.sh
+++ b/advanced/Scripts/version.sh
@@ -8,6 +8,10 @@
 # This file is copyright under the latest version of the EUPL.
 # Please see LICENSE file for your rights under this license.
 
+# Ignore warning about `local` being undefinded in POSIX
+# shellcheck disable=SC3043
+# https://github.com/koalaman/shellcheck/wiki/SC3043#exceptions
+
 # Source the versions file poupulated by updatechecker.sh
 cachedVersions="/etc/pihole/versions"
 
@@ -21,118 +25,34 @@ else
     . "$cachedVersions"
 fi
 
-getLocalVersion() {
-    case ${1} in
-        "Pi-hole"   )  echo "${CORE_VERSION:=N/A}";;
-        "web"  )  echo "${WEB_VERSION:=N/A}";;
-        "FTL"       )  echo "${FTL_VERSION:=N/A}";;
-    esac
-}
+main() {
+    local details
+    details=false
 
-getLocalHash() {
-    case ${1} in
-        "Pi-hole"   )  echo "${CORE_HASH:=N/A}";;
-        "web"  )  echo "${WEB_HASH:=N/A}";;
-        "FTL"       )  echo "${FTL_HASH:=N/A}";;
-    esac
-}
-
-getRemoteHash(){
-    case ${1} in
-        "Pi-hole"   )  echo "${GITHUB_CORE_HASH:=N/A}";;
-        "web"  )  echo "${GITHUB_WEB_HASH:=N/A}";;
-        "FTL"       )  echo "${GITHUB_FTL_HASH:=N/A}";;
-    esac
-}
-
-getRemoteVersion(){
-    case ${1} in
-        "Pi-hole"   )  echo "${GITHUB_CORE_VERSION:=N/A}";;
-        "web"  )  echo "${GITHUB_WEB_VERSION:=N/A}";;
-        "FTL"       )  echo "${GITHUB_FTL_VERSION:=N/A}";;
-    esac
-}
-
-getLocalBranch(){
-    case ${1} in
-        "Pi-hole"   )  echo "${CORE_BRANCH:=N/A}";;
-        "web"  )  echo "${WEB_BRANCH:=N/A}";;
-        "FTL"       )  echo "${FTL_BRANCH:=N/A}";;
-    esac
-}
-
-versionOutput() {
-
-    [ "$2" = "-c" ] || [ "$2" = "--current" ] || [ -z "$2" ] && current=$(getLocalVersion "${1}") && branch=$(getLocalBranch "${1}")
-    [ "$2" = "-l" ] || [ "$2" = "--latest" ] || [ -z "$2" ] && latest=$(getRemoteVersion "${1}")
-    if [ "$2" = "--hash" ]; then
-        [ "$3" = "-c" ] || [ "$3" = "--current" ] || [ -z "$3" ] && curHash=$(getLocalHash "${1}") && branch=$(getLocalBranch "${1}")
-        [ "$3" = "-l" ] || [ "$3" = "--latest" ] || [ -z "$3" ] && latHash=$(getRemoteHash "${1}") && branch=$(getLocalBranch "${1}")
+    # Automatically show detailed information if
+    # at least one of the components is not on master branch
+    if [ ! "${CORE_BRANCH}" = "master" ] || [ ! "${WEB_BRANCH}" = "master" ] || [ ! "${FTL_BRANCH}" = "master" ]; then
+        details=true
     fi
 
-    # We do not want to show the branch name when we are on master,
-    # blank out the variable in this case
-    if [ "$branch" = "master" ]; then
-        branch=""
+    if [ "${details}" = true ]; then
+        echo "Core"
+        echo "    Version is ${CORE_VERSION:=N/A} (Latest: ${GITHUB_CORE_VERSION:=N/A})"
+        echo "    Branch is ${CORE_BRANCH:=N/A}"
+        echo "    Hash is ${CORE_HASH:=N/A} (Latest: ${GITHUB_CORE_HASH:=N/A})"
+        echo "Web"
+        echo "    Version is ${WEB_VERSION:=N/A} (Latest: ${GITHUB_WEB_VERSION:=N/A})"
+        echo "    Branch is ${WEB_BRANCH:=N/A}"
+        echo "    Hash is ${WEB_HASH:=N/A} (Latest: ${GITHUB_WEB_HASH:=N/A})"
+        echo "FTL"
+        echo "    Version is ${FTL_VERSION:=N/A} (Latest: ${GITHUB_FTL_VERSION:=N/A})"
+        echo "    Branch is ${FTL_BRANCH:=N/A}"
+        echo "    Hash is ${FTL_HASH:=N/A} (Latest: ${GITHUB_FTL_HASH:=N/A})"
     else
-        branch="$branch "
+        echo "Core version is ${CORE_VERSION:=N/A} (Latest: ${GITHUB_CORE_VERSION:=N/A})"
+        echo "Web version is ${WEB_VERSION:=N/A} (Latest: ${GITHUB_WEB_VERSION:=N/A})"
+        echo "FTL version is ${FTL_VERSION:=N/A} (Latest: ${GITHUB_FTL_VERSION:=N/A})"
     fi
-
-    if [ -n "$current" ] && [ -n "$latest" ]; then
-        output="${1} version is $branch$current (Latest: $latest)"
-    elif [ -n "$current" ] && [ -z "$latest" ]; then
-        output="Current ${1} version is $branch$current"
-    elif [ -z "$current" ] && [ -n "$latest" ]; then
-        output="Latest ${1} version is $latest"
-    elif [ -n "$curHash" ] && [ -n "$latHash" ]; then
-        output="Local ${1} hash is $curHash (Remote: $latHash)"
-    elif [ -n "$curHash" ] && [ -z "$latHash" ]; then
-        output="Current local ${1} hash is $curHash"
-    elif [ -z "$curHash" ] && [ -n "$latHash" ]; then
-        output="Latest remote ${1} hash is $latHash"
-    elif [ -z "$curHash" ] && [ -z "$latHash" ]; then
-        output="Hashes for ${1} not available"
-    else
-        errorOutput
-        return 1
-    fi
-
-    [ -n "$output" ] && echo "  $output"
 }
 
-errorOutput() {
-    echo "  Invalid Option! Try 'pihole -v --help' for more information."
-    exit 1
-}
-
-defaultOutput() {
-    versionOutput "Pi-hole" "$@"
-    versionOutput "web" "$@"
-    versionOutput "FTL" "$@"
-}
-
-helpFunc() {
-    echo "Usage: pihole -v [repo | option] [option]
-Example: 'pihole -v -p -l'
-Show Pi-hole, Admin Console & FTL versions
-
-Repositories:
-  -p, --pihole         Only retrieve info regarding Pi-hole repository
-  -a, --admin          Only retrieve info regarding web repository
-  -f, --ftl            Only retrieve info regarding FTL repository
-
-Options:
-  -c, --current        Return the current version
-  -l, --latest         Return the latest version
-  --hash               Return the GitHub hash from your local repositories
-  -h, --help           Show this help dialog"
-  exit 0
-}
-
-case "${1}" in
-    "-p" | "--pihole"    ) shift; versionOutput "Pi-hole" "$@";;
-    "-a" | "--admin"     ) shift; versionOutput "web" "$@";;
-    "-f" | "--ftl"       ) shift; versionOutput "FTL" "$@";;
-    "-h" | "--help"      ) helpFunc;;
-    *                    ) defaultOutput "$@";;
-esac
+main
diff --git a/advanced/Templates/gravity.db.sql b/advanced/Templates/gravity.db.sql
index 46f26ba7..42060443 100644
--- a/advanced/Templates/gravity.db.sql
+++ b/advanced/Templates/gravity.db.sql
@@ -27,7 +27,7 @@ CREATE TABLE domainlist
 CREATE TABLE adlist
 (
 	id INTEGER PRIMARY KEY AUTOINCREMENT,
-	address TEXT UNIQUE NOT NULL,
+	address TEXT NOT NULL,
 	enabled BOOLEAN NOT NULL DEFAULT 1,
 	date_added INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int)),
 	date_modified INTEGER NOT NULL DEFAULT (cast(strftime('%s', 'now') as int)),
@@ -37,7 +37,8 @@ CREATE TABLE adlist
 	invalid_domains INTEGER NOT NULL DEFAULT 0,
 	status INTEGER NOT NULL DEFAULT 0,
 	abp_entries INTEGER NOT NULL DEFAULT 0,
-	type INTEGER NOT NULL DEFAULT 0
+	type INTEGER NOT NULL DEFAULT 0,
+	UNIQUE(address, type)
 );
 
 CREATE TABLE adlist_by_group
@@ -65,7 +66,7 @@ CREATE TABLE info
 	value TEXT NOT NULL
 );
 
-INSERT INTO "info" VALUES('version','17');
+INSERT INTO "info" VALUES('version','18');
 
 CREATE TABLE domain_audit
 (
@@ -144,14 +145,14 @@ CREATE VIEW vw_regex_blacklist AS SELECT domain, domainlist.id AS id, domainlist
     AND domainlist.type = 3
     ORDER BY domainlist.id;
 
-CREATE VIEW vw_gravity AS SELECT domain, adlist_by_group.group_id AS group_id
+CREATE VIEW vw_gravity AS SELECT domain, adlist.id AS adlist_id, adlist_by_group.group_id AS group_id
     FROM gravity
     LEFT JOIN adlist_by_group ON adlist_by_group.adlist_id = gravity.adlist_id
     LEFT JOIN adlist ON adlist.id = gravity.adlist_id
     LEFT JOIN "group" ON "group".id = adlist_by_group.group_id
     WHERE adlist.enabled = 1 AND (adlist_by_group.group_id IS NULL OR "group".enabled = 1);
 
-CREATE VIEW vw_antigravity AS SELECT domain, adlist_by_group.group_id AS group_id
+CREATE VIEW vw_antigravity AS SELECT domain, adlist.id AS adlist_id, adlist_by_group.group_id AS group_id
     FROM antigravity
     LEFT JOIN adlist_by_group ON adlist_by_group.adlist_id = antigravity.adlist_id
     LEFT JOIN adlist ON adlist.id = antigravity.adlist_id
diff --git a/advanced/Templates/pihole.sudo b/advanced/Templates/pihole.sudo
deleted file mode 100644
index 708309be..00000000
--- a/advanced/Templates/pihole.sudo
+++ /dev/null
@@ -1,9 +0,0 @@
-# Pi-hole: A black hole for Internet advertisements
-# (c) 2017 Pi-hole, LLC (https://pi-hole.net)
-# Network-wide ad blocking via your own hardware.
-#
-# Allows the WebUI to use Pi-hole commands
-#
-# This file is copyright under the latest version of the EUPL.
-# Please see LICENSE file for your rights under this license.
-#
diff --git a/advanced/bash-completion/pihole b/advanced/bash-completion/pihole
index 305a3f5b..89e02d2f 100644
--- a/advanced/bash-completion/pihole
+++ b/advanced/bash-completion/pihole
@@ -1,5 +1,5 @@
 _pihole() {
-	local cur prev opts opts_admin opts_checkout opts_debug opts_interface  opts_logging opts_privacy opts_query opts_update opts_version
+	local cur prev opts opts_checkout opts_debug  opts_logging opts_query opts_update opts_version
 	COMPREPLY=()
 	cur="${COMP_WORDS[COMP_CWORD]}"
 	prev="${COMP_WORDS[COMP_CWORD-1]}"
@@ -7,17 +7,13 @@ _pihole() {
 
 	case "${prev}" in
 		"pihole")
-			opts="admin blacklist checkout debug disable enable flush help logging query reconfigure regex restartdns status tail uninstall updateGravity updatePihole version wildcard whitelist arpflush"
+			opts="blacklist checkout debug disable enable flush help logging query reconfigure regex restartdns status tail uninstall updateGravity updatePihole version wildcard whitelist arpflush"
 			COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
 		;;
 		"whitelist"|"blacklist"|"wildcard"|"regex")
 			opts_lists="\--delmode \--noreload \--quiet \--list \--nuke"
 			COMPREPLY=( $(compgen -W "${opts_lists}" -- ${cur}) )
 		;;
-		"admin")
-			opts_admin="celsius fahrenheit interface kelvin password privacylevel"
-			COMPREPLY=( $(compgen -W "${opts_admin}" -- ${cur}) )
-		;;
 		"checkout")
 			opts_checkout="core ftl web master dev"
 			COMPREPLY=( $(compgen -W "${opts_checkout}" -- ${cur}) )
@@ -31,33 +27,13 @@ _pihole() {
 			COMPREPLY=( $(compgen -W "${opts_logging}" -- ${cur}) )
 		;;
 		"query")
-			opts_query="-adlist -all -exact"
+			opts_query="--partial --all"
 			COMPREPLY=( $(compgen -W "${opts_query}" -- ${cur}) )
 		;;
 		"updatePihole"|"-up")
 			opts_update="--check-only"
 			COMPREPLY=( $(compgen -W "${opts_update}" -- ${cur}) )
 		;;
-		"version")
-			opts_version="\--admin \--current \--ftl \--hash \--latest \--pihole"
-			COMPREPLY=( $(compgen -W "${opts_version}" -- ${cur}) )
-		;;
-		"interface")
-			if ( [[ "$prev2" == "admin" ]] || [[ "$prev2" == "-a" ]] ); then
-				opts_interface="$(cat /proc/net/dev | cut -d: -s -f1)"
-				COMPREPLY=( $(compgen -W "${opts_interface}" -- ${cur}) )
-			else
-				return 1
-			fi
-		;;
-		"privacylevel")
-			if ( [[ "$prev2" == "admin" ]] || [[ "$prev2" == "-a" ]] ); then
-				opts_privacy="0 1 2 3"
-				COMPREPLY=( $(compgen -W "${opts_privacy}" -- ${cur}) )
-			else
-				return 1
-			fi
-		;;
 		"core"|"admin"|"ftl")
 			if [[ "$prev2" == "checkout" ]]; then
 				opts_checkout="master dev"
diff --git a/automated install/basic-install.sh b/automated install/basic-install.sh
index 72f00992..24abb7e7 100755
--- a/automated install/basic-install.sh	
+++ b/automated install/basic-install.sh	
@@ -39,9 +39,9 @@ export PATH+=':/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin'
 : "${DIALOG_CANCEL:=1}"
 : "${DIALOG_ESC:=255}"
 
-
 # List of supported DNS servers
-DNS_SERVERS=$(cat << EOM
+DNS_SERVERS=$(
+    cat <<EOM
 Google (ECS, DNSSEC);8.8.8.8;8.8.4.4;2001:4860:4860:0:0:0:0:8888;2001:4860:4860:0:0:0:0:8844
 OpenDNS (ECS, DNSSEC);208.67.222.222;208.67.220.220;2620:119:35::35;2620:119:53::53
 Level3;4.2.2.1;4.2.2.2;;
@@ -62,7 +62,6 @@ coltable="/opt/pihole/COL_TABLE"
 # Root of the web server
 webroot="/var/www/html"
 
-
 # We clone (or update) two git repositories during the install. This helps to make sure that we always have the latest versions of the relevant files.
 # web is used to set up the Web admin interface.
 # Pi-hole contains various setup scripts and files which are critical to the installation.
@@ -107,8 +106,8 @@ runUnattended=false
 # Check arguments for the undocumented flags
 for var in "$@"; do
     case "$var" in
-        "--reconfigure" ) reconfigure=true;;
-        "--unattended" ) runUnattended=true;;
+    "--reconfigure") reconfigure=true ;;
+    "--unattended") runUnattended=true ;;
     esac
 done
 
@@ -176,7 +175,10 @@ os_check() {
         detected_version=$(grep VERSION_ID /etc/os-release | cut -d '=' -f2 | tr -d '"')
 
         # Test via IPv4
-        cmdResult="$(dig -4 +short -t txt "${remote_os_domain}" @ns1.pi-hole.net 2>&1; echo $?)"
+        cmdResult="$(
+            dig -4 +short -t txt "${remote_os_domain}" @ns1.pi-hole.net 2>&1
+            echo $?
+        )"
         # Gets the return code of the previous command (last line)
         digReturnCode="${cmdResult##*$'\n'}"
 
@@ -197,7 +199,10 @@ os_check() {
         if [ "$valid_response" = false ]; then
             unset valid_response
 
-            cmdResult="$(dig -6 +short -t txt "${remote_os_domain}" @ns1.pi-hole.net 2>&1; echo $?)"
+            cmdResult="$(
+                dig -6 +short -t txt "${remote_os_domain}" @ns1.pi-hole.net 2>&1
+                echo $?
+            )"
             # Gets the return code of the previous command (last line)
             digReturnCode="${cmdResult##*$'\n'}"
 
@@ -217,8 +222,7 @@ os_check() {
 
         if [ "$valid_response" = true ]; then
             IFS=" " read -r -a supportedOS < <(echo "${response}" | tr -d '"')
-            for distro_and_versions in "${supportedOS[@]}"
-            do
+            for distro_and_versions in "${supportedOS[@]}"; do
                 distro_part="${distro_and_versions%%=*}"
                 versions_part="${distro_and_versions##*=}"
 
@@ -226,8 +230,7 @@ os_check() {
                 if [[ "${detected_os^^}" =~ ${distro_part^^} ]]; then
                     valid_os=true
                     IFS="," read -r -a supportedVer <<<"${versions_part}"
-                    for version in "${supportedVer[@]}"
-                    do
+                    for version in "${supportedVer[@]}"; do
                         if [[ "${detected_version}" =~ $version ]]; then
                             valid_version=true
                             break
@@ -292,17 +295,16 @@ test_dpkg_lock() {
     printf "  %b Waiting for package manager to finish (up to 30 seconds)\\n" "${INFO}"
     # fuser is a program to show which processes use the named files, sockets, or filesystems
     # So while the lock is held,
-    while fuser /var/lib/dpkg/lock >/dev/null 2>&1
-    do
+    while fuser /var/lib/dpkg/lock >/dev/null 2>&1; do
         # we wait half a second,
         sleep 0.5
         # increase the iterator,
-        ((i=i+1))
+        ((i = i + 1))
         # exit if waiting for more then 30 seconds
         if [[ $i -gt 60 ]]; then
             printf "  %b %bError: Could not verify package manager finished and released lock. %b\\n" "${CROSS}" "${COL_LIGHT_RED}" "${COL_NC}"
             printf "       Attempt to install packages manually and retry.\\n"
-            exit 1;
+            exit 1
         fi
     done
     # and then report success once dpkg is unlocked.
@@ -315,7 +317,7 @@ package_manager_detect() {
     # the distro-specific ones below.
 
     # First check to see if apt-get is installed.
-    if is_command apt-get ; then
+    if is_command apt-get; then
         # Set some global variables here
         # We don't set them earlier since the installed package manager might be rpm, so these values would be different
         PKG_MANAGER="apt-get"
@@ -332,12 +334,12 @@ package_manager_detect() {
         # Packages required to run this install script
         INSTALLER_DEPS=(git iproute2 dialog ca-certificates)
         # Packages required to run Pi-hole
-        PIHOLE_DEPS=(cron curl iputils-ping psmisc sudo unzip libcap2-bin dns-root-data libcap2 netcat-openbsd procps jq lshw)
+        PIHOLE_DEPS=(cron curl iputils-ping psmisc sudo unzip libcap2-bin dns-root-data libcap2 netcat-openbsd procps jq lshw bash-completion)
 
     # If apt-get is not found, check for rpm.
-    elif is_command rpm ; then
+    elif is_command rpm; then
         # Then check if dnf or yum is the package manager
-        if is_command dnf ; then
+        if is_command dnf; then
             PKG_MANAGER="dnf"
         else
             PKG_MANAGER="yum"
@@ -349,7 +351,7 @@ package_manager_detect() {
         PKG_COUNT="${PKG_MANAGER} check-update | grep -E '(.i686|.x86|.noarch|.arm|.src|.riscv64)' | wc -l || true"
         OS_CHECK_DEPS=(grep bind-utils)
         INSTALLER_DEPS=(git dialog iproute newt procps-ng chkconfig ca-certificates binutils)
-        PIHOLE_DEPS=(cronie curl findutils sudo unzip psmisc libcap nmap-ncat jq lshw)
+        PIHOLE_DEPS=(cronie curl findutils sudo unzip psmisc libcap nmap-ncat jq lshw bash-completion)
 
     # If neither apt-get or yum/dnf package managers were found
     else
@@ -370,17 +372,17 @@ is_repo() {
     # If the first argument passed to this function is a directory,
     if [[ -d "${directory}" ]]; then
         # move into the directory
-        pushd "${directory}" &> /dev/null || return 1
+        pushd "${directory}" &>/dev/null || return 1
         # Use git to check if the directory is a repo
         # git -C is not used here to support git versions older than 1.8.4
-        git status --short &> /dev/null || rc=$?
+        git status --short &>/dev/null || rc=$?
     # If the command was not successful,
     else
         # Set a non-zero return code if directory does not exist
         rc=1
     fi
     # Move back into the directory the user started in
-    popd &> /dev/null || return 1
+    popd &>/dev/null || return 1
     # Return the code; if one is not set, return 0
     return "${rc:-0}"
 }
@@ -403,9 +405,9 @@ make_repo() {
         return 1
     fi
     # Clone the repo and return the return code from this command
-    git clone -q --depth 20 "${remoteRepo}" "${directory}" &> /dev/null || return $?
+    git clone -q --depth 20 "${remoteRepo}" "${directory}" &>/dev/null || return $?
     # Move into the directory that was passed as an argument
-    pushd "${directory}" &> /dev/null || return 1
+    pushd "${directory}" &>/dev/null || return 1
     # Check current branch. If it is master, then reset to the latest available tag.
     # In case extra commits have been added after tagging/release (i.e in case of metadata updates/README.MD tweaks)
     curBranch=$(git rev-parse --abbrev-ref HEAD)
@@ -418,7 +420,7 @@ make_repo() {
     # Data in the repositories is public anyway so we can make it readable by everyone (+r to keep executable permission if already set by git)
     chmod -R a+rX "${directory}"
     # Move back into the original directory
-    popd &> /dev/null || return 1
+    popd &>/dev/null || return 1
     return 0
 }
 
@@ -436,14 +438,14 @@ update_repo() {
     # we only need to make one change here
     local str="Update repo in ${1}"
     # Move into the directory that was passed as an argument
-    pushd "${directory}" &> /dev/null || return 1
+    pushd "${directory}" &>/dev/null || return 1
     # Let the user know what's happening
     printf "  %b %s..." "${INFO}" "${str}"
     # Stash any local commits as they conflict with our working code
-    git stash --all --quiet &> /dev/null || true # Okay for stash failure
-    git clean --quiet --force -d || true # Okay for already clean directory
+    git stash --all --quiet &>/dev/null || true # Okay for stash failure
+    git clean --quiet --force -d || true        # Okay for already clean directory
     # Pull the latest commits
-    git pull --no-rebase --quiet &> /dev/null || return $?
+    git pull --no-rebase --quiet &>/dev/null || return $?
     # Check current branch. If it is master, then reset to the latest available tag.
     # In case extra commits have been added after tagging/release (i.e in case of metadata updates/README.MD tweaks)
     curBranch=$(git rev-parse --abbrev-ref HEAD)
@@ -455,7 +457,7 @@ update_repo() {
     # Data in the repositories is public anyway so we can make it readable by everyone (+r to keep executable permission if already set by git)
     chmod -R a+rX "${directory}"
     # Move back into the original directory
-    popd &> /dev/null || return 1
+    popd &>/dev/null || return 1
     return 0
 }
 
@@ -475,13 +477,19 @@ getGitFiles() {
         # Show that we're checking it
         printf "%b  %b %s\\n" "${OVER}" "${TICK}" "${str}"
         # Update the repo, returning an error message on failure
-        update_repo "${directory}" || { printf "\\n  %b: Could not update local repository. Contact support.%b\\n" "${COL_LIGHT_RED}" "${COL_NC}"; exit 1; }
+        update_repo "${directory}" || {
+            printf "\\n  %b: Could not update local repository. Contact support.%b\\n" "${COL_LIGHT_RED}" "${COL_NC}"
+            exit 1
+        }
     # If it's not a .git repo,
     else
         # Show an error
         printf "%b  %b %s\\n" "${OVER}" "${CROSS}" "${str}"
         # Attempt to make the repository, showing an error on failure
-        make_repo "${directory}" "${remoteRepo}" || { printf "\\n  %bError: Could not update local repository. Contact support.%b\\n" "${COL_LIGHT_RED}" "${COL_NC}"; exit 1; }
+        make_repo "${directory}" "${remoteRepo}" || {
+            printf "\\n  %bError: Could not update local repository. Contact support.%b\\n" "${COL_LIGHT_RED}" "${COL_NC}"
+            exit 1
+        }
     fi
     echo ""
     # Success via one of the two branches, as the commands would exit if they failed.
@@ -493,19 +501,19 @@ resetRepo() {
     # Use named variables for arguments
     local directory="${1}"
     # Move into the directory
-    pushd "${directory}" &> /dev/null || return 1
+    pushd "${directory}" &>/dev/null || return 1
     # Store the message in a variable
     str="Resetting repository within ${1}..."
     # Show the message
     printf "  %b %s..." "${INFO}" "${str}"
     # Use git to remove the local changes
-    git reset --hard &> /dev/null || return $?
+    git reset --hard &>/dev/null || return $?
     # Data in the repositories is public anyway so we can make it readable by everyone (+r to keep executable permission if already set by git)
     chmod -R a+rX "${directory}"
     # And show the status
     printf "%b  %b %s\\n" "${OVER}" "${TICK}" "${str}"
     # Return to where we came from
-    popd &> /dev/null || return 1
+    popd &>/dev/null || return 1
     # Function succeeded, as "git reset" would have triggered a return earlier if it failed
     return 0
 }
@@ -527,12 +535,12 @@ find_IPv4_information() {
     # the variable with just the first field.
     printf -v IPv4bare "$(printf ${route#*src })"
 
-    if ! valid_ip "${IPv4bare}" ; then
+    if ! valid_ip "${IPv4bare}"; then
         IPv4bare="127.0.0.1"
     fi
 
     # Append the CIDR notation to the IP address, if valid_ip fails this should return 127.0.0.1/8
-    IPV4_ADDRESS=$(ip -oneline -family inet address show | grep "${IPv4bare}/" |  awk '{print $4}' | awk 'END {print}')
+    IPV4_ADDRESS=$(ip -oneline -family inet address show | grep "${IPv4bare}/" | awk '{print $4}' | awk 'END {print}')
 }
 
 # Get available interfaces that are UP
@@ -546,32 +554,32 @@ welcomeDialogs() {
     # Display the welcome dialog using an appropriately sized window via the calculation conducted earlier in the script
     dialog --no-shadow --clear --keep-tite \
         --backtitle "Welcome" \
-            --title "Pi-hole Automated Installer" \
-            --msgbox "\\n\\nThis installer will transform your device into a network-wide ad blocker!" \
-            "${r}" "${c}" \
-            --and-widget --clear \
+        --title "Pi-hole Automated Installer" \
+        --msgbox "\\n\\nThis installer will transform your device into a network-wide ad blocker!" \
+        "${r}" "${c}" \
+        --and-widget --clear \
         --backtitle "Support Pi-hole" \
-            --title "Open Source Software" \
-            --msgbox "\\n\\nThe Pi-hole is free, but powered by your donations:  https://pi-hole.net/donate/" \
-            "${r}" "${c}" \
-            --and-widget --clear \
+        --title "Open Source Software" \
+        --msgbox "\\n\\nThe Pi-hole is free, but powered by your donations:  https://pi-hole.net/donate/" \
+        "${r}" "${c}" \
+        --and-widget --clear \
         --colors \
-            --backtitle "Initiating network interface" \
-            --title "Static IP Needed" \
-            --no-button "Exit" --yes-button "Continue" \
-            --defaultno \
-            --yesno "\\n\\nThe Pi-hole is a SERVER so it needs a STATIC IP ADDRESS to function properly.\\n\\n\
+        --backtitle "Initiating network interface" \
+        --title "Static IP Needed" \
+        --no-button "Exit" --yes-button "Continue" \
+        --defaultno \
+        --yesno "\\n\\nThe Pi-hole is a SERVER so it needs a STATIC IP ADDRESS to function properly.\\n\\n\
 \\Zb\\Z1IMPORTANT:\\Zn If you have not already done so, you must ensure that this device has a static IP.\\n\\n\
 Depending on your operating system, there are many ways to achieve this, through DHCP reservation, or by manually assigning one.\\n\\n\
-Please continue when the static addressing has been configured."\
-            "${r}" "${c}" && result=0 || result="$?"
+Please continue when the static addressing has been configured." \
+        "${r}" "${c}" && result=0 || result="$?"
 
-         case "${result}" in
-             "${DIALOG_CANCEL}" | "${DIALOG_ESC}")
-                printf "  %b Installer exited at static IP message.\\n" "${INFO}"
-                exit 1
-                ;;
-         esac
+    case "${result}" in
+    "${DIALOG_CANCEL}" | "${DIALOG_ESC}")
+        printf "  %b Installer exited at static IP message.\\n" "${INFO}"
+        exit 1
+        ;;
+    esac
 }
 
 # A function that lets the user pick an interface to use with Pi-hole
@@ -609,11 +617,11 @@ chooseInterface() {
 
         result=$?
         case ${result} in
-            "${DIALOG_CANCEL}"|"${DIALOG_ESC}")
-                # Show an error message and exit
-                printf "  %b %s\\n" "${CROSS}" "No interface selected, exiting installer"
-                exit 1
-                ;;
+        "${DIALOG_CANCEL}" | "${DIALOG_ESC}")
+            # Show an error message and exit
+            printf "  %b %s\\n" "${CROSS}" "No interface selected, exiting installer"
+            exit 1
+            ;;
         esac
 
         printf "  %b Using interface: %s\\n" "${INFO}" "${PIHOLE_INTERFACE}"
@@ -627,21 +635,21 @@ testIPv6() {
     # first will contain fda2 (ULA)
     printf -v first "%s" "${1%%:*}"
     # value1 will contain 253 which is the decimal value corresponding to 0xFD
-    value1=$(( (0x$first)/256 ))
+    value1=$(((0x$first) / 256))
     # value2 will contain 162 which is the decimal value corresponding to 0xA2
-    value2=$(( (0x$first)%256 ))
+    value2=$(((0x$first) % 256))
     # the ULA test is testing for fc00::/7 according to RFC 4193
-    if (( (value1&254)==252 )); then
+    if (((value1 & 254) == 252)); then
         # echoing result to calling function as return value
         echo "ULA"
     fi
     # the GUA test is testing for 2000::/3 according to RFC 4291
-    if (( (value1&112)==32 )); then
+    if (((value1 & 112) == 32)); then
         # echoing result to calling function as return value
         echo "GUA"
     fi
     # the LL test is testing for fe80::/10 according to RFC 4193
-    if (( (value1)==254 )) && (( (value2&192)==128 )); then
+    if (((value1) == 254)) && (((value2 & 192) == 128)); then
         # echoing result to calling function as return value
         echo "Link-local"
     fi
@@ -699,9 +707,9 @@ valid_ip() {
 
     # Regex matching one IPv4 component, i.e. an integer from 0 to 255.
     # See https://tools.ietf.org/html/rfc1340
-    local ipv4elem="(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9][0-9]?|0)";
+    local ipv4elem="(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9][0-9]?|0)"
     # Regex matching an optional port (starting with '#') range of 1-65536
-    local portelem="(#(6553[0-5]|655[0-2][0-9]|65[0-4][0-9]{2}|6[0-4][0-9]{3}|[1-5][0-9]{4}|[1-9][0-9]{0,3}|0))?";
+    local portelem="(#(6553[0-5]|655[0-2][0-9]|65[0-4][0-9]{2}|6[0-4][0-9]{3}|[1-5][0-9]{4}|[1-9][0-9]{0,3}|0))?"
     # Build a full IPv4 regex from the above subexpressions
     local regex="^${ipv4elem}\\.${ipv4elem}\\.${ipv4elem}\\.${ipv4elem}${portelem}$"
 
@@ -721,7 +729,7 @@ valid_ip6() {
     # Regex matching an IPv6 CIDR, i.e. 1 to 128
     local v6cidr="(\\/([1-9]|[1-9][0-9]|1[0-1][0-9]|12[0-8])){0,1}"
     # Regex matching an optional port (starting with '#') range of 1-65536
-    local portelem="(#(6553[0-5]|655[0-2][0-9]|65[0-4][0-9]{2}|6[0-4][0-9]{3}|[1-5][0-9]{4}|[1-9][0-9]{0,3}|0))?";
+    local portelem="(#(6553[0-5]|655[0-2][0-9]|65[0-4][0-9]{2}|6[0-4][0-9]{3}|[1-5][0-9]{4}|[1-9][0-9]{0,3}|0))?"
     # Build a full IPv6 regex from the above subexpressions
     local regex="^(((${ipv6elem}))*((:${ipv6elem}))*::((${ipv6elem}))*((:${ipv6elem}))*|((${ipv6elem}))((:${ipv6elem})){7})${v6cidr}${portelem}$"
 
@@ -745,36 +753,34 @@ setDNS() {
     # and set the new one to newline
     IFS=$'\n'
     # Put the DNS Servers into an array
-    for DNSServer in ${DNS_SERVERS}
-    do
-        DNSName="$(cut -d';' -f1 <<< "${DNSServer}")"
+    for DNSServer in ${DNS_SERVERS}; do
+        DNSName="$(cut -d';' -f1 <<<"${DNSServer}")"
         DNSChooseOptions[DNSServerCount]="${DNSName}"
-        (( DNSServerCount=DNSServerCount+1 ))
+        ((DNSServerCount = DNSServerCount + 1))
         DNSChooseOptions[DNSServerCount]=""
-        (( DNSServerCount=DNSServerCount+1 ))
+        ((DNSServerCount = DNSServerCount + 1))
     done
     DNSChooseOptions[DNSServerCount]="Custom"
-    (( DNSServerCount=DNSServerCount+1 ))
+    ((DNSServerCount = DNSServerCount + 1))
     DNSChooseOptions[DNSServerCount]=""
     # Restore the IFS to what it was
     IFS=${OIFS}
     # In a dialog, show the options
     DNSchoices=$(dialog --no-shadow --keep-tite --output-fd 1 \
-                    --cancel-label "Exit" \
-                    --menu "Select Upstream DNS Provider. To use your own, select Custom." "${r}" "${c}" 7 \
+        --cancel-label "Exit" \
+        --menu "Select Upstream DNS Provider. To use your own, select Custom." "${r}" "${c}" 7 \
         "${DNSChooseOptions[@]}")
 
-        result=$?
-        case ${result} in
-            "${DIALOG_CANCEL}" | "${DIALOG_ESC}")
-            printf "  %b Cancel was selected, exiting installer%b\\n" "${COL_LIGHT_RED}" "${COL_NC}"
-            exit 1
-            ;;
-        esac
+    result=$?
+    case ${result} in
+    "${DIALOG_CANCEL}" | "${DIALOG_ESC}")
+        printf "  %b Cancel was selected, exiting installer%b\\n" "${COL_LIGHT_RED}" "${COL_NC}"
+        exit 1
+        ;;
+    esac
 
     # Depending on the user's choice, set the GLOBAL variables to the IP of the respective provider
-    if [[ "${DNSchoices}" == "Custom" ]]
-    then
+    if [[ "${DNSchoices}" == "Custom" ]]; then
         # Loop until we have a valid DNS setting
         until [[ "${DNSSettingsCorrect}" = True ]]; do
             # Signal value, to be used if the user inputs an invalid IP address
@@ -787,7 +793,7 @@ setDNS() {
                     # Otherwise, prepopulate the dialogue with the appropriate DNS value(s)
                     prePopulate=", ${PIHOLE_DNS_2}"
                 fi
-            elif  [[ "${PIHOLE_DNS_1}" ]] && [[ ! "${PIHOLE_DNS_2}" ]]; then
+            elif [[ "${PIHOLE_DNS_1}" ]] && [[ ! "${PIHOLE_DNS_2}" ]]; then
                 prePopulate="${PIHOLE_DNS_1}"
             elif [[ "${PIHOLE_DNS_1}" ]] && [[ "${PIHOLE_DNS_2}" ]]; then
                 prePopulate="${PIHOLE_DNS_1}, ${PIHOLE_DNS_2}"
@@ -795,23 +801,23 @@ setDNS() {
 
             # Prompt the user to enter custom upstream servers
             piholeDNS=$(dialog --no-shadow --keep-tite --output-fd 1 \
-                            --cancel-label "Exit" \
-                            --backtitle "Specify Upstream DNS Provider(s)" \
-                            --inputbox "Enter your desired upstream DNS provider(s), separated by a comma.\
+                --cancel-label "Exit" \
+                --backtitle "Specify Upstream DNS Provider(s)" \
+                --inputbox "Enter your desired upstream DNS provider(s), separated by a comma.\
 If you want to specify a port other than 53, separate it with a hash.\
-\\n\\nFor example '8.8.8.8, 8.8.4.4' or '127.0.0.1#5335'"\
-                                "${r}" "${c}" "${prePopulate}")
+\\n\\nFor example '8.8.8.8, 8.8.4.4' or '127.0.0.1#5335'" \
+                "${r}" "${c}" "${prePopulate}")
 
             result=$?
             case ${result} in
-                "${DIALOG_CANCEL}" | "${DIALOG_ESC}")
+            "${DIALOG_CANCEL}" | "${DIALOG_ESC}")
                 printf "  %b Cancel was selected, exiting installer%b\\n" "${COL_LIGHT_RED}" "${COL_NC}"
                 exit 1
                 ;;
             esac
 
             # Clean user input and replace whitespace with comma.
-            piholeDNS=$(sed 's/[, \t]\+/,/g' <<< "${piholeDNS}")
+            piholeDNS=$(sed 's/[, \t]\+/,/g' <<<"${piholeDNS}")
 
             # Separate the user input into the two DNS values (separated by a comma)
             printf -v PIHOLE_DNS_1 "%s" "${piholeDNS%%,*}"
@@ -852,16 +858,16 @@ If you want to specify a port other than 53, separate it with a hash.\
                     "${r}" "${c}" && result=0 || result=$?
 
                 case ${result} in
-                    "${DIALOG_OK}")
-                        DNSSettingsCorrect=True
-                        ;;
-                    "${DIALOG_CANCEL}")
-                        DNSSettingsCorrect=False
-                        ;;
-                    "${DIALOG_ESC}")
-                        printf "  %b Escape pressed, exiting installer at DNS Settings%b\\n" "${COL_LIGHT_RED}" "${COL_NC}"
-                        exit 1
-                        ;;
+                "${DIALOG_OK}")
+                    DNSSettingsCorrect=True
+                    ;;
+                "${DIALOG_CANCEL}")
+                    DNSSettingsCorrect=False
+                    ;;
+                "${DIALOG_ESC}")
+                    printf "  %b Escape pressed, exiting installer at DNS Settings%b\\n" "${COL_LIGHT_RED}" "${COL_NC}"
+                    exit 1
+                    ;;
                 esac
             fi
         done
@@ -870,13 +876,11 @@ If you want to specify a port other than 53, separate it with a hash.\
         OIFS=$IFS
         # and set the new one to newline
         IFS=$'\n'
-        for DNSServer in ${DNS_SERVERS}
-        do
-            DNSName="$(cut -d';' -f1 <<< "${DNSServer}")"
-            if [[ "${DNSchoices}" == "${DNSName}" ]]
-            then
-                PIHOLE_DNS_1="$(cut -d';' -f2 <<< "${DNSServer}")"
-                PIHOLE_DNS_2="$(cut -d';' -f3 <<< "${DNSServer}")"
+        for DNSServer in ${DNS_SERVERS}; do
+            DNSName="$(cut -d';' -f1 <<<"${DNSServer}")"
+            if [[ "${DNSchoices}" == "${DNSName}" ]]; then
+                PIHOLE_DNS_1="$(cut -d';' -f2 <<<"${DNSServer}")"
+                PIHOLE_DNS_2="$(cut -d';' -f3 <<<"${DNSServer}")"
                 break
             fi
         done
@@ -900,21 +904,21 @@ setLogging() {
         "${r}" "${c}" && result=0 || result=$?
 
     case ${result} in
-        "${DIALOG_OK}")
-            # If they chose yes,
-            printf "  %b Query Logging on.\\n" "${INFO}"
-            QUERY_LOGGING=true
-            ;;
-        "${DIALOG_CANCEL}")
-            # If they chose no,
-            printf "  %b Query Logging off.\\n" "${INFO}"
-            QUERY_LOGGING=false
-            ;;
-        "${DIALOG_ESC}")
-            # User pressed <ESC>
-            printf "  %b Escape pressed, exiting installer at Query Logging choice.%b\\n" "${COL_LIGHT_RED}" "${COL_NC}"
-            exit 1
-            ;;
+    "${DIALOG_OK}")
+        # If they chose yes,
+        printf "  %b Query Logging on.\\n" "${INFO}"
+        QUERY_LOGGING=true
+        ;;
+    "${DIALOG_CANCEL}")
+        # If they chose no,
+        printf "  %b Query Logging off.\\n" "${INFO}"
+        QUERY_LOGGING=false
+        ;;
+    "${DIALOG_ESC}")
+        # User pressed <ESC>
+        printf "  %b Escape pressed, exiting installer at Query Logging choice.%b\\n" "${COL_LIGHT_RED}" "${COL_NC}"
+        exit 1
+        ;;
     esac
 }
 
@@ -931,16 +935,16 @@ setPrivacyLevel() {
         "2" "Hide domains and clients" off \
         "3" "Anonymous mode" off)
 
-        result=$?
-        case ${result} in
-            "${DIALOG_OK}")
-                printf "  %b Using privacy level: %s\\n" "${INFO}" "${PRIVACY_LEVEL}"
-                ;;
-            "${DIALOG_CANCEL}" | "${DIALOG_ESC}")
-                printf "  %b Cancelled privacy level selection.%b\\n" "${COL_LIGHT_RED}" "${COL_NC}"
-                exit 1
-                ;;
-        esac
+    result=$?
+    case ${result} in
+    "${DIALOG_OK}")
+        printf "  %b Using privacy level: %s\\n" "${INFO}" "${PRIVACY_LEVEL}"
+        ;;
+    "${DIALOG_CANCEL}" | "${DIALOG_ESC}")
+        printf "  %b Cancelled privacy level selection.%b\\n" "${COL_LIGHT_RED}" "${COL_NC}"
+        exit 1
+        ;;
+    esac
 }
 
 # A function to display a list of example blocklists for users to select
@@ -960,20 +964,20 @@ chooseBlocklists() {
         "${r}" "${c}" && result=0 || result=$?
 
     case ${result} in
-        "${DIALOG_OK}")
-            # If they chose yes,
-            printf "  %b Installing StevenBlack's Unified Hosts List\\n" "${INFO}"
-            echo "https://raw.githubusercontent.com/StevenBlack/hosts/master/hosts" >> "${adlistFile}"
-            ;;
-        "${DIALOG_CANCEL}")
-            # If they chose no,
-            printf "  %b Not installing StevenBlack's Unified Hosts List\\n" "${INFO}"
-            ;;
-        "${DIALOG_ESC}")
-            # User pressed <ESC>
-            printf "  %b Escape pressed, exiting installer at blocklist choice.%b\\n" "${COL_LIGHT_RED}" "${COL_NC}"
-            exit 1
-            ;;
+    "${DIALOG_OK}")
+        # If they chose yes,
+        printf "  %b Installing StevenBlack's Unified Hosts List\\n" "${INFO}"
+        echo "https://raw.githubusercontent.com/StevenBlack/hosts/master/hosts" >>"${adlistFile}"
+        ;;
+    "${DIALOG_CANCEL}")
+        # If they chose no,
+        printf "  %b Not installing StevenBlack's Unified Hosts List\\n" "${INFO}"
+        ;;
+    "${DIALOG_ESC}")
+        # User pressed <ESC>
+        printf "  %b Escape pressed, exiting installer at blocklist choice.%b\\n" "${COL_LIGHT_RED}" "${COL_NC}"
+        exit 1
+        ;;
     esac
     # Create an empty adList file with appropriate permissions.
     if [ ! -f "${adlistFile}" ]; then
@@ -989,9 +993,9 @@ installDefaultBlocklists() {
     # In unattended setup, could be useful to use userdefined blocklist.
     # If this file exists, we avoid overriding it.
     if [[ -f "${adlistFile}" ]]; then
-        return;
+        return
     fi
-        echo "https://raw.githubusercontent.com/StevenBlack/hosts/master/hosts" >> "${adlistFile}"
+    echo "https://raw.githubusercontent.com/StevenBlack/hosts/master/hosts" >>"${adlistFile}"
 }
 
 remove_old_dnsmasq_ftl_configs() {
@@ -1033,7 +1037,6 @@ remove_old_pihole_lighttpd_configs() {
     local confavailable="/etc/lighttpd/conf-available/15-pihole-admin.conf"
     local confenabled="/etc/lighttpd/conf-enabled/15-pihole-admin.conf"
 
-
     if [[ -f "${lighttpdConfig}" ]]; then
         sed -i '/include "\/etc\/lighttpd\/conf.d\/pihole-admin.conf"/d' "${lighttpdConfig}"
     fi
@@ -1042,8 +1045,8 @@ remove_old_pihole_lighttpd_configs() {
         rm "${condfd}"
     fi
 
-    if is_command lighty-disable-mod ; then
-        lighty-disable-mod pihole-admin > /dev/null || true
+    if is_command lighty-disable-mod; then
+        lighty-disable-mod pihole-admin >/dev/null || true
     fi
 
     if [[ -f "${confavailable}" ]]; then
@@ -1063,7 +1066,7 @@ clean_existing() {
     # Pop the first argument, and shift all addresses down by one (i.e. ${2} becomes ${1})
     shift
     # Then, we can access all arguments ($@) without including the directory to clean
-    local old_files=( "$@" )
+    local old_files=("$@")
 
     # Remove each script in the old_files array
     for script in "${old_files[@]}"; do
@@ -1101,7 +1104,7 @@ installScripts() {
 
     else
         # Otherwise, show an error and exit
-        printf "%b  %b %s\\n" "${OVER}"  "${CROSS}" "${str}"
+        printf "%b  %b %s\\n" "${OVER}" "${CROSS}" "${str}"
         printf "\\t\\t%bError: Local repo %s not found, exiting installer%b\\n" "${COL_LIGHT_RED}" "${PI_HOLE_LOCAL_REPO}" "${COL_NC}"
         return 1
     fi
@@ -1117,7 +1120,7 @@ installConfigs() {
     # Install list of DNS servers
     # Format: Name;Primary IPv4;Secondary IPv4;Primary IPv6;Secondary IPv6
     # Some values may be empty (for example: DNS servers without IPv6 support)
-    echo "${DNS_SERVERS}" > "${PI_HOLE_CONFIG_DIR}/dns-servers.conf"
+    echo "${DNS_SERVERS}" >"${PI_HOLE_CONFIG_DIR}/dns-servers.conf"
     chmod 644 "${PI_HOLE_CONFIG_DIR}/dns-servers.conf"
     chown pihole:pihole "${PI_HOLE_CONFIG_DIR}/dns-servers.conf"
 
@@ -1154,7 +1157,7 @@ install_manpage() {
     # Default location for man files for /usr/local/bin is /usr/local/share/man
     # on lightweight systems may not be present, so check before copying.
     printf "  %b Testing man page installation" "${INFO}"
-    if ! is_command mandb ; then
+    if ! is_command mandb; then
         # if mandb is not present, no manpage support
         printf "%b  %b man not installed\\n" "${OVER}" "${INFO}"
         return
@@ -1199,10 +1202,10 @@ stop_service() {
     # Can softfail, as process may not be installed when this is called
     local str="Stopping ${1} service"
     printf "  %b %s..." "${INFO}" "${str}"
-    if is_command systemctl ; then
-        systemctl stop "${1}" &> /dev/null || true
+    if is_command systemctl; then
+        systemctl stop "${1}" &>/dev/null || true
     else
-        service "${1}" stop &> /dev/null || true
+        service "${1}" stop &>/dev/null || true
     fi
     printf "%b  %b %s...\\n" "${OVER}" "${TICK}" "${str}"
 }
@@ -1213,12 +1216,12 @@ restart_service() {
     local str="Restarting ${1} service"
     printf "  %b %s..." "${INFO}" "${str}"
     # If systemctl exists,
-    if is_command systemctl ; then
+    if is_command systemctl; then
         # use that to restart the service
-        systemctl restart "${1}" &> /dev/null
+        systemctl restart "${1}" &>/dev/null
     else
         # Otherwise, fall back to the service command
-        service "${1}" restart &> /dev/null
+        service "${1}" restart &>/dev/null
     fi
     printf "%b  %b %s...\\n" "${OVER}" "${TICK}" "${str}"
 }
@@ -1229,12 +1232,12 @@ enable_service() {
     local str="Enabling ${1} service to start on reboot"
     printf "  %b %s..." "${INFO}" "${str}"
     # If systemctl exists,
-    if is_command systemctl ; then
+    if is_command systemctl; then
         # use that to enable the service
-        systemctl enable "${1}" &> /dev/null
+        systemctl enable "${1}" &>/dev/null
     else
         #  Otherwise, use update-rc.d to accomplish this
-        update-rc.d "${1}" defaults &> /dev/null
+        update-rc.d "${1}" defaults &>/dev/null
     fi
     printf "%b  %b %s...\\n" "${OVER}" "${TICK}" "${str}"
 }
@@ -1245,24 +1248,24 @@ disable_service() {
     local str="Disabling ${1} service"
     printf "  %b %s..." "${INFO}" "${str}"
     # If systemctl exists,
-    if is_command systemctl ; then
+    if is_command systemctl; then
         # use that to disable the service
-        systemctl disable "${1}" &> /dev/null
+        systemctl disable "${1}" &>/dev/null
     else
         # Otherwise, use update-rc.d to accomplish this
-        update-rc.d "${1}" disable &> /dev/null
+        update-rc.d "${1}" disable &>/dev/null
     fi
     printf "%b  %b %s...\\n" "${OVER}" "${TICK}" "${str}"
 }
 
 check_service_active() {
     # If systemctl exists,
-    if is_command systemctl ; then
+    if is_command systemctl; then
         # use that to check the status of the service
-        systemctl is-enabled "${1}" &> /dev/null
+        systemctl is-enabled "${1}" &>/dev/null
     else
         # Otherwise, fall back to service command
-        service "${1}" status &> /dev/null
+        service "${1}" status &>/dev/null
     fi
 }
 
@@ -1273,7 +1276,7 @@ disable_resolved_stublistener() {
     if check_service_active "systemd-resolved"; then
         # Check if DNSStubListener is enabled
         printf "  %b %b Testing if systemd-resolved DNSStub-Listener is active" "${OVER}" "${INFO}"
-        if ( grep -E '#?DNSStubListener=yes' /etc/systemd/resolved.conf &> /dev/null ); then
+        if (grep -E '#?DNSStubListener=yes' /etc/systemd/resolved.conf &>/dev/null); then
             # Disable the DNSStubListener to unbind it from port 53
             # Note that this breaks dns functionality on host until ftl are up and running
             printf "%b  %b Disabling systemd-resolved DNSStubListener" "${OVER}" "${TICK}"
@@ -1298,14 +1301,14 @@ update_package_cache() {
     local str="Update local cache of available packages"
     printf "  %b %s..." "${INFO}" "${str}"
     # Create a command from the package cache variable
-    if eval "${UPDATE_PKG_CACHE}" &> /dev/null; then
+    if eval "${UPDATE_PKG_CACHE}" &>/dev/null; then
         printf "%b  %b %s\\n" "${OVER}" "${TICK}" "${str}"
     else
         # Otherwise, show an error and exit
 
         # In case we used apt-get and apt is also available, we use this as recommendation as we have seen it
         # gives more user-friendly (interactive) advice
-        if [[ ${PKG_MANAGER} == "apt-get" ]] && is_command apt ; then
+        if [[ ${PKG_MANAGER} == "apt-get" ]] && is_command apt; then
             UPDATE_PKG_CACHE="apt update"
         fi
         printf "%b  %b %s\\n" "${OVER}" "${CROSS}" "${str}"
@@ -1347,11 +1350,11 @@ install_dependent_packages() {
     # amount of download traffic.
     # NOTE: We may be able to use this installArray in the future to create a list of package that were
     # installed by us, and remove only the installed packages, and not the entire list.
-    if is_command apt-get ; then
+    if is_command apt-get; then
         # For each package, check if it's already installed (and if so, don't add it to the installArray)
         for i in "$@"; do
             printf "  %b Checking for %s..." "${INFO}" "${i}"
-            if dpkg-query -W -f='${Status}' "${i}" 2>/dev/null | grep "ok installed" &> /dev/null; then
+            if dpkg-query -W -f='${Status}' "${i}" 2>/dev/null | grep "ok installed" &>/dev/null; then
                 printf "%b  %b Checking for %s\\n" "${OVER}" "${TICK}" "${i}"
             else
                 printf "%b  %b Checking for %s (will be installed)\\n" "${OVER}" "${INFO}" "${i}"
@@ -1364,9 +1367,9 @@ install_dependent_packages() {
             # Running apt-get install with minimal output can cause some issues with
             # requiring user input (e.g password for phpmyadmin see #218)
             printf "  %b Processing %s install(s) for: %s, please wait...\\n" "${INFO}" "${PKG_MANAGER}" "${installArray[*]}"
-            printf '%*s\n' "${c}" '' | tr " " -;
+            printf '%*s\n' "${c}" '' | tr " " -
             "${PKG_INSTALL[@]}" "${installArray[@]}"
-            printf '%*s\n' "${c}" '' | tr " " -;
+            printf '%*s\n' "${c}" '' | tr " " -
             return
         fi
         printf "\\n"
@@ -1377,7 +1380,7 @@ install_dependent_packages() {
     for i in "$@"; do
         # For each package, check if it's already installed (and if so, don't add it to the installArray)
         printf "  %b Checking for %s..." "${INFO}" "${i}"
-        if "${PKG_MANAGER}" -q list installed "${i}" &> /dev/null; then
+        if "${PKG_MANAGER}" -q list installed "${i}" &>/dev/null; then
             printf "%b  %b Checking for %s\\n" "${OVER}" "${TICK}" "${i}"
         else
             printf "%b  %b Checking for %s (will be installed)\\n" "${OVER}" "${INFO}" "${i}"
@@ -1387,9 +1390,9 @@ install_dependent_packages() {
     # If there's anything to install, install everything in the list.
     if [[ "${#installArray[@]}" -gt 0 ]]; then
         printf "  %b Processing %s install(s) for: %s, please wait...\\n" "${INFO}" "${PKG_MANAGER}" "${installArray[*]}"
-        printf '%*s\n' "${c}" '' | tr " " -;
+        printf '%*s\n' "${c}" '' | tr " " -
         "${PKG_INSTALL[@]}" "${installArray[@]}"
-        printf '%*s\n' "${c}" '' | tr " " -;
+        printf '%*s\n' "${c}" '' | tr " " -
         return
     fi
     printf "\\n"
@@ -1423,9 +1426,9 @@ create_pihole_user() {
     local str="Checking for user 'pihole'"
     printf "  %b %s..." "${INFO}" "${str}"
     # If the pihole user exists,
-    if id -u pihole &> /dev/null; then
+    if id -u pihole &>/dev/null; then
         # and if the pihole group exists,
-        if getent group pihole > /dev/null 2>&1; then
+        if getent group pihole >/dev/null 2>&1; then
             # succeed
             printf "%b  %b %s\\n" "${OVER}" "${TICK}" "${str}"
         else
@@ -1452,7 +1455,7 @@ create_pihole_user() {
         printf "%b  %b %s" "${OVER}" "${CROSS}" "${str}"
         local str="Checking for group 'pihole'"
         printf "  %b %s..." "${INFO}" "${str}"
-        if getent group pihole > /dev/null 2>&1; then
+        if getent group pihole >/dev/null 2>&1; then
             # group pihole exists
             printf "%b  %b %s\\n" "${OVER}" "${TICK}" "${str}"
             # then create and add her to the pihole group
@@ -1495,7 +1498,7 @@ installLogrotate() {
     if [[ -f ${target} ]]; then
 
         # Account for changed logfile paths from /var/log -> /var/log/pihole/ made in core v5.11.
-        if  grep -q "/var/log/pihole.log" ${target}  ||  grep -q "/var/log/pihole-FTL.log" ${target}; then
+        if grep -q "/var/log/pihole.log" ${target} || grep -q "/var/log/pihole-FTL.log" ${target}; then
             sed -i 's/\/var\/log\/pihole.log/\/var\/log\/pihole\/pihole.log/g' ${target}
             sed -i 's/\/var\/log\/pihole-FTL.log/\/var\/log\/pihole\/FTL.log/g' ${target}
 
@@ -1572,27 +1575,27 @@ checkSelinux() {
         # Check the default SELinux mode
         DEFAULT_SELINUX=$(awk -F= '/^SELINUX=/ {print $2}' /etc/selinux/config)
         case "${DEFAULT_SELINUX,,}" in
-            enforcing)
-                printf "  %b %bDefault SELinux: %s%b\\n" "${CROSS}" "${COL_RED}" "${DEFAULT_SELINUX,,}" "${COL_NC}"
-                SELINUX_ENFORCING=1
-                ;;
-            *)  # 'permissive' and 'disabled'
-                printf "  %b %bDefault SELinux: %s%b\\n" "${TICK}" "${COL_GREEN}" "${DEFAULT_SELINUX,,}" "${COL_NC}"
-                ;;
+        enforcing)
+            printf "  %b %bDefault SELinux: %s%b\\n" "${CROSS}" "${COL_RED}" "${DEFAULT_SELINUX,,}" "${COL_NC}"
+            SELINUX_ENFORCING=1
+            ;;
+        *) # 'permissive' and 'disabled'
+            printf "  %b %bDefault SELinux: %s%b\\n" "${TICK}" "${COL_GREEN}" "${DEFAULT_SELINUX,,}" "${COL_NC}"
+            ;;
         esac
         # Check the current state of SELinux
         CURRENT_SELINUX=$(getenforce)
         case "${CURRENT_SELINUX,,}" in
-            enforcing)
-                printf "  %b %bCurrent SELinux: %s%b\\n" "${CROSS}" "${COL_RED}" "${CURRENT_SELINUX,,}" "${COL_NC}"
-                SELINUX_ENFORCING=1
-                ;;
-            *)  # 'permissive' and 'disabled'
-                printf "  %b %bCurrent SELinux: %s%b\\n" "${TICK}" "${COL_GREEN}" "${CURRENT_SELINUX,,}" "${COL_NC}"
-                ;;
+        enforcing)
+            printf "  %b %bCurrent SELinux: %s%b\\n" "${CROSS}" "${COL_RED}" "${CURRENT_SELINUX,,}" "${COL_NC}"
+            SELINUX_ENFORCING=1
+            ;;
+        *) # 'permissive' and 'disabled'
+            printf "  %b %bCurrent SELinux: %s%b\\n" "${TICK}" "${COL_GREEN}" "${CURRENT_SELINUX,,}" "${COL_NC}"
+            ;;
         esac
     else
-        echo -e "  ${INFO} ${COL_GREEN}SELinux not detected${COL_NC}";
+        echo -e "  ${INFO} ${COL_GREEN}SELinux not detected${COL_NC}"
     fi
     # Exit the installer if any SELinux checks toggled the flag
     if [[ "${SELINUX_ENFORCING}" -eq 1 ]] && [[ -z "${PIHOLE_SELINUX}" ]]; then
@@ -1601,8 +1604,8 @@ checkSelinux() {
         printf "      This check can be skipped by setting the environment variable %bPIHOLE_SELINUX%b to %btrue%b\\n" "${COL_LIGHT_RED}" "${COL_NC}" "${COL_LIGHT_RED}" "${COL_NC}"
         printf "      e.g: export PIHOLE_SELINUX=true\\n"
         printf "      By setting this variable to true you acknowledge there may be issues with Pi-hole during or after the install\\n"
-        printf "\\n  %bSELinux Enforcing detected, exiting installer%b\\n" "${COL_LIGHT_RED}" "${COL_NC}";
-        exit 1;
+        printf "\\n  %bSELinux Enforcing detected, exiting installer%b\\n" "${COL_LIGHT_RED}" "${COL_NC}"
+        exit 1
     elif [[ "${SELINUX_ENFORCING}" -eq 1 ]] && [[ -n "${PIHOLE_SELINUX}" ]]; then
         printf "  %b %bSELinux Enforcing detected%b. PIHOLE_SELINUX env variable set - installer will continue\\n" "${INFO}" "${COL_LIGHT_RED}" "${COL_NC}"
     fi
@@ -1612,10 +1615,10 @@ checkSelinux() {
 displayFinalMessage() {
     # TODO: COME BACK TO THIS, WHAT IS GOING ON?
     # If the number of arguments is > 0,
-    if [[ "${#1}" -gt 0 ]] ; then
+    if [[ "${#1}" -gt 0 ]]; then
         # set the password to the first argument.
         pwstring="$1"
-    elif [[ $(pihole-FTL --config webserver.api.pwhash) == '""' ]] ; then
+    elif [[ $(pihole-FTL --config webserver.api.pwhash) == '""' ]]; then
         # Else if the password exists from previous setup, we'll load it later
         pwstring="unchanged"
     else
@@ -1626,7 +1629,6 @@ displayFinalMessage() {
     # Store a message in a variable and display it
     additional="View the web interface at http://pi.hole/admin:${WEBPORT} or http://${IPV4_ADDRESS%/*}:${WEBPORT}/admin\\n\\nYour Admin Webpage login password is ${pwstring}"
 
-
     # Final completion message to user
     dialog --no-shadow --keep-tite \
         --title "Installation Complete!" \
@@ -1655,44 +1657,51 @@ update_dialogs() {
 
     # Display the information to the user
     UpdateCmd=$(dialog --no-shadow --keep-tite --output-fd 1 \
-                --cancel-label Exit \
-                --title "Existing Install Detected!" \
-                --menu "\\n\\nWe have detected an existing install.\
+        --cancel-label Exit \
+        --title "Existing Install Detected!" \
+        --menu "\\n\\nWe have detected an existing install.\
 \\n\\nPlease choose from the following options:\
-\\n($strAdd)"\
-                    "${r}" "${c}" 2 \
-    "${opt1a}"  "${opt1b}" \
-    "${opt2a}"  "${opt2b}") || result=$?
+\\n($strAdd)" \
+        "${r}" "${c}" 2 \
+        "${opt1a}" "${opt1b}" \
+        "${opt2a}" "${opt2b}") || result=$?
 
     case ${result} in
-        "${DIALOG_CANCEL}" | "${DIALOG_ESC}")
-            printf "  %b Cancel was selected, exiting installer%b\\n" "${COL_LIGHT_RED}" "${COL_NC}"
-            exit 1
-            ;;
+    "${DIALOG_CANCEL}" | "${DIALOG_ESC}")
+        printf "  %b Cancel was selected, exiting installer%b\\n" "${COL_LIGHT_RED}" "${COL_NC}"
+        exit 1
+        ;;
     esac
 
     # Set the variable based on if the user chooses
     case ${UpdateCmd} in
-        # repair, or
-        "${opt1a}")
-            printf "  %b %s option selected\\n" "${INFO}" "${opt1a}"
-            useUpdateVars=true
-            ;;
-        # reconfigure,
-        "${opt2a}")
-            printf "  %b %s option selected\\n" "${INFO}" "${opt2a}"
-            useUpdateVars=false
-            ;;
+    # repair, or
+    "${opt1a}")
+        printf "  %b %s option selected\\n" "${INFO}" "${opt1a}"
+        useUpdateVars=true
+        ;;
+    # reconfigure,
+    "${opt2a}")
+        printf "  %b %s option selected\\n" "${INFO}" "${opt2a}"
+        useUpdateVars=false
+        ;;
     esac
 }
 
 check_download_exists() {
+    # Check if the download exists and we can reach the server
     status=$(curl --head --silent "https://ftl.pi-hole.net/${1}" | head -n 1)
-    if grep -q "404" <<< "$status"; then
-        return 1
-    else
+
+    # Check the status code
+    if grep -q "200" <<<"$status"; then
         return 0
+    elif grep -q "404" <<<"$status"; then
+        return 1
     fi
+
+    # Other error or no status code at all, e.g., no Internet, server not
+    # available/reachable, ...
+    return 2
 }
 
 fully_fetch_repo() {
@@ -1717,7 +1726,7 @@ get_available_branches() {
 
     cd "${directory}" || return 1
     # Get reachable remote branches, but store STDERR as STDOUT variable
-    output=$( { git ls-remote --heads --quiet | cut -d'/' -f3- -; } 2>&1 )
+    output=$({ git ls-remote --heads --quiet | cut -d'/' -f3- -; } 2>&1)
     # echo status for calling function to capture
     echo "$output"
     return
@@ -1733,7 +1742,7 @@ fetch_checkout_pull_branch() {
     # Set the reference for the requested branch, fetch, check it put and pull it
     cd "${directory}" || return 1
     git remote set-branches origin "${branch}" || return 1
-    git stash --all --quiet &> /dev/null || true
+    git stash --all --quiet &>/dev/null || true
     git clean --quiet --force -d || true
     git fetch --quiet || return 1
     checkout_pull_branch "${directory}" "${branch}" || return 1
@@ -1770,27 +1779,31 @@ clone_or_update_repos() {
     if [[ "${reconfigure}" == true ]]; then
         printf "  %b Performing reconfiguration, skipping download of local repos\\n" "${INFO}"
         # Reset the Core repo
-        resetRepo ${PI_HOLE_LOCAL_REPO} || \
-        { printf "  %b Unable to reset %s, exiting installer%b\\n" "${COL_LIGHT_RED}" "${PI_HOLE_LOCAL_REPO}" "${COL_NC}"; \
-        exit 1; \
-        }
+        resetRepo ${PI_HOLE_LOCAL_REPO} ||
+            {
+                printf "  %b Unable to reset %s, exiting installer%b\\n" "${COL_LIGHT_RED}" "${PI_HOLE_LOCAL_REPO}" "${COL_NC}"
+                exit 1
+            }
         # Reset the Web repo
-        resetRepo ${webInterfaceDir} || \
-        { printf "  %b Unable to reset %s, exiting installer%b\\n" "${COL_LIGHT_RED}" "${webInterfaceDir}" "${COL_NC}"; \
-        exit 1; \
-        }
+        resetRepo ${webInterfaceDir} ||
+            {
+                printf "  %b Unable to reset %s, exiting installer%b\\n" "${COL_LIGHT_RED}" "${webInterfaceDir}" "${COL_NC}"
+                exit 1
+            }
     # Otherwise, a repair is happening
     else
         # so get git files for Core
-        getGitFiles ${PI_HOLE_LOCAL_REPO} ${piholeGitUrl} || \
-        { printf "  %b Unable to clone %s into %s, unable to continue%b\\n" "${COL_LIGHT_RED}" "${piholeGitUrl}" "${PI_HOLE_LOCAL_REPO}" "${COL_NC}"; \
-        exit 1; \
-        }
+        getGitFiles ${PI_HOLE_LOCAL_REPO} ${piholeGitUrl} ||
+            {
+                printf "  %b Unable to clone %s into %s, unable to continue%b\\n" "${COL_LIGHT_RED}" "${piholeGitUrl}" "${PI_HOLE_LOCAL_REPO}" "${COL_NC}"
+                exit 1
+            }
         # get the Web git files
-        getGitFiles ${webInterfaceDir} ${webInterfaceGitUrl} || \
-        { printf "  %b Unable to clone %s into ${webInterfaceDir}, exiting installer%b\\n" "${COL_LIGHT_RED}" "${webInterfaceGitUrl}" "${COL_NC}"; \
-        exit 1; \
-        }
+        getGitFiles ${webInterfaceDir} ${webInterfaceGitUrl} ||
+            {
+                printf "  %b Unable to clone %s into ${webInterfaceDir}, exiting installer%b\\n" "${COL_LIGHT_RED}" "${webInterfaceGitUrl}" "${COL_NC}"
+                exit 1
+            }
     fi
 }
 
@@ -1803,13 +1816,16 @@ FTLinstall() {
     printf "  %b %s..." "${INFO}" "${str}"
 
     # Move into the temp ftl directory
-    pushd "$(mktemp -d)" > /dev/null || { printf "Unable to make temporary directory for FTL binary download\\n"; return 1; }
+    pushd "$(mktemp -d)" >/dev/null || {
+        printf "Unable to make temporary directory for FTL binary download\\n"
+        return 1
+    }
     local tempdir
     tempdir="$(pwd)"
     local ftlBranch
     local url
 
-    if [[ -f "/etc/pihole/ftlbranch" ]];then
+    if [[ -f "/etc/pihole/ftlbranch" ]]; then
         ftlBranch=$(</etc/pihole/ftlbranch)
     else
         ftlBranch="master"
@@ -1819,7 +1835,7 @@ FTLinstall() {
     binary="${1}"
 
     # Determine which version of FTL to download
-    if [[ "${ftlBranch}" == "master" ]];then
+    if [[ "${ftlBranch}" == "master" ]]; then
         url="https://github.com/pi-hole/ftl/releases/latest/download"
     else
         url="https://ftl.pi-hole.net/${ftlBranch}"
@@ -1837,13 +1853,16 @@ FTLinstall() {
             curl -sSL "https://ftl.pi-hole.net/macvendor.db" -o "${PI_HOLE_CONFIG_DIR}/macvendor.db" || true
 
             # Stop pihole-FTL service if available
-            stop_service pihole-FTL &> /dev/null
+            stop_service pihole-FTL &>/dev/null
 
             # Install the new version with the correct permissions
             install -T -m 0755 "${binary}" /usr/bin/pihole-FTL
 
             # Move back into the original directory the user was in
-            popd > /dev/null || { printf "Unable to return to original directory after FTL binary download.\\n"; return 1; }
+            popd >/dev/null || {
+                printf "Unable to return to original directory after FTL binary download.\\n"
+                return 1
+            }
 
             # Installed the FTL service
             printf "%b  %b %s\\n" "${OVER}" "${TICK}" "${str}"
@@ -1854,7 +1873,10 @@ FTLinstall() {
             return 0
         else
             # Otherwise, the hash download failed, so print and exit.
-            popd > /dev/null || { printf "Unable to return to original directory after FTL binary download.\\n"; return 1; }
+            popd >/dev/null || {
+                printf "Unable to return to original directory after FTL binary download.\\n"
+                return 1
+            }
             printf "%b  %b %s\\n" "${OVER}" "${CROSS}" "${str}"
             printf "  %b Error: Download of %s/%s failed (checksum error)%b\\n" "${COL_LIGHT_RED}" "${url}" "${binary}" "${COL_NC}"
 
@@ -1864,7 +1886,10 @@ FTLinstall() {
         fi
     else
         # Otherwise, the download failed, so print and exit.
-        popd > /dev/null || { printf "Unable to return to original directory after FTL binary download.\\n"; return 1; }
+        popd >/dev/null || {
+            printf "Unable to return to original directory after FTL binary download.\\n"
+            return 1
+        }
         printf "%b  %b %s\\n" "${OVER}" "${CROSS}" "${str}"
         # The URL could not be found
         printf "  %b Error: URL %s/%s not found%b\\n" "${COL_LIGHT_RED}" "${url}" "${binary}" "${COL_NC}"
@@ -1876,9 +1901,9 @@ FTLinstall() {
 }
 
 remove_dir() {
-  # Delete dir
-  rm -r "${1}" > /dev/null 2>&1 || \
-    echo -e "  ${CROSS} Unable to remove ${1}"
+    # Delete dir
+    rm -r "${1}" >/dev/null 2>&1 ||
+        echo -e "  ${CROSS} Unable to remove ${1}"
 }
 
 get_binary_name() {
@@ -1923,7 +1948,7 @@ get_binary_name() {
     elif [[ "${machine}" == "x86_64" ]]; then
         # This gives the processor of packages dpkg installs (for example, "i386")
         local dpkgarch
-        dpkgarch=$(dpkg --print-processor 2> /dev/null || dpkg --print-architecture 2> /dev/null)
+        dpkgarch=$(dpkg --print-processor 2>/dev/null || dpkg --print-architecture 2>/dev/null)
 
         # Special case: This is a 32 bit OS, installed on a 64 bit machine
         # -> change machine processor to download the 32 bit executable
@@ -1957,16 +1982,14 @@ get_binary_name() {
 }
 
 FTLcheckUpdate() {
-    #In the next section we check to see if FTL is already installed (in case of pihole -r).
-    #If the installed version matches the latest version, then check the installed sha1sum of the binary vs the remote sha1sum. If they do not match, then download
-    printf "  %b Checking for existing FTL binary...\\n" "${INFO}"
-
+    # In the next section we check to see if FTL is already installed (in case of pihole -r).
+    # If the installed version matches the latest version, then check the installed sha1sum of the binary vs the remote sha1sum. If they do not match, then download
     local ftlLoc
     ftlLoc=$(command -v pihole-FTL 2>/dev/null)
 
     local ftlBranch
 
-    if [[ -f "/etc/pihole/ftlbranch" ]];then
+    if [[ -f "/etc/pihole/ftlbranch" ]]; then
         ftlBranch=$(</etc/pihole/ftlbranch)
     else
         ftlBranch="master"
@@ -1979,14 +2002,24 @@ FTLcheckUpdate() {
     local localSha1
 
     if [[ ! "${ftlBranch}" == "master" ]]; then
-        #Check whether or not the binary for this FTL branch actually exists. If not, then there is no update!
+        # Check whether or not the binary for this FTL branch actually exists. If not, then there is no update!
         local path
         path="${ftlBranch}/${binary}"
         # shellcheck disable=SC1090
-        if ! check_download_exists "$path"; then
-            printf "  %b Branch \"%s\" is not available.\\n" "${INFO}" "${ftlBranch}"
-            printf "  %b Use %bpihole checkout ftl [branchname]%b to switch to a valid branch.\\n" "${INFO}" "${COL_LIGHT_GREEN}" "${COL_NC}"
-            return 2
+        check_download_exists "$path"
+        local ret=$?
+        if [ $ret -ne 0 ]; then
+            if [[ $ret -eq 1 ]]; then
+                printf "  %b Branch \"%s\" is not available.\\n" "${INFO}" "${ftlBranch}"
+                printf "  %b Use %bpihole checkout ftl [branchname]%b to switch to a valid branch.\\n" "${INFO}" "${COL_LIGHT_GREEN}" "${COL_NC}"
+                return 2
+            elif [[ $ret -eq 2 ]]; then
+                printf "  %b Unable to download from ftl.pi-hole.net. Please check your Internet connection and try again later.\\n" "${CROSS}"
+                return 3
+            else
+                printf "  %b Unknown error. Please contact Pi-hole Support\\n" "${CROSS}"
+                return 4
+            fi
         fi
 
         if [[ ${ftlLoc} ]]; then
@@ -2011,12 +2044,14 @@ FTLcheckUpdate() {
             FTLversion=$(/usr/bin/pihole-FTL tag)
             local FTLlatesttag
 
+            # Get the latest version from the GitHub API
             if ! FTLlatesttag=$(curl -sI https://github.com/pi-hole/FTL/releases/latest | grep --color=never -i Location: | awk -F / '{print $NF}' | tr -d '[:cntrl:]'); then
                 # There was an issue while retrieving the latest version
                 printf "  %b Failed to retrieve latest FTL release metadata" "${CROSS}"
                 return 3
             fi
 
+            # Check if the installed version matches the latest version
             if [[ "${FTLversion}" != "${FTLlatesttag}" ]]; then
                 return 0
             else
@@ -2065,7 +2100,7 @@ make_temporary_log() {
 copy_to_install_log() {
     # Copy the contents of file descriptor 3 into the install log
     # Since we use color codes such as '\e[1;33m', they should be removed
-    sed 's/\[[0-9;]\{1,5\}m//g' < /proc/$$/fd/3 > "${installLogLoc}"
+    sed 's/\[[0-9;]\{1,5\}m//g' </proc/$$/fd/3 >"${installLogLoc}"
     chmod 644 "${installLogLoc}"
     chown pihole:pihole "${installLogLoc}"
 }
@@ -2093,8 +2128,8 @@ main() {
         printf "  %b Sudo utility check" "${INFO}"
 
         # If the sudo command exists, try rerunning as admin
-        if is_command sudo ; then
-            printf "%b  %b Sudo utility check\\n" "${OVER}"  "${TICK}"
+        if is_command sudo; then
+            printf "%b  %b Sudo utility check\\n" "${OVER}" "${TICK}"
 
             # when run via curl piping
             if [[ "$0" == "bash" ]]; then
@@ -2213,18 +2248,23 @@ main() {
     # Add password to web UI if there is none
     pw=""
     # If no password is set,
-    if [[ $(pihole-FTL --config webserver.api.pwhash) == '""' ]] ; then
+    if [[ $(pihole-FTL --config webserver.api.pwhash) == '""' ]]; then
         # generate a random password
-        pw=$(tr -dc _A-Z-a-z-0-9 < /dev/urandom | head -c 8)
+        pw=$(tr -dc _A-Z-a-z-0-9 </dev/urandom | head -c 8)
         pihole -a -p "${pw}"
     fi
 
     # Check for and disable systemd-resolved-DNSStubListener before reloading resolved
     # DNSStubListener needs to remain in place for installer to download needed files,
     # so this change needs to be made after installation is complete,
-    # but before starting or restarting the ftl service
+    # but before starting or resttarting the ftl service
     disable_resolved_stublistener
 
+    # Check if gravity database needs to be upgraded. If so, do it without rebuilding
+    # gravity altogether. This may be a very long running task needlessly blocking
+    # the update process.
+    /opt/pihole/gravity.sh --upgrade
+
     printf "  %b Restarting services...\\n" "${INFO}"
     # Start services
 
@@ -2240,7 +2280,7 @@ main() {
     # can be removed with Pi-hole v6.0
     # To be sure FTL is not running when we move the files we explicitly stop it here
 
-    stop_service pihole-FTL &> /dev/null
+    stop_service pihole-FTL &>/dev/null
 
     if [ ! -d /var/log/pihole/ ]; then
         mkdir -m 0755 /var/log/pihole/
@@ -2254,7 +2294,7 @@ main() {
         # /var/log/pihole-FTL.log.3.gz -> /var/log/pihole/FTL.log.3.gz
         # /var/log/pihole-FTL.log.4.gz -> /var/log/pihole/FTL.log.4.gz
         # /var/log/pihole-FTL.log.5.gz -> /var/log/pihole/FTL.log.5.gz
-        for f in /var/log/pihole-FTL.log*; do mv "$f" "$( sed "s/pihole-/pihole\//" <<< "$f")"; done
+        for f in /var/log/pihole-FTL.log*; do mv "$f" "$(sed "s/pihole-/pihole\//" <<<"$f")"; done
     fi
 
     # Remaining log files
@@ -2275,7 +2315,7 @@ main() {
     fi
 
     # If there is a password
-    if (( ${#pw} > 0 )) ; then
+    if ((${#pw} > 0)); then
         # display the password
         printf "  %b Web Interface password: %b%s%b\\n" "${INFO}" "${COL_LIGHT_GREEN}" "${pw}" "${COL_NC}"
         printf "  %b This can be changed using 'pihole -a -p'\\n\\n" "${INFO}"
@@ -2306,6 +2346,6 @@ main() {
 }
 
 # allow to source this script without running it
-if [[ "${SKIP_INSTALL}" != true ]] ; then
+if [[ "${SKIP_INSTALL}" != true ]]; then
     main "$@"
 fi
diff --git a/gravity.sh b/gravity.sh
index 20ad6215..d49af29d 100755
--- a/gravity.sh
+++ b/gravity.sh
@@ -36,21 +36,16 @@ blacklistFile="${piholeDir}/blacklist.txt"
 regexFile="${piholeDir}/regex.list"
 adListFile="${piholeDir}/adlists.list"
 
-localList="${piholeDir}/local.list"
-VPNList="/etc/openvpn/ipp.txt"
-
 piholeGitDir="/etc/.pihole"
 GRAVITYDB=$(getFTLConfigValue files.gravity)
+GRAVITY_TMPDIR=$(getFTLConfigValue files.gravity_tmp)
 gravityDBschema="${piholeGitDir}/advanced/Templates/gravity.db.sql"
 gravityDBcopy="${piholeGitDir}/advanced/Templates/gravity_copy.sql"
 
 domainsExtension="domains"
 curl_connect_timeout=10
 
-
-# Set up tmp dir variable in case it's not configured
-: "${GRAVITY_TMPDIR:=/tmp}"
-
+# Check gravity temp directory
 if [ ! -d "${GRAVITY_TMPDIR}" ] || [ ! -w "${GRAVITY_TMPDIR}" ]; then
   echo -e "  ${COL_LIGHT_RED}Gravity temporary directory does not exist or is not a writeable directory, falling back to /tmp. ${COL_NC}"
   GRAVITY_TMPDIR="/tmp"
@@ -66,7 +61,7 @@ gravityOLDfile="${gravityDIR}/gravity_old.db"
 
 # Generate new SQLite3 file from schema template
 generate_gravity_database() {
-  if ! pihole-FTL sqlite3 "${gravityDBfile}" < "${gravityDBschema}"; then
+  if ! pihole-FTL sqlite3 -ni "${gravityDBfile}" <"${gravityDBschema}"; then
     echo -e "   ${CROSS} Unable to create ${gravityDBfile}"
     return 1
   fi
@@ -81,7 +76,7 @@ gravity_build_tree() {
   echo -ne "  ${INFO} ${str}..."
 
   # The index is intentionally not UNIQUE as poor quality adlists may contain domains more than once
-  output=$( { pihole-FTL sqlite3 "${gravityTEMPfile}" "CREATE INDEX idx_gravity ON gravity (domain, adlist_id);"; } 2>&1 )
+  output=$({ pihole-FTL sqlite3 -ni "${gravityTEMPfile}" "CREATE INDEX idx_gravity ON gravity (domain, adlist_id);"; } 2>&1)
   status="$?"
 
   if [[ "${status}" -ne 0 ]]; then
@@ -120,7 +115,7 @@ gravity_swap_databases() {
 
 # Update timestamp when the gravity table was last updated successfully
 update_gravity_timestamp() {
-  output=$( { printf ".timeout 30000\\nINSERT OR REPLACE INTO info (property,value) values ('updated',cast(strftime('%%s', 'now') as int));" | pihole-FTL sqlite3 "${gravityTEMPfile}"; } 2>&1 )
+  output=$({ printf ".timeout 30000\\nINSERT OR REPLACE INTO info (property,value) values ('updated',cast(strftime('%%s', 'now') as int));" | pihole-FTL sqlite3 -ni "${gravityTEMPfile}"; } 2>&1)
   status="$?"
 
   if [[ "${status}" -ne 0 ]]; then
@@ -165,7 +160,7 @@ database_table_from_file() {
 
   # Get MAX(id) from domainlist when INSERTing into this table
   if [[ "${table}" == "domainlist" ]]; then
-    rowid="$(pihole-FTL sqlite3 "${gravityDBfile}" "SELECT MAX(id) FROM domainlist;")"
+    rowid="$(pihole-FTL sqlite3 -ni "${gravityDBfile}" "SELECT MAX(id) FROM domainlist;")"
     if [[ -z "$rowid" ]]; then
       rowid=0
     fi
@@ -174,19 +169,18 @@ database_table_from_file() {
 
   # Loop over all domains in ${src} file
   # Read file line by line
-  grep -v '^ *#' < "${src}" | while IFS= read -r domain
-  do
+  grep -v '^ *#' <"${src}" | while IFS= read -r domain; do
     # Only add non-empty lines
     if [[ -n "${domain}" ]]; then
       if [[ "${table}" == "domain_audit" ]]; then
         # domain_audit table format (no enable or modified fields)
-        echo "${rowid},\"${domain}\",${timestamp}" >> "${tmpFile}"
+        echo "${rowid},\"${domain}\",${timestamp}" >>"${tmpFile}"
       elif [[ "${table}" == "adlist" ]]; then
         # Adlist table format
-        echo "${rowid},\"${domain}\",1,${timestamp},${timestamp},\"Migrated from ${src}\",,0,0,0,0,0" >> "${tmpFile}"
+        echo "${rowid},\"${domain}\",1,${timestamp},${timestamp},\"Migrated from ${src}\",,0,0,0,0,0" >>"${tmpFile}"
       else
         # White-, black-, and regexlist table format
-        echo "${rowid},${list_type},\"${domain}\",1,${timestamp},${timestamp},\"Migrated from ${src}\"" >> "${tmpFile}"
+        echo "${rowid},${list_type},\"${domain}\",1,${timestamp},${timestamp},\"Migrated from ${src}\"" >>"${tmpFile}"
       fi
       rowid+=1
     fi
@@ -195,7 +189,7 @@ database_table_from_file() {
   # Store domains in database table specified by ${table}
   # Use printf as .mode and .import need to be on separate lines
   # see https://unix.stackexchange.com/a/445615/83260
-  output=$( { printf ".timeout 30000\\n.mode csv\\n.import \"%s\" %s\\n" "${tmpFile}" "${table}" | pihole-FTL sqlite3 "${gravityDBfile}"; } 2>&1 )
+  output=$({ printf ".timeout 30000\\n.mode csv\\n.import \"%s\" %s\\n" "${tmpFile}" "${table}" | pihole-FTL sqlite3 -ni "${gravityDBfile}"; } 2>&1)
   status="$?"
 
   if [[ "${status}" -ne 0 ]]; then
@@ -205,17 +199,17 @@ database_table_from_file() {
 
   # Move source file to backup directory, create directory if not existing
   mkdir -p "${backup_path}"
-  mv "${src}" "${backup_file}" 2> /dev/null || \
+  mv "${src}" "${backup_file}" 2>/dev/null ||
     echo -e "  ${CROSS} Unable to backup ${src} to ${backup_path}"
 
   # Delete tmpFile
-  rm "${tmpFile}" > /dev/null 2>&1 || \
+  rm "${tmpFile}" >/dev/null 2>&1 ||
     echo -e "  ${CROSS} Unable to remove ${tmpFile}"
 }
 
 # Check if a column with name ${2} exists in gravity table with name ${1}
 gravity_column_exists() {
-  output=$( { printf ".timeout 30000\\nSELECT EXISTS(SELECT * FROM pragma_table_info('%s') WHERE name='%s');\\n" "${1}" "${2}" | pihole-FTL sqlite3 "${gravityTEMPfile}"; } 2>&1 )
+  output=$({ printf ".timeout 30000\\nSELECT EXISTS(SELECT * FROM pragma_table_info('%s') WHERE name='%s');\\n" "${1}" "${2}" | pihole-FTL sqlite3 -ni "${gravityTEMPfile}"; } 2>&1)
   if [[ "${output}" == "1" ]]; then
     return 0 # Bash 0 is success
   fi
@@ -227,10 +221,10 @@ gravity_column_exists() {
 database_adlist_number() {
   # Only try to set number of domains when this field exists in the gravity database
   if ! gravity_column_exists "adlist" "number"; then
-    return;
+    return
   fi
 
-  output=$( { printf ".timeout 30000\\nUPDATE adlist SET number = %i, invalid_domains = %i WHERE id = %i;\\n" "${2}" "${3}" "${1}" | pihole-FTL sqlite3 "${gravityTEMPfile}"; } 2>&1 )
+  output=$({ printf ".timeout 30000\\nUPDATE adlist SET number = %i, invalid_domains = %i WHERE id = %i;\\n" "${2}" "${3}" "${1}" | pihole-FTL sqlite3 -ni "${gravityTEMPfile}"; } 2>&1)
   status="$?"
 
   if [[ "${status}" -ne 0 ]]; then
@@ -243,10 +237,10 @@ database_adlist_number() {
 database_adlist_status() {
   # Only try to set the status when this field exists in the gravity database
   if ! gravity_column_exists "adlist" "status"; then
-    return;
+    return
   fi
 
-  output=$( { printf ".timeout 30000\\nUPDATE adlist SET status = %i WHERE id = %i;\\n" "${2}" "${1}" | pihole-FTL sqlite3 "${gravityTEMPfile}"; } 2>&1 )
+  output=$({ printf ".timeout 30000\\nUPDATE adlist SET status = %i WHERE id = %i;\\n" "${2}" "${1}" | pihole-FTL sqlite3 -ni "${gravityTEMPfile}"; } 2>&1)
   status="$?"
 
   if [[ "${status}" -ne 0 ]]; then
@@ -300,15 +294,10 @@ migrate_to_database() {
 
 # Determine if DNS resolution is available before proceeding
 gravity_CheckDNSResolutionAvailable() {
-  local lookupDomain="pi.hole"
-
-  # Determine if $localList does not exist, and ensure it is not empty
-  if [[ ! -e "${localList}" ]] || [[ -s "${localList}" ]]; then
-    lookupDomain="raw.githubusercontent.com"
-  fi
+  local lookupDomain="raw.githubusercontent.com"
 
   # Determine if $lookupDomain is resolvable
-  if timeout 4 getent hosts "${lookupDomain}" &> /dev/null; then
+  if timeout 4 getent hosts "${lookupDomain}" &>/dev/null; then
     # Print confirmation of resolvability if it had previously failed
     if [[ -n "${secs:-}" ]]; then
       echo -e "${OVER}  ${TICK} DNS resolution is now available\\n"
@@ -322,7 +311,7 @@ gravity_CheckDNSResolutionAvailable() {
   # If the /etc/resolv.conf contains resolvers other than 127.0.0.1 then the local dnsmasq will not be queried and pi.hole is NXDOMAIN.
   # This means that even though name resolution is working, the getent hosts check fails and the holddown timer keeps ticking and eventually fails
   # So we check the output of the last command and if it failed, attempt to use dig +short as a fallback
-  if timeout 4 dig +short "${lookupDomain}" &> /dev/null; then
+  if timeout 4 dig +short "${lookupDomain}" &>/dev/null; then
     if [[ -n "${secs:-}" ]]; then
       echo -e "${OVER}  ${TICK} DNS resolution is now available\\n"
     fi
@@ -333,7 +322,7 @@ gravity_CheckDNSResolutionAvailable() {
   fi
 
   # Determine error output message
-  if pgrep pihole-FTL &> /dev/null; then
+  if pgrep pihole-FTL &>/dev/null; then
     echo -e "  ${CROSS} DNS resolution is currently unavailable"
   else
     echo -e "  ${CROSS} DNS service is not running"
@@ -343,7 +332,7 @@ gravity_CheckDNSResolutionAvailable() {
   # Ensure DNS server is given time to be resolvable
   secs="120"
   echo -ne "  ${INFO} Time until retry: ${secs}"
-  until timeout 1 getent hosts "${lookupDomain}" &> /dev/null; do
+  until timeout 1 getent hosts "${lookupDomain}" &>/dev/null; do
     [[ "${secs:-}" -eq 0 ]] && break
     echo -ne "${OVER}  ${INFO} Time until retry: ${secs}"
     : $((secs--))
@@ -364,19 +353,19 @@ gravity_DownloadBlocklists() {
 
   # Retrieve source URLs from gravity database
   # We source only enabled adlists, SQLite3 stores boolean values as 0 (false) or 1 (true)
-  mapfile -t sources <<< "$(pihole-FTL sqlite3 "${gravityDBfile}" "SELECT address FROM vw_adlist;" 2> /dev/null)"
-  mapfile -t sourceIDs <<< "$(pihole-FTL sqlite3 "${gravityDBfile}" "SELECT id FROM vw_adlist;" 2> /dev/null)"
-  mapfile -t sourceTypes <<< "$(pihole-FTL sqlite3 "${gravityDBfile}" "SELECT type FROM vw_adlist;" 2> /dev/null)"
+  mapfile -t sources <<<"$(pihole-FTL sqlite3 -ni "${gravityDBfile}" "SELECT address FROM vw_adlist;" 2>/dev/null)"
+  mapfile -t sourceIDs <<<"$(pihole-FTL sqlite3 -ni "${gravityDBfile}" "SELECT id FROM vw_adlist;" 2>/dev/null)"
+  mapfile -t sourceTypes <<<"$(pihole-FTL sqlite3 -ni "${gravityDBfile}" "SELECT type FROM vw_adlist;" 2>/dev/null)"
 
   # Parse source domains from $sources
-  mapfile -t sourceDomains <<< "$(
+  mapfile -t sourceDomains <<<"$(
     # Logic: Split by folder/port
     awk -F '[/:]' '{
       # Remove URL protocol & optional username:password@
       gsub(/(.*:\/\/|.*:.*@)/, "", $0)
       if(length($1)>0){print $1}
       else {print "local"}
-    }' <<< "$(printf '%s\n' "${sources[@]}")" 2> /dev/null
+    }' <<<"$(printf '%s\n' "${sources[@]}")" 2>/dev/null
   )"
 
   local str="Pulling blocklist source list into range"
@@ -394,8 +383,8 @@ gravity_DownloadBlocklists() {
   # Prepare new gravity database
   str="Preparing new gravity database"
   echo -ne "  ${INFO} ${str}..."
-  rm "${gravityTEMPfile}" > /dev/null 2>&1
-  output=$( { pihole-FTL sqlite3 "${gravityTEMPfile}" < "${gravityDBschema}"; } 2>&1 )
+  rm "${gravityTEMPfile}" >/dev/null 2>&1
+  output=$({ pihole-FTL sqlite3 -ni "${gravityTEMPfile}" <"${gravityDBschema}"; } 2>&1)
   status="$?"
 
   if [[ "${status}" -ne 0 ]]; then
@@ -415,7 +404,7 @@ gravity_DownloadBlocklists() {
     copyGravity="${copyGravity//"${gravityDBfile_default}"/"${gravityDBfile}"}"
   fi
 
-  output=$( { pihole-FTL sqlite3 "${gravityTEMPfile}" <<< "${copyGravity}"; } 2>&1 )
+  output=$({ pihole-FTL sqlite3 -ni "${gravityTEMPfile}" <<<"${copyGravity}"; } 2>&1)
   status="$?"
 
   if [[ "${status}" -ne 0 ]]; then
@@ -460,7 +449,7 @@ gravity_DownloadBlocklists() {
 
     # this will remove first @ that is after schema and before domain
     # \1 is optional schema, \2 is userinfo
-    check_url="$( sed -re 's#([^:/]*://)?([^/]+)@#\1\2#' <<< "$url" )"
+    check_url="$(sed -re 's#([^:/]*://)?([^/]+)@#\1\2#' <<<"$url")"
 
     if [[ "${check_url}" =~ ${regex} ]]; then
       echo -e "  ${CROSS} Invalid Target"
@@ -480,7 +469,7 @@ compareLists() {
   if [[ -s "${target}.sha1" ]]; then
     if ! sha1sum --check --status --strict "${target}.sha1"; then
       # The list changed upstream, we need to update the checksum
-      sha1sum "${target}" > "${target}.sha1"
+      sha1sum "${target}" >"${target}.sha1"
       echo "  ${INFO} List has been updated"
       database_adlist_status "${adlistID}" "1"
     else
@@ -489,7 +478,7 @@ compareLists() {
     fi
   else
     # No checksum available, create one for comparing on the next run
-    sha1sum "${target}" > "${target}.sha1"
+    sha1sum "${target}" >"${target}.sha1"
     # We assume here it was changed upstream
     database_adlist_status "${adlistID}" "1"
   fi
@@ -517,6 +506,31 @@ gravity_DownloadBlocklistFromUrl() {
   str="Status:"
   echo -ne "  ${INFO} ${str} Pending..."
   blocked=false
+  case $(getFTLConfigValue dns.blocking.mode) in
+  "IP-NODATA-AAAA" | "IP")
+    # Get IP address of this domain
+    ip="$(dig "${domain}" +short)"
+    # Check if this IP matches any IP of the system
+    if [[ -n "${ip}" && $(grep -Ec "inet(|6) ${ip}" <<<"$(ip a)") -gt 0 ]]; then
+      blocked=true
+    fi
+    ;;
+  "NXDOMAIN")
+    if [[ $(dig "${domain}" | grep "NXDOMAIN" -c) -ge 1 ]]; then
+      blocked=true
+    fi
+    ;;
+  "NODATA")
+    if [[ $(dig "${domain}" | grep "NOERROR" -c) -ge 1 ]] && [[ -z $(dig +short "${domain}") ]]; then
+      blocked=true
+    fi
+    ;;
+  "NULL" | *)
+    if [[ $(dig "${domain}" +short | grep "0.0.0.0" -c) -ge 1 ]]; then
+      blocked=true
+    fi
+    ;;
+  esac
 
   # Check if this domain is blocked by Pi-hole but only if the domain is not a
   # local file or empty
@@ -561,14 +575,15 @@ gravity_DownloadBlocklistFromUrl() {
         local ip_addr port
         printf -v ip_addr "%s" "${upstream%#*}"
         if [[ ${upstream} != *"#"* ]]; then
-        port=53
+            port=53
         else
-        printf -v port "%s" "${upstream#*#}"
+            printf -v port "%s" "${upstream#*#}"
         fi
         ip=$(dig "@${ip_addr}" -p "${port}" +short "${domain}" | tail -1)
         if [[ $(echo "${url}" | awk -F '://' '{print $1}') = "https" ]]; then
-        port=443;
-        else port=80
+            port=443
+        else
+            port=80
         fi
         echo -e "${OVER}  ${CROSS} ${str} ${domain} is blocked by one of your lists. Using DNS server ${upstream} instead";
         echo -ne "  ${INFO} ${str} Pending..."
@@ -577,33 +592,42 @@ gravity_DownloadBlocklistFromUrl() {
   fi
 
   # shellcheck disable=SC2086
-  httpCode=$(curl --connect-timeout ${curl_connect_timeout} -s -L ${compression} ${cmd_ext} ${heisenbergCompensator} -w "%{http_code}" "${url}" -o "${listCurlBuffer}" 2> /dev/null)
+  httpCode=$(curl --connect-timeout ${curl_connect_timeout} -s -L ${compression} ${cmd_ext} ${heisenbergCompensator} -w "%{http_code}" "${url}" -o "${listCurlBuffer}" 2>/dev/null)
 
   case $url in
-    # Did we "download" a local file?
-    "file"*)
-      if [[ -s "${listCurlBuffer}" ]]; then
-        echo -e "${OVER}  ${TICK} ${str} Retrieval successful"; success=true
-      else
-        echo -e "${OVER}  ${CROSS} ${str} Not found / empty list"
-      fi;;
-    # Did we "download" a remote file?
-    *)
-      # Determine "Status:" output based on HTTP response
-      case "${httpCode}" in
-        "200") echo -e "${OVER}  ${TICK} ${str} Retrieval successful"; success=true;;
-        "304") echo -e "${OVER}  ${TICK} ${str} No changes detected"; success=true;;
-        "000") echo -e "${OVER}  ${CROSS} ${str} Connection Refused";;
-        "403") echo -e "${OVER}  ${CROSS} ${str} Forbidden";;
-        "404") echo -e "${OVER}  ${CROSS} ${str} Not found";;
-        "408") echo -e "${OVER}  ${CROSS} ${str} Time-out";;
-        "451") echo -e "${OVER}  ${CROSS} ${str} Unavailable For Legal Reasons";;
-        "500") echo -e "${OVER}  ${CROSS} ${str} Internal Server Error";;
-        "504") echo -e "${OVER}  ${CROSS} ${str} Connection Timed Out (Gateway)";;
-        "521") echo -e "${OVER}  ${CROSS} ${str} Web Server Is Down (Cloudflare)";;
-        "522") echo -e "${OVER}  ${CROSS} ${str} Connection Timed Out (Cloudflare)";;
-        *    ) echo -e "${OVER}  ${CROSS} ${str} ${url} (${httpCode})";;
-      esac;;
+  # Did we "download" a local file?
+  "file"*)
+    if [[ -s "${listCurlBuffer}" ]]; then
+      echo -e "${OVER}  ${TICK} ${str} Retrieval successful"
+      success=true
+    else
+      echo -e "${OVER}  ${CROSS} ${str} Not found / empty list"
+    fi
+    ;;
+  # Did we "download" a remote file?
+  *)
+    # Determine "Status:" output based on HTTP response
+    case "${httpCode}" in
+    "200")
+      echo -e "${OVER}  ${TICK} ${str} Retrieval successful"
+      success=true
+      ;;
+    "304")
+      echo -e "${OVER}  ${TICK} ${str} No changes detected"
+      success=true
+      ;;
+    "000") echo -e "${OVER}  ${CROSS} ${str} Connection Refused" ;;
+    "403") echo -e "${OVER}  ${CROSS} ${str} Forbidden" ;;
+    "404") echo -e "${OVER}  ${CROSS} ${str} Not found" ;;
+    "408") echo -e "${OVER}  ${CROSS} ${str} Time-out" ;;
+    "451") echo -e "${OVER}  ${CROSS} ${str} Unavailable For Legal Reasons" ;;
+    "500") echo -e "${OVER}  ${CROSS} ${str} Internal Server Error" ;;
+    "504") echo -e "${OVER}  ${CROSS} ${str} Connection Timed Out (Gateway)" ;;
+    "521") echo -e "${OVER}  ${CROSS} ${str} Web Server Is Down (Cloudflare)" ;;
+    "522") echo -e "${OVER}  ${CROSS} ${str} Connection Timed Out (Cloudflare)" ;;
+    *) echo -e "${OVER}  ${CROSS} ${str} ${url} (${httpCode})" ;;
+    esac
+    ;;
   esac
 
   local done="false"
@@ -657,7 +681,7 @@ gravity_ParseFileIntoDomains() {
   # This helps with that and makes it easier to read
   # It also helps with debugging so each stage of the script can be researched more in depth
   # 1) Convert all characters to lowercase
-  tr '[:upper:]' '[:lower:]' < "${src}" > "${destination}"
+  tr '[:upper:]' '[:lower:]' <"${src}" >"${destination}"
 
   # 2) Remove carriage returns
   # 3) Remove lines starting with ! (ABP Comments)
@@ -667,7 +691,7 @@ gravity_ParseFileIntoDomains() {
   # 7) Remove leading tabs, spaces, etc. (Also removes leading IP addresses)
   # 8) Remove empty lines
 
-    sed -i -r \
+  sed -i -r \
     -e 's/\r$//' \
     -e 's/\s*!.*//g' \
     -e 's/\s*\[.*//g' \
@@ -684,12 +708,12 @@ gravity_Table_Count() {
   local table="${1}"
   local str="${2}"
   local num
-  num="$(pihole-FTL sqlite3 "${gravityTEMPfile}" "SELECT COUNT(*) FROM ${table};")"
+  num="$(pihole-FTL sqlite3 -ni "${gravityTEMPfile}" "SELECT COUNT(*) FROM ${table};")"
   if [[ "${table}" == "gravity" ]]; then
     local unique
-    unique="$(pihole-FTL sqlite3 "${gravityTEMPfile}" "SELECT COUNT(*) FROM (SELECT DISTINCT domain FROM ${table});")"
+    unique="$(pihole-FTL sqlite3 -ni "${gravityTEMPfile}" "SELECT COUNT(*) FROM (SELECT DISTINCT domain FROM ${table});")"
     echo -e "  ${INFO} Number of ${str}: ${num} (${COL_BOLD}${unique} unique domains${COL_NC})"
-    pihole-FTL sqlite3 "${gravityTEMPfile}" "INSERT OR REPLACE INTO info (property,value) VALUES ('gravity_count',${unique});"
+    pihole-FTL sqlite3 -ni "${gravityTEMPfile}" "INSERT OR REPLACE INTO info (property,value) VALUES ('gravity_count',${unique});"
   else
     echo -e "  ${INFO} Number of ${str}: ${num}"
   fi
@@ -706,18 +730,6 @@ gravity_ShowCount() {
   gravity_Table_Count "vw_regex_whitelist" "regex allowed filters"
 }
 
-# Create "localhost" entries into hosts format
-gravity_generateLocalList() {
-  # Empty $localList if it already exists, otherwise, create it
-  echo "### Do not modify this file, it will be overwritten by pihole -g" > "${localList}"
-  chmod 644 "${localList}"
-
-  # Add additional LAN hosts provided by OpenVPN (if available)
-  if [[ -f "${VPNList}" ]]; then
-    awk -F, '{printf $2"\t"$1".vpn\n"}' "${VPNList}" >> "${localList}"
-  fi
-}
-
 # Trap Ctrl-C
 gravity_Trap() {
   trap '{ echo -e "\\n\\n  ${INFO} ${COL_LIGHT_RED}User-abort detected${COL_NC}"; gravity_Cleanup "error"; }' INT
@@ -731,12 +743,12 @@ gravity_Cleanup() {
   echo -ne "  ${INFO} ${str}..."
 
   # Delete tmp content generated by Gravity
-  rm ${piholeDir}/pihole.*.txt 2> /dev/null
-  rm ${piholeDir}/*.tmp 2> /dev/null
+  rm ${piholeDir}/pihole.*.txt 2>/dev/null
+  rm ${piholeDir}/*.tmp 2>/dev/null
   # listCurlBuffer location
-  rm "${GRAVITY_TMPDIR}"/*.phgpb 2> /dev/null
+  rm "${GRAVITY_TMPDIR}"/*.phgpb 2>/dev/null
   # invalid_domains location
-  rm "${GRAVITY_TMPDIR}"/*.ph-non-domains 2> /dev/null
+  rm "${GRAVITY_TMPDIR}"/*.ph-non-domains 2>/dev/null
 
   # Ensure this function only runs when gravity_SetDownloadOptions() has completed
   if [[ "${gravity_Blackbody:-}" == true ]]; then
@@ -744,7 +756,7 @@ gravity_Cleanup() {
     for file in "${piholeDir}"/*."${domainsExtension}"; do
       # If list is not in active array, then remove it
       if [[ ! "${activeDomains[*]}" == *"${file}"* ]]; then
-        rm -f "${file}" 2> /dev/null || \
+        rm -f "${file}" 2>/dev/null ||
           echo -e "  ${CROSS} Failed to remove ${file##*/}"
       fi
     done
@@ -770,7 +782,7 @@ database_recovery() {
   local str="Checking integrity of existing gravity database (this can take a while)"
   local option="${1}"
   echo -ne "  ${INFO} ${str}..."
-  result="$(pihole-FTL sqlite3 "${gravityDBfile}" "PRAGMA integrity_check" 2>&1)"
+  result="$(pihole-FTL sqlite3 -ni "${gravityDBfile}" "PRAGMA integrity_check" 2>&1)"
 
   if [[ ${result} = "ok" ]]; then
     echo -e "${OVER}  ${TICK} ${str} - no errors found"
@@ -778,7 +790,7 @@ database_recovery() {
     str="Checking foreign keys of existing gravity database (this can take a while)"
     echo -ne "  ${INFO} ${str}..."
     unset result
-    result="$(pihole-FTL sqlite3 "${gravityDBfile}" "PRAGMA foreign_key_check" 2>&1)"
+    result="$(pihole-FTL sqlite3 -ni "${gravityDBfile}" "PRAGMA foreign_key_check" 2>&1)"
     if [[ -z ${result} ]]; then
       echo -e "${OVER}  ${TICK} ${str} - no errors found"
       if [[ "${option}" != "force" ]]; then
@@ -786,18 +798,18 @@ database_recovery() {
       fi
     else
       echo -e "${OVER}  ${CROSS} ${str} - errors found:"
-      while IFS= read -r line ; do echo "  - $line"; done <<< "$result"
+      while IFS= read -r line; do echo "  - $line"; done <<<"$result"
     fi
   else
     echo -e "${OVER}  ${CROSS} ${str} - errors found:"
-    while IFS= read -r line ; do echo "  - $line"; done <<< "$result"
+    while IFS= read -r line; do echo "  - $line"; done <<<"$result"
   fi
 
   str="Trying to recover existing gravity database"
   echo -ne "  ${INFO} ${str}..."
   # We have to remove any possibly existing recovery database or this will fail
-  rm -f "${gravityDBfile}.recovered" > /dev/null 2>&1
-  if result="$(pihole-FTL sqlite3 "${gravityDBfile}" ".recover" | pihole-FTL sqlite3 "${gravityDBfile}.recovered" 2>&1)"; then
+  rm -f "${gravityDBfile}.recovered" >/dev/null 2>&1
+  if result="$(pihole-FTL sqlite3 -ni "${gravityDBfile}" ".recover" | pihole-FTL sqlite3 -ni "${gravityDBfile}.recovered" 2>&1)"; then
     echo -e "${OVER}  ${TICK} ${str} - success"
     mv "${gravityDBfile}" "${gravityDBfile}.old"
     mv "${gravityDBfile}.recovered" "${gravityDBfile}"
@@ -805,7 +817,7 @@ database_recovery() {
     echo -ne " ${INFO} The old ${gravityDBfile} has been moved to ${gravityDBfile}.old"
   else
     echo -e "${OVER}  ${CROSS} ${str} - the following errors happened:"
-    while IFS= read -r line ; do echo "  - $line"; done <<< "$result"
+    while IFS= read -r line; do echo "  - $line"; done <<<"$result"
     echo -e "  ${CROSS} Recovery failed. Try \"pihole -r recreate\" instead."
     exit 1
   fi
@@ -824,9 +836,10 @@ Options:
 
 repairSelector() {
   case "$1" in
-    "recover") recover_database=true;;
-    "recreate") recreate_database=true;;
-    *) echo "Usage: pihole -g -r {recover,recreate}
+  "recover") recover_database=true ;;
+  "recreate") recreate_database=true ;;
+  *)
+    echo "Usage: pihole -g -r {recover,recreate}
 Attempt to repair gravity database
 
 Available options:
@@ -845,15 +858,17 @@ Available options:
                               and create a new file from scratch. If you still
                               have the migration backup created when migrating
                               to Pi-hole v5.0, Pi-hole will import these files."
-    exit 0;;
+    exit 0
+    ;;
   esac
 }
 
 for var in "$@"; do
   case "${var}" in
-    "-f" | "--force" ) forceDelete=true;;
-    "-r" | "--repair" ) repairSelector "$3";;
-    "-h" | "--help" ) helpFunc;;
+  "-f" | "--force" ) forceDelete=true;;
+  "-r" | "--repair" ) repairSelector "$3";;
+  "-u" | "--upgrade" ) upgrade_gravityDB "${gravityDBfile}" "${piholeDir}"; exit 0;;
+  "-h" | "--help" ) helpFunc;;
   esac
 done
 
@@ -869,9 +884,9 @@ if [[ "${recreate_database:-}" == true ]]; then
   str="Recreating gravity database from migration backup"
   echo -ne "${INFO} ${str}..."
   rm "${gravityDBfile}"
-  pushd "${piholeDir}" > /dev/null || exit
+  pushd "${piholeDir}" >/dev/null || exit
   cp migration_backup/* .
-  popd > /dev/null || exit
+  popd >/dev/null || exit
   echo -e "${OVER}  ${TICK} ${str}"
 fi
 
@@ -889,7 +904,7 @@ if [[ "${forceDelete:-}" == true ]]; then
   str="Deleting existing list cache"
   echo -ne "${INFO} ${str}..."
 
-  rm /etc/pihole/list.* 2> /dev/null || true
+  rm /etc/pihole/list.* 2>/dev/null || true
   echo -e "${OVER}  ${TICK} ${str}"
 fi
 
@@ -904,9 +919,6 @@ if ! gravity_DownloadBlocklists; then
   exit 1
 fi
 
-# Create local.list
-gravity_generateLocalList
-
 # Update gravity timestamp
 update_gravity_timestamp
 
diff --git a/pihole b/pihole
index 47da4ddd..7c84771c 100755
--- a/pihole
+++ b/pihole
@@ -140,8 +140,7 @@ uninstallFunc() {
 }
 
 versionFunc() {
-  shift
-  exec "${PI_HOLE_SCRIPT_DIR}"/version.sh "$@"
+  exec "${PI_HOLE_SCRIPT_DIR}"/version.sh
 }
 
 restartDNS() {
@@ -508,7 +507,6 @@ Options:
   -up, updatePihole   Update Pi-hole subsystems
                         Add '--check-only' to exit script before update is performed.
   -v, version         Show installed versions of Pi-hole, Web Interface & FTL
-                        Add '-h' for more info on version usage
   uninstall           Uninstall Pi-hole from your system
   status              Display the running status of Pi-hole subsystems
   enable              Enable Pi-hole subsystems
@@ -531,7 +529,7 @@ fi
 need_root=1
 case "${1}" in
   "-h" | "help" | "--help"      ) helpFunc;;
-  "-v" | "version"              ) versionFunc "$@";;
+  "-v" | "version"              ) versionFunc;;
   "-c" | "chronometer"          ) chronometerFunc "$@";;
   "-q" | "query"                ) queryFunc "$@";;
   "status"                      ) statusFunc "$2";;
diff --git a/test/_fedora_37.Dockerfile b/test/_fedora_37.Dockerfile
deleted file mode 100644
index b4f939ba..00000000
--- a/test/_fedora_37.Dockerfile
+++ /dev/null
@@ -1,18 +0,0 @@
-FROM fedora:37
-RUN dnf install -y git initscripts
-
-ENV GITDIR /etc/.pihole
-ENV SCRIPTDIR /opt/pihole
-
-RUN mkdir -p $GITDIR $SCRIPTDIR /etc/pihole
-ADD . $GITDIR
-RUN cp $GITDIR/advanced/Scripts/*.sh $GITDIR/gravity.sh $GITDIR/pihole $GITDIR/automated\ install/*.sh $SCRIPTDIR/
-ENV PATH /usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:$SCRIPTDIR
-
-RUN true && \
-    chmod +x $SCRIPTDIR/*
-
-ENV SKIP_INSTALL true
-ENV OS_CHECK_DOMAIN_NAME dev-supportedos.pi-hole.net
-
-#sed '/# Start the installer/Q' /opt/pihole/basic-install.sh > /opt/pihole/stub_basic-install.sh && \
diff --git a/test/_fedora_36.Dockerfile b/test/_fedora_39.Dockerfile
similarity index 97%
rename from test/_fedora_36.Dockerfile
rename to test/_fedora_39.Dockerfile
index 847767e7..1727a3aa 100644
--- a/test/_fedora_36.Dockerfile
+++ b/test/_fedora_39.Dockerfile
@@ -1,4 +1,4 @@
-FROM fedora:36
+FROM fedora:39
 RUN dnf install -y git initscripts
 
 ENV GITDIR /etc/.pihole
diff --git a/test/requirements.txt b/test/requirements.txt
index 74c67fd9..ffb05813 100644
--- a/test/requirements.txt
+++ b/test/requirements.txt
@@ -1,6 +1,6 @@
 pyyaml == 6.0.1
-pytest == 7.4.3
-pytest-xdist == 3.4.0
-pytest-testinfra == 10.0.0
-tox == 4.11.3
+pytest == 8.0.2
+pytest-xdist == 3.5.0
+pytest-testinfra == 10.1.0
+tox == 4.13.0
 
diff --git a/test/test_any_automated_install.py b/test/test_any_automated_install.py
index 7d9d15f6..0930f0af 100644
--- a/test/test_any_automated_install.py
+++ b/test/test_any_automated_install.py
@@ -12,6 +12,8 @@ from .conftest import (
     run_script,
 )
 
+FTL_BRANCH = "development-v6"
+
 
 def test_supported_package_manager(host):
     """
@@ -80,11 +82,7 @@ def test_installPihole_fresh_install_readableFiles(host):
     host.run("command -v dnf > /dev/null && dnf install -y man")
     host.run("command -v yum > /dev/null && yum install -y man")
     # Workaround to get FTLv6 installed until it reaches master branch
-    host.run(
-        """
-    echo "development-v6" > /etc/pihole/ftlbranch
-    """
-    )
+    host.run('echo "' + FTL_BRANCH + '" > /etc/pihole/ftlbranch')
     install = host.run(
         """
     export TERM=xterm
@@ -174,10 +172,6 @@ def test_installPihole_fresh_install_readableFiles(host):
         )
         actual_rc = host.run(check_man).rc
         assert exit_status_success == actual_rc
-    # check not readable sudoers file
-    check_sudo = test_cmd.format("r", "/etc/sudoers.d/pihole", piholeuser)
-    actual_rc = host.run(check_sudo).rc
-    assert exit_status_success != actual_rc
     # check not readable cron file
     check_sudo = test_cmd.format("x", "/etc/cron.d/", piholeuser)
     actual_rc = host.run(check_sudo).rc
@@ -235,45 +229,35 @@ def test_update_package_cache_failure_no_errors(host):
     assert "Error: Unable to update package cache." in updateCache.stdout
 
 
-def test_FTL_detect_aarch64_no_errors(host):
+@pytest.mark.parametrize(
+    "arch,detected_string,supported",
+    [
+        ("aarch64", "AArch64 (64 Bit ARM)", True),
+        ("armv6", "ARMv6", True),
+        ("armv7l", "ARMv7 (or newer)", True),
+        ("armv7", "ARMv7 (or newer)", True),
+        ("armv8a", "ARMv7 (or newer)", True),
+        ("x86_64", "x86_64", True),
+        ("riscv64", "riscv64", True),
+        ("mips", "mips", False),
+    ],
+)
+def test_FTL_detect_no_errors(host, arch, detected_string, supported):
     """
-    confirms only aarch64 package is downloaded for FTL engine
+    confirms only correct package is downloaded for FTL engine
     """
-    # mock uname to return aarch64 platform
-    mock_command("uname", {"-m": ("aarch64", "0")}, host)
-    detectPlatform = host.run(
-        """
-    source /opt/pihole/basic-install.sh
-    create_pihole_user
-    funcOutput=$(get_binary_name)
-    binary="pihole-FTL${funcOutput##*pihole-FTL}"
-    theRest="${funcOutput%pihole-FTL*}"
-    FTLdetect "${binary}" "${theRest}"
-    """
-    )
-    expected_stdout = info_box + " FTL Checks..."
-    assert expected_stdout in detectPlatform.stdout
-    expected_stdout = tick_box + " Detected AArch64 (64 Bit ARM) architecture"
-    assert expected_stdout in detectPlatform.stdout
-    expected_stdout = tick_box + " Downloading and Installing FTL"
-    assert expected_stdout in detectPlatform.stdout
-
-
-def test_FTL_detect_armv6_no_errors(host):
-    """
-    confirms only armv6 package is downloaded for FTL engine
-    """
-    # mock uname to return armv6 platform
-    mock_command("uname", {"-m": ("armv6", "0")}, host)
-    # mock readelf to respond with armv6l CPU architecture
+    # mock uname to return passed platform
+    mock_command("uname", {"-m": (arch, "0")}, host)
+    # mock readelf to respond with passed CPU architecture
     mock_command_2(
         "readelf",
         {
-            "-A /bin/sh": ("Tag_CPU_arch: armv6", "0"),
-            "-A /usr/bin/sh": ("Tag_CPU_arch: armv6", "0"),
+            "-A /bin/sh": ("Tag_CPU_arch: " + arch, "0"),
+            "-A /usr/bin/sh": ("Tag_CPU_arch: " + arch, "0"),
         },
         host,
     )
+    host.run('echo "' + FTL_BRANCH + '" > /etc/pihole/ftlbranch')
     detectPlatform = host.run(
         """
     source /opt/pihole/basic-install.sh
@@ -284,188 +268,30 @@ def test_FTL_detect_armv6_no_errors(host):
     FTLdetect "${binary}" "${theRest}"
     """
     )
-    expected_stdout = info_box + " FTL Checks..."
-    assert expected_stdout in detectPlatform.stdout
-    expected_stdout = tick_box + " Detected ARMv6 architecture"
-    assert expected_stdout in detectPlatform.stdout
-    expected_stdout = tick_box + " Downloading and Installing FTL"
-    assert expected_stdout in detectPlatform.stdout
-
-
-def test_FTL_detect_armv7l_no_errors(host):
-    """
-    confirms only armv7l package is downloaded for FTL engine
-    """
-    # mock uname to return armv7l platform
-    mock_command("uname", {"-m": ("armv7l", "0")}, host)
-    # mock readelf to respond with armv7l CPU architecture
-    mock_command_2(
-        "readelf",
-        {
-            "-A /bin/sh": ("Tag_CPU_arch: armv7l", "0"),
-            "-A /usr/bin/sh": ("Tag_CPU_arch: armv7l", "0"),
-        },
-        host,
-    )
-    detectPlatform = host.run(
-        """
-    source /opt/pihole/basic-install.sh
-    create_pihole_user
-    funcOutput=$(get_binary_name)
-    binary="pihole-FTL${funcOutput##*pihole-FTL}"
-    theRest="${funcOutput%pihole-FTL*}"
-    FTLdetect "${binary}" "${theRest}"
-    """
-    )
-    expected_stdout = info_box + " FTL Checks..."
-    assert expected_stdout in detectPlatform.stdout
-    expected_stdout = tick_box + (" Detected ARMv7 (or newer) architecture")
-    assert expected_stdout in detectPlatform.stdout
-    expected_stdout = tick_box + " Downloading and Installing FTL"
-    assert expected_stdout in detectPlatform.stdout
-
-
-def test_FTL_detect_armv7_no_errors(host):
-    """
-    confirms only armv7 package is downloaded for FTL engine
-    """
-    # mock uname to return armv7 platform
-    mock_command("uname", {"-m": ("armv7", "0")}, host)
-    # mock readelf to respond with armv7 CPU architecture
-    mock_command_2(
-        "readelf",
-        {
-            "-A /bin/sh": ("Tag_CPU_arch: armv7", "0"),
-            "-A /usr/bin/sh": ("Tag_CPU_arch: armv7", "0"),
-        },
-        host,
-    )
-    detectPlatform = host.run(
-        """
-    source /opt/pihole/basic-install.sh
-    create_pihole_user
-    funcOutput=$(get_binary_name)
-    binary="pihole-FTL${funcOutput##*pihole-FTL}"
-    theRest="${funcOutput%pihole-FTL*}"
-    FTLdetect "${binary}" "${theRest}"
-    """
-    )
-    expected_stdout = info_box + " FTL Checks..."
-    assert expected_stdout in detectPlatform.stdout
-    expected_stdout = tick_box + (" Detected ARMv7 (or newer) architecture")
-    assert expected_stdout in detectPlatform.stdout
-    expected_stdout = tick_box + " Downloading and Installing FTL"
-    assert expected_stdout in detectPlatform.stdout
-
-
-def test_FTL_detect_armv8a_no_errors(host):
-    """
-    confirms only armv8a package is downloaded for FTL engine
-    """
-    # mock uname to return armv8a platform
-    mock_command("uname", {"-m": ("armv8a", "0")}, host)
-    # mock readelf to respond with armv8a CPU architecture
-    mock_command_2(
-        "readelf",
-        {
-            "-A /bin/sh": ("Tag_CPU_arch: armv8a", "0"),
-            "-A /usr/bin/sh": ("Tag_CPU_arch: armv8a", "0"),
-        },
-        host,
-    )
-    detectPlatform = host.run(
-        """
-    source /opt/pihole/basic-install.sh
-    create_pihole_user
-    funcOutput=$(get_binary_name)
-    binary="pihole-FTL${funcOutput##*pihole-FTL}"
-    theRest="${funcOutput%pihole-FTL*}"
-    FTLdetect "${binary}" "${theRest}"
-    """
-    )
-    expected_stdout = info_box + " FTL Checks..."
-    assert expected_stdout in detectPlatform.stdout
-    expected_stdout = tick_box + " Detected ARMv7 (or newer) architecture (armv8a)"
-    assert expected_stdout in detectPlatform.stdout
-    expected_stdout = tick_box + " Downloading and Installing FTL"
-    assert expected_stdout in detectPlatform.stdout
-
-
-def test_FTL_detect_x86_64_no_errors(host):
-    """
-    confirms only x86_64 package is downloaded for FTL engine
-    """
-    detectPlatform = host.run(
-        """
-    source /opt/pihole/basic-install.sh
-    create_pihole_user
-    funcOutput=$(get_binary_name)
-    binary="pihole-FTL${funcOutput##*pihole-FTL}"
-    theRest="${funcOutput%pihole-FTL*}"
-    FTLdetect "${binary}" "${theRest}"
-    """
-    )
-    expected_stdout = info_box + " FTL Checks..."
-    assert expected_stdout in detectPlatform.stdout
-    expected_stdout = tick_box + " Detected x86_64 architecture"
-    assert expected_stdout in detectPlatform.stdout
-    expected_stdout = tick_box + " Downloading and Installing FTL"
-    assert expected_stdout in detectPlatform.stdout
-
-
-def test_FTL_detect_unknown_no_errors(host):
-    """confirms only generic package is downloaded for FTL engine"""
-    # mock uname to return generic platform
-    mock_command("uname", {"-m": ("mips", "0")}, host)
-    detectPlatform = host.run(
-        """
-    source /opt/pihole/basic-install.sh
-    create_pihole_user
-    funcOutput=$(get_binary_name)
-    binary="pihole-FTL${funcOutput##*pihole-FTL}"
-    theRest="${funcOutput%pihole-FTL*}"
-    FTLdetect "${binary}" "${theRest}"
-    """
-    )
-    expected_stdout = "Not able to detect architecture (unknown: mips)"
-    assert expected_stdout in detectPlatform.stdout
-
-
-def test_FTL_download_aarch64_no_errors(host):
-    """
-    confirms only aarch64 package is downloaded for FTL engine
-    """
-    # mock dialog answers and ensure installer dependencies
-    mock_command("dialog", {"*": ("", "0")}, host)
-    host.run(
-        """
-    source /opt/pihole/basic-install.sh
-    package_manager_detect
-    install_dependent_packages ${INSTALLER_DEPS[@]}
-    """
-    )
-    download_binary = host.run(
-        """
-    source /opt/pihole/basic-install.sh
-    create_pihole_user
-    FTLinstall "pihole-FTL-aarch64-linux-gnu"
-    """
-    )
-    expected_stdout = tick_box + " Downloading and Installing FTL"
-    assert expected_stdout in download_binary.stdout
-    assert "error" not in download_binary.stdout.lower()
+    if supported:
+        expected_stdout = info_box + " FTL Checks..."
+        assert expected_stdout in detectPlatform.stdout
+        expected_stdout = tick_box + " Detected " + detected_string + " architecture"
+        assert expected_stdout in detectPlatform.stdout
+        expected_stdout = tick_box + " Downloading and Installing FTL"
+        assert expected_stdout in detectPlatform.stdout
+    else:
+        expected_stdout = (
+            "Not able to detect architecture (unknown: " + detected_string + ")"
+        )
+        assert expected_stdout in detectPlatform.stdout
 
 
 def test_FTL_development_binary_installed_and_responsive_no_errors(host):
     """
     confirms FTL development binary is copied and functional in installed location
     """
+    host.run('echo "' + FTL_BRANCH + '" > /etc/pihole/ftlbranch')
     host.run(
         """
     source /opt/pihole/basic-install.sh
     create_pihole_user
     funcOutput=$(get_binary_name)
-    echo "development" > /etc/pihole/ftlbranch
     binary="pihole-FTL${funcOutput##*pihole-FTL}"
     theRest="${funcOutput%pihole-FTL*}"
     FTLdetect "${binary}" "${theRest}"
diff --git a/test/tox.fedora_36.ini b/test/tox.fedora_36.ini
deleted file mode 100644
index 1896a45f..00000000
--- a/test/tox.fedora_36.ini
+++ /dev/null
@@ -1,8 +0,0 @@
-[tox]
-envlist = py3
-
-[testenv:py3]
-allowlist_externals = docker
-deps = -rrequirements.txt
-commands = docker buildx build --load --progress plain -f _fedora_36.Dockerfile -t pytest_pihole:test_container ../
-           pytest {posargs:-vv -n auto} ./test_any_automated_install.py ./test_any_utils.py ./test_centos_fedora_common_support.py
diff --git a/test/tox.fedora_37.ini b/test/tox.fedora_39.ini
similarity index 84%
rename from test/tox.fedora_37.ini
rename to test/tox.fedora_39.ini
index 9c8752cc..5c8557c9 100644
--- a/test/tox.fedora_37.ini
+++ b/test/tox.fedora_39.ini
@@ -4,5 +4,5 @@ envlist = py3
 [testenv]
 allowlist_externals = docker
 deps = -rrequirements.txt
-commands = docker buildx build --load --progress plain -f _fedora_37.Dockerfile -t pytest_pihole:test_container ../
+commands = docker buildx build --load --progress plain -f _fedora_39.Dockerfile -t pytest_pihole:test_container ../
            pytest {posargs:-vv -n auto} ./test_any_automated_install.py ./test_any_utils.py ./test_centos_fedora_common_support.py