Compare commits

..

No commits in common. "master" and "v6.0" have entirely different histories.
master ... v6.0

63 changed files with 1367 additions and 1577 deletions

5
.github/CODEOWNERS vendored
View File

@ -1,5 +0,0 @@
# see https://docs.github.com/en/repositories/managing-your-repositorys-settings-and-features/customizing-your-repository/about-code-owners#codeowners-syntax
# These owners will be the default owners for everything in
# the repo. Unless a later match takes precedence,
* @pi-hole/core-maintainers

View File

@ -8,10 +8,8 @@ updates:
time: "10:00" time: "10:00"
open-pull-requests-limit: 10 open-pull-requests-limit: 10
target-branch: development target-branch: development
groups: reviewers:
github-actions-dependencies: - "pi-hole/core-maintainers"
patterns:
- "*"
- package-ecosystem: pip - package-ecosystem: pip
directory: "/test" directory: "/test"
schedule: schedule:
@ -20,7 +18,5 @@ updates:
time: "10:00" time: "10:00"
open-pull-requests-limit: 10 open-pull-requests-limit: 10
target-branch: development target-branch: development
groups: reviewers:
python-dependencies: - "pi-hole/core-maintainers"
patterns:
- "*"

1
.github/release.yml vendored
View File

@ -2,7 +2,6 @@ changelog:
exclude: exclude:
labels: labels:
- internal - internal
- dependencies
authors: authors:
- dependabot - dependabot
- github-actions - github-actions

View File

@ -25,16 +25,16 @@ jobs:
steps: steps:
- -
name: Checkout repository name: Checkout repository
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 #v6.0.0 uses: actions/checkout@v4.2.2
# Initializes the CodeQL tools for scanning. # Initializes the CodeQL tools for scanning.
- -
name: Initialize CodeQL name: Initialize CodeQL
uses: github/codeql-action/init@e12f0178983d466f2f6028f5cc7a6d786fd97f4b #v4.31.4 uses: github/codeql-action/init@v3
with: with:
languages: 'python' languages: 'python'
- -
name: Autobuild name: Autobuild
uses: github/codeql-action/autobuild@e12f0178983d466f2f6028f5cc7a6d786fd97f4b #v4.31.4 uses: github/codeql-action/autobuild@v3
- -
name: Perform CodeQL Analysis name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@e12f0178983d466f2f6028f5cc7a6d786fd97f4b #v4.31.4 uses: github/codeql-action/analyze@v3

View File

@ -13,7 +13,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Check if PRs are have merge conflicts - name: Check if PRs are have merge conflicts
uses: eps1lon/actions-label-merge-conflict@1df065ebe6e3310545d4f4c4e862e43bdca146f0 #v3.0.3 uses: eps1lon/actions-label-merge-conflict@v3.0.3
with: with:
dirtyLabel: "PR: Merge Conflict" dirtyLabel: "PR: Merge Conflict"
repoToken: "${{ secrets.GITHUB_TOKEN }}" repoToken: "${{ secrets.GITHUB_TOKEN }}"

View File

@ -17,14 +17,14 @@ jobs:
issues: write issues: write
steps: steps:
- uses: actions/stale@5f858e3efba33a5ca4407a664cc011ad407f2008 #v10.1.0 - uses: actions/stale@v9.1.0
with: with:
repo-token: ${{ secrets.GITHUB_TOKEN }} repo-token: ${{ secrets.GITHUB_TOKEN }}
days-before-stale: 30 days-before-stale: 30
days-before-close: 5 days-before-close: 5
stale-issue-message: 'This issue is stale because it has been open 30 days with no activity. Please comment or update this issue or it will be closed in 5 days.' stale-issue-message: 'This issue is stale because it has been open 30 days with no activity. Please comment or update this issue or it will be closed in 5 days.'
stale-issue-label: '${{ env.stale_label }}' stale-issue-label: '${{ env.stale_label }}'
exempt-issue-labels: 'Internal, Fixed in next release, Bug: Confirmed, Documentation Needed, never-stale' exempt-issue-labels: 'Internal, Fixed in next release, Bug: Confirmed, Documentation Needed'
exempt-all-issue-assignees: true exempt-all-issue-assignees: true
operations-per-run: 300 operations-per-run: 300
close-issue-reason: 'not_planned' close-issue-reason: 'not_planned'
@ -40,7 +40,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 #v6.0.0 uses: actions/checkout@v4.2.2
- name: Remove 'stale' label - name: Remove 'stale' label
run: gh issue edit ${{ github.event.issue.number }} --remove-label ${{ env.stale_label }} run: gh issue edit ${{ github.event.issue.number }} --remove-label ${{ env.stale_label }}
env: env:

View File

@ -17,7 +17,7 @@ jobs:
pull-requests: write pull-requests: write
steps: steps:
- uses: actions/stale@5f858e3efba33a5ca4407a664cc011ad407f2008 #v10.1.0 - uses: actions/stale@v9.1.0
with: with:
repo-token: ${{ secrets.GITHUB_TOKEN }} repo-token: ${{ secrets.GITHUB_TOKEN }}
# Do not automatically mark PR/issue as stale # Do not automatically mark PR/issue as stale

View File

@ -33,7 +33,7 @@ jobs:
name: Syncing branches name: Syncing branches
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 #v6.0.0 uses: actions/checkout@v4.2.2
- name: Opening pull request - name: Opening pull request
run: gh pr create -B development -H master --title 'Sync master back into development' --body 'Created by Github action' --label 'internal' run: gh pr create -B development -H master --title 'Sync master back into development' --body 'Created by Github action' --label 'internal'
env: env:

View File

@ -18,9 +18,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 #v6.0.0 uses: actions/checkout@v4.2.2
with:
fetch-depth: 0 # Differential ShellCheck requires full git history
- name: Check scripts in repository are executable - name: Check scripts in repository are executable
run: | run: |
@ -30,26 +28,26 @@ jobs:
# If FAIL is 1 then we fail. # If FAIL is 1 then we fail.
[[ $FAIL == 1 ]] && exit 1 || echo "Scripts are executable!" [[ $FAIL == 1 ]] && exit 1 || echo "Scripts are executable!"
- name: Differential ShellCheck - name: Run shellcheck
uses: redhat-plumbers-in-action/differential-shellcheck@0d9e5b29625f871e6a4215380486d6f1a7cb6cdd #v5.5.5 uses: ludeeus/action-shellcheck@master
with: with:
severity: warning check_together: 'yes'
display-engine: sarif-fmt format: tty
severity: error
- name: Spell-Checking - name: Spell-Checking
uses: codespell-project/actions-codespell@8f01853be192eb0f849a5c7d721450e7a467c579 #v2.2 uses: codespell-project/actions-codespell@master
with: with:
ignore_words_file: .codespellignore ignore_words_file: .codespellignore
- name: Get editorconfig-checker - name: Get editorconfig-checker
uses: editorconfig-checker/action-editorconfig-checker@4b6cd6190d435e7e084fb35e36a096e98506f7b9 #v2.1.0 uses: editorconfig-checker/action-editorconfig-checker@main # tag v1.0.0 is really out of date
- name: Run editorconfig-checker - name: Run editorconfig-checker
run: editorconfig-checker run: editorconfig-checker
- name: Check python code formatting with black - name: Check python code formatting with black
uses: psf/black@05f0a8ce1f71fbb36e1e032d3b518c7b945089a2 #25.11.0 uses: psf/black@stable
with: with:
src: "./test" src: "./test"
options: "--check --diff --color" options: "--check --diff --color"
@ -65,29 +63,23 @@ jobs:
[ [
debian_11, debian_11,
debian_12, debian_12,
debian_13,
ubuntu_20, ubuntu_20,
ubuntu_22, ubuntu_22,
ubuntu_24, ubuntu_24,
centos_9, centos_9,
centos_10,
fedora_40, fedora_40,
fedora_41, fedora_41,
fedora_42,
fedora_43,
alpine_3_21,
alpine_3_22,
] ]
env: env:
DISTRO: ${{matrix.distro}} DISTRO: ${{matrix.distro}}
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 #v6.0.0 uses: actions/checkout@v4.2.2
- name: Set up Python - name: Set up Python 3.10
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c #v6.0.0 uses: actions/setup-python@v5.4.0
with: with:
python-version: "3.13" python-version: "3.10"
- name: Install wheel - name: Install wheel
run: pip install wheel run: pip install wheel

3
.gitignore vendored
View File

@ -10,6 +10,3 @@ __pycache__
.idea/ .idea/
*.iml *.iml
.vscode/ .vscode/
.venv/
.fleet/
.cache/

View File

@ -1,6 +0,0 @@
external-sources=true # allow shellcheck to read external sources
disable=SC3043 #disable SC3043: In POSIX sh, local is undefined.
enable=useless-use-of-cat # disabled by default as of shellcheck 0.11.0
enable=avoid-negated-conditions # avoid-negated-conditions is optional as of shellcheck 0.11.0
enable=require-variable-braces
enable=deprecate-which

View File

@ -3,9 +3,13 @@
# #
<p align="center"> <p align="center">
<img src="https://raw.githubusercontent.com/pi-hole/graphics/refs/heads/master/Vortex/vortex_with_text.svg" alt="Pi-hole website" width="168" height="270"> <picture>
<br> <source media="(prefers-color-scheme: dark)" srcset="https://pi-hole.github.io/graphics/Vortex/Vortex_Vertical_wordmark_darkmode.png">
<strong>Network-wide ad blocking via your own Linux hardware</strong> <source media="(prefers-color-scheme: light)" srcset="https://pi-hole.github.io/graphics/Vortex/Vortex_Vertical_wordmark_lightmode.png">
<img src="https://pi-hole.github.io/graphics/Vortex/Vortex_Vertical_wordmark_lightmode.png" width="168" height="270" alt="Pi-hole website">
</picture>
<br>
<strong>Network-wide ad blocking via your own Linux hardware</strong>
</p> </p>
<!-- markdownlint-enable MD033 --> <!-- markdownlint-enable MD033 -->
@ -128,10 +132,7 @@ Some of the statistics you can integrate include:
- Queries cached - Queries cached
- Unique clients - Unique clients
Access the API using: Access the API via [`telnet`](https://github.com/pi-hole/FTL), the Web (`admin/api.php`) and Command Line (`pihole -c -j`). You can find out [more details over here](https://discourse.pi-hole.net/t/pi-hole-api/1863).
- your browser: http://pi.hole/api/docs
- `curl`: `curl --connect-timeout 2 -ks "https://pi.hole/api/stats/summary" -H "Accept: application/json"`;
- the command line - examples: `pihole api config/webserver/port` or `pihole api stats/summary`.
### The Command-Line Interface ### The Command-Line Interface
@ -139,7 +140,7 @@ The [pihole](https://docs.pi-hole.net/core/pihole-command/) command has all the
Some notable features include: Some notable features include:
- [Allowlisting, Denylisting (fka Whitelisting, Blacklisting), and Regex](https://docs.pi-hole.net/core/pihole-command/#allowlisting-denylisting-and-regex) - [Whitelisting, Blacklisting, and Regex](https://docs.pi-hole.net/core/pihole-command/#whitelisting-blacklisting-and-regex)
- [Debugging utility](https://docs.pi-hole.net/core/pihole-command/#debugger) - [Debugging utility](https://docs.pi-hole.net/core/pihole-command/#debugger)
- [Viewing the live log file](https://docs.pi-hole.net/core/pihole-command/#tail) - [Viewing the live log file](https://docs.pi-hole.net/core/pihole-command/#tail)
- [Updating Ad Lists](https://docs.pi-hole.net/core/pihole-command/#gravity) - [Updating Ad Lists](https://docs.pi-hole.net/core/pihole-command/#gravity)

View File

@ -1,12 +1,10 @@
#!/usr/bin/env sh
# shellcheck disable=SC2034 # Disable warning about unused variables
# Determine if terminal is capable of showing colors # Determine if terminal is capable of showing colors
# When COL_TABLE is sourced via gravity invoked by FTL, FORCE_COLOR is set to true if ([ -t 1 ] && [ $(tput colors) -ge 8 ]) || [ "${WEBCALL}" ]; then
if { [ -t 1 ] && [ "$(tput colors)" -ge 8 ]; } || [ "${FORCE_COLOR}" ]; then
# Bold and underline may not show up on all clients # Bold and underline may not show up on all clients
# If something MUST be emphasized, use both # If something MUST be emphasized, use both
COL_BOLD='' COL_BOLD=''
COL_ULINE=''
COL_NC='' COL_NC=''
COL_GRAY='' COL_GRAY=''
COL_RED='' COL_RED=''
@ -18,6 +16,8 @@ if { [ -t 1 ] && [ "$(tput colors)" -ge 8 ]; } || [ "${FORCE_COLOR}" ]; then
else else
# Provide empty variables for `set -u` # Provide empty variables for `set -u`
COL_BOLD="" COL_BOLD=""
COL_ULINE=""
COL_NC="" COL_NC=""
COL_GRAY="" COL_GRAY=""
COL_RED="" COL_RED=""
@ -28,8 +28,22 @@ else
COL_CYAN="" COL_CYAN=""
fi fi
# Deprecated variables
COL_WHITE="${COL_BOLD}"
COL_BLACK="${COL_NC}"
COL_LIGHT_BLUE="${COL_BLUE}"
COL_LIGHT_GREEN="${COL_GREEN}"
COL_LIGHT_CYAN="${COL_CYAN}"
COL_LIGHT_RED="${COL_RED}"
COL_URG_RED="${COL_RED}${COL_BOLD}${COL_ULINE}"
COL_LIGHT_PURPLE="${COL_PURPLE}"
COL_BROWN="${COL_YELLOW}"
COL_LIGHT_GRAY="${COL_GRAY}"
COL_DARK_GRAY="${COL_GRAY}"
TICK="[${COL_GREEN}✓${COL_NC}]" TICK="[${COL_GREEN}✓${COL_NC}]"
CROSS="[${COL_RED}✗${COL_NC}]" CROSS="[${COL_RED}✗${COL_NC}]"
INFO="[i]" INFO="[i]"
QST="[?]" QST="[?]"
DONE="${COL_GREEN} done!${COL_NC}"
OVER="\\r" OVER="\\r"

View File

@ -1,4 +1,5 @@
#!/usr/bin/env sh #!/usr/bin/env sh
# shellcheck disable=SC3043 #https://github.com/koalaman/shellcheck/wiki/SC3043#exceptions
# Pi-hole: A black hole for Internet advertisements # Pi-hole: A black hole for Internet advertisements
# (c) 2017 Pi-hole, LLC (https://pi-hole.net) # (c) 2017 Pi-hole, LLC (https://pi-hole.net)
@ -19,20 +20,13 @@
TestAPIAvailability() { TestAPIAvailability() {
local chaos_api_list authResponse authStatus authData apiAvailable DNSport
# as we are running locally, we can get the port value from FTL directly # as we are running locally, we can get the port value from FTL directly
PI_HOLE_SCRIPT_DIR="/opt/pihole" local chaos_api_list availabilityResponse
utilsfile="${PI_HOLE_SCRIPT_DIR}/utils.sh"
# shellcheck source=./advanced/Scripts/utils.sh
. "${utilsfile}"
DNSport=$(getFTLConfigValue dns.port)
# Query the API URLs from FTL using CHAOS TXT local.api.ftl # Query the API URLs from FTL using CHAOS TXT local.api.ftl
# The result is a space-separated enumeration of full URLs # The result is a space-separated enumeration of full URLs
# e.g., "http://localhost:80/api/" "https://localhost:443/api/" # e.g., "http://localhost:80/api/" "https://localhost:443/api/"
chaos_api_list="$(dig +short -p "${DNSport}" chaos txt local.api.ftl @127.0.0.1)" chaos_api_list="$(dig +short chaos txt local.api.ftl @127.0.0.1)"
# If the query was not successful, the variable is empty # If the query was not successful, the variable is empty
if [ -z "${chaos_api_list}" ]; then if [ -z "${chaos_api_list}" ]; then
@ -40,12 +34,6 @@ TestAPIAvailability() {
exit 1 exit 1
fi fi
# If an error occurred, the variable starts with ;;
if [ "${chaos_api_list#;;}" != "${chaos_api_list}" ]; then
echo "Communication error. Is FTL running?"
exit 1
fi
# Iterate over space-separated list of URLs # Iterate over space-separated list of URLs
while [ -n "${chaos_api_list}" ]; do while [ -n "${chaos_api_list}" ]; do
# Get the first URL # Get the first URL
@ -54,50 +42,39 @@ TestAPIAvailability() {
API_URL="${API_URL%\"}" API_URL="${API_URL%\"}"
API_URL="${API_URL#\"}" API_URL="${API_URL#\"}"
# Test if the API is available at this URL, include delimiter for ease in splitting payload # Test if the API is available at this URL
authResponse=$(curl --connect-timeout 2 -skS -w ">>%{http_code}" "${API_URL}auth") availabilityResponse=$(curl -skS -o /dev/null -w "%{http_code}" "${API_URL}auth")
# authStatus is the response http_code, eg. 200, 401.
# Shell parameter expansion, remove everything up to and including the >> delim
authStatus=${authResponse#*>>}
# data is everything from response
# Shell parameter expansion, remove the >> delim and everything after
authData=${authResponse%>>*}
# Test if http status code was 200 (OK) or 401 (authentication required) # Test if http status code was 200 (OK) or 401 (authentication required)
if [ "${authStatus}" = 200 ]; then if [ ! "${availabilityResponse}" = 200 ] && [ ! "${availabilityResponse}" = 401 ]; then
# API is available without authentication
apiAvailable=true
needAuth=false
break
elif [ "${authStatus}" = 401 ]; then
# API is available with authentication
apiAvailable=true
needAuth=true
# Check if 2FA is required
needTOTP=$(echo "${authData}"| jq --raw-output .session.totp 2>/dev/null)
break
else
# API is not available at this port/protocol combination # API is not available at this port/protocol combination
apiAvailable=false API_PORT=""
# Remove the first URL from the list else
local last_api_list # API is available at this URL combination
last_api_list="${chaos_api_list}"
chaos_api_list="${chaos_api_list#* }"
# If the list did not change, we are at the last element if [ "${availabilityResponse}" = 200 ]; then
if [ "${last_api_list}" = "${chaos_api_list}" ]; then # API is available without authentication
# Remove the last element needAuth=false
chaos_api_list=""
fi fi
break
fi
# Remove the first URL from the list
local last_api_list
last_api_list="${chaos_api_list}"
chaos_api_list="${chaos_api_list#* }"
# If the list did not change, we are at the last element
if [ "${last_api_list}" = "${chaos_api_list}" ]; then
# Remove the last element
chaos_api_list=""
fi fi
done done
# if apiAvailable is false, no working API was found # if API_PORT is empty, no working API port was found
if [ "${apiAvailable}" = false ]; then if [ -n "${API_PORT}" ]; then
echo "API not available. Please check FTL.log" echo "API not available at: ${API_URL}"
echo "Exiting." echo "Exiting."
exit 1 exit 1
fi fi
@ -125,58 +102,22 @@ LoginAPI() {
echo "API Authentication: Trying to use CLI password" echo "API Authentication: Trying to use CLI password"
fi fi
# If we can read the CLI password, we can skip 2FA even when it's required otherwise # Try to authenticate using the CLI password
needTOTP=false Authentication "${1}"
elif [ "${1}" = "verbose" ]; then elif [ "${1}" = "verbose" ]; then
echo "API Authentication: CLI password not available" echo "API Authentication: CLI password not available"
fi fi
if [ -z "${password}" ]; then
# no password read from CLI file
echo "Please enter your password:"
# secretly read the password
secretRead; printf '\n'
fi
if [ "${needTOTP}" = true ]; then
# 2FA required
echo "Please enter the correct second factor."
echo "(Can be any number if you used the app password)"
read -r totp
fi
# Try to authenticate using the supplied password (CLI file or user input) and TOTP # If this did not work, ask the user for the password
Authentication "${1}" while [ "${validSession}" = false ] || [ -z "${validSession}" ] ; do
echo "Authentication failed. Please enter your Pi-hole password"
# Try to login again until the session is valid
while [ ! "${validSession}" = true ] ; do
# Print the error message if there is one
if [ ! "${sessionError}" = "null" ] && [ "${1}" = "verbose" ]; then
echo "Error: ${sessionError}"
fi
# Print the session message if there is one
if [ ! "${sessionMessage}" = "null" ] && [ "${1}" = "verbose" ]; then
echo "Error: ${sessionMessage}"
fi
if [ "${1}" = "verbose" ]; then
# If we are not in verbose mode, no need to print the error message again
echo "Please enter your Pi-hole password"
else
echo "Authentication failed. Please enter your Pi-hole password"
fi
# secretly read the password # secretly read the password
secretRead; printf '\n' secretRead; printf '\n'
if [ "${needTOTP}" = true ]; then
echo "Please enter the correct second factor:"
echo "(Can be any number if you used the app password)"
read -r totp
fi
# Try to authenticate again # Try to authenticate again
Authentication "${1}" Authentication "${1}"
done done
@ -184,34 +125,23 @@ LoginAPI() {
} }
Authentication() { Authentication() {
sessionResponse="$(curl --connect-timeout 2 -skS -X POST "${API_URL}auth" --user-agent "Pi-hole cli" --data "{\"password\":\"${password}\", \"totp\":${totp:-null}}" )" sessionResponse="$(curl -skS -X POST "${API_URL}auth" --user-agent "Pi-hole cli " --data "{\"password\":\"${password}\"}" )"
if [ -z "${sessionResponse}" ]; then if [ -z "${sessionResponse}" ]; then
echo "No response from FTL server. Please check connectivity" echo "No response from FTL server. Please check connectivity"
exit 1 exit 1
fi fi
# obtain validity and session ID from session response
# obtain validity, session ID, sessionMessage and error message from validSession=$(echo "${sessionResponse}"| jq .session.valid 2>/dev/null)
# session response, apply default values if none returned SID=$(echo "${sessionResponse}"| jq --raw-output .session.sid 2>/dev/null)
result=$(echo "${sessionResponse}" | jq -r '
(.session.valid // false), if [ "${1}" = "verbose" ]; then
(.session.sid // null), if [ "${validSession}" = true ]; then
(.session.message // null), echo "API Authentication: ${COL_GREEN}Success${COL_NC}"
(.error.message // null) else
' 2>/dev/null) echo "API Authentication: ${COL_RED}Failed${COL_NC}"
validSession=$(echo "${result}" | sed -n '1p')
SID=$(echo "${result}" | sed -n '2p')
sessionMessage=$(echo "${result}" | sed -n '3p')
sessionError=$(echo "${result}" | sed -n '4p')
if [ "${1}" = "verbose" ]; then
if [ "${validSession}" = true ]; then
echo "API Authentication: ${COL_GREEN}Success${COL_NC}"
else
echo "API Authentication: ${COL_RED}Failed${COL_NC}"
fi
fi fi
fi
} }
LogoutAPI() { LogoutAPI() {
@ -249,7 +179,7 @@ GetFTLData() {
# return only the data # return only the data
if [ "${status}" = 200 ]; then if [ "${status}" = 200 ]; then
# response OK # response OK
printf %s "${data}" echo "${data}"
else else
# connection lost # connection lost
echo "${status}" echo "${status}"
@ -323,23 +253,14 @@ secretRead() {
} }
apiFunc() { apiFunc() {
local data response status status_col verbosity local data response status status_col
# Define if the output will be silent (default) or verbose
verbosity="silent"
if [ "$1" = "verbose" ]; then
verbosity="verbose"
shift
fi
# Authenticate with the API # Authenticate with the API
LoginAPI "${verbosity}" LoginAPI verbose
echo ""
if [ "${verbosity}" = "verbose" ]; then echo "Requesting: ${COL_PURPLE}GET ${COL_CYAN}${API_URL}${COL_YELLOW}$1${COL_NC}"
echo "" echo ""
echo "Requesting: ${COL_PURPLE}GET ${COL_CYAN}${API_URL}${COL_YELLOW}$1${COL_NC}"
echo ""
fi
# Get the data from the API # Get the data from the API
response=$(GetFTLData "$1" raw) response=$(GetFTLData "$1" raw)
@ -356,21 +277,17 @@ apiFunc() {
else else
status_col="${COL_RED}" status_col="${COL_RED}"
fi fi
echo "Status: ${status_col}${status}${COL_NC}"
# Only print the status in verbose mode or if the status is not 200
if [ "${verbosity}" = "verbose" ] || [ "${status}" != 200 ]; then
echo "Status: ${status_col}${status}${COL_NC}"
fi
# Output the data. Format it with jq if available and data is actually JSON. # Output the data. Format it with jq if available and data is actually JSON.
# Otherwise just print it # Otherwise just print it
if [ "${verbosity}" = "verbose" ]; then echo "Data:"
echo "Data:" if command -v jq >/dev/null && echo "${data}" | jq . >/dev/null 2>&1; then
echo "${data}" | jq .
else
echo "${data}"
fi fi
# Attempt to print the data with jq, if it is not valid JSON, or not installed
# then print the plain text.
echo "${data}" | jq . 2>/dev/null || echo "${data}"
# Delete the session # Delete the session
LogoutAPI "${verbosity}" LogoutAPI verbose
} }

View File

@ -1,5 +1,5 @@
#!/usr/bin/env bash #!/usr/bin/env bash
# shellcheck disable=SC1090
# Pi-hole: A black hole for Internet advertisements # Pi-hole: A black hole for Internet advertisements
# (c) 2019 Pi-hole, LLC (https://pi-hole.net) # (c) 2019 Pi-hole, LLC (https://pi-hole.net)
@ -13,8 +13,9 @@
readonly scriptPath="/etc/.pihole/advanced/Scripts/database_migration/gravity" readonly scriptPath="/etc/.pihole/advanced/Scripts/database_migration/gravity"
upgrade_gravityDB(){ upgrade_gravityDB(){
local database version local database piholeDir version
database="${1}" database="${1}"
piholeDir="${2}"
# Exit early if the database does not exist (e.g. in CI tests) # Exit early if the database does not exist (e.g. in CI tests)
if [[ ! -f "${database}" ]]; then if [[ ! -f "${database}" ]]; then
@ -150,10 +151,4 @@ upgrade_gravityDB(){
pihole-FTL sqlite3 -ni "${database}" < "${scriptPath}/18_to_19.sql" pihole-FTL sqlite3 -ni "${database}" < "${scriptPath}/18_to_19.sql"
version=19 version=19
fi fi
if [[ "$version" == "19" ]]; then
# Update views to use new allowlist/denylist names
echo -e " ${INFO} Upgrading gravity database from version 19 to 20"
pihole-FTL sqlite3 -ni "${database}" < "${scriptPath}/19_to_20.sql"
version=20
fi
} }

View File

@ -1,43 +0,0 @@
.timeout 30000
BEGIN TRANSACTION;
DROP VIEW vw_whitelist;
CREATE VIEW vw_allowlist AS SELECT domain, domainlist.id AS id, domainlist_by_group.group_id AS group_id
FROM domainlist
LEFT JOIN domainlist_by_group ON domainlist_by_group.domainlist_id = domainlist.id
LEFT JOIN "group" ON "group".id = domainlist_by_group.group_id
WHERE domainlist.enabled = 1 AND (domainlist_by_group.group_id IS NULL OR "group".enabled = 1)
AND domainlist.type = 0
ORDER BY domainlist.id;
DROP VIEW vw_blacklist;
CREATE VIEW vw_denylist AS SELECT domain, domainlist.id AS id, domainlist_by_group.group_id AS group_id
FROM domainlist
LEFT JOIN domainlist_by_group ON domainlist_by_group.domainlist_id = domainlist.id
LEFT JOIN "group" ON "group".id = domainlist_by_group.group_id
WHERE domainlist.enabled = 1 AND (domainlist_by_group.group_id IS NULL OR "group".enabled = 1)
AND domainlist.type = 1
ORDER BY domainlist.id;
DROP VIEW vw_regex_whitelist;
CREATE VIEW vw_regex_allowlist AS SELECT domain, domainlist.id AS id, domainlist_by_group.group_id AS group_id
FROM domainlist
LEFT JOIN domainlist_by_group ON domainlist_by_group.domainlist_id = domainlist.id
LEFT JOIN "group" ON "group".id = domainlist_by_group.group_id
WHERE domainlist.enabled = 1 AND (domainlist_by_group.group_id IS NULL OR "group".enabled = 1)
AND domainlist.type = 2
ORDER BY domainlist.id;
DROP VIEW vw_regex_blacklist;
CREATE VIEW vw_regex_denylist AS SELECT domain, domainlist.id AS id, domainlist_by_group.group_id AS group_id
FROM domainlist
LEFT JOIN domainlist_by_group ON domainlist_by_group.domainlist_id = domainlist.id
LEFT JOIN "group" ON "group".id = domainlist_by_group.group_id
WHERE domainlist.enabled = 1 AND (domainlist_by_group.group_id IS NULL OR "group".enabled = 1)
AND domainlist.type = 3
ORDER BY domainlist.id;
UPDATE info SET value = 20 WHERE property = 'version';
COMMIT;

View File

@ -1,4 +1,5 @@
#!/usr/bin/env bash #!/usr/bin/env bash
# shellcheck disable=SC1090
# Pi-hole: A black hole for Internet advertisements # Pi-hole: A black hole for Internet advertisements
# (c) 2017 Pi-hole, LLC (https://pi-hole.net) # (c) 2017 Pi-hole, LLC (https://pi-hole.net)
@ -9,13 +10,11 @@
# This file is copyright under the latest version of the EUPL. # This file is copyright under the latest version of the EUPL.
# Please see LICENSE file for your rights under this license. # Please see LICENSE file for your rights under this license.
PI_HOLE_SCRIPT_DIR="/opt/pihole" readonly PI_HOLE_SCRIPT_DIR="/opt/pihole"
utilsfile="${PI_HOLE_SCRIPT_DIR}/utils.sh" readonly utilsfile="${PI_HOLE_SCRIPT_DIR}/utils.sh"
# shellcheck source="./advanced/Scripts/utils.sh"
source "${utilsfile}" source "${utilsfile}"
apifile="${PI_HOLE_SCRIPT_DIR}/api.sh" readonly apifile="${PI_HOLE_SCRIPT_DIR}/api.sh"
# shellcheck source="./advanced/Scripts/api.sh"
source "${apifile}" source "${apifile}"
# Determine database location # Determine database location
@ -40,7 +39,6 @@ typeId=""
comment="" comment=""
colfile="/opt/pihole/COL_TABLE" colfile="/opt/pihole/COL_TABLE"
# shellcheck source="./advanced/Scripts/COL_TABLE"
source ${colfile} source ${colfile}
helpFunc() { helpFunc() {

View File

@ -0,0 +1,82 @@
#!/usr/bin/env bash
# shellcheck disable=SC1090
# Pi-hole: A black hole for Internet advertisements
# (c) 2019 Pi-hole, LLC (https://pi-hole.net)
# Network-wide ad blocking via your own hardware.
#
# ARP table interaction
#
# This file is copyright under the latest version of the EUPL.
# Please see LICENSE file for your rights under this license.
coltable="/opt/pihole/COL_TABLE"
if [[ -f ${coltable} ]]; then
source ${coltable}
fi
readonly PI_HOLE_SCRIPT_DIR="/opt/pihole"
utilsfile="${PI_HOLE_SCRIPT_DIR}/utils.sh"
source "${utilsfile}"
# Determine database location
DBFILE=$(getFTLConfigValue "files.database")
if [ -z "$DBFILE" ]; then
DBFILE="/etc/pihole/pihole-FTL.db"
fi
flushARP(){
local output
if [[ "${args[1]}" != "quiet" ]]; then
echo -ne " ${INFO} Flushing network table ..."
fi
# Stop FTL to prevent database access
if ! output=$(service pihole-FTL stop 2>&1); then
echo -e "${OVER} ${CROSS} Failed to stop FTL"
echo " Output: ${output}"
return 1
fi
# Truncate network_addresses table in pihole-FTL.db
# This needs to be done before we can truncate the network table due to
# foreign key constraints
if ! output=$(pihole-FTL sqlite3 -ni "${DBFILE}" "DELETE FROM network_addresses" 2>&1); then
echo -e "${OVER} ${CROSS} Failed to truncate network_addresses table"
echo " Database location: ${DBFILE}"
echo " Output: ${output}"
return 1
fi
# Truncate network table in pihole-FTL.db
if ! output=$(pihole-FTL sqlite3 -ni "${DBFILE}" "DELETE FROM network" 2>&1); then
echo -e "${OVER} ${CROSS} Failed to truncate network table"
echo " Database location: ${DBFILE}"
echo " Output: ${output}"
return 1
fi
# Flush ARP cache of the host
if ! output=$(ip -s -s neigh flush all 2>&1); then
echo -e "${OVER} ${CROSS} Failed to flush ARP cache"
echo " Output: ${output}"
return 1
fi
# Start FTL again
if ! output=$(service pihole-FTL restart 2>&1); then
echo -e "${OVER} ${CROSS} Failed to restart FTL"
echo " Output: ${output}"
return 1
fi
if [[ "${args[1]}" != "quiet" ]]; then
echo -e "${OVER} ${TICK} Flushed network table"
fi
}
args=("$@")
case "${args[0]}" in
"arpflush" ) flushARP;;
esac

View File

@ -10,7 +10,6 @@
readonly PI_HOLE_FILES_DIR="/etc/.pihole" readonly PI_HOLE_FILES_DIR="/etc/.pihole"
SKIP_INSTALL="true" SKIP_INSTALL="true"
# shellcheck source="./automated install/basic-install.sh"
source "${PI_HOLE_FILES_DIR}/automated install/basic-install.sh" source "${PI_HOLE_FILES_DIR}/automated install/basic-install.sh"
# webInterfaceGitUrl set in basic-install.sh # webInterfaceGitUrl set in basic-install.sh
@ -26,7 +25,7 @@ source "${PI_HOLE_FILES_DIR}/automated install/basic-install.sh"
warning1() { warning1() {
echo " Please note that changing branches severely alters your Pi-hole subsystems" echo " Please note that changing branches severely alters your Pi-hole subsystems"
echo " Features that work on the master branch, may not on a development branch" echo " Features that work on the master branch, may not on a development branch"
echo -e " ${COL_RED}This feature is NOT supported unless a Pi-hole developer explicitly asks!${COL_NC}" echo -e " ${COL_LIGHT_RED}This feature is NOT supported unless a Pi-hole developer explicitly asks!${COL_NC}"
read -r -p " Have you read and understood this? [y/N] " response read -r -p " Have you read and understood this? [y/N] " response
case "${response}" in case "${response}" in
[yY][eE][sS]|[yY]) [yY][eE][sS]|[yY])
@ -55,19 +54,19 @@ checkout() {
# This is unlikely # This is unlikely
if ! is_repo "${PI_HOLE_FILES_DIR}" ; then if ! is_repo "${PI_HOLE_FILES_DIR}" ; then
echo -e " ${COL_RED}Error: Core Pi-hole repo is missing from system!" echo -e " ${COL_LIGHT_RED}Error: Core Pi-hole repo is missing from system!"
echo -e " Please re-run install script from https://github.com/pi-hole/pi-hole${COL_NC}" echo -e " Please re-run install script from https://github.com/pi-hole/pi-hole${COL_NC}"
exit 1; exit 1;
fi fi
if ! is_repo "${webInterfaceDir}" ; then if ! is_repo "${webInterfaceDir}" ; then
echo -e " ${COL_RED}Error: Web Admin repo is missing from system!" echo -e " ${COL_LIGHT_RED}Error: Web Admin repo is missing from system!"
echo -e " Please re-run install script from https://github.com/pi-hole/pi-hole${COL_NC}" echo -e " Please re-run install script from https://github.com/pi-hole/pi-hole${COL_NC}"
exit 1; exit 1;
fi fi
if [[ -z "${1}" ]]; then if [[ -z "${1}" ]]; then
echo -e " ${COL_RED}Invalid option${COL_NC}" echo -e " ${COL_LIGHT_RED}Invalid option${COL_NC}"
echo -e " Try 'pihole checkout --help' for more information." echo -e " Try 'pihole checkout --help' for more information."
exit 1 exit 1
fi fi
@ -110,7 +109,7 @@ checkout() {
echo -e "${OVER} ${CROSS} $str" echo -e "${OVER} ${CROSS} $str"
exit 1 exit 1
fi fi
mapfile -t corebranches < <(get_available_branches "${PI_HOLE_FILES_DIR}") corebranches=($(get_available_branches "${PI_HOLE_FILES_DIR}"))
if [[ "${corebranches[*]}" == *"master"* ]]; then if [[ "${corebranches[*]}" == *"master"* ]]; then
echo -e "${OVER} ${TICK} $str" echo -e "${OVER} ${TICK} $str"
@ -137,7 +136,7 @@ checkout() {
echo -e "${OVER} ${CROSS} $str" echo -e "${OVER} ${CROSS} $str"
exit 1 exit 1
fi fi
mapfile -t webbranches < <(get_available_branches "${webInterfaceDir}") webbranches=($(get_available_branches "${webInterfaceDir}"))
if [[ "${webbranches[*]}" == *"master"* ]]; then if [[ "${webbranches[*]}" == *"master"* ]]; then
echo -e "${OVER} ${TICK} $str" echo -e "${OVER} ${TICK} $str"
@ -168,7 +167,7 @@ checkout() {
# Check if requested branch is available # Check if requested branch is available
echo -e " ${INFO} Checking for availability of branch ${COL_CYAN}${2}${COL_NC} on GitHub" echo -e " ${INFO} Checking for availability of branch ${COL_CYAN}${2}${COL_NC} on GitHub"
mapfile -t ftlbranches < <(git ls-remote https://github.com/pi-hole/ftl | grep "refs/heads" | cut -d'/' -f3- -) ftlbranches=( $(git ls-remote https://github.com/pi-hole/ftl | grep "refs/heads" | cut -d'/' -f3- -) )
# If returned array is empty -> connectivity issue # If returned array is empty -> connectivity issue
if [[ ${#ftlbranches[@]} -eq 0 ]]; then if [[ ${#ftlbranches[@]} -eq 0 ]]; then
echo -e " ${CROSS} Unable to fetch branches from GitHub. Please check your Internet connection and try again later." echo -e " ${CROSS} Unable to fetch branches from GitHub. Please check your Internet connection and try again later."
@ -210,15 +209,13 @@ checkout() {
# Update local and remote versions via updatechecker # Update local and remote versions via updatechecker
/opt/pihole/updatecheck.sh /opt/pihole/updatecheck.sh
else else
local status if [ $? -eq 1 ]; then
status=$?
if [ $status -eq 1 ]; then
# Binary for requested branch is not available, may still be # Binary for requested branch is not available, may still be
# int he process of being built or CI build job failed # int he process of being built or CI build job failed
printf " %b Binary for requested branch is not available, please try again later.\\n" "${CROSS}" printf " %b Binary for requested branch is not available, please try again later.\\n" ${CROSS}
printf " If the issue persists, please contact Pi-hole Support and ask them to re-generate the binary.\\n" printf " If the issue persists, please contact Pi-hole Support and ask them to re-generate the binary.\\n"
exit 1 exit 1
elif [ $status -eq 2 ]; then elif [ $? -eq 2 ]; then
printf " %b Unable to download from ftl.pi-hole.net. Please check your Internet connection and try again later.\\n" "${CROSS}" printf " %b Unable to download from ftl.pi-hole.net. Please check your Internet connection and try again later.\\n" "${CROSS}"
exit 1 exit 1
else else
@ -238,7 +235,7 @@ checkout() {
if "${PI_HOLE_FILES_DIR}/automated install/basic-install.sh" --unattended; then if "${PI_HOLE_FILES_DIR}/automated install/basic-install.sh" --unattended; then
exit 0 exit 0
else else
echo -e " ${COL_RED} Error: Unable to complete update, please contact support${COL_NC}" echo -e " ${COL_LIGHT_RED} Error: Unable to complete update, please contact support${COL_NC}"
exit 1 exit 1
fi fi
fi fi

View File

@ -8,6 +8,7 @@
# This file is copyright under the latest version of the EUPL. # This file is copyright under the latest version of the EUPL.
# Please see LICENSE file for your rights under this license. # Please see LICENSE file for your rights under this license.
# shellcheck source=/dev/null
# -e option instructs bash to immediately exit if any command [1] has a non-zero exit status # -e option instructs bash to immediately exit if any command [1] has a non-zero exit status
# -u a reference to any variable you haven't previously defined # -u a reference to any variable you haven't previously defined
@ -26,7 +27,6 @@ PIHOLE_COLTABLE_FILE="${PIHOLE_SCRIPTS_DIRECTORY}/COL_TABLE"
# These provide the colors we need for making the log more readable # These provide the colors we need for making the log more readable
if [[ -f ${PIHOLE_COLTABLE_FILE} ]]; then if [[ -f ${PIHOLE_COLTABLE_FILE} ]]; then
# shellcheck source=./advanced/Scripts/COL_TABLE
source ${PIHOLE_COLTABLE_FILE} source ${PIHOLE_COLTABLE_FILE}
else else
COL_NC='\e[0m' # No Color COL_NC='\e[0m' # No Color
@ -41,7 +41,7 @@ else
#OVER="\r\033[K" #OVER="\r\033[K"
fi fi
# shellcheck source=/dev/null # shellcheck disable=SC1091
. /etc/pihole/versions . /etc/pihole/versions
# Read the value of an FTL config key. The value is printed to stdout. # Read the value of an FTL config key. The value is printed to stdout.
@ -213,7 +213,7 @@ compare_local_version_to_git_version() {
local local_status local local_status
local_status=$(git status -s) local_status=$(git status -s)
# echo this information out to the user in a nice format # echo this information out to the user in a nice format
if [ "${local_version}" ]; then if [ ${local_version} ]; then
log_write "${TICK} Version: ${local_version}" log_write "${TICK} Version: ${local_version}"
elif [ -n "${DOCKER_VERSION}" ]; then elif [ -n "${DOCKER_VERSION}" ]; then
log_write "${TICK} Version: Pi-hole Docker Container ${COL_BOLD}${DOCKER_VERSION}${COL_NC}" log_write "${TICK} Version: Pi-hole Docker Container ${COL_BOLD}${DOCKER_VERSION}${COL_NC}"
@ -296,12 +296,91 @@ check_component_versions() {
check_ftl_version check_ftl_version
} }
os_check() {
# This function gets a list of supported OS versions from a TXT record at versions.pi-hole.net
# and determines whether or not the script is running on one of those systems
local remote_os_domain valid_os valid_version detected_os detected_version cmdResult digReturnCode response
remote_os_domain=${OS_CHECK_DOMAIN_NAME:-"versions.pi-hole.net"}
detected_os=$(grep "\bID\b" /etc/os-release | cut -d '=' -f2 | tr -d '"')
detected_version=$(grep VERSION_ID /etc/os-release | cut -d '=' -f2 | tr -d '"')
cmdResult="$(dig -4 +short -t txt "${remote_os_domain}" @ns1.pi-hole.net 2>&1; echo $?)"
#Get the return code of the previous command (last line)
digReturnCode="${cmdResult##*$'\n'}"
# Extract dig response
response="${cmdResult%%$'\n'*}"
if [ "${digReturnCode}" -ne 0 ]; then
log_write "${INFO} Distro: ${detected_os^}"
log_write "${INFO} Version: ${detected_version}"
log_write "${CROSS} dig IPv4 return code: ${COL_RED}${digReturnCode}${COL_NC}"
log_write "${CROSS} dig response: ${response}"
log_write "${INFO} Retrying via IPv6"
cmdResult="$(dig -6 +short -t txt "${remote_os_domain}" @ns1.pi-hole.net 2>&1; echo $?)"
#Get the return code of the previous command (last line)
digReturnCode="${cmdResult##*$'\n'}"
# Extract dig response
response="${cmdResult%%$'\n'*}"
fi
# If also no success via IPv6
if [ "${digReturnCode}" -ne 0 ]; then
log_write "${CROSS} dig IPv6 return code: ${COL_RED}${digReturnCode}${COL_NC}"
log_write "${CROSS} dig response: ${response}"
log_write "${CROSS} Error: ${COL_RED}dig command failed - Unable to check OS${COL_NC}"
else
IFS=" " read -r -a supportedOS < <(echo "${response}" | tr -d '"')
for distro_and_versions in "${supportedOS[@]}"
do
distro_part="${distro_and_versions%%=*}"
versions_part="${distro_and_versions##*=}"
if [[ "${detected_os^^}" =~ ${distro_part^^} ]]; then
valid_os=true
IFS="," read -r -a supportedVer <<<"${versions_part}"
for version in "${supportedVer[@]}"
do
if [[ "${detected_version}" =~ $version ]]; then
valid_version=true
break
fi
done
break
fi
done
# If it is a docker container, we can assume the OS is supported
[ -n "${DOCKER_VERSION}" ] && valid_os=true && valid_version=true
local finalmsg
if [ "$valid_os" = true ]; then
log_write "${TICK} Distro: ${COL_GREEN}${detected_os^}${COL_NC}"
if [ "$valid_version" = true ]; then
log_write "${TICK} Version: ${COL_GREEN}${detected_version}${COL_NC}"
finalmsg="${TICK} ${COL_GREEN}Distro and version supported${COL_NC}"
else
log_write "${CROSS} Version: ${COL_RED}${detected_version}${COL_NC}"
finalmsg="${CROSS} Error: ${COL_RED}${detected_os^} is supported but version ${detected_version} is currently unsupported ${COL_NC}(${FAQ_HARDWARE_REQUIREMENTS})${COL_NC}"
fi
else
log_write "${CROSS} Distro: ${COL_RED}${detected_os^}${COL_NC}"
finalmsg="${CROSS} Error: ${COL_RED}${detected_os^} is not a supported distro ${COL_NC}(${FAQ_HARDWARE_REQUIREMENTS})${COL_NC}"
fi
# Print dig response and the final check result
log_write "${TICK} dig return code: ${COL_GREEN}${digReturnCode}${COL_NC}"
log_write "${INFO} dig response: ${response}"
log_write "${finalmsg}"
fi
}
diagnose_operating_system() { diagnose_operating_system() {
# error message in a variable so we can easily modify it later (or reuse it) # error message in a variable so we can easily modify it later (or reuse it)
local error_msg="Distribution unknown -- most likely you are on an unsupported platform and may run into issues." local error_msg="Distribution unknown -- most likely you are on an unsupported platform and may run into issues."
local detected_os
local detected_version
# Display the current test that is running # Display the current test that is running
echo_current_diagnostic "Operating system" echo_current_diagnostic "Operating system"
@ -310,13 +389,8 @@ diagnose_operating_system() {
# If there is a /etc/*release file, it's probably a supported operating system, so we can # If there is a /etc/*release file, it's probably a supported operating system, so we can
if ls /etc/*release 1> /dev/null 2>&1; then if ls /etc/*release 1> /dev/null 2>&1; then
# display the attributes to the user # display the attributes to the user from the function made earlier
os_check
detected_os=$(grep "\bID\b" /etc/os-release | cut -d '=' -f2 | tr -d '"')
detected_version=$(grep VERSION_ID /etc/os-release | cut -d '=' -f2 | tr -d '"')
log_write "${INFO} Distro: ${detected_os^}"
log_write "${INFO} Version: ${detected_version}"
else else
# If it doesn't exist, it's not a system we currently support and link to FAQ # If it doesn't exist, it's not a system we currently support and link to FAQ
log_write "${CROSS} ${COL_RED}${error_msg}${COL_NC} (${FAQ_HARDWARE_REQUIREMENTS})" log_write "${CROSS} ${COL_RED}${error_msg}${COL_NC} (${FAQ_HARDWARE_REQUIREMENTS})"
@ -367,7 +441,7 @@ check_firewalld() {
# test common required service ports # test common required service ports
local firewalld_enabled_services local firewalld_enabled_services
firewalld_enabled_services=$(firewall-cmd --list-services) firewalld_enabled_services=$(firewall-cmd --list-services)
local firewalld_expected_services=("http" "https" "dns" "dhcp" "dhcpv6" "ntp") local firewalld_expected_services=("http" "dns" "dhcp" "dhcpv6")
for i in "${firewalld_expected_services[@]}"; do for i in "${firewalld_expected_services[@]}"; do
if [[ "${firewalld_enabled_services}" =~ ${i} ]]; then if [[ "${firewalld_enabled_services}" =~ ${i} ]]; then
log_write "${TICK} ${COL_GREEN} Allow Service: ${i}${COL_NC}"; log_write "${TICK} ${COL_GREEN} Allow Service: ${i}${COL_NC}";
@ -388,6 +462,14 @@ check_firewalld() {
else else
log_write "${CROSS} ${COL_RED} Local Interface Not Detected${COL_NC} (${FAQ_HARDWARE_REQUIREMENTS_FIREWALLD})" log_write "${CROSS} ${COL_RED} Local Interface Not Detected${COL_NC} (${FAQ_HARDWARE_REQUIREMENTS_FIREWALLD})"
fi fi
# check FTL custom zone port: 4711
local firewalld_ftl_zone_ports
firewalld_ftl_zone_ports=$(firewall-cmd --zone=ftl --list-ports)
if [[ "${firewalld_ftl_zone_ports}" =~ "4711/tcp" ]]; then
log_write "${TICK} ${COL_GREEN} FTL Port 4711/tcp Detected${COL_NC}";
else
log_write "${CROSS} ${COL_RED} FTL Port 4711/tcp Not Detected${COL_NC} (${FAQ_HARDWARE_REQUIREMENTS_FIREWALLD})"
fi
else else
log_write "${CROSS} ${COL_RED}FTL Custom Zone Not Detected${COL_NC} (${FAQ_HARDWARE_REQUIREMENTS_FIREWALLD})" log_write "${CROSS} ${COL_RED}FTL Custom Zone Not Detected${COL_NC} (${FAQ_HARDWARE_REQUIREMENTS_FIREWALLD})"
fi fi
@ -406,9 +488,7 @@ run_and_print_command() {
local output local output
output=$(${cmd} 2>&1) output=$(${cmd} 2>&1)
# If the command was successful, # If the command was successful,
local return_code if [[ $? -eq 0 ]]; then
return_code=$?
if [[ "${return_code}" -eq 0 ]]; then
# show the output # show the output
log_write "${output}" log_write "${output}"
else else
@ -489,25 +569,16 @@ ping_gateway() {
ping_ipv4_or_ipv6 "${protocol}" ping_ipv4_or_ipv6 "${protocol}"
# Check if we are using IPv4 or IPv6 # Check if we are using IPv4 or IPv6
# Find the default gateways using IPv4 or IPv6 # Find the default gateways using IPv4 or IPv6
local gateway gateway_addr gateway_iface default_route local gateway gateway_addr gateway_iface
log_write "${INFO} Default IPv${protocol} gateway(s):" log_write "${INFO} Default IPv${protocol} gateway(s):"
while IFS= read -r default_route; do while IFS= read -r gateway; do
gateway_addr=$(jq -r '.gateway' <<< "${default_route}") log_write " $(cut -d ' ' -f 3 <<< "${gateway}")%$(cut -d ' ' -f 5 <<< "${gateway}")"
gateway_iface=$(jq -r '.dev' <<< "${default_route}") done < <(ip -"${protocol}" route | grep default)
log_write " ${gateway_addr}%${gateway_iface}"
done < <(ip -j -"${protocol}" route | jq -c '.[] | select(.dst == "default")')
# Find the first default route
default_route=$(ip -j -"${protocol}" route show default)
if echo "$default_route" | grep 'gateway' | grep -q 'dev'; then
gateway_addr=$(echo "$default_route" | jq -r -c '.[0].gateway')
gateway_iface=$(echo "$default_route" | jq -r -c '.[0].dev')
else
log_write " Unable to determine gateway address for IPv${protocol}"
fi
gateway_addr=$(ip -"${protocol}" route | grep default | cut -d ' ' -f 3 | head -n 1)
gateway_iface=$(ip -"${protocol}" route | grep default | cut -d ' ' -f 5 | head -n 1)
# If there was at least one gateway # If there was at least one gateway
if [ -n "${gateway_addr}" ]; then if [ -n "${gateway_addr}" ]; then
# Append the interface to the gateway address if it is a link-local address # Append the interface to the gateway address if it is a link-local address
@ -593,21 +664,18 @@ check_required_ports() {
# Add port 53 # Add port 53
ports_configured+=("53") ports_configured+=("53")
local protocol_type port_number service_name
# Now that we have the values stored, # Now that we have the values stored,
for i in "${!ports_in_use[@]}"; do for i in "${!ports_in_use[@]}"; do
# loop through them and assign some local variables # loop through them and assign some local variables
read -r protocol_type port_number service_name <<< "$( local service_name
awk '{ service_name=$(echo "${ports_in_use[$i]}" | awk '{gsub(/users:\(\("/,"",$7);gsub(/".*/,"",$7);print $7}')
p=$1; n=$5; s=$7 local protocol_type
gsub(/users:\(\("/,"",s) protocol_type=$(echo "${ports_in_use[$i]}" | awk '{print $1}')
gsub(/".*/,"",s) local port_number
print p, n, s port_number="$(echo "${ports_in_use[$i]}" | awk '{print $5}')" # | awk '{gsub(/^.*:/,"",$5);print $5}')
}' <<< "${ports_in_use[$i]}"
)"
# Check if the right services are using the right ports # Check if the right services are using the right ports
if [[ ${ports_configured[*]} =~ ${port_number##*:} ]]; then if [[ ${ports_configured[*]} =~ $(echo "${port_number}" | rev | cut -d: -f1 | rev) ]]; then
compare_port_to_service_assigned "${ftl}" "${service_name}" "${protocol_type}:${port_number}" compare_port_to_service_assigned "${ftl}" "${service_name}" "${protocol_type}:${port_number}"
else else
# If it's not a default port that Pi-hole needs, just print it out for the user to see # If it's not a default port that Pi-hole needs, just print it out for the user to see
@ -675,7 +743,7 @@ dig_at() {
local record_type="A" local record_type="A"
fi fi
# Find a random blocked url that has not been allowlisted and is not ABP style. # Find a random blocked url that has not been whitelisted and is not ABP style.
# This helps emulate queries to different domains that a user might query # This helps emulate queries to different domains that a user might query
# It will also give extra assurance that Pi-hole is correctly resolving and blocking domains # It will also give extra assurance that Pi-hole is correctly resolving and blocking domains
local random_url local random_url
@ -725,7 +793,7 @@ dig_at() {
fi fi
# Check if Pi-hole can use itself to block a domain # Check if Pi-hole can use itself to block a domain
if local_dig="$(dig +tries=1 +time=2 -"${protocol}" "${random_url}" @"${local_address}" "${record_type}" -p "$(get_ftl_conf_value "dns.port")")"; then if local_dig="$(dig +tries=1 +time=2 -"${protocol}" "${random_url}" @"${local_address}" "${record_type}")"; then
# If it can, show success # If it can, show success
if [[ "${local_dig}" == *"status: NOERROR"* ]]; then if [[ "${local_dig}" == *"status: NOERROR"* ]]; then
local_dig="NOERROR" local_dig="NOERROR"
@ -781,7 +849,7 @@ process_status(){
: :
else else
# non-Docker system # non-Docker system
if service "${i}" status | grep -q -E 'is\srunning|started'; then if service "${i}" status | grep -E 'is\srunning' &> /dev/null; then
status_of_process="active" status_of_process="active"
else else
status_of_process="inactive" status_of_process="inactive"
@ -819,27 +887,42 @@ ftl_full_status(){
make_array_from_file() { make_array_from_file() {
local filename="${1}" local filename="${1}"
# If the file is a directory do nothing since it cannot be parsed
[[ -d "${filename}" ]] && return
# The second argument can put a limit on how many line should be read from the file # The second argument can put a limit on how many line should be read from the file
# Since some of the files are so large, this is helpful to limit the output # Since some of the files are so large, this is helpful to limit the output
local limit=${2} local limit=${2}
# A local iterator for testing if we are at the limit above # A local iterator for testing if we are at the limit above
local i=0 local i=0
# If the file is a directory
if [[ -d "${filename}" ]]; then
# do nothing since it cannot be parsed
:
else
# Otherwise, read the file line by line
while IFS= read -r line;do
# Otherwise, strip out comments and blank lines
new_line=$(echo "${line}" | sed -e 's/^\s*#.*$//' -e '/^$/d')
# If the line still has content (a non-zero value)
if [[ -n "${new_line}" ]]; then
# Process the file, strip out comments and blank lines # If the string contains "### CHANGED", highlight this part in red
local processed if [[ "${new_line}" == *"### CHANGED"* ]]; then
processed=$(sed -e 's/^\s*#.*$//' -e '/^$/d' "${filename}") new_line="${new_line//### CHANGED/${COL_RED}### CHANGED${COL_NC}}"
fi
while IFS= read -r line; do # Finally, write this line to the log
# If the string contains "### CHANGED", highlight this part in red log_write " ${new_line}"
log_write " ${line//### CHANGED/${COL_RED}### CHANGED${COL_NC}}" fi
((i++)) # Increment the iterator +1
# if the limit of lines we want to see is exceeded do nothing i=$((i+1))
[[ -n ${limit} && $i -eq ${limit} ]] && break # but if the limit of lines we want to see is exceeded
done <<< "$processed" if [[ -z ${limit} ]]; then
# do nothing
:
elif [[ $i -eq ${limit} ]]; then
break
fi
done < "${filename}"
fi
} }
parse_file() { parse_file() {
@ -850,6 +933,7 @@ parse_file() {
# Get the lines that are in the file(s) and store them in an array for parsing later # Get the lines that are in the file(s) and store them in an array for parsing later
local file_info local file_info
if [[ -f "$filename" ]]; then if [[ -f "$filename" ]]; then
#shellcheck disable=SC2016
IFS=$'\r\n' command eval 'file_info=( $(cat "${filename}") )' IFS=$'\r\n' command eval 'file_info=( $(cat "${filename}") )'
else else
read -r -a file_info <<< "$filename" read -r -a file_info <<< "$filename"
@ -912,38 +996,38 @@ list_files_in_dir() {
fi fi
# Store the files found in an array # Store the files found in an array
local files_found=("${dir_to_parse}"/*) mapfile -t files_found < <(ls "${dir_to_parse}")
# For each file in the array, # For each file in the array,
for each_file in "${files_found[@]}"; do for each_file in "${files_found[@]}"; do
if [[ -d "${each_file}" ]]; then if [[ -d "${dir_to_parse}/${each_file}" ]]; then
# If it's a directory, do nothing # If it's a directory, do nothing
: :
elif [[ "${each_file}" == "${PIHOLE_DEBUG_LOG}" ]] || \ elif [[ "${dir_to_parse}/${each_file}" == "${PIHOLE_DEBUG_LOG}" ]] || \
[[ "${each_file}" == "${PIHOLE_RAW_BLOCKLIST_FILES}" ]] || \ [[ "${dir_to_parse}/${each_file}" == "${PIHOLE_RAW_BLOCKLIST_FILES}" ]] || \
[[ "${each_file}" == "${PIHOLE_INSTALL_LOG_FILE}" ]] || \ [[ "${dir_to_parse}/${each_file}" == "${PIHOLE_INSTALL_LOG_FILE}" ]] || \
[[ "${each_file}" == "${PIHOLE_LOG}" ]] || \ [[ "${dir_to_parse}/${each_file}" == "${PIHOLE_LOG}" ]] || \
[[ "${each_file}" == "${PIHOLE_LOG_GZIPS}" ]]; then [[ "${dir_to_parse}/${each_file}" == "${PIHOLE_LOG_GZIPS}" ]]; then
: :
elif [[ "${dir_to_parse}" == "${DNSMASQ_D_DIRECTORY}" ]]; then elif [[ "${dir_to_parse}" == "${DNSMASQ_D_DIRECTORY}" ]]; then
# in case of the dnsmasq directory include all files in the debug output # in case of the dnsmasq directory include all files in the debug output
log_write "\\n${COL_GREEN}$(ls -lhd "${each_file}")${COL_NC}" log_write "\\n${COL_GREEN}$(ls -lhd "${dir_to_parse}"/"${each_file}")${COL_NC}"
make_array_from_file "${each_file}" make_array_from_file "${dir_to_parse}/${each_file}"
else else
# Then, parse the file's content into an array so each line can be analyzed if need be # Then, parse the file's content into an array so each line can be analyzed if need be
for i in "${!REQUIRED_FILES[@]}"; do for i in "${!REQUIRED_FILES[@]}"; do
if [[ "${each_file}" == "${REQUIRED_FILES[$i]}" ]]; then if [[ "${dir_to_parse}/${each_file}" == "${REQUIRED_FILES[$i]}" ]]; then
# display the filename # display the filename
log_write "\\n${COL_GREEN}$(ls -lhd "${each_file}")${COL_NC}" log_write "\\n${COL_GREEN}$(ls -lhd "${dir_to_parse}"/"${each_file}")${COL_NC}"
# Check if the file we want to view has a limit (because sometimes we just need a little bit of info from the file, not the entire thing) # Check if the file we want to view has a limit (because sometimes we just need a little bit of info from the file, not the entire thing)
case "${each_file}" in case "${dir_to_parse}/${each_file}" in
# If it's Web server log, give the first and last 25 lines # If it's Web server log, give the first and last 25 lines
"${PIHOLE_WEBSERVER_LOG}") head_tail_log "${each_file}" 25 "${PIHOLE_WEBSERVER_LOG}") head_tail_log "${dir_to_parse}/${each_file}" 25
;; ;;
# Same for the FTL log # Same for the FTL log
"${PIHOLE_FTL_LOG}") head_tail_log "${each_file}" 35 "${PIHOLE_FTL_LOG}") head_tail_log "${dir_to_parse}/${each_file}" 35
;; ;;
# parse the file into an array in case we ever need to analyze it line-by-line # parse the file into an array in case we ever need to analyze it line-by-line
*) make_array_from_file "${each_file}"; *) make_array_from_file "${dir_to_parse}/${each_file}";
esac esac
else else
# Otherwise, do nothing since it's not a file needed for Pi-hole so we don't care about it # Otherwise, do nothing since it's not a file needed for Pi-hole so we don't care about it
@ -979,7 +1063,6 @@ head_tail_log() {
local filename="${1}" local filename="${1}"
# The number of lines to use for head and tail # The number of lines to use for head and tail
local qty="${2}" local qty="${2}"
local filebasename="${filename##*/}"
local head_line local head_line
local tail_line local tail_line
# Put the current Internal Field Separator into another variable so it can be restored later # Put the current Internal Field Separator into another variable so it can be restored later
@ -988,14 +1071,14 @@ head_tail_log() {
IFS=$'\r\n' IFS=$'\r\n'
local log_head=() local log_head=()
mapfile -t log_head < <(head -n "${qty}" "${filename}") mapfile -t log_head < <(head -n "${qty}" "${filename}")
log_write " ${COL_CYAN}-----head of ${filebasename}------${COL_NC}" log_write " ${COL_CYAN}-----head of $(basename "${filename}")------${COL_NC}"
for head_line in "${log_head[@]}"; do for head_line in "${log_head[@]}"; do
log_write " ${head_line}" log_write " ${head_line}"
done done
log_write "" log_write ""
local log_tail=() local log_tail=()
mapfile -t log_tail < <(tail -n "${qty}" "${filename}") mapfile -t log_tail < <(tail -n "${qty}" "${filename}")
log_write " ${COL_CYAN}-----tail of ${filebasename}------${COL_NC}" log_write " ${COL_CYAN}-----tail of $(basename "${filename}")------${COL_NC}"
for tail_line in "${log_tail[@]}"; do for tail_line in "${log_tail[@]}"; do
log_write " ${tail_line}" log_write " ${tail_line}"
done done
@ -1069,15 +1152,15 @@ check_dhcp_servers() {
} }
show_groups() { show_groups() {
show_db_entries "Groups" "SELECT id,CASE enabled WHEN '0' THEN ' no' WHEN '1' THEN ' yes' ELSE enabled END enabled,name,datetime(date_added,'unixepoch','localtime') date_added,datetime(date_modified,'unixepoch','localtime') date_modified,description FROM \"group\"" "4 7 50 19 19 50" show_db_entries "Groups" "SELECT id,CASE enabled WHEN '0' THEN ' 0' WHEN '1' THEN ' 1' ELSE enabled END enabled,name,datetime(date_added,'unixepoch','localtime') date_added,datetime(date_modified,'unixepoch','localtime') date_modified,description FROM \"group\"" "4 7 50 19 19 50"
} }
show_adlists() { show_adlists() {
show_db_entries "Adlists" "SELECT id,CASE enabled WHEN '0' THEN ' no' WHEN '1' THEN ' yes' ELSE enabled END enabled,GROUP_CONCAT(adlist_by_group.group_id) group_ids, CASE type WHEN '0' THEN 'Block' WHEN '1' THEN 'Allow' ELSE type END type, address,datetime(date_added,'unixepoch','localtime') date_added,datetime(date_modified,'unixepoch','localtime') date_modified,comment FROM adlist LEFT JOIN adlist_by_group ON adlist.id = adlist_by_group.adlist_id GROUP BY id;" "5 7 12 5 100 19 19 50" show_db_entries "Adlists" "SELECT id,CASE enabled WHEN '0' THEN ' 0' WHEN '1' THEN ' 1' ELSE enabled END enabled,GROUP_CONCAT(adlist_by_group.group_id) group_ids,address,datetime(date_added,'unixepoch','localtime') date_added,datetime(date_modified,'unixepoch','localtime') date_modified,comment FROM adlist LEFT JOIN adlist_by_group ON adlist.id = adlist_by_group.adlist_id GROUP BY id;" "5 7 12 100 19 19 50"
} }
show_domainlist() { show_domainlist() {
show_db_entries "Domainlist" "SELECT id,CASE type WHEN '0' THEN 'exact-allow' WHEN '1' THEN 'exact-deny' WHEN '2' THEN 'regex-allow' WHEN '3' THEN 'regex-deny' ELSE type END type,CASE enabled WHEN '0' THEN ' no' WHEN '1' THEN ' yes' ELSE enabled END enabled,GROUP_CONCAT(domainlist_by_group.group_id) group_ids,domain,datetime(date_added,'unixepoch','localtime') date_added,datetime(date_modified,'unixepoch','localtime') date_modified,comment FROM domainlist LEFT JOIN domainlist_by_group ON domainlist.id = domainlist_by_group.domainlist_id GROUP BY id;" "5 11 7 12 100 19 19 50" show_db_entries "Domainlist (0/1 = exact white-/blacklist, 2/3 = regex white-/blacklist)" "SELECT id,CASE type WHEN '0' THEN '0 ' WHEN '1' THEN ' 1 ' WHEN '2' THEN ' 2 ' WHEN '3' THEN ' 3' ELSE type END type,CASE enabled WHEN '0' THEN ' 0' WHEN '1' THEN ' 1' ELSE enabled END enabled,GROUP_CONCAT(domainlist_by_group.group_id) group_ids,domain,datetime(date_added,'unixepoch','localtime') date_added,datetime(date_modified,'unixepoch','localtime') date_modified,comment FROM domainlist LEFT JOIN domainlist_by_group ON domainlist.id = domainlist_by_group.domainlist_id GROUP BY id;" "5 4 7 12 100 19 19 50"
} }
show_clients() { show_clients() {

View File

@ -9,12 +9,10 @@
# Please see LICENSE file for your rights under this license. # Please see LICENSE file for your rights under this license.
colfile="/opt/pihole/COL_TABLE" colfile="/opt/pihole/COL_TABLE"
# shellcheck source="./advanced/Scripts/COL_TABLE"
source ${colfile} source ${colfile}
readonly PI_HOLE_SCRIPT_DIR="/opt/pihole" readonly PI_HOLE_SCRIPT_DIR="/opt/pihole"
utilsfile="${PI_HOLE_SCRIPT_DIR}/utils.sh" utilsfile="${PI_HOLE_SCRIPT_DIR}/utils.sh"
# shellcheck source="./advanced/Scripts/utils.sh"
source "${utilsfile}" source "${utilsfile}"
# In case we're running at the same time as a system logrotate, use a # In case we're running at the same time as a system logrotate, use a
@ -37,46 +35,6 @@ FTLFILE=$(getFTLConfigValue "files.log.ftl")
if [ -z "$FTLFILE" ]; then if [ -z "$FTLFILE" ]; then
FTLFILE="/var/log/pihole/FTL.log" FTLFILE="/var/log/pihole/FTL.log"
fi fi
WEBFILE=$(getFTLConfigValue "files.log.webserver")
if [ -z "$WEBFILE" ]; then
WEBFILE="/var/log/pihole/webserver.log"
fi
# Helper function to handle log rotation for a single file
rotate_log() {
# This function copies x.log over to x.log.1
# and then empties x.log
# Note that moving the file is not an option, as
# dnsmasq would happily continue writing into the
# moved file (it will have the same file handler)
local logfile="$1"
if [[ "$*" != *"quiet"* ]]; then
echo -ne " ${INFO} Rotating ${logfile} ..."
fi
cp -p "${logfile}" "${logfile}.1"
echo " " > "${logfile}"
chmod 640 "${logfile}"
if [[ "$*" != *"quiet"* ]]; then
echo -e "${OVER} ${TICK} Rotated ${logfile} ..."
fi
}
# Helper function to handle log flushing for a single file
flush_log() {
local logfile="$1"
if [[ "$*" != *"quiet"* ]]; then
echo -ne " ${INFO} Flushing ${logfile} ..."
fi
echo " " > "${logfile}"
chmod 640 "${logfile}"
if [ -f "${logfile}.1" ]; then
echo " " > "${logfile}.1"
chmod 640 "${logfile}.1"
fi
if [[ "$*" != *"quiet"* ]]; then
echo -e "${OVER} ${TICK} Flushed ${logfile} ..."
fi
}
if [[ "$*" == *"once"* ]]; then if [[ "$*" == *"once"* ]]; then
# Nightly logrotation # Nightly logrotation
@ -86,19 +44,64 @@ if [[ "$*" == *"once"* ]]; then
if [[ "$*" != *"quiet"* ]]; then if [[ "$*" != *"quiet"* ]]; then
echo -ne " ${INFO} Running logrotate ..." echo -ne " ${INFO} Running logrotate ..."
fi fi
mkdir -p "${STATEFILE%/*}"
/usr/sbin/logrotate --force --state "${STATEFILE}" /etc/pihole/logrotate /usr/sbin/logrotate --force --state "${STATEFILE}" /etc/pihole/logrotate
else else
# Handle rotation for each log file # Copy pihole.log over to pihole.log.1
rotate_log "${LOGFILE}" # and empty out pihole.log
rotate_log "${FTLFILE}" # Note that moving the file is not an option, as
rotate_log "${WEBFILE}" # dnsmasq would happily continue writing into the
# moved file (it will have the same file handler)
if [[ "$*" != *"quiet"* ]]; then
echo -ne " ${INFO} Rotating ${LOGFILE} ..."
fi
cp -p "${LOGFILE}" "${LOGFILE}.1"
echo " " > "${LOGFILE}"
chmod 640 "${LOGFILE}"
if [[ "$*" != *"quiet"* ]]; then
echo -e "${OVER} ${TICK} Rotated ${LOGFILE} ..."
fi
# Copy FTL.log over to FTL.log.1
# and empty out FTL.log
if [[ "$*" != *"quiet"* ]]; then
echo -ne " ${INFO} Rotating ${FTLFILE} ..."
fi
cp -p "${FTLFILE}" "${FTLFILE}.1"
echo " " > "${FTLFILE}"
chmod 640 "${FTLFILE}"
if [[ "$*" != *"quiet"* ]]; then
echo -e "${OVER} ${TICK} Rotated ${FTLFILE} ..."
fi
fi fi
else else
# Manual flushing # Manual flushing
flush_log "${LOGFILE}"
flush_log "${FTLFILE}" # Flush both pihole.log and pihole.log.1 (if existing)
flush_log "${WEBFILE}" if [[ "$*" != *"quiet"* ]]; then
echo -ne " ${INFO} Flushing ${LOGFILE} ..."
fi
echo " " > "${LOGFILE}"
chmod 640 "${LOGFILE}"
if [ -f "${LOGFILE}.1" ]; then
echo " " > "${LOGFILE}.1"
chmod 640 "${LOGFILE}.1"
fi
if [[ "$*" != *"quiet"* ]]; then
echo -e "${OVER} ${TICK} Flushed ${LOGFILE} ..."
fi
# Flush both FTL.log and FTL.log.1 (if existing)
if [[ "$*" != *"quiet"* ]]; then
echo -ne " ${INFO} Flushing ${FTLFILE} ..."
fi
echo " " > "${FTLFILE}"
chmod 640 "${FTLFILE}"
if [ -f "${FTLFILE}.1" ]; then
echo " " > "${FTLFILE}.1"
chmod 640 "${FTLFILE}.1"
fi
if [[ "$*" != *"quiet"* ]]; then
echo -e "${OVER} ${TICK} Flushed ${FTLFILE} ..."
fi
if [[ "$*" != *"quiet"* ]]; then if [[ "$*" != *"quiet"* ]]; then
echo -ne " ${INFO} Flushing database, DNS resolution temporarily unavailable ..." echo -ne " ${INFO} Flushing database, DNS resolution temporarily unavailable ..."
@ -116,3 +119,4 @@ else
echo -e "${OVER} ${TICK} Deleted ${deleted} queries from long-term query database" echo -e "${OVER} ${TICK} Deleted ${deleted} queries from long-term query database"
fi fi
fi fi

View File

@ -1,84 +0,0 @@
#!/usr/bin/env bash
# Pi-hole: A black hole for Internet advertisements
# (c) 2019 Pi-hole, LLC (https://pi-hole.net)
# Network-wide ad blocking via your own hardware.
#
# Network table flush
#
# This file is copyright under the latest version of the EUPL.
# Please see LICENSE file for your rights under this license.
coltable="/opt/pihole/COL_TABLE"
if [[ -f ${coltable} ]]; then
# shellcheck source="./advanced/Scripts/COL_TABLE"
source ${coltable}
fi
readonly PI_HOLE_SCRIPT_DIR="/opt/pihole"
utilsfile="${PI_HOLE_SCRIPT_DIR}/utils.sh"
# shellcheck source=./advanced/Scripts/utils.sh
source "${utilsfile}"
# Source api functions
# shellcheck source="./advanced/Scripts/api.sh"
. "${PI_HOLE_SCRIPT_DIR}/api.sh"
flushNetwork(){
local output
echo -ne " ${INFO} Flushing network table ..."
local data status error
# Authenticate with FTL
LoginAPI
# send query again
data=$(PostFTLData "action/flush/network" "" "status")
# Separate the status from the data
status=$(printf %s "${data#"${data%???}"}")
data=$(printf %s "${data%???}")
# If there is an .error object in the returned data, display it
local error
error=$(jq --compact-output <<< "${data}" '.error')
if [[ $error != "null" && $error != "" ]]; then
echo -e "${OVER} ${CROSS} Failed to flush the network table:"
echo -e " $(jq <<< "${data}" '.error')"
LogoutAPI
exit 1
elif [[ "${status}" == "200" ]]; then
echo -e "${OVER} ${TICK} Flushed network table"
fi
# Delete session
LogoutAPI
}
flushArp(){
# Flush ARP cache of the host
if ! output=$(ip -s -s neigh flush all 2>&1); then
echo -e "${OVER} ${CROSS} Failed to flush ARP cache"
echo " Output: ${output}"
return 1
fi
}
# Process all options (if present)
while [ "$#" -gt 0 ]; do
case "$1" in
"--arp" ) doARP=true ;;
esac
shift
done
flushNetwork
if [[ "${doARP}" == true ]]; then
echo -ne " ${INFO} Flushing ARP cache"
if flushArp; then
echo -e "${OVER} ${TICK} Flushed ARP cache"
fi
fi

View File

@ -1,4 +1,9 @@
#!/usr/bin/env sh #!/usr/bin/env sh
# shellcheck disable=SC1090
# Ignore warning about `local` being undefinded in POSIX
# shellcheck disable=SC3043
# https://github.com/koalaman/shellcheck/wiki/SC3043#exceptions
# Pi-hole: A black hole for Internet advertisements # Pi-hole: A black hole for Internet advertisements
# (c) 2023 Pi-hole, LLC (https://pi-hole.net) # (c) 2023 Pi-hole, LLC (https://pi-hole.net)
@ -17,11 +22,9 @@ domain=""
# Source color table # Source color table
colfile="/opt/pihole/COL_TABLE" colfile="/opt/pihole/COL_TABLE"
# shellcheck source="./advanced/Scripts/COL_TABLE"
. "${colfile}" . "${colfile}"
# Source api functions # Source api functions
# shellcheck source="./advanced/Scripts/api.sh"
. "${PI_HOLE_INSTALL_DIR}/api.sh" . "${PI_HOLE_INSTALL_DIR}/api.sh"
Help() { Help() {
@ -37,16 +40,19 @@ Options:
} }
GenerateOutput() { GenerateOutput() {
local counts data num_gravity num_lists search_type_str local data gravity_data lists_data num_gravity num_lists search_type_str
local gravity_data_csv lists_data_csv line url type color local gravity_data_csv lists_data_csv line current_domain url type color
data="${1}" data="${1}"
# Get count of list and gravity matches # construct a new json for the list results where each object contains the domain and the related type
# Use JQ to count number of entries in lists and gravity lists_data=$(printf %s "${data}" | jq '.search.domains | [.[] | {domain: .domain, type: .type}]')
# (output is number of list matches then number of gravity matches)
counts=$(printf %s "${data}" | jq --raw-output '(.search.domains | length), (.search.gravity | group_by(.address,.type) | length)') # construct a new json for the gravity results where each object contains the adlist URL and the related domains
num_lists=$(echo "$counts" | sed -n '1p') gravity_data=$(printf %s "${data}" | jq '.search.gravity | group_by(.address,.type) | map({ address: (.[0].address), type: (.[0].type), domains: [.[] | .domain] })')
num_gravity=$(echo "$counts" | sed -n '2p')
# number of objects in each json
num_gravity=$(printf %s "${gravity_data}" | jq length)
num_lists=$(printf %s "${lists_data}" | jq length)
if [ "${partial}" = true ]; then if [ "${partial}" = true ]; then
search_type_str="partially" search_type_str="partially"
@ -59,7 +65,7 @@ GenerateOutput() {
if [ "${num_lists}" -gt 0 ]; then if [ "${num_lists}" -gt 0 ]; then
# Convert the data to a csv, each line is a "domain,type" string # Convert the data to a csv, each line is a "domain,type" string
# not using jq's @csv here as it quotes each value individually # not using jq's @csv here as it quotes each value individually
lists_data_csv=$(printf %s "${data}" | jq --raw-output '.search.domains | map([.domain, .type] | join(",")) | join("\n")') lists_data_csv=$(printf %s "${lists_data}" | jq --raw-output '.[] | [.domain, .type] | join(",")')
# Generate output for each csv line, separating line in a domain and type substring at the ',' # Generate output for each csv line, separating line in a domain and type substring at the ','
echo "${lists_data_csv}" | while read -r line; do echo "${lists_data_csv}" | while read -r line; do
@ -68,11 +74,11 @@ GenerateOutput() {
fi fi
# Results from gravity # Results from gravity
printf "%s\n\n" "Found ${num_gravity} lists ${search_type_str} matching '${COL_BLUE}${domain}${COL_NC}'." printf "%s\n\n" "Found ${num_gravity} adlists ${search_type_str} matching '${COL_BLUE}${domain}${COL_NC}'."
if [ "${num_gravity}" -gt 0 ]; then if [ "${num_gravity}" -gt 0 ]; then
# Convert the data to a csv, each line is a "URL,type,domain,domain,...." string # Convert the data to a csv, each line is a "URL,domain,domain,...." string
# not using jq's @csv here as it quotes each value individually # not using jq's @csv here as it quotes each value individually
gravity_data_csv=$(printf %s "${data}" | jq --raw-output '.search.gravity | group_by(.address,.type) | map([.[0].address, .[0].type, (.[] | .domain)] | join(",")) | join("\n")') gravity_data_csv=$(printf %s "${gravity_data}" | jq --raw-output '.[] | [.address, .type, .domains[]] | join(",")')
# Generate line-by-line output for each csv line # Generate line-by-line output for each csv line
echo "${gravity_data_csv}" | while read -r line; do echo "${gravity_data_csv}" | while read -r line; do
@ -94,8 +100,15 @@ GenerateOutput() {
# cut off type, leaving "domain,domain,...." # cut off type, leaving "domain,domain,...."
line=${line#*,} line=${line#*,}
# Replace commas with newlines and format output # print each domain and remove it from the string until nothing is left
echo "${line}" | sed 's/,/\n/g' | sed "s/^/ - ${COL_GREEN}/" | sed "s/$/${COL_NC}/" while [ ${#line} -gt 0 ]; do
current_domain=${line%%,*}
printf ' - %s\n' "${COL_GREEN}${current_domain}${COL_NC}"
# we need to remove the current_domain and the comma in two steps because
# the last domain won't have a trailing comma and the while loop wouldn't exit
line=${line#"${current_domain}"}
line=${line#,}
done
printf "\n\n" printf "\n\n"
done done
fi fi

View File

@ -12,31 +12,26 @@
# Variables # Variables
readonly ADMIN_INTERFACE_GIT_URL="https://github.com/pi-hole/web.git" readonly ADMIN_INTERFACE_GIT_URL="https://github.com/pi-hole/web.git"
readonly ADMIN_INTERFACE_DIR="/var/www/html/admin"
readonly PI_HOLE_GIT_URL="https://github.com/pi-hole/pi-hole.git" readonly PI_HOLE_GIT_URL="https://github.com/pi-hole/pi-hole.git"
readonly PI_HOLE_FILES_DIR="/etc/.pihole" readonly PI_HOLE_FILES_DIR="/etc/.pihole"
# shellcheck disable=SC2034
SKIP_INSTALL=true SKIP_INSTALL=true
# when --check-only is passed to this script, it will not perform the actual update # when --check-only is passed to this script, it will not perform the actual update
CHECK_ONLY=false CHECK_ONLY=false
# shellcheck source="./automated install/basic-install.sh" # shellcheck disable=SC1090
source "${PI_HOLE_FILES_DIR}/automated install/basic-install.sh" source "${PI_HOLE_FILES_DIR}/automated install/basic-install.sh"
# shellcheck source=./advanced/Scripts/COL_TABLE # shellcheck disable=SC1091
source "/opt/pihole/COL_TABLE" source "/opt/pihole/COL_TABLE"
# shellcheck source="./advanced/Scripts/utils.sh"
source "${PI_HOLE_INSTALL_DIR}/utils.sh"
# is_repo() sourced from basic-install.sh # is_repo() sourced from basic-install.sh
# make_repo() sourced from basic-install.sh # make_repo() sourced from basic-install.sh
# update_repo() source from basic-install.sh # update_repo() source from basic-install.sh
# getGitFiles() sourced from basic-install.sh # getGitFiles() sourced from basic-install.sh
# FTLcheckUpdate() sourced from basic-install.sh # FTLcheckUpdate() sourced from basic-install.sh
# getFTLConfigValue() sourced from utils.sh
# Honour configured paths for the web application.
ADMIN_INTERFACE_DIR=$(getFTLConfigValue "webserver.paths.webroot")$(getFTLConfigValue "webserver.paths.webhome")
readonly ADMIN_INTERFACE_DIR
GitCheckUpdateAvail() { GitCheckUpdateAvail() {
local directory local directory
@ -47,7 +42,7 @@ GitCheckUpdateAvail() {
# Fetch latest changes in this repo # Fetch latest changes in this repo
if ! git fetch --quiet origin ; then if ! git fetch --quiet origin ; then
echo -e "\\n ${COL_RED}Error: Unable to update local repository. Contact Pi-hole Support.${COL_NC}" echo -e "\\n ${COL_LIGHT_RED}Error: Unable to update local repository. Contact Pi-hole Support.${COL_NC}"
exit 1 exit 1
fi fi
@ -76,13 +71,13 @@ GitCheckUpdateAvail() {
if [[ "${#LOCAL}" == 0 ]]; then if [[ "${#LOCAL}" == 0 ]]; then
echo -e "\\n ${COL_RED}Error: Local revision could not be obtained, please contact Pi-hole Support" echo -e "\\n ${COL_LIGHT_RED}Error: Local revision could not be obtained, please contact Pi-hole Support"
echo -e " Additional debugging output:${COL_NC}" echo -e " Additional debugging output:${COL_NC}"
git status git status
exit 1 exit 1
fi fi
if [[ "${#REMOTE}" == 0 ]]; then if [[ "${#REMOTE}" == 0 ]]; then
echo -e "\\n ${COL_RED}Error: Remote revision could not be obtained, please contact Pi-hole Support" echo -e "\\n ${COL_LIGHT_RED}Error: Remote revision could not be obtained, please contact Pi-hole Support"
echo -e " Additional debugging output:${COL_NC}" echo -e " Additional debugging output:${COL_NC}"
git status git status
exit 1 exit 1
@ -103,7 +98,7 @@ GitCheckUpdateAvail() {
} }
main() { main() {
local basicError="\\n ${COL_RED}Unable to complete update, please contact Pi-hole Support${COL_NC}" local basicError="\\n ${COL_LIGHT_RED}Unable to complete update, please contact Pi-hole Support${COL_NC}"
local core_update local core_update
local web_update local web_update
local FTL_update local FTL_update
@ -112,6 +107,8 @@ main() {
web_update=false web_update=false
FTL_update=false FTL_update=false
# Perform an OS check to ensure we're on an appropriate operating system
os_check
# Install packages used by this installation script (necessary if users have removed e.g. git from their systems) # Install packages used by this installation script (necessary if users have removed e.g. git from their systems)
package_manager_detect package_manager_detect
@ -120,7 +117,7 @@ main() {
# This is unlikely # This is unlikely
if ! is_repo "${PI_HOLE_FILES_DIR}" ; then if ! is_repo "${PI_HOLE_FILES_DIR}" ; then
echo -e "\\n ${COL_RED}Error: Core Pi-hole repo is missing from system!" echo -e "\\n ${COL_LIGHT_RED}Error: Core Pi-hole repo is missing from system!"
echo -e " Please re-run install script from https://pi-hole.net${COL_NC}" echo -e " Please re-run install script from https://pi-hole.net${COL_NC}"
exit 1; exit 1;
fi fi
@ -132,11 +129,11 @@ main() {
echo -e " ${INFO} Pi-hole Core:\\t${COL_YELLOW}update available${COL_NC}" echo -e " ${INFO} Pi-hole Core:\\t${COL_YELLOW}update available${COL_NC}"
else else
core_update=false core_update=false
echo -e " ${INFO} Pi-hole Core:\\t${COL_GREEN}up to date${COL_NC}" echo -e " ${INFO} Pi-hole Core:\\t${COL_LIGHT_GREEN}up to date${COL_NC}"
fi fi
if ! is_repo "${ADMIN_INTERFACE_DIR}" ; then if ! is_repo "${ADMIN_INTERFACE_DIR}" ; then
echo -e "\\n ${COL_RED}Error: Web Admin repo is missing from system!" echo -e "\\n ${COL_LIGHT_RED}Error: Web Admin repo is missing from system!"
echo -e " Please re-run install script from https://pi-hole.net${COL_NC}" echo -e " Please re-run install script from https://pi-hole.net${COL_NC}"
exit 1; exit 1;
fi fi
@ -146,7 +143,7 @@ main() {
echo -e " ${INFO} Web Interface:\\t${COL_YELLOW}update available${COL_NC}" echo -e " ${INFO} Web Interface:\\t${COL_YELLOW}update available${COL_NC}"
else else
web_update=false web_update=false
echo -e " ${INFO} Web Interface:\\t${COL_GREEN}up to date${COL_NC}" echo -e " ${INFO} Web Interface:\\t${COL_LIGHT_GREEN}up to date${COL_NC}"
fi fi
local funcOutput local funcOutput
@ -160,18 +157,17 @@ main() {
else else
case $? in case $? in
1) 1)
echo -e " ${INFO} FTL:\\t\\t${COL_GREEN}up to date${COL_NC}" echo -e " ${INFO} FTL:\\t\\t${COL_LIGHT_GREEN}up to date${COL_NC}"
;; ;;
2) 2)
echo -e " ${INFO} FTL:\\t\\t${COL_RED}Branch is not available.${COL_NC}\\n\\t\\t\\tUse ${COL_GREEN}pihole checkout ftl [branchname]${COL_NC} to switch to a valid branch." echo -e " ${INFO} FTL:\\t\\t${COL_LIGHT_RED}Branch is not available.${COL_NC}\\n\\t\\t\\tUse ${COL_LIGHT_GREEN}pihole checkout ftl [branchname]${COL_NC} to switch to a valid branch."
exit 1
;; ;;
3) 3)
echo -e " ${INFO} FTL:\\t\\t${COL_RED}Something has gone wrong, cannot reach download server${COL_NC}" echo -e " ${INFO} FTL:\\t\\t${COL_LIGHT_RED}Something has gone wrong, cannot reach download server${COL_NC}"
exit 1 exit 1
;; ;;
*) *)
echo -e " ${INFO} FTL:\\t\\t${COL_RED}Something has gone wrong, contact support${COL_NC}" echo -e " ${INFO} FTL:\\t\\t${COL_LIGHT_RED}Something has gone wrong, contact support${COL_NC}"
exit 1 exit 1
esac esac
FTL_update=false FTL_update=false
@ -188,7 +184,7 @@ main() {
if [[ ! "${ftlBranch}" == "master" && ! "${ftlBranch}" == "development" ]]; then if [[ ! "${ftlBranch}" == "master" && ! "${ftlBranch}" == "development" ]]; then
# Notify user that they are on a custom branch which might mean they they are lost # Notify user that they are on a custom branch which might mean they they are lost
# behind if a branch was merged to development and got abandoned # behind if a branch was merged to development and got abandoned
printf " %b %bWarning:%b You are using FTL from a custom branch (%s) and might be missing future releases.\\n" "${INFO}" "${COL_RED}" "${COL_NC}" "${ftlBranch}" printf " %b %bWarning:%b You are using FTL from a custom branch (%s) and might be missing future releases.\\n" "${INFO}" "${COL_LIGHT_RED}" "${COL_NC}" "${ftlBranch}"
fi fi
if [[ "${core_update}" == false && "${web_update}" == false && "${FTL_update}" == false ]]; then if [[ "${core_update}" == false && "${web_update}" == false && "${FTL_update}" == false ]]; then
@ -213,7 +209,7 @@ main() {
echo "" echo ""
echo -e " ${INFO} Pi-hole Web Admin files out of date, updating local repo." echo -e " ${INFO} Pi-hole Web Admin files out of date, updating local repo."
getGitFiles "${ADMIN_INTERFACE_DIR}" "${ADMIN_INTERFACE_GIT_URL}" getGitFiles "${ADMIN_INTERFACE_DIR}" "${ADMIN_INTERFACE_GIT_URL}"
echo -e " ${INFO} If you had made any changes in '${ADMIN_INTERFACE_DIR}', they have been stashed using 'git stash'" echo -e " ${INFO} If you had made any changes in '/var/www/html/admin/', they have been stashed using 'git stash'"
fi fi
if [[ "${FTL_update}" == true ]]; then if [[ "${FTL_update}" == true ]]; then
@ -222,7 +218,7 @@ main() {
fi fi
if [[ "${FTL_update}" == true || "${core_update}" == true ]]; then if [[ "${FTL_update}" == true || "${core_update}" == true ]]; then
${PI_HOLE_FILES_DIR}/automated\ install/basic-install.sh --repair --unattended || \ ${PI_HOLE_FILES_DIR}/automated\ install/basic-install.sh --reconfigure --unattended || \
echo -e "${basicError}" && exit 1 echo -e "${basicError}" && exit 1
fi fi

View File

@ -39,12 +39,9 @@ function get_remote_hash() {
} }
# Source the utils file for addOrEditKeyValPair() # Source the utils file for addOrEditKeyValPair()
# shellcheck source="./advanced/Scripts/utils.sh" # shellcheck disable=SC1091
. /opt/pihole/utils.sh . /opt/pihole/utils.sh
ADMIN_INTERFACE_DIR=$(getFTLConfigValue "webserver.paths.webroot")$(getFTLConfigValue "webserver.paths.webhome")
readonly ADMIN_INTERFACE_DIR
# Remove the below three legacy files if they exist # Remove the below three legacy files if they exist
rm -f "/etc/pihole/GitHubVersions" rm -f "/etc/pihole/GitHubVersions"
rm -f "/etc/pihole/localbranches" rm -f "/etc/pihole/localbranches"
@ -88,13 +85,13 @@ addOrEditKeyValPair "${VERSION_FILE}" "GITHUB_CORE_HASH" "${GITHUB_CORE_HASH}"
# get Web versions # get Web versions
WEB_VERSION="$(get_local_version "${ADMIN_INTERFACE_DIR}")" WEB_VERSION="$(get_local_version /var/www/html/admin)"
addOrEditKeyValPair "${VERSION_FILE}" "WEB_VERSION" "${WEB_VERSION}" addOrEditKeyValPair "${VERSION_FILE}" "WEB_VERSION" "${WEB_VERSION}"
WEB_BRANCH="$(get_local_branch "${ADMIN_INTERFACE_DIR}")" WEB_BRANCH="$(get_local_branch /var/www/html/admin)"
addOrEditKeyValPair "${VERSION_FILE}" "WEB_BRANCH" "${WEB_BRANCH}" addOrEditKeyValPair "${VERSION_FILE}" "WEB_BRANCH" "${WEB_BRANCH}"
WEB_HASH="$(get_local_hash "${ADMIN_INTERFACE_DIR}")" WEB_HASH="$(get_local_hash /var/www/html/admin)"
addOrEditKeyValPair "${VERSION_FILE}" "WEB_HASH" "${WEB_HASH}" addOrEditKeyValPair "${VERSION_FILE}" "WEB_HASH" "${WEB_HASH}"
GITHUB_WEB_VERSION="$(get_remote_version web "${WEB_BRANCH}")" GITHUB_WEB_VERSION="$(get_remote_version web "${WEB_BRANCH}")"

View File

@ -1,4 +1,5 @@
#!/usr/bin/env sh #!/usr/bin/env sh
# shellcheck disable=SC3043 #https://github.com/koalaman/shellcheck/wiki/SC3043#exceptions
# Pi-hole: A black hole for Internet advertisements # Pi-hole: A black hole for Internet advertisements
# (c) 2017 Pi-hole, LLC (https://pi-hole.net) # (c) 2017 Pi-hole, LLC (https://pi-hole.net)
@ -73,9 +74,7 @@ getFTLPID() {
# Example getFTLConfigValue dns.piholePTR # Example getFTLConfigValue dns.piholePTR
####################### #######################
getFTLConfigValue(){ getFTLConfigValue(){
# Pipe to cat to avoid pihole-FTL assuming this is an interactive command pihole-FTL --config -q "${1}"
# returning colored output.
pihole-FTL --config -q "${1}" | cat
} }
####################### #######################
@ -88,17 +87,9 @@ getFTLConfigValue(){
# setFTLConfigValue dns.upstreams '[ "8.8.8.8" , "8.8.4.4" ]' # setFTLConfigValue dns.upstreams '[ "8.8.8.8" , "8.8.4.4" ]'
####################### #######################
setFTLConfigValue(){ setFTLConfigValue(){
local err pihole-FTL --config "${1}" "${2}" >/dev/null
{ pihole-FTL --config "${1}" "${2}" >/dev/null; err="$?"; } || true if [[ $? -eq 5 ]]; then
echo -e " ${CROSS} ${1} set by environment variable. Please unset it to use this function"
case $err in exit 5
0) ;; fi
5)
# FTL returns 5 if the value was set by an environment variable and is therefore read-only
printf " %s %s set by environment variable. Please unset it to use this function\n" "${CROSS}" "${1}";
exit 5;;
*)
printf " %s Failed to set %s. Try with sudo power\n" "${CROSS}" "${1}"
exit 1
esac
} }

View File

@ -8,16 +8,20 @@
# This file is copyright under the latest version of the EUPL. # This file is copyright under the latest version of the EUPL.
# Please see LICENSE file for your rights under this license. # Please see LICENSE file for your rights under this license.
# Source the versions file populated by updatechecker.sh # Ignore warning about `local` being undefinded in POSIX
# shellcheck disable=SC3043
# https://github.com/koalaman/shellcheck/wiki/SC3043#exceptions
# Source the versions file poupulated by updatechecker.sh
cachedVersions="/etc/pihole/versions" cachedVersions="/etc/pihole/versions"
if [ -f ${cachedVersions} ]; then if [ -f ${cachedVersions} ]; then
# shellcheck source=/dev/null # shellcheck disable=SC1090
. "$cachedVersions" . "$cachedVersions"
else else
echo "Could not find /etc/pihole/versions. Running update now." echo "Could not find /etc/pihole/versions. Running update now."
pihole updatechecker pihole updatechecker
# shellcheck source=/dev/null # shellcheck disable=SC1090
. "$cachedVersions" . "$cachedVersions"
fi fi

View File

@ -43,8 +43,8 @@ CREATE TABLE adlist
CREATE TABLE adlist_by_group CREATE TABLE adlist_by_group
( (
adlist_id INTEGER NOT NULL REFERENCES adlist (id) ON DELETE CASCADE, adlist_id INTEGER NOT NULL REFERENCES adlist (id),
group_id INTEGER NOT NULL REFERENCES "group" (id) ON DELETE CASCADE, group_id INTEGER NOT NULL REFERENCES "group" (id),
PRIMARY KEY (adlist_id, group_id) PRIMARY KEY (adlist_id, group_id)
); );
@ -66,7 +66,7 @@ CREATE TABLE info
value TEXT NOT NULL value TEXT NOT NULL
); );
INSERT INTO "info" VALUES('version','20'); INSERT INTO "info" VALUES('version','19');
/* This is a flag to indicate if gravity was restored from a backup /* This is a flag to indicate if gravity was restored from a backup
false = not restored, false = not restored,
failed = restoration failed due to no backup failed = restoration failed due to no backup
@ -75,8 +75,8 @@ INSERT INTO "info" VALUES('gravity_restored','false');
CREATE TABLE domainlist_by_group CREATE TABLE domainlist_by_group
( (
domainlist_id INTEGER NOT NULL REFERENCES domainlist (id) ON DELETE CASCADE, domainlist_id INTEGER NOT NULL REFERENCES domainlist (id),
group_id INTEGER NOT NULL REFERENCES "group" (id) ON DELETE CASCADE, group_id INTEGER NOT NULL REFERENCES "group" (id),
PRIMARY KEY (domainlist_id, group_id) PRIMARY KEY (domainlist_id, group_id)
); );
@ -91,8 +91,8 @@ CREATE TABLE client
CREATE TABLE client_by_group CREATE TABLE client_by_group
( (
client_id INTEGER NOT NULL REFERENCES client (id) ON DELETE CASCADE, client_id INTEGER NOT NULL REFERENCES client (id),
group_id INTEGER NOT NULL REFERENCES "group" (id) ON DELETE CASCADE, group_id INTEGER NOT NULL REFERENCES "group" (id),
PRIMARY KEY (client_id, group_id) PRIMARY KEY (client_id, group_id)
); );
@ -111,7 +111,7 @@ CREATE TRIGGER tr_domainlist_update AFTER UPDATE ON domainlist
UPDATE domainlist SET date_modified = (cast(strftime('%s', 'now') as int)) WHERE domain = NEW.domain; UPDATE domainlist SET date_modified = (cast(strftime('%s', 'now') as int)) WHERE domain = NEW.domain;
END; END;
CREATE VIEW vw_allowlist AS SELECT domain, domainlist.id AS id, domainlist_by_group.group_id AS group_id CREATE VIEW vw_whitelist AS SELECT domain, domainlist.id AS id, domainlist_by_group.group_id AS group_id
FROM domainlist FROM domainlist
LEFT JOIN domainlist_by_group ON domainlist_by_group.domainlist_id = domainlist.id LEFT JOIN domainlist_by_group ON domainlist_by_group.domainlist_id = domainlist.id
LEFT JOIN "group" ON "group".id = domainlist_by_group.group_id LEFT JOIN "group" ON "group".id = domainlist_by_group.group_id
@ -119,7 +119,7 @@ CREATE VIEW vw_allowlist AS SELECT domain, domainlist.id AS id, domainlist_by_gr
AND domainlist.type = 0 AND domainlist.type = 0
ORDER BY domainlist.id; ORDER BY domainlist.id;
CREATE VIEW vw_denylist AS SELECT domain, domainlist.id AS id, domainlist_by_group.group_id AS group_id CREATE VIEW vw_blacklist AS SELECT domain, domainlist.id AS id, domainlist_by_group.group_id AS group_id
FROM domainlist FROM domainlist
LEFT JOIN domainlist_by_group ON domainlist_by_group.domainlist_id = domainlist.id LEFT JOIN domainlist_by_group ON domainlist_by_group.domainlist_id = domainlist.id
LEFT JOIN "group" ON "group".id = domainlist_by_group.group_id LEFT JOIN "group" ON "group".id = domainlist_by_group.group_id
@ -127,7 +127,7 @@ CREATE VIEW vw_denylist AS SELECT domain, domainlist.id AS id, domainlist_by_gro
AND domainlist.type = 1 AND domainlist.type = 1
ORDER BY domainlist.id; ORDER BY domainlist.id;
CREATE VIEW vw_regex_allowlist AS SELECT domain, domainlist.id AS id, domainlist_by_group.group_id AS group_id CREATE VIEW vw_regex_whitelist AS SELECT domain, domainlist.id AS id, domainlist_by_group.group_id AS group_id
FROM domainlist FROM domainlist
LEFT JOIN domainlist_by_group ON domainlist_by_group.domainlist_id = domainlist.id LEFT JOIN domainlist_by_group ON domainlist_by_group.domainlist_id = domainlist.id
LEFT JOIN "group" ON "group".id = domainlist_by_group.group_id LEFT JOIN "group" ON "group".id = domainlist_by_group.group_id
@ -135,7 +135,7 @@ CREATE VIEW vw_regex_allowlist AS SELECT domain, domainlist.id AS id, domainlist
AND domainlist.type = 2 AND domainlist.type = 2
ORDER BY domainlist.id; ORDER BY domainlist.id;
CREATE VIEW vw_regex_denylist AS SELECT domain, domainlist.id AS id, domainlist_by_group.group_id AS group_id CREATE VIEW vw_regex_blacklist AS SELECT domain, domainlist.id AS id, domainlist_by_group.group_id AS group_id
FROM domainlist FROM domainlist
LEFT JOIN domainlist_by_group ON domainlist_by_group.domainlist_id = domainlist.id LEFT JOIN domainlist_by_group ON domainlist_by_group.domainlist_id = domainlist.id
LEFT JOIN "group" ON "group".id = domainlist_by_group.group_id LEFT JOIN "group" ON "group".id = domainlist_by_group.group_id

View File

@ -3,7 +3,7 @@
# Source utils.sh for getFTLConfigValue() # Source utils.sh for getFTLConfigValue()
PI_HOLE_SCRIPT_DIR='/opt/pihole' PI_HOLE_SCRIPT_DIR='/opt/pihole'
utilsfile="${PI_HOLE_SCRIPT_DIR}/utils.sh" utilsfile="${PI_HOLE_SCRIPT_DIR}/utils.sh"
# shellcheck source="./advanced/Scripts/utils.sh" # shellcheck disable=SC1090
. "${utilsfile}" . "${utilsfile}"
# Get file paths # Get file paths

View File

@ -3,40 +3,32 @@
# Source utils.sh for getFTLConfigValue() # Source utils.sh for getFTLConfigValue()
PI_HOLE_SCRIPT_DIR='/opt/pihole' PI_HOLE_SCRIPT_DIR='/opt/pihole'
utilsfile="${PI_HOLE_SCRIPT_DIR}/utils.sh" utilsfile="${PI_HOLE_SCRIPT_DIR}/utils.sh"
# shellcheck source="./advanced/Scripts/utils.sh" # shellcheck disable=SC1090
. "${utilsfile}" . "${utilsfile}"
# Get file paths # Get file paths
FTL_PID_FILE="$(getFTLConfigValue files.pid)" FTL_PID_FILE="$(getFTLConfigValue files.pid)"
FTL_LOG_FILE="$(getFTLConfigValue files.log.ftl)"
PIHOLE_LOG_FILE="$(getFTLConfigValue files.log.dnsmasq)"
WEBSERVER_LOG_FILE="$(getFTLConfigValue files.log.webserver)"
FTL_PID_FILE="${FTL_PID_FILE:-/run/pihole-FTL.pid}"
FTL_LOG_FILE="${FTL_LOG_FILE:-/var/log/pihole/FTL.log}"
PIHOLE_LOG_FILE="${PIHOLE_LOG_FILE:-/var/log/pihole/pihole.log}"
WEBSERVER_LOG_FILE="${WEBSERVER_LOG_FILE:-/var/log/pihole/webserver.log}"
# Ensure that permissions are set so that pihole-FTL can edit all necessary files # Ensure that permissions are set so that pihole-FTL can edit all necessary files
mkdir -p /var/log/pihole # shellcheck disable=SC2174
chown -R pihole:pihole /etc/pihole/ /var/log/pihole/ mkdir -pm 0640 /var/log/pihole
chown -R pihole:pihole /etc/pihole /var/log/pihole
chmod -R 0640 /var/log/pihole
chmod -R 0660 /etc/pihole
# allow all users read version file (and use pihole -v) # Logrotate config file need to be owned by root and must not be writable by group and others
touch /etc/pihole/versions chown root:root /etc/pihole/logrotate
chmod 0644 /etc/pihole/versions chmod 0644 /etc/pihole/logrotate
# allow all users to enter the directories
chmod 0755 /etc/pihole /var/log/pihole
# allow pihole to access subdirs in /etc/pihole (sets execution bit on dirs) # allow pihole to access subdirs in /etc/pihole (sets execution bit on dirs)
find /etc/pihole/ /var/log/pihole/ -type d -exec chmod 0755 {} + # credits https://stackoverflow.com/a/11512211
# Set all files (except TLS-related ones) to u+rw g+r find /etc/pihole -type d -exec chmod 0755 {} \;
find /etc/pihole/ /var/log/pihole/ -type f ! \( -name '*.pem' -o -name '*.crt' \) -exec chmod 0640 {} +
# Set TLS-related files to a more restrictive u+rw *only* (they may contain private keys)
find /etc/pihole/ -type f \( -name '*.pem' -o -name '*.crt' \) -exec chmod 0600 {} +
# Logrotate config file need to be owned by root
chown root:root /etc/pihole/logrotate
# Touch files to ensure they exist (create if non-existing, preserve if existing) # Touch files to ensure they exist (create if non-existing, preserve if existing)
[ -f "${FTL_PID_FILE}" ] || install -D -m 644 -o pihole -g pihole /dev/null "${FTL_PID_FILE}" [ -f "${FTL_PID_FILE}" ] || install -D -m 644 -o pihole -g pihole /dev/null "${FTL_PID_FILE}"
[ -f "${FTL_LOG_FILE}" ] || install -m 640 -o pihole -g pihole /dev/null "${FTL_LOG_FILE}" [ -f /var/log/pihole/FTL.log ] || install -m 640 -o pihole -g pihole /dev/null /var/log/pihole/FTL.log
[ -f "${PIHOLE_LOG_FILE}" ] || install -m 640 -o pihole -g pihole /dev/null "${PIHOLE_LOG_FILE}" [ -f /var/log/pihole/pihole.log ] || install -m 640 -o pihole -g pihole /dev/null /var/log/pihole/pihole.log
[ -f "${WEBSERVER_LOG_FILE}" ] || install -m 640 -o pihole -g pihole /dev/null "${WEBSERVER_LOG_FILE}"
[ -f /etc/pihole/dhcp.leases ] || install -m 644 -o pihole -g pihole /dev/null /etc/pihole/dhcp.leases [ -f /etc/pihole/dhcp.leases ] || install -m 644 -o pihole -g pihole /dev/null /etc/pihole/dhcp.leases

View File

@ -1,40 +0,0 @@
#!/sbin/openrc-run
# shellcheck shell=sh disable=SC2034
: "${PI_HOLE_SCRIPT_DIR:=/opt/pihole}"
command="/usr/bin/pihole-FTL"
command_user="pihole:pihole"
supervisor=supervise-daemon
command_args_foreground="-f"
command_background=true
pidfile="/run/${RC_SVCNAME}_openrc.pid"
extra_started_commands="reload"
respawn_max=5
respawn_period=60
capabilities="^CAP_NET_BIND_SERVICE,^CAP_NET_RAW,^CAP_NET_ADMIN,^CAP_SYS_NICE,^CAP_IPC_LOCK,^CAP_CHOWN,^CAP_SYS_TIME"
depend() {
want net
provide dns
}
checkconfig() {
$command -f test
}
start_pre() {
sh "${PI_HOLE_SCRIPT_DIR}/pihole-FTL-prestart.sh"
}
stop_post() {
sh "${PI_HOLE_SCRIPT_DIR}/pihole-FTL-poststop.sh"
}
reload() {
checkconfig || return $?
ebegin "Reloading ${RC_SVCNAME}"
start-stop-daemon --signal HUP --pidfile "${pidfile}"
eend $?
}

View File

@ -12,7 +12,7 @@
# Source utils.sh for getFTLConfigValue(), getFTLPID() # Source utils.sh for getFTLConfigValue(), getFTLPID()
PI_HOLE_SCRIPT_DIR="/opt/pihole" PI_HOLE_SCRIPT_DIR="/opt/pihole"
utilsfile="${PI_HOLE_SCRIPT_DIR}/utils.sh" utilsfile="${PI_HOLE_SCRIPT_DIR}/utils.sh"
# shellcheck source="./advanced/Scripts/utils.sh" # shellcheck disable=SC1090
. "${utilsfile}" . "${utilsfile}"
@ -57,16 +57,13 @@ start() {
stop() { stop() {
if is_running; then if is_running; then
kill "${FTL_PID}" kill "${FTL_PID}"
# Give FTL 60 seconds to gracefully stop for i in 1 2 3 4 5; do
i=1
while [ "${i}" -le 60 ]; do
if ! is_running; then if ! is_running; then
break break
fi fi
printf "." printf "."
sleep 1 sleep 1
i=$((i + 1))
done done
echo echo

View File

@ -17,18 +17,18 @@ StartLimitIntervalSec=60s
[Service] [Service]
User=pihole User=pihole
PermissionsStartOnly=true
AmbientCapabilities=CAP_NET_BIND_SERVICE CAP_NET_RAW CAP_NET_ADMIN CAP_SYS_NICE CAP_IPC_LOCK CAP_CHOWN CAP_SYS_TIME AmbientCapabilities=CAP_NET_BIND_SERVICE CAP_NET_RAW CAP_NET_ADMIN CAP_SYS_NICE CAP_IPC_LOCK CAP_CHOWN CAP_SYS_TIME
# Run prestart with elevated permissions ExecStartPre=/opt/pihole/pihole-FTL-prestart.sh
ExecStartPre=+/opt/pihole/pihole-FTL-prestart.sh
ExecStart=/usr/bin/pihole-FTL -f ExecStart=/usr/bin/pihole-FTL -f
Restart=on-failure Restart=on-failure
RestartSec=5s RestartSec=5s
ExecReload=/bin/kill -HUP $MAINPID ExecReload=/bin/kill -HUP $MAINPID
ExecStopPost=+/opt/pihole/pihole-FTL-poststop.sh ExecStopPost=/opt/pihole/pihole-FTL-poststop.sh
# Use graceful shutdown with a reasonable timeout # Use graceful shutdown with a reasonable timeout
TimeoutStopSec=60s TimeoutStopSec=10s
# Make /usr, /boot, /etc and possibly some more folders read-only... # Make /usr, /boot, /etc and possibly some more folders read-only...
ProtectSystem=full ProtectSystem=full

View File

@ -0,0 +1,51 @@
_pihole() {
local cur prev opts opts_checkout opts_debug opts_logging opts_query opts_update opts_version
COMPREPLY=()
cur="${COMP_WORDS[COMP_CWORD]}"
prev="${COMP_WORDS[COMP_CWORD-1]}"
prev2="${COMP_WORDS[COMP_CWORD-2]}"
case "${prev}" in
"pihole")
opts="allow allow-regex allow-wild deny checkout debug disable enable flush help logging query reconfigure regex reloaddns reloadlists status tail uninstall updateGravity updatePihole version wildcard arpflush api"
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
;;
"allow"|"deny"|"wildcard"|"regex"|"allow-regex"|"allow-wild")
opts_lists="\not \--delmode \--quiet \--list \--help"
COMPREPLY=( $(compgen -W "${opts_lists}" -- ${cur}) )
;;
"checkout")
opts_checkout="core ftl web master dev"
COMPREPLY=( $(compgen -W "${opts_checkout}" -- ${cur}) )
;;
"debug")
opts_debug="-a"
COMPREPLY=( $(compgen -W "${opts_debug}" -- ${cur}) )
;;
"logging")
opts_logging="on off 'off noflush'"
COMPREPLY=( $(compgen -W "${opts_logging}" -- ${cur}) )
;;
"query")
opts_query="--partial --all"
COMPREPLY=( $(compgen -W "${opts_query}" -- ${cur}) )
;;
"updatePihole"|"-up")
opts_update="--check-only"
COMPREPLY=( $(compgen -W "${opts_update}" -- ${cur}) )
;;
"core"|"admin"|"ftl")
if [[ "$prev2" == "checkout" ]]; then
opts_checkout="master dev"
COMPREPLY=( $(compgen -W "${opts_checkout}" -- ${cur}) )
else
return 1
fi
;;
*)
return 1
;;
esac
return 0
}
complete -F _pihole pihole

View File

@ -1,9 +0,0 @@
#!/bin/bash
#
# Bash completion script for pihole-FTL
#
# This completion script provides tab completion for pihole-FTL CLI flags and commands.
# It uses the `pihole-FTL --complete` command to generate the completion options.
_complete_FTL() { mapfile -t COMPREPLY < <(pihole-FTL --complete "${COMP_WORDS[@]}"); }
complete -F _complete_FTL pihole-FTL

View File

@ -1,59 +0,0 @@
#!/bin/bash
#
# Bash completion script for pihole
#
_pihole() {
local cur prev prev2 opts opts_lists opts_checkout opts_debug opts_logging opts_query opts_update opts_networkflush
COMPREPLY=()
cur="${COMP_WORDS[COMP_CWORD]}"
prev="${COMP_WORDS[COMP_CWORD-1]}"
prev2="${COMP_WORDS[COMP_CWORD-2]}"
case "${prev}" in
"pihole")
opts="allow allow-regex allow-wild deny checkout debug disable enable flush help logging query repair regex reloaddns reloadlists setpassword status tail uninstall updateGravity updatePihole version wildcard networkflush api"
mapfile -t COMPREPLY < <(compgen -W "${opts}" -- "${cur}")
;;
"allow"|"deny"|"wildcard"|"regex"|"allow-regex"|"allow-wild")
opts_lists="\not \--delmode \--quiet \--list \--help"
mapfile -t COMPREPLY < <(compgen -W "${opts_lists}" -- "${cur}")
;;
"checkout")
opts_checkout="core ftl web master dev"
mapfile -t COMPREPLY < <(compgen -W "${opts_checkout}" -- "${cur}")
;;
"debug")
opts_debug="-a"
mapfile -t COMPREPLY < <(compgen -W "${opts_debug}" -- "${cur}")
;;
"logging")
opts_logging="on off 'off noflush'"
mapfile -t COMPREPLY < <(compgen -W "${opts_logging}" -- "${cur}")
;;
"query")
opts_query="--partial --all"
mapfile -t COMPREPLY < <(compgen -W "${opts_query}" -- "${cur}")
;;
"updatePihole"|"-up")
opts_update="--check-only"
mapfile -t COMPREPLY < <(compgen -W "${opts_update}" -- "${cur}")
;;
"networkflush")
opts_networkflush="--arp"
mapfile -t COMPREPLY < <(compgen -W "${opts_networkflush}" -- "${cur}")
;;
"core"|"web"|"ftl")
if [[ "$prev2" == "checkout" ]]; then
opts_checkout="master development"
mapfile -t COMPREPLY < <(compgen -W "${opts_checkout}" -- "${cur}")
else
return 1
fi
;;
*)
return 1
;;
esac
return 0
}
complete -F _pihole pihole

File diff suppressed because it is too large Load Diff

View File

@ -8,17 +8,13 @@
# This file is copyright under the latest version of the EUPL. # This file is copyright under the latest version of the EUPL.
# Please see LICENSE file for your rights under this license. # Please see LICENSE file for your rights under this license.
# shellcheck source="./advanced/Scripts/COL_TABLE"
source "/opt/pihole/COL_TABLE" source "/opt/pihole/COL_TABLE"
# shellcheck source="./advanced/Scripts/utils.sh"
source "/opt/pihole/utils.sh"
# getFTLConfigValue() from utils.sh
while true; do while true; do
read -rp " ${QST} Are you sure you would like to remove ${COL_BOLD}Pi-hole${COL_NC}? [y/N] " answer read -rp " ${QST} Are you sure you would like to remove ${COL_WHITE}Pi-hole${COL_NC}? [y/N] " answer
case ${answer} in case ${answer} in
[Yy]* ) break;; [Yy]* ) break;;
* ) echo -e "${OVER} ${COL_GREEN}Uninstall has been canceled${COL_NC}"; exit 0;; * ) echo -e "${OVER} ${COL_LIGHT_GREEN}Uninstall has been canceled${COL_NC}"; exit 0;;
esac esac
done done
@ -27,200 +23,141 @@ str="Root user check"
if [[ ${EUID} -eq 0 ]]; then if [[ ${EUID} -eq 0 ]]; then
echo -e " ${TICK} ${str}" echo -e " ${TICK} ${str}"
else else
echo -e " ${CROSS} ${str} # Check if sudo is actually installed
Script called with non-root privileges # If it isn't, exit because the uninstall can not complete
The Pi-hole requires elevated privileges to uninstall" if [ -x "$(command -v sudo)" ]; then
exit 1 export SUDO="sudo"
else
echo -e " ${CROSS} ${str}
Script called with non-root privileges
The Pi-hole requires elevated privileges to uninstall"
exit 1
fi
fi fi
# Get paths for admin interface, log files and database files, readonly PI_HOLE_FILES_DIR="/etc/.pihole"
# to allow deletion where user has specified a non-default location
ADMIN_INTERFACE_DIR=$(getFTLConfigValue "webserver.paths.webroot")$(getFTLConfigValue "webserver.paths.webhome")
FTL_LOG=$(getFTLConfigValue "files.log.ftl")
DNSMASQ_LOG=$(getFTLConfigValue "files.log.dnsmasq")
WEBSERVER_LOG=$(getFTLConfigValue "files.log.webserver")
PIHOLE_DB=$(getFTLConfigValue "files.database")
GRAVITY_DB=$(getFTLConfigValue "files.gravity")
MACVENDOR_DB=$(getFTLConfigValue "files.macvendor")
PI_HOLE_LOCAL_REPO="/etc/.pihole"
# Setting SKIP_INSTALL="true" to source the installer functions without running them
SKIP_INSTALL="true" SKIP_INSTALL="true"
# shellcheck source="./automated install/basic-install.sh" source "${PI_HOLE_FILES_DIR}/automated install/basic-install.sh"
source "${PI_HOLE_LOCAL_REPO}/automated install/basic-install.sh"
# Functions and Variables sources from basic-install: # package_manager_detect() sourced from basic-install.sh
# package_manager_detect(), disable_service(), stop_service(), package_manager_detect
# restart service() and is_command()
# PI_HOLE_CONFIG_DIR PI_HOLE_INSTALL_DIR PI_HOLE_LOCAL_REPO
removeMetaPackage() { removeMetaPackage() {
# Purge Pi-hole meta package # Purge Pi-hole meta package
echo "" echo ""
echo -ne " ${INFO} Removing Pi-hole meta package..."; echo -ne " ${INFO} Removing Pi-hole meta package...";
eval "${PKG_REMOVE}" "pihole-meta" &> /dev/null; eval "${SUDO}" "${PKG_REMOVE}" "pihole-meta" &> /dev/null;
echo -e "${OVER} ${INFO} Removed Pi-hole meta package"; echo -e "${OVER} ${INFO} Removed Pi-hole meta package";
} }
removeWebInterface() { removePiholeFiles() {
# Remove the web interface of Pi-hole # Only web directories/files that are created by Pi-hole should be removed
echo -ne " ${INFO} Removing Web Interface..." echo -ne " ${INFO} Removing Web Interface..."
rm -rf "${ADMIN_INTERFACE_DIR:-/var/www/html/admin/}" &> /dev/null ${SUDO} rm -rf /var/www/html/admin &> /dev/null
echo -e "${OVER} ${TICK} Removed Web Interface"
}
removeFTL() {
# Remove FTL and stop any running FTL service
if is_command "pihole-FTL"; then
# service stop & disable from basic_install.sh
stop_service pihole-FTL
disable_service pihole-FTL
echo -ne " ${INFO} Removing pihole-FTL..." # If the web directory is empty after removing these files, then the parent html directory can be removed.
rm -f /etc/systemd/system/pihole-FTL.service &> /dev/null if [ -d "/var/www/html" ]; then
if [[ -d '/etc/systemd/system/pihole-FTL.service.d' ]]; then if [[ ! "$(ls -A /var/www/html)" ]]; then
read -rp " ${QST} FTL service override directory /etc/systemd/system/pihole-FTL.service.d detected. Do you wish to remove this from your system? [y/N] " answer ${SUDO} rm -rf /var/www/html &> /dev/null
case $answer in
[yY]*)
echo -ne " ${INFO} Removing /etc/systemd/system/pihole-FTL.service.d..."
rm -R /etc/systemd/system/pihole-FTL.service.d &> /dev/null
echo -e "${OVER} ${INFO} Removed /etc/systemd/system/pihole-FTL.service.d"
;;
*) echo -e " ${INFO} Leaving /etc/systemd/system/pihole-FTL.service.d in place.";;
esac
fi
rm -f /etc/init.d/pihole-FTL &> /dev/null
rm -f /usr/bin/pihole-FTL &> /dev/null
echo -e "${OVER} ${TICK} Removed pihole-FTL"
# Force systemd reload after service files are removed
if is_command "systemctl"; then
echo -ne " ${INFO} Restarting systemd..."
systemctl daemon-reload
echo -e "${OVER} ${TICK} Restarted systemd..."
fi fi
fi fi
} echo -e "${OVER} ${TICK} Removed Web Interface"
removeCronFiles() {
# Attempt to preserve backwards compatibility with older versions # Attempt to preserve backwards compatibility with older versions
# to guarantee no additional changes were made to /etc/crontab after # to guarantee no additional changes were made to /etc/crontab after
# the installation of pihole, /etc/crontab.pihole should be permanently # the installation of pihole, /etc/crontab.pihole should be permanently
# preserved. # preserved.
if [[ -f /etc/crontab.orig ]]; then if [[ -f /etc/crontab.orig ]]; then
mv /etc/crontab /etc/crontab.pihole ${SUDO} mv /etc/crontab /etc/crontab.pihole
mv /etc/crontab.orig /etc/crontab ${SUDO} mv /etc/crontab.orig /etc/crontab
restart_service cron ${SUDO} service cron restart
echo -e " ${TICK} Restored the default system cron" echo -e " ${TICK} Restored the default system cron"
echo -e " ${INFO} A backup of the most recent crontab is saved at /etc/crontab.pihole"
fi fi
# Attempt to preserve backwards compatibility with older versions # Attempt to preserve backwards compatibility with older versions
if [[ -f /etc/cron.d/pihole ]];then if [[ -f /etc/cron.d/pihole ]];then
rm -f /etc/cron.d/pihole &> /dev/null ${SUDO} rm -f /etc/cron.d/pihole &> /dev/null
echo -e " ${TICK} Removed /etc/cron.d/pihole" echo -e " ${TICK} Removed /etc/cron.d/pihole"
fi fi
}
removePiholeFiles() {
# Remove databases (including user specified non-default paths)
rm -f "${PIHOLE_DB:-/etc/pihole/pihole-FTL.db}" &> /dev/null
rm -f "${GRAVITY_DB:-/etc/pihole/gravity.db}" &> /dev/null
rm -f "${MACVENDOR_DB:-/etc/pihole/macvendor.db}" &> /dev/null
# Remove pihole config, repo and local files
rm -rf "${PI_HOLE_CONFIG_DIR:-/etc/pihole}" &> /dev/null
rm -rf "${PI_HOLE_LOCAL_REPO:-/etc/.pihole}" &> /dev/null
rm -rf "${PI_HOLE_INSTALL_DIR:-/opt/pihole}" &> /dev/null
# Remove log files (including user specified non-default paths)
# and rotated logs
# Explicitly escape spaces, in case of trailing space in path before wildcard
rm -f "$(printf '%q' "${FTL_LOG:-/var/log/pihole/FTL.log}")*" &> /dev/null
rm -f "$(printf '%q' "${DNSMASQ_LOG:-/var/log/pihole/pihole.log}")*" &> /dev/null
rm -f "$(printf '%q' "${WEBSERVER_LOG:-/var/log/pihole/webserver.log}")*" &> /dev/null
# remove any remnant log-files from old versions
rm -rf /var/log/*pihole* &> /dev/null
# remove log directory
rm -rf /var/log/pihole &> /dev/null
# remove the pihole command
rm -f /usr/local/bin/pihole &> /dev/null
# remove Pi-hole's bash completion
rm -f /etc/bash_completion.d/pihole &> /dev/null
rm -f /etc/bash_completion.d/pihole-FTL &> /dev/null
# Remove pihole from sudoers for compatibility with old versions
rm -f /etc/sudoers.d/pihole &> /dev/null
${SUDO} rm -rf /var/log/*pihole* &> /dev/null
${SUDO} rm -rf /var/log/pihole/*pihole* &> /dev/null
${SUDO} rm -rf /etc/pihole/ &> /dev/null
${SUDO} rm -rf /etc/.pihole/ &> /dev/null
${SUDO} rm -rf /opt/pihole/ &> /dev/null
${SUDO} rm -f /usr/local/bin/pihole &> /dev/null
${SUDO} rm -f /etc/bash_completion.d/pihole &> /dev/null
${SUDO} rm -f /etc/sudoers.d/pihole &> /dev/null
echo -e " ${TICK} Removed config files" echo -e " ${TICK} Removed config files"
}
removeManPage() { # Restore Resolved
# If the pihole manpage exists, then delete if [[ -e /etc/systemd/resolved.conf.orig ]] || [[ -e /etc/systemd/resolved.conf.d/90-pi-hole-disable-stub-listener.conf ]]; then
if [[ -f /usr/local/share/man/man8/pihole.8 ]]; then ${SUDO} cp -p /etc/systemd/resolved.conf.orig /etc/systemd/resolved.conf &> /dev/null || true
rm -f /usr/local/share/man/man8/pihole.8 /usr/local/share/man/man8/pihole-FTL.8 /usr/local/share/man/man5/pihole-FTL.conf.5 ${SUDO} rm -f /etc/systemd/resolved.conf.d/90-pi-hole-disable-stub-listener.conf
# Rebuild man-db if present systemctl reload-or-restart systemd-resolved
if is_command "mandb"; then fi
mandb -q &>/dev/null
# Remove FTL
if command -v pihole-FTL &> /dev/null; then
echo -ne " ${INFO} Removing pihole-FTL..."
if [[ -x "$(command -v systemctl)" ]]; then
systemctl stop pihole-FTL
else
service pihole-FTL stop
fi fi
${SUDO} rm -f /etc/systemd/system/pihole-FTL.service
if [[ -d '/etc/systemd/system/pihole-FTL.service.d' ]]; then
read -rp " ${QST} FTL service override directory /etc/systemd/system/pihole-FTL.service.d detected. Do you wish to remove this from your system? [y/N] " answer
case $answer in
[yY]*)
echo -ne " ${INFO} Removing /etc/systemd/system/pihole-FTL.service.d..."
${SUDO} rm -R /etc/systemd/system/pihole-FTL.service.d
echo -e "${OVER} ${INFO} Removed /etc/systemd/system/pihole-FTL.service.d"
;;
*) echo -e " ${INFO} Leaving /etc/systemd/system/pihole-FTL.service.d in place.";;
esac
fi
${SUDO} rm -f /etc/init.d/pihole-FTL
${SUDO} rm -f /usr/bin/pihole-FTL
echo -e "${OVER} ${TICK} Removed pihole-FTL"
fi
# If the pihole manpage exists, then delete and rebuild man-db
if [[ -f /usr/local/share/man/man8/pihole.8 ]]; then
${SUDO} rm -f /usr/local/share/man/man8/pihole.8 /usr/local/share/man/man8/pihole-FTL.8 /usr/local/share/man/man5/pihole-FTL.conf.5
${SUDO} mandb -q &>/dev/null
echo -e " ${TICK} Removed pihole man page" echo -e " ${TICK} Removed pihole man page"
fi fi
}
removeUser() {
# If the pihole user exists, then remove # If the pihole user exists, then remove
if id "pihole" &> /dev/null; then if id "pihole" &> /dev/null; then
if userdel -r pihole 2> /dev/null; then if ${SUDO} userdel -r pihole 2> /dev/null; then
echo -e " ${TICK} Removed 'pihole' user" echo -e " ${TICK} Removed 'pihole' user"
else else
echo -e " ${CROSS} Unable to remove 'pihole' user" echo -e " ${CROSS} Unable to remove 'pihole' user"
fi fi
fi fi
# If the pihole group exists, then remove # If the pihole group exists, then remove
if getent group "pihole" &> /dev/null; then if getent group "pihole" &> /dev/null; then
if groupdel pihole 2> /dev/null; then if ${SUDO} groupdel pihole 2> /dev/null; then
echo -e " ${TICK} Removed 'pihole' group" echo -e " ${TICK} Removed 'pihole' group"
else else
echo -e " ${CROSS} Unable to remove 'pihole' group" echo -e " ${CROSS} Unable to remove 'pihole' group"
fi fi
fi fi
}
restoreResolved() {
# Restore Resolved from saved configuration, if present
if [[ -e /etc/systemd/resolved.conf.orig ]] || [[ -e /etc/systemd/resolved.conf.d/90-pi-hole-disable-stub-listener.conf ]]; then
cp -p /etc/systemd/resolved.conf.orig /etc/systemd/resolved.conf &> /dev/null || true
rm -f /etc/systemd/resolved.conf.d/90-pi-hole-disable-stub-listener.conf &> /dev/null
systemctl reload-or-restart systemd-resolved
fi
}
completionMessage() {
echo -e "\\n We're sorry to see you go, but thanks for checking out Pi-hole! echo -e "\\n We're sorry to see you go, but thanks for checking out Pi-hole!
If you need help, reach out to us on GitHub, Discourse, Reddit or Twitter If you need help, reach out to us on GitHub, Discourse, Reddit or Twitter
Reinstall at any time: ${COL_BOLD}curl -sSL https://install.pi-hole.net | bash${COL_NC} Reinstall at any time: ${COL_WHITE}curl -sSL https://install.pi-hole.net | bash${COL_NC}
${COL_RED}Please reset the DNS on your router/clients to restore internet connectivity${COL_NC} ${COL_LIGHT_RED}Please reset the DNS on your router/clients to restore internet connectivity${COL_NC}
${INFO} Pi-hole's meta package has been removed, use the 'autoremove' function from your package manager to remove unused dependencies${COL_NC} ${INFO} Pi-hole's meta package has been removed, use the 'autoremove' function from your package manager to remove unused dependencies${COL_NC}
${COL_GREEN}Uninstallation Complete! ${COL_NC}" ${COL_LIGHT_GREEN}Uninstallation Complete! ${COL_NC}"
} }
######### SCRIPT ########### ######### SCRIPT ###########
# The ordering here allows clean uninstallation with nothing
# removed before anything that depends upon it.
# eg removeFTL relies on scripts removed by removePiholeFiles
# removeUser relies on commands removed by removeMetaPackage
package_manager_detect
removeWebInterface
removeCronFiles
restoreResolved
removeManPage
removeFTL
removeUser
removeMetaPackage removeMetaPackage
removePiholeFiles removePiholeFiles
completionMessage

View File

@ -1,4 +1,5 @@
#!/usr/bin/env bash #!/usr/bin/env bash
# shellcheck disable=SC1090
# Pi-hole: A black hole for Internet advertisements # Pi-hole: A black hole for Internet advertisements
# (c) 2017 Pi-hole, LLC (https://pi-hole.net) # (c) 2017 Pi-hole, LLC (https://pi-hole.net)
@ -15,13 +16,13 @@ export LC_ALL=C
PI_HOLE_SCRIPT_DIR="/opt/pihole" PI_HOLE_SCRIPT_DIR="/opt/pihole"
# Source utils.sh for GetFTLConfigValue # Source utils.sh for GetFTLConfigValue
utilsfile="${PI_HOLE_SCRIPT_DIR}/utils.sh" utilsfile="${PI_HOLE_SCRIPT_DIR}/utils.sh"
# shellcheck source=./advanced/Scripts/utils.sh # shellcheck disable=SC1090
. "${utilsfile}" . "${utilsfile}"
coltable="${PI_HOLE_SCRIPT_DIR}/COL_TABLE" coltable="${PI_HOLE_SCRIPT_DIR}/COL_TABLE"
# shellcheck source=./advanced/Scripts/COL_TABLE # shellcheck disable=SC1090
. "${coltable}" . "${coltable}"
# shellcheck source=./advanced/Scripts/database_migration/gravity-db.sh # shellcheck disable=SC1091
. "/etc/.pihole/advanced/Scripts/database_migration/gravity-db.sh" . "/etc/.pihole/advanced/Scripts/database_migration/gravity-db.sh"
basename="pihole" basename="pihole"
@ -50,14 +51,14 @@ etag_support=false
# Check gravity temp directory # Check gravity temp directory
if [ ! -d "${GRAVITY_TMPDIR}" ] || [ ! -w "${GRAVITY_TMPDIR}" ]; then if [ ! -d "${GRAVITY_TMPDIR}" ] || [ ! -w "${GRAVITY_TMPDIR}" ]; then
echo -e " ${COL_RED}Gravity temporary directory does not exist or is not a writeable directory, falling back to /tmp. ${COL_NC}" echo -e " ${COL_LIGHT_RED}Gravity temporary directory does not exist or is not a writeable directory, falling back to /tmp. ${COL_NC}"
GRAVITY_TMPDIR="/tmp" GRAVITY_TMPDIR="/tmp"
fi fi
# Set this only after sourcing pihole-FTL.conf as the gravity database path may # Set this only after sourcing pihole-FTL.conf as the gravity database path may
# have changed # have changed
gravityDBfile="${GRAVITYDB}" gravityDBfile="${GRAVITYDB}"
gravityDBfile_default="${piholeDir}/gravity.db" gravityDBfile_default="/etc/pihole/gravity.db"
gravityTEMPfile="${GRAVITYDB}_temp" gravityTEMPfile="${GRAVITYDB}_temp"
gravityDIR="$(dirname -- "${gravityDBfile}")" gravityDIR="$(dirname -- "${gravityDBfile}")"
gravityOLDfile="${gravityDIR}/gravity_old.db" gravityOLDfile="${gravityDIR}/gravity_old.db"
@ -118,18 +119,15 @@ gravity_swap_databases() {
# Swap databases and remove or conditionally rename old database # Swap databases and remove or conditionally rename old database
# Number of available blocks on disk # Number of available blocks on disk
# Busybox Compat: `stat` long flags unsupported availableBlocks=$(stat -f --format "%a" "${gravityDIR}")
# -f flag is short form of --file-system.
# -c flag is short form of --format.
availableBlocks=$(stat -f -c "%a" "${gravityDIR}")
# Number of blocks, used by gravity.db # Number of blocks, used by gravity.db
gravityBlocks=$(stat -c "%b" "${gravityDBfile}") gravityBlocks=$(stat --format "%b" "${gravityDBfile}")
# Only keep the old database if available disk space is at least twice the size of the existing gravity.db. # Only keep the old database if available disk space is at least twice the size of the existing gravity.db.
# Better be safe than sorry... # Better be safe than sorry...
oldAvail=false oldAvail=false
if [ "${availableBlocks}" -gt "$((gravityBlocks * 2))" ] && [ -f "${gravityDBfile}" ]; then if [ "${availableBlocks}" -gt "$((gravityBlocks * 2))" ] && [ -f "${gravityDBfile}" ]; then
oldAvail=true oldAvail=true
cp -p "${gravityDBfile}" "${gravityOLDfile}" cp "${gravityDBfile}" "${gravityOLDfile}"
fi fi
# Drop the gravity and antigravity tables + subsequent VACUUM the current # Drop the gravity and antigravity tables + subsequent VACUUM the current
@ -142,7 +140,7 @@ gravity_swap_databases() {
else else
# Check if the backup directory exists # Check if the backup directory exists
if [ ! -d "${gravityBCKdir}" ]; then if [ ! -d "${gravityBCKdir}" ]; then
mkdir -p "${gravityBCKdir}" && chown pihole:pihole "${gravityBCKdir}" mkdir -p "${gravityBCKdir}"
fi fi
# If multiple gravityBCKfile's are present (appended with a number), rotate them # If multiple gravityBCKfile's are present (appended with a number), rotate them
@ -308,7 +306,7 @@ migrate_to_database() {
fi fi
# Check if gravity database needs to be updated # Check if gravity database needs to be updated
upgrade_gravityDB "${gravityDBfile}" upgrade_gravityDB "${gravityDBfile}" "${piholeDir}"
# Migrate list files to new database # Migrate list files to new database
if [ -e "${adListFile}" ]; then if [ -e "${adListFile}" ]; then
@ -336,7 +334,7 @@ migrate_to_database() {
fi fi
# Check if gravity database needs to be updated # Check if gravity database needs to be updated
upgrade_gravityDB "${gravityDBfile}" upgrade_gravityDB "${gravityDBfile}" "${piholeDir}"
} }
# Determine if DNS resolution is available before proceeding # Determine if DNS resolution is available before proceeding
@ -351,24 +349,17 @@ gravity_CheckDNSResolutionAvailable() {
echo -e " ${CROSS} DNS resolution is currently unavailable" echo -e " ${CROSS} DNS resolution is currently unavailable"
fi fi
str="Waiting up to 120 seconds for DNS resolution..." str="Waiting until DNS resolution is available..."
echo -ne " ${INFO} ${str}" echo -ne " ${INFO} ${str}"
until getent hosts github.com &> /dev/null; do
# Default DNS timeout is two seconds, plus 1 second for each dot > 120 seconds # Append one dot for each second waiting
for ((i = 0; i < 40; i++)); do str="${str}."
if getent hosts github.com &> /dev/null; then echo -ne " ${OVER} ${INFO} ${str}"
# If we reach this point, DNS resolution is available sleep 1
echo -e "${OVER} ${TICK} DNS resolution is available"
return 0
fi
# Append one dot for each second waiting
echo -ne "."
sleep 1
done done
# DNS resolution is still unavailable after 120 seconds # If we reach this point, DNS resolution is available
return 1 echo -e "${OVER} ${TICK} DNS resolution is available"
} }
# Function: try_restore_backup # Function: try_restore_backup
@ -427,7 +418,7 @@ gravity_DownloadBlocklists() {
echo -e " ${INFO} Storing gravity database in ${COL_BOLD}${gravityDBfile}${COL_NC}" echo -e " ${INFO} Storing gravity database in ${COL_BOLD}${gravityDBfile}${COL_NC}"
fi fi
local url domain str compression adlist_type directory success local url domain str target compression adlist_type directory success
echo "" echo ""
# Prepare new gravity database # Prepare new gravity database
@ -576,12 +567,12 @@ gravity_DownloadBlocklists() {
if [[ "${check_url}" =~ ${regex} ]]; then if [[ "${check_url}" =~ ${regex} ]]; then
echo -e " ${CROSS} Invalid Target" echo -e " ${CROSS} Invalid Target"
else else
timeit gravity_DownloadBlocklistFromUrl "${url}" "${sourceIDs[$i]}" "${saveLocation}" "${compression}" "${adlist_type}" "${domain}" timeit gravity_DownloadBlocklistFromUrl "${url}" "${sourceIDs[$i]}" "${saveLocation}" "${target}" "${compression}" "${adlist_type}" "${domain}"
fi fi
echo "" echo ""
done done
DownloadBlocklists_done=true gravity_Blackbody=true
} }
compareLists() { compareLists() {
@ -610,11 +601,9 @@ compareLists() {
# Download specified URL and perform checks on HTTP status and file content # Download specified URL and perform checks on HTTP status and file content
gravity_DownloadBlocklistFromUrl() { gravity_DownloadBlocklistFromUrl() {
local url="${1}" adlistID="${2}" saveLocation="${3}" compression="${4}" gravity_type="${5}" domain="${6}" local url="${1}" adlistID="${2}" saveLocation="${3}" target="${4}" compression="${5}" gravity_type="${6}" domain="${7}"
local listCurlBuffer str httpCode success="" ip customUpstreamResolver="" local modifiedOptions="" listCurlBuffer str httpCode success="" ip cmd_ext
local file_path permissions ip_addr port blocked=false download=true local file_path permissions ip_addr port blocked=false download=true
# modifiedOptions is an array to store all the options used to check if the adlist has been changed upstream
local modifiedOptions=()
# Create temp file to store content on disk instead of RAM # Create temp file to store content on disk instead of RAM
# We don't use '--suffix' here because not all implementations of mktemp support it, e.g. on Alpine # We don't use '--suffix' here because not all implementations of mktemp support it, e.g. on Alpine
@ -631,14 +620,14 @@ gravity_DownloadBlocklistFromUrl() {
# Save HTTP ETag to the specified file. An ETag is a caching related header, # Save HTTP ETag to the specified file. An ETag is a caching related header,
# usually returned in a response. If no ETag is sent by the server, an empty # usually returned in a response. If no ETag is sent by the server, an empty
# file is created and can later be used consistently. # file is created and can later be used consistently.
modifiedOptions=("${modifiedOptions[@]}" --etag-save "${saveLocation}".etag) modifiedOptions="--etag-save ${saveLocation}.etag"
if [[ -f "${saveLocation}.etag" ]]; then if [[ -f "${saveLocation}.etag" ]]; then
# This option makes a conditional HTTP request for the specific ETag read # This option makes a conditional HTTP request for the specific ETag read
# from the given file by sending a custom If-None-Match header using the # from the given file by sending a custom If-None-Match header using the
# stored ETag. This way, the server will only send the file if it has # stored ETag. This way, the server will only send the file if it has
# changed since the last request. # changed since the last request.
modifiedOptions=("${modifiedOptions[@]}" --etag-compare "${saveLocation}".etag) modifiedOptions="${modifiedOptions} --etag-compare ${saveLocation}.etag"
fi fi
fi fi
@ -651,13 +640,39 @@ gravity_DownloadBlocklistFromUrl() {
# Interstingly, this option is not supported by raw.githubusercontent.com # Interstingly, this option is not supported by raw.githubusercontent.com
# URLs, however, it is still supported by many older web servers which may # URLs, however, it is still supported by many older web servers which may
# not support the HTTP ETag method so we keep it as a fallback. # not support the HTTP ETag method so we keep it as a fallback.
modifiedOptions=("${modifiedOptions[@]}" -z "${saveLocation}") modifiedOptions="${modifiedOptions} -z ${saveLocation}"
fi fi
fi fi
str="Status:" str="Status:"
echo -ne " ${INFO} ${str} Pending..." echo -ne " ${INFO} ${str} Pending..."
blocked=false blocked=false
case $(getFTLConfigValue dns.blocking.mode) in
"IP-NODATA-AAAA" | "IP")
# Get IP address of this domain
ip="$(dig "${domain}" +short)"
# Check if this IP matches any IP of the system
if [[ -n "${ip}" && $(grep -Ec "inet(|6) ${ip}" <<<"$(ip a)") -gt 0 ]]; then
blocked=true
fi
;;
"NXDOMAIN")
if [[ $(dig "${domain}" | grep "NXDOMAIN" -c) -ge 1 ]]; then
blocked=true
fi
;;
"NODATA")
if [[ $(dig "${domain}" | grep "NOERROR" -c) -ge 1 ]] && [[ -z $(dig +short "${domain}") ]]; then
blocked=true
fi
;;
"NULL" | *)
if [[ $(dig "${domain}" +short | grep "0.0.0.0" -c) -ge 1 ]]; then
blocked=true
fi
;;
esac
# Check if this domain is blocked by Pi-hole but only if the domain is not a # Check if this domain is blocked by Pi-hole but only if the domain is not a
# local file or empty # local file or empty
if [[ $url != "file"* ]] && [[ -n "${domain}" ]]; then if [[ $url != "file"* ]] && [[ -n "${domain}" ]]; then
@ -717,7 +732,7 @@ gravity_DownloadBlocklistFromUrl() {
fi fi
echo -e "${OVER} ${CROSS} ${str} ${domain} is blocked by one of your lists. Using DNS server ${upstream} instead" echo -e "${OVER} ${CROSS} ${str} ${domain} is blocked by one of your lists. Using DNS server ${upstream} instead"
echo -ne " ${INFO} ${str} Pending..." echo -ne " ${INFO} ${str} Pending..."
customUpstreamResolver="--resolve $domain:$port:$ip" cmd_ext="--resolve $domain:$port:$ip"
fi fi
fi fi
@ -750,12 +765,13 @@ gravity_DownloadBlocklistFromUrl() {
# Check for allowed protocols # Check for allowed protocols
if [[ $url != "http"* && $url != "https"* && $url != "file"* && $url != "ftp"* && $url != "ftps"* && $url != "sftp"* ]]; then if [[ $url != "http"* && $url != "https"* && $url != "file"* && $url != "ftp"* && $url != "ftps"* && $url != "sftp"* ]]; then
echo -e "${OVER} ${CROSS} ${str} Invalid protocol specified. Ignoring list." echo -e "${OVER} ${CROSS} ${str} Invalid protocol specified. Ignoring list."
echo -e " Ensure your URL starts with a valid protocol like http:// , https:// or file:// ." echo -e "Ensure your URL starts with a valid protocol like http:// , https:// or file:// ."
download=false download=false
fi fi
if [[ "${download}" == true ]]; then if [[ "${download}" == true ]]; then
httpCode=$(curl --connect-timeout ${curl_connect_timeout} -s -L ${compression:+${compression}} ${customUpstreamResolver:+${customUpstreamResolver}} "${modifiedOptions[@]}" -w "%{http_code}" "${url}" -o "${listCurlBuffer}" 2>/dev/null) # shellcheck disable=SC2086
httpCode=$(curl --connect-timeout ${curl_connect_timeout} -s -L ${compression} ${cmd_ext} ${modifiedOptions} -w "%{http_code}" "${url}" -o "${listCurlBuffer}" 2>/dev/null)
fi fi
case $url in case $url in
@ -805,11 +821,11 @@ gravity_DownloadBlocklistFromUrl() {
done="true" done="true"
# Check if $listCurlBuffer is a non-zero length file # Check if $listCurlBuffer is a non-zero length file
elif [[ -s "${listCurlBuffer}" ]]; then elif [[ -s "${listCurlBuffer}" ]]; then
# Move the downloaded list to the final location # Determine if blocklist is non-standard and parse as appropriate
mv "${listCurlBuffer}" "${saveLocation}" gravity_ParseFileIntoDomains "${listCurlBuffer}" "${saveLocation}"
# Ensure the file has the correct permissions # Remove curl buffer file after its use
fix_owner_permissions "${saveLocation}" rm "${listCurlBuffer}"
# Compare lists if they are identical # Compare lists if are they identical
compareLists "${adlistID}" "${saveLocation}" compareLists "${adlistID}" "${saveLocation}"
# Add domains to database table file # Add domains to database table file
pihole-FTL "${gravity_type}" parseList "${saveLocation}" "${gravityTEMPfile}" "${adlistID}" pihole-FTL "${gravity_type}" parseList "${saveLocation}" "${gravityTEMPfile}" "${adlistID}"
@ -824,13 +840,13 @@ gravity_DownloadBlocklistFromUrl() {
if [[ "${done}" != "true" ]]; then if [[ "${done}" != "true" ]]; then
# Determine if cached list has read permission # Determine if cached list has read permission
if [[ -r "${saveLocation}" ]]; then if [[ -r "${saveLocation}" ]]; then
echo -e " ${CROSS} List download failed: ${COL_GREEN}using previously cached list${COL_NC}" echo -e " ${CROSS} List download failed: ${COL_LIGHT_GREEN}using previously cached list${COL_NC}"
# Set list status to "download-failed/cached" # Set list status to "download-failed/cached"
database_adlist_status "${adlistID}" "3" database_adlist_status "${adlistID}" "3"
# Add domains to database table file # Add domains to database table file
pihole-FTL "${gravity_type}" parseList "${saveLocation}" "${gravityTEMPfile}" "${adlistID}" pihole-FTL "${gravity_type}" parseList "${saveLocation}" "${gravityTEMPfile}" "${adlistID}"
else else
echo -e " ${CROSS} List download failed: ${COL_RED}no cached list available${COL_NC}" echo -e " ${CROSS} List download failed: ${COL_LIGHT_RED}no cached list available${COL_NC}"
# Manually reset these two numbers because we do not call parseList here # Manually reset these two numbers because we do not call parseList here
database_adlist_number "${adlistID}" 0 0 database_adlist_number "${adlistID}" 0 0
database_adlist_status "${adlistID}" "4" database_adlist_status "${adlistID}" "4"
@ -838,6 +854,37 @@ gravity_DownloadBlocklistFromUrl() {
fi fi
} }
# Parse source files into domains format
gravity_ParseFileIntoDomains() {
local src="${1}" destination="${2}"
# Remove comments and print only the domain name
# Most of the lists downloaded are already in hosts file format but the spacing/formatting is not contiguous
# This helps with that and makes it easier to read
# It also helps with debugging so each stage of the script can be researched more in depth
# 1) Convert all characters to lowercase
tr '[:upper:]' '[:lower:]' <"${src}" >"${destination}"
# 2) Remove carriage returns
# 3) Remove lines starting with ! (ABP Comments)
# 4) Remove lines starting with [ (ABP Header)
# 5) Remove lines containing ABP extended CSS selectors ("##", "#$#", "#@#", "#?#") and Adguard JavaScript (#%#) preceded by a letter
# 6) Remove comments (text starting with "#", include possible spaces before the hash sign)
# 7) Remove leading tabs, spaces, etc. (Also removes leading IP addresses)
# 8) Remove empty lines
sed -i -r \
-e 's/\r$//' \
-e 's/\s*!.*//g' \
-e 's/\s*\[.*//g' \
-e '/[a-z]\#[$?@%]{0,3}\#/d' \
-e 's/\s*#.*//g' \
-e 's/^.*\s+//g' \
-e '/^$/d' "${destination}"
fix_owner_permissions "${destination}"
}
# Report number of entries in a table # Report number of entries in a table
gravity_Table_Count() { gravity_Table_Count() {
local table="${1}" local table="${1}"
@ -854,7 +901,7 @@ gravity_Table_Count() {
fi fi
} }
# Output count of denied and allowed domains and regex filters # Output count of blacklisted domains and regex filters
gravity_ShowCount() { gravity_ShowCount() {
# Here we use the table "gravity" instead of the view "vw_gravity" for speed. # Here we use the table "gravity" instead of the view "vw_gravity" for speed.
# It's safe to replace it here, because right after a gravity run both will show the exactly same number of domains. # It's safe to replace it here, because right after a gravity run both will show the exactly same number of domains.
@ -867,7 +914,7 @@ gravity_ShowCount() {
# Trap Ctrl-C # Trap Ctrl-C
gravity_Trap() { gravity_Trap() {
trap '{ echo -e "\\n\\n ${INFO} ${COL_RED}User-abort detected${COL_NC}"; gravity_Cleanup "error"; }' INT trap '{ echo -e "\\n\\n ${INFO} ${COL_LIGHT_RED}User-abort detected${COL_NC}"; gravity_Cleanup "error"; }' INT
} }
# Clean up after Gravity upon exit or cancellation # Clean up after Gravity upon exit or cancellation
@ -885,13 +932,13 @@ gravity_Cleanup() {
# invalid_domains location # invalid_domains location
rm "${GRAVITY_TMPDIR}"/*.ph-non-domains 2>/dev/null rm "${GRAVITY_TMPDIR}"/*.ph-non-domains 2>/dev/null
# Ensure this function only runs when gravity_DownloadBlocklists() has completed # Ensure this function only runs when gravity_SetDownloadOptions() has completed
if [[ "${DownloadBlocklists_done:-}" == true ]]; then if [[ "${gravity_Blackbody:-}" == true ]]; then
# Remove any unused .domains/.etag/.sha files # Remove any unused .domains files
for file in "${listsCacheDir}"/*."${domainsExtension}"; do for file in "${piholeDir}"/*."${domainsExtension}"; do
# If list is not in active array, then remove it and all associated files # If list is not in active array, then remove it
if [[ ! "${activeDomains[*]}" == *"${file}"* ]]; then if [[ ! "${activeDomains[*]}" == *"${file}"* ]]; then
rm -f "${file}"* 2>/dev/null || rm -f "${file}" 2>/dev/null ||
echo -e " ${CROSS} Failed to remove ${file##*/}" echo -e " ${CROSS} Failed to remove ${file##*/}"
fi fi
done done
@ -1025,7 +1072,7 @@ migrate_to_listsCache_dir() {
# If not, we need to migrate the old files to the new directory # If not, we need to migrate the old files to the new directory
local str="Migrating the list's cache directory to new location" local str="Migrating the list's cache directory to new location"
echo -ne " ${INFO} ${str}..." echo -ne " ${INFO} ${str}..."
mkdir -p "${listsCacheDir}" && chown pihole:pihole "${listsCacheDir}" mkdir -p "${listsCacheDir}"
# Move the old files to the new directory # Move the old files to the new directory
if mv "${piholeDir}"/list.* "${listsCacheDir}/" 2>/dev/null; then if mv "${piholeDir}"/list.* "${listsCacheDir}/" 2>/dev/null; then
@ -1035,7 +1082,7 @@ migrate_to_listsCache_dir() {
fi fi
# Update the list's paths in the corresponding .sha1 files to the new location # Update the list's paths in the corresponding .sha1 files to the new location
sed -i "s|${piholeDir}/|${listsCacheDir}/|g" "${listsCacheDir}"/*.sha1 2>/dev/null sed -i "s|${piholeDir}/|${listsCacheDir}/|g" "${listsCacheDir}"/*.sha1
} }
helpFunc() { helpFunc() {
@ -1084,19 +1131,13 @@ for var in "$@"; do
"-t" | "--timeit") timed=true ;; "-t" | "--timeit") timed=true ;;
"-r" | "--repair") repairSelector "$3" ;; "-r" | "--repair") repairSelector "$3" ;;
"-u" | "--upgrade") "-u" | "--upgrade")
upgrade_gravityDB "${gravityDBfile}" upgrade_gravityDB "${gravityDBfile}" "${piholeDir}"
exit 0 exit 0
;; ;;
"-h" | "--help") helpFunc ;; "-h" | "--help") helpFunc ;;
esac esac
done done
# Check if DNS is available, no need to do any database manipulation if we're not able to download adlists
if ! timeit gravity_CheckDNSResolutionAvailable; then
echo -e " ${CROSS} No DNS resolution available. Please contact support."
exit 1
fi
# Remove OLD (backup) gravity file, if it exists # Remove OLD (backup) gravity file, if it exists
if [[ -f "${gravityOLDfile}" ]]; then if [[ -f "${gravityOLDfile}" ]]; then
rm "${gravityOLDfile}" rm "${gravityOLDfile}"
@ -1130,13 +1171,18 @@ fi
if [[ "${forceDelete:-}" == true ]]; then if [[ "${forceDelete:-}" == true ]]; then
str="Deleting existing list cache" str="Deleting existing list cache"
echo -ne " ${INFO} ${str}..." echo -ne "${INFO} ${str}..."
rm "${listsCacheDir}/list.*" 2>/dev/null || true rm "${listsCacheDir}/list.*" 2>/dev/null || true
echo -e "${OVER} ${TICK} ${str}" echo -e "${OVER} ${TICK} ${str}"
fi fi
# Gravity downloads blocklists next # Gravity downloads blocklists next
if ! timeit gravity_CheckDNSResolutionAvailable; then
echo -e " ${CROSS} Can not complete gravity update, no DNS is available. Please contact support."
exit 1
fi
if ! gravity_DownloadBlocklists; then if ! gravity_DownloadBlocklists; then
echo -e " ${CROSS} Unable to create gravity database. Please try again later. If the problem persists, please contact support." echo -e " ${CROSS} Unable to create gravity database. Please try again later. If the problem persists, please contact support."
exit 1 exit 1

View File

@ -23,7 +23,7 @@ pihole -r
.br .br
\fBpihole -g\fR \fBpihole -g\fR
.br .br
\fBpihole\fR \fB-q\fR [options] \fBpihole\fR -\fBq\fR [options]
.br .br
\fBpihole\fR \fB-l\fR (\fBon|off|off noflush\fR) \fBpihole\fR \fB-l\fR (\fBon|off|off noflush\fR)
.br .br
@ -43,7 +43,7 @@ pihole -r
.br .br
\fBpihole\fR \fBcheckout\fR repo [branch] \fBpihole\fR \fBcheckout\fR repo [branch]
.br .br
\fBpihole\fR \fBapi\fR [verbose] endpoint \fBpihole\fR \api\fR endpoint
.br .br
\fBpihole\fR \fBhelp\fR \fBpihole\fR \fBhelp\fR
.br .br
@ -105,9 +105,9 @@ Available commands and options:
Flush the Pi-hole log Flush the Pi-hole log
.br .br
\fB-r, repair\fR \fB-r, reconfigure\fR
.br .br
Repair Pi-hole subsystems Reconfigure or Repair Pi-hole subsystems
.br .br
\fB-t, tail\fR [arg] \fB-t, tail\fR [arg]
@ -234,14 +234,10 @@ Available commands and options:
branchname Update subsystems to the specified branchname branchname Update subsystems to the specified branchname
.br .br
\fBapi\fR [verbose] endpoint \fBapi\fR endpoint
.br .br
Query the Pi-hole API at <endpoint> Query the Pi-hole API at <endpoint>
.br .br
verbose Show authentication and status messages
.br
.SH "EXAMPLE" .SH "EXAMPLE"
Some usage examples Some usage examples
@ -268,7 +264,7 @@ Allow-/denylist manipulation
\fBpihole --regex "ad.*\\.example\\.com$"\fR \fBpihole --regex "ad.*\\.example\\.com$"\fR
.br .br
Adds "ad.*\\.example\\.com$" to the regex denylist. Adds "ad.*\\.example\\.com$" to the regex blacklist.
Would block all subdomains of example.com which start with "ad" Would block all subdomains of example.com which start with "ad"
.br .br
@ -317,10 +313,9 @@ Switching Pi-hole subsystem branches
Switch to core development branch Switch to core development branch
.br .br
\fBpihole networkflush\fR \fBpihole arpflush\fR
.br .br
Flush information stored in Pi-hole's network table Flush information stored in Pi-hole's network tables
Add '--arp' to additionally flush the ARP table
.br .br
\fBpihole api stats/summary\fR \fBpihole api stats/summary\fR
@ -328,11 +323,6 @@ Switching Pi-hole subsystem branches
Queries FTL for the stats/summary endpoint Queries FTL for the stats/summary endpoint
.br .br
\fBpihole api verbose stats/summary\fR
.br
Same as above, but shows authentication and status messages
.br
.SH "COLOPHON" .SH "COLOPHON"
Get sucked into the latest news and community activity by entering Pi-hole's orbit. Information about Pi-hole, and the latest version of the software can be found at https://pi-hole.net. Get sucked into the latest news and community activity by entering Pi-hole's orbit. Information about Pi-hole, and the latest version of the software can be found at https://pi-hole.net.

135
pihole
View File

@ -9,7 +9,7 @@
# This file is copyright under the latest version of the EUPL. # This file is copyright under the latest version of the EUPL.
# Please see LICENSE file for your rights under this license. # Please see LICENSE file for your rights under this license.
PI_HOLE_SCRIPT_DIR="/opt/pihole" readonly PI_HOLE_SCRIPT_DIR="/opt/pihole"
# PI_HOLE_BIN_DIR is not readonly here because in some functions (checkout), # PI_HOLE_BIN_DIR is not readonly here because in some functions (checkout),
# they might get set again when the installer is sourced. This causes an # they might get set again when the installer is sourced. This causes an
@ -17,16 +17,13 @@ PI_HOLE_SCRIPT_DIR="/opt/pihole"
PI_HOLE_BIN_DIR="/usr/local/bin" PI_HOLE_BIN_DIR="/usr/local/bin"
readonly colfile="${PI_HOLE_SCRIPT_DIR}/COL_TABLE" readonly colfile="${PI_HOLE_SCRIPT_DIR}/COL_TABLE"
# shellcheck source=./advanced/Scripts/COL_TABLE
source "${colfile}" source "${colfile}"
utilsfile="${PI_HOLE_SCRIPT_DIR}/utils.sh" readonly utilsfile="${PI_HOLE_SCRIPT_DIR}/utils.sh"
# shellcheck source=./advanced/Scripts/utils.sh
source "${utilsfile}" source "${utilsfile}"
# Source api functions # Source api functions
readonly apifile="${PI_HOLE_SCRIPT_DIR}/api.sh" readonly apifile="${PI_HOLE_SCRIPT_DIR}/api.sh"
# shellcheck source=./advanced/Scripts/api.sh
source "${apifile}" source "${apifile}"
versionsfile="/etc/pihole/versions" versionsfile="/etc/pihole/versions"
@ -34,7 +31,6 @@ if [ -f "${versionsfile}" ]; then
# Only source versionsfile if the file exits # Only source versionsfile if the file exits
# fixes a warning during installation where versionsfile does not exist yet # fixes a warning during installation where versionsfile does not exist yet
# but gravity calls `pihole -status` and thereby sourcing the file # but gravity calls `pihole -status` and thereby sourcing the file
# shellcheck source=/dev/null
source "${versionsfile}" source "${versionsfile}"
fi fi
@ -96,18 +92,8 @@ flushFunc() {
exit 0 exit 0
} }
# Deprecated function, should be removed in the future
# use networkFlush instead
arpFunc() { arpFunc() {
shift "${PI_HOLE_SCRIPT_DIR}"/piholeARPTable.sh "$@"
echo -e " ${INFO} The 'arpflush' command is deprecated, use 'networkflush' instead"
"${PI_HOLE_SCRIPT_DIR}"/piholeNetworkFlush.sh "$@"
exit 0
}
networkFlush() {
shift
"${PI_HOLE_SCRIPT_DIR}"/piholeNetworkFlush.sh "$@"
exit 0 exit 0
} }
@ -121,11 +107,11 @@ updatePiholeFunc() {
fi fi
} }
repairPiholeFunc() { reconfigurePiholeFunc() {
if [ -n "${DOCKER_VERSION}" ]; then if [ -n "${DOCKER_VERSION}" ]; then
unsupportedFunc unsupportedFunc
else else
/etc/.pihole/automated\ install/basic-install.sh --repair /etc/.pihole/automated\ install/basic-install.sh --reconfigure
exit 0; exit 0;
fi fi
} }
@ -157,11 +143,10 @@ uninstallFunc() {
versionFunc() { versionFunc() {
exec "${PI_HOLE_SCRIPT_DIR}"/version.sh exec "${PI_HOLE_SCRIPT_DIR}"/version.sh
exit 0
} }
reloadDNS() { reloadDNS() {
local svcOption svc str output status pid icon FTL_PID_FILE sigrtmin local svcOption svc str output status pid icon FTL_PID_FILE
svcOption="${1:-reload}" svcOption="${1:-reload}"
# get the current path to the pihole-FTL.pid # get the current path to the pihole-FTL.pid
@ -180,10 +165,7 @@ reloadDNS() {
str="FTL is not running" str="FTL is not running"
icon="${INFO}" icon="${INFO}"
else else
sigrtmin="$(pihole-FTL sigrtmin 2>/dev/null)" svc="kill -RTMIN ${pid}"
# Make sure sigrtmin is a number, otherwise fallback to RTMIN
[[ "${sigrtmin}" =~ ^[0-9]+$ ]] || unset sigrtmin
svc="kill -${sigrtmin:-RTMIN} ${pid}"
str="Reloading DNS lists" str="Reloading DNS lists"
icon="${TICK}" icon="${TICK}"
fi fi
@ -252,7 +234,7 @@ Time:
fi fi
if [[ ${error} == true ]];then if [[ ${error} == true ]];then
echo -e " ${COL_RED}Unknown format for blocking timer!${COL_NC}" echo -e " ${COL_LIGHT_RED}Unknown format for blocking timer!${COL_NC}"
echo -e " Try 'pihole disable --help' for more information." echo -e " Try 'pihole disable --help' for more information."
exit 1 exit 1
fi fi
@ -265,20 +247,17 @@ Time:
data=$(PostFTLData "dns/blocking" "{ \"blocking\": ${1}, \"timer\": ${tt} }") data=$(PostFTLData "dns/blocking" "{ \"blocking\": ${1}, \"timer\": ${tt} }")
# Check the response # Check the response
local extra timer local extra=" forever"
extra=" forever" local timer="$(echo "${data}"| jq --raw-output '.timer' )"
timer="$(echo "${data}"| jq --raw-output '.timer' )"
if [[ "${timer}" != "null" ]]; then if [[ "${timer}" != "null" ]]; then
extra=" for ${timer}s" extra=" for ${timer}s"
fi fi
local str local str="Pi-hole $(echo "${data}" | jq --raw-output '.blocking')${extra}"
str="Pi-hole $(echo "${data}" | jq --raw-output '.blocking')${extra}"
# Logout from the API # Logout from the API
LogoutAPI LogoutAPI
echo -e "${OVER} ${TICK} ${str}" echo -e "${OVER} ${TICK} ${str}"
exit 0
} }
piholeLogging() { piholeLogging() {
@ -308,7 +287,7 @@ Options:
echo -e " ${INFO} Enabling logging..." echo -e " ${INFO} Enabling logging..."
local str="Logging has been enabled!" local str="Logging has been enabled!"
else else
echo -e " ${COL_RED}Invalid option${COL_NC} echo -e " ${COL_LIGHT_RED}Invalid option${COL_NC}
Try 'pihole logging --help' for more information." Try 'pihole logging --help' for more information."
exit 1 exit 1
fi fi
@ -396,22 +375,20 @@ statusFunc() {
tailFunc() { tailFunc() {
# Warn user if Pi-hole's logging is disabled # Warn user if Pi-hole's logging is disabled
local logging_enabled local logging_enabled=$(getFTLConfigValue dns.queryLogging)
logging_enabled=$(getFTLConfigValue dns.queryLogging)
if [[ "${logging_enabled}" != "true" ]]; then if [[ "${logging_enabled}" != "true" ]]; then
echo " ${CROSS} Warning: Query logging is disabled" echo " ${CROSS} Warning: Query logging is disabled"
fi fi
echo -e " ${INFO} Press Ctrl-C to exit" echo -e " ${INFO} Press Ctrl-C to exit"
# Get logfile path # Get logfile path
LOGFILE=$(getFTLConfigValue files.log.dnsmasq) readonly LOGFILE=$(getFTLConfigValue files.log.dnsmasq)
readonly LOGFILE
# Strip date from each line # Strip date from each line
# Color blocklist/denylist/wildcard entries as red # Color blocklist/denylist/wildcard entries as red
# Color A/AAAA/DHCP strings as white # Color A/AAAA/DHCP strings as white
# Color everything else as gray # Color everything else as gray
tail -f $LOGFILE | grep --line-buffered -- "${1}" | sed -E \ tail -f $LOGFILE | grep --line-buffered "${1}" | sed -E \
-e "s,($(date +'%b %d ')| dnsmasq\[[0-9]*\]),,g" \ -e "s,($(date +'%b %d ')| dnsmasq\[[0-9]*\]),,g" \
-e "s,(.*(denied |gravity blocked ).*),${COL_RED}&${COL_NC}," \ -e "s,(.*(denied |gravity blocked ).*),${COL_RED}&${COL_NC}," \
-e "s,.*(query\\[A|DHCP).*,${COL_NC}&${COL_NC}," \ -e "s,.*(query\\[A|DHCP).*,${COL_NC}&${COL_NC}," \
@ -421,10 +398,7 @@ tailFunc() {
piholeCheckoutFunc() { piholeCheckoutFunc() {
if [ -n "${DOCKER_VERSION}" ]; then if [ -n "${DOCKER_VERSION}" ]; then
echo -e "${CROSS} Function not supported in Docker images" unsupportedFunc
echo "Please build a custom image following the steps at"
echo "https://github.com/pi-hole/docker-pi-hole?tab=readme-ov-file#building-the-image-locally"
exit 0
else else
if [[ "$2" == "-h" ]] || [[ "$2" == "--help" ]]; then if [[ "$2" == "-h" ]] || [[ "$2" == "--help" ]]; then
echo "Switch Pi-hole subsystems to a different GitHub branch echo "Switch Pi-hole subsystems to a different GitHub branch
@ -446,7 +420,6 @@ piholeCheckoutFunc() {
exit 0 exit 0
fi fi
#shellcheck source=./advanced/Scripts/piholeCheckout.sh
source "${PI_HOLE_SCRIPT_DIR}"/piholeCheckout.sh source "${PI_HOLE_SCRIPT_DIR}"/piholeCheckout.sh
shift shift
checkout "$@" checkout "$@"
@ -503,12 +476,11 @@ Debugging Options:
Add '-c' or '--check-database' to include a Pi-hole database integrity check Add '-c' or '--check-database' to include a Pi-hole database integrity check
Add '-a' to automatically upload the log to tricorder.pi-hole.net Add '-a' to automatically upload the log to tricorder.pi-hole.net
-f, flush Flush the Pi-hole log -f, flush Flush the Pi-hole log
-r, repair Repair Pi-hole subsystems -r, reconfigure Reconfigure or Repair Pi-hole subsystems
-t, tail [arg] View the live output of the Pi-hole log. -t, tail [arg] View the live output of the Pi-hole log.
Add an optional argument to filter the log Add an optional argument to filter the log
(regular expressions are supported) (regular expressions are supported)
api <endpoint> Query the Pi-hole API at <endpoint> api <endpoint> Query the Pi-hole API at <endpoint>
Precede <endpoint> with 'verbose' option to show authentication and status messages
Options: Options:
@ -534,8 +506,7 @@ Options:
reloadlists Update the lists WITHOUT flushing the cache or restarting the DNS server reloadlists Update the lists WITHOUT flushing the cache or restarting the DNS server
checkout Switch Pi-hole subsystems to a different GitHub branch checkout Switch Pi-hole subsystems to a different GitHub branch
Add '-h' for more info on checkout usage Add '-h' for more info on checkout usage
networkflush Flush information stored in Pi-hole's network tables arpflush Flush information stored in Pi-hole's network tables";
Add '--arp' to additionally flush the ARP table ";
exit 0 exit 0
} }
@ -544,7 +515,7 @@ if [[ $# = 0 ]]; then
fi fi
# functions that do not require sudo power # functions that do not require sudo power
need_root= need_root=1
case "${1}" in case "${1}" in
"-h" | "help" | "--help" ) helpFunc;; "-h" | "help" | "--help" ) helpFunc;;
"-v" | "version" ) versionFunc;; "-v" | "version" ) versionFunc;;
@ -552,32 +523,31 @@ case "${1}" in
"-q" | "query" ) queryFunc "$@";; "-q" | "query" ) queryFunc "$@";;
"status" ) statusFunc "$2";; "status" ) statusFunc "$2";;
"tricorder" ) tricorderFunc;; "tricorder" ) tricorderFunc;;
"allow" | "allowlist" ) listFunc "$@";;
"deny" | "denylist" ) listFunc "$@";;
"--wild" | "wildcard" ) listFunc "$@";;
"--regex" | "regex" ) listFunc "$@";;
"--allow-regex" | "allow-regex" ) listFunc "$@";;
"--allow-wild" | "allow-wild" ) listFunc "$@";;
"enable" ) piholeEnable true "$2";;
"disable" ) piholeEnable false "$2";;
"api" ) shift; apiFunc "$@"; exit 0;;
# we need to add all arguments that require sudo power to not trigger the * argument # we need to add all arguments that require sudo power to not trigger the * argument
"-f" | "flush" ) need_root=true;; "allow" | "allowlist" ) need_root=0;;
"-up" | "updatePihole" ) need_root=true;; "deny" | "denylist" ) need_root=0;;
"-r" | "repair" ) need_root=true;; "--wild" | "wildcard" ) need_root=0;;
"-l" | "logging" ) need_root=true;; "--regex" | "regex" ) need_root=0;;
"uninstall" ) need_root=true;; "--allow-regex" | "allow-regex" ) need_root=0;;
"-d" | "debug" ) need_root=true;; "--allow-wild" | "allow-wild" ) need_root=0;;
"-g" | "updateGravity" ) need_root=true;; "-f" | "flush" ) ;;
"reloaddns" ) need_root=true;; "-up" | "updatePihole" ) ;;
"reloadlists" ) need_root=true;; "-r" | "reconfigure" ) ;;
"setpassword" ) need_root=true;; "-l" | "logging" ) ;;
"checkout" ) need_root=true;; "uninstall" ) ;;
"updatechecker" ) need_root=true;; "enable" ) need_root=0;;
"arpflush" ) need_root=true;; # Deprecated, use networkflush instead "disable" ) need_root=0;;
"networkflush" ) need_root=true;; "-d" | "debug" ) ;;
"-t" | "tail" ) need_root=true;; "-g" | "updateGravity" ) ;;
"reloaddns" ) ;;
"reloadlists" ) ;;
"setpassword" ) ;;
"checkout" ) ;;
"updatechecker" ) ;;
"arpflush" ) ;;
"-t" | "tail" ) ;;
"api" ) need_root=0;;
* ) helpFunc;; * ) helpFunc;;
esac esac
@ -587,31 +557,38 @@ if [[ -z ${USER} ]]; then
USER=$(whoami) USER=$(whoami)
fi fi
# Check if the current user is not root and if the command # Check if the current user is neither root nor pihole and if the command
# requires root. If so, exit with an error message. # requires root. If so, exit with an error message.
# Add an exception for the user "pihole" to allow the webserver running gravity if [[ $EUID -ne 0 && ${USER} != "pihole" && need_root -eq 1 ]];then
if [[ ( $EUID -ne 0 && ${USER} != "pihole" ) && -n "${need_root}" ]]; then echo -e " ${CROSS} The Pi-hole command requires root privileges, try:"
echo -e " ${CROSS} This Pi-hole command requires root privileges, try:"
echo -e " ${COL_GREEN}sudo pihole $*${COL_NC}" echo -e " ${COL_GREEN}sudo pihole $*${COL_NC}"
exit 1 exit 1
fi fi
# Handle redirecting to specific functions based on arguments # Handle redirecting to specific functions based on arguments
case "${1}" in case "${1}" in
"allow" | "allowlist" ) listFunc "$@";;
"deny" | "denylist" ) listFunc "$@";;
"--wild" | "wildcard" ) listFunc "$@";;
"--regex" | "regex" ) listFunc "$@";;
"--allow-regex" | "allow-regex" ) listFunc "$@";;
"--allow-wild" | "allow-wild" ) listFunc "$@";;
"-d" | "debug" ) debugFunc "$@";; "-d" | "debug" ) debugFunc "$@";;
"-f" | "flush" ) flushFunc "$@";; "-f" | "flush" ) flushFunc "$@";;
"-up" | "updatePihole" ) updatePiholeFunc "$@";; "-up" | "updatePihole" ) updatePiholeFunc "$@";;
"-r" | "repair" ) repairPiholeFunc;; "-r" | "reconfigure" ) reconfigurePiholeFunc;;
"-g" | "updateGravity" ) updateGravityFunc "$@";; "-g" | "updateGravity" ) updateGravityFunc "$@";;
"-l" | "logging" ) piholeLogging "$@";; "-l" | "logging" ) piholeLogging "$@";;
"uninstall" ) uninstallFunc;; "uninstall" ) uninstallFunc;;
"enable" ) piholeEnable true "$2";;
"disable" ) piholeEnable false "$2";;
"reloaddns" ) reloadDNS "reload";; "reloaddns" ) reloadDNS "reload";;
"reloadlists" ) reloadDNS "reload-lists";; "reloadlists" ) reloadDNS "reload-lists";;
"setpassword" ) SetWebPassword "$@";; "setpassword" ) SetWebPassword "$@";;
"checkout" ) piholeCheckoutFunc "$@";; "checkout" ) piholeCheckoutFunc "$@";;
"updatechecker" ) shift; updateCheckFunc "$@";; "updatechecker" ) shift; updateCheckFunc "$@";;
"arpflush" ) arpFunc "$@";; # Deprecated, use networkflush instead "arpflush" ) arpFunc "$@";;
"networkflush" ) networkFlush "$@";;
"-t" | "tail" ) tailFunc "$2";; "-t" | "tail" ) tailFunc "$2";;
"api" ) apiFunc "$2";;
* ) helpFunc;; * ) helpFunc;;
esac esac

View File

@ -1,18 +0,0 @@
FROM alpine:3.21
ENV GITDIR=/etc/.pihole
ENV SCRIPTDIR=/opt/pihole
RUN sed -i 's/#\(.*\/community\)/\1/' /etc/apk/repositories
RUN apk --no-cache add bash coreutils curl git jq openrc shadow
RUN mkdir -p $GITDIR $SCRIPTDIR /etc/pihole
ADD . $GITDIR
RUN cp $GITDIR/advanced/Scripts/*.sh $GITDIR/gravity.sh $GITDIR/pihole $GITDIR/automated\ install/*.sh $GITDIR/advanced/Scripts/COL_TABLE $SCRIPTDIR/
ENV PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:$SCRIPTDIR
RUN true && \
chmod +x $SCRIPTDIR/*
ENV SKIP_INSTALL=true
#sed '/# Start the installer/Q' /opt/pihole/basic-install.sh > /opt/pihole/stub_basic-install.sh && \

View File

@ -1,18 +0,0 @@
FROM alpine:3.22
ENV GITDIR=/etc/.pihole
ENV SCRIPTDIR=/opt/pihole
RUN sed -i 's/#\(.*\/community\)/\1/' /etc/apk/repositories
RUN apk --no-cache add bash coreutils curl git jq openrc shadow
RUN mkdir -p $GITDIR $SCRIPTDIR /etc/pihole
ADD . $GITDIR
RUN cp $GITDIR/advanced/Scripts/*.sh $GITDIR/gravity.sh $GITDIR/pihole $GITDIR/automated\ install/*.sh $GITDIR/advanced/Scripts/COL_TABLE $SCRIPTDIR/
ENV PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:$SCRIPTDIR
RUN true && \
chmod +x $SCRIPTDIR/*
ENV SKIP_INSTALL=true
#sed '/# Start the installer/Q' /opt/pihole/basic-install.sh > /opt/pihole/stub_basic-install.sh && \

View File

@ -1,19 +0,0 @@
FROM quay.io/centos/centos:stream10
# Disable SELinux
RUN echo "SELINUX=disabled" > /etc/selinux/config
RUN yum install -y --allowerasing curl git initscripts
ENV GITDIR=/etc/.pihole
ENV SCRIPTDIR=/opt/pihole
RUN mkdir -p $GITDIR $SCRIPTDIR /etc/pihole
ADD . $GITDIR
RUN cp $GITDIR/advanced/Scripts/*.sh $GITDIR/gravity.sh $GITDIR/pihole $GITDIR/automated\ install/*.sh $GITDIR/advanced/Scripts/COL_TABLE $SCRIPTDIR/
ENV PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:$SCRIPTDIR
RUN true && \
chmod +x $SCRIPTDIR/*
ENV SKIP_INSTALL=true
#sed '/# Start the installer/Q' /opt/pihole/basic-install.sh > /opt/pihole/stub_basic-install.sh && \

View File

@ -15,5 +15,6 @@ RUN true && \
chmod +x $SCRIPTDIR/* chmod +x $SCRIPTDIR/*
ENV SKIP_INSTALL=true ENV SKIP_INSTALL=true
ENV OS_CHECK_DOMAIN_NAME=dev-supportedos.pi-hole.net
#sed '/# Start the installer/Q' /opt/pihole/basic-install.sh > /opt/pihole/stub_basic-install.sh && \ #sed '/# Start the installer/Q' /opt/pihole/basic-install.sh > /opt/pihole/stub_basic-install.sh && \

View File

@ -12,5 +12,6 @@ RUN true && \
chmod +x $SCRIPTDIR/* chmod +x $SCRIPTDIR/*
ENV SKIP_INSTALL=true ENV SKIP_INSTALL=true
ENV OS_CHECK_DOMAIN_NAME=dev-supportedos.pi-hole.net
#sed '/# Start the installer/Q' /opt/pihole/basic-install.sh > /opt/pihole/stub_basic-install.sh && \ #sed '/# Start the installer/Q' /opt/pihole/basic-install.sh > /opt/pihole/stub_basic-install.sh && \

View File

@ -12,5 +12,6 @@ RUN true && \
chmod +x $SCRIPTDIR/* chmod +x $SCRIPTDIR/*
ENV SKIP_INSTALL=true ENV SKIP_INSTALL=true
ENV OS_CHECK_DOMAIN_NAME=dev-supportedos.pi-hole.net
#sed '/# Start the installer/Q' /opt/pihole/basic-install.sh > /opt/pihole/stub_basic-install.sh && \ #sed '/# Start the installer/Q' /opt/pihole/basic-install.sh > /opt/pihole/stub_basic-install.sh && \

View File

@ -1,16 +0,0 @@
FROM buildpack-deps:trixie-scm
ENV GITDIR=/etc/.pihole
ENV SCRIPTDIR=/opt/pihole
RUN mkdir -p $GITDIR $SCRIPTDIR /etc/pihole
ADD . $GITDIR
RUN cp $GITDIR/advanced/Scripts/*.sh $GITDIR/gravity.sh $GITDIR/pihole $GITDIR/automated\ install/*.sh $GITDIR/advanced/Scripts/COL_TABLE $SCRIPTDIR/
ENV PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:$SCRIPTDIR
RUN true && \
chmod +x $SCRIPTDIR/*
ENV SKIP_INSTALL=true
#sed '/# Start the installer/Q' /opt/pihole/basic-install.sh > /opt/pihole/stub_basic-install.sh && \

View File

@ -13,5 +13,6 @@ RUN true && \
chmod +x $SCRIPTDIR/* chmod +x $SCRIPTDIR/*
ENV SKIP_INSTALL=true ENV SKIP_INSTALL=true
ENV OS_CHECK_DOMAIN_NAME=dev-supportedos.pi-hole.net
#sed '/# Start the installer/Q' /opt/pihole/basic-install.sh > /opt/pihole/stub_basic-install.sh && \ #sed '/# Start the installer/Q' /opt/pihole/basic-install.sh > /opt/pihole/stub_basic-install.sh && \

View File

@ -13,5 +13,6 @@ RUN true && \
chmod +x $SCRIPTDIR/* chmod +x $SCRIPTDIR/*
ENV SKIP_INSTALL=true ENV SKIP_INSTALL=true
ENV OS_CHECK_DOMAIN_NAME=dev-supportedos.pi-hole.net
#sed '/# Start the installer/Q' /opt/pihole/basic-install.sh > /opt/pihole/stub_basic-install.sh && \ #sed '/# Start the installer/Q' /opt/pihole/basic-install.sh > /opt/pihole/stub_basic-install.sh && \

View File

@ -1,17 +0,0 @@
FROM fedora:42
RUN dnf install -y git initscripts
ENV GITDIR=/etc/.pihole
ENV SCRIPTDIR=/opt/pihole
RUN mkdir -p $GITDIR $SCRIPTDIR /etc/pihole
ADD . $GITDIR
RUN cp $GITDIR/advanced/Scripts/*.sh $GITDIR/gravity.sh $GITDIR/pihole $GITDIR/automated\ install/*.sh $GITDIR/advanced/Scripts/COL_TABLE $SCRIPTDIR/
ENV PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:$SCRIPTDIR
RUN true && \
chmod +x $SCRIPTDIR/*
ENV SKIP_INSTALL=true
#sed '/# Start the installer/Q' /opt/pihole/basic-install.sh > /opt/pihole/stub_basic-install.sh && \

View File

@ -1,17 +0,0 @@
FROM fedora:43
RUN dnf install -y git initscripts
ENV GITDIR=/etc/.pihole
ENV SCRIPTDIR=/opt/pihole
RUN mkdir -p $GITDIR $SCRIPTDIR /etc/pihole
ADD . $GITDIR
RUN cp $GITDIR/advanced/Scripts/*.sh $GITDIR/gravity.sh $GITDIR/pihole $GITDIR/automated\ install/*.sh $GITDIR/advanced/Scripts/COL_TABLE $SCRIPTDIR/
ENV PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:$SCRIPTDIR
RUN true && \
chmod +x $SCRIPTDIR/*
ENV SKIP_INSTALL=true
#sed '/# Start the installer/Q' /opt/pihole/basic-install.sh > /opt/pihole/stub_basic-install.sh && \

View File

@ -12,5 +12,6 @@ RUN true && \
chmod +x $SCRIPTDIR/* chmod +x $SCRIPTDIR/*
ENV SKIP_INSTALL=true ENV SKIP_INSTALL=true
ENV OS_CHECK_DOMAIN_NAME=dev-supportedos.pi-hole.net
#sed '/# Start the installer/Q' /opt/pihole/basic-install.sh > /opt/pihole/stub_basic-install.sh && \ #sed '/# Start the installer/Q' /opt/pihole/basic-install.sh > /opt/pihole/stub_basic-install.sh && \

View File

@ -13,5 +13,6 @@ RUN true && \
chmod +x $SCRIPTDIR/* chmod +x $SCRIPTDIR/*
ENV SKIP_INSTALL=true ENV SKIP_INSTALL=true
ENV OS_CHECK_DOMAIN_NAME=dev-supportedos.pi-hole.net
#sed '/# Start the installer/Q' /opt/pihole/basic-install.sh > /opt/pihole/stub_basic-install.sh && \ #sed '/# Start the installer/Q' /opt/pihole/basic-install.sh > /opt/pihole/stub_basic-install.sh && \

View File

@ -13,5 +13,6 @@ RUN true && \
chmod +x $SCRIPTDIR/* chmod +x $SCRIPTDIR/*
ENV SKIP_INSTALL=true ENV SKIP_INSTALL=true
ENV OS_CHECK_DOMAIN_NAME=dev-supportedos.pi-hole.net
#sed '/# Start the installer/Q' /opt/pihole/basic-install.sh > /opt/pihole/stub_basic-install.sh && \ #sed '/# Start the installer/Q' /opt/pihole/basic-install.sh > /opt/pihole/stub_basic-install.sh && \

View File

@ -1,6 +1,6 @@
pyyaml == 6.0.3 pyyaml == 6.0.2
pytest == 9.0.1 pytest == 8.3.4
pytest-xdist == 3.8.0 pytest-xdist == 3.6.1
pytest-testinfra == 10.2.2 pytest-testinfra == 10.1.1
tox == 4.32.0 tox == 4.24.1
pytest-clarity == 1.0.1 pytest-clarity == 1.0.1

View File

@ -22,7 +22,6 @@ def test_supported_package_manager(host):
# break supported package managers # break supported package managers
host.run("rm -rf /usr/bin/apt-get") host.run("rm -rf /usr/bin/apt-get")
host.run("rm -rf /usr/bin/rpm") host.run("rm -rf /usr/bin/rpm")
host.run("rm -rf /sbin/apk")
package_manager_detect = host.run( package_manager_detect = host.run(
""" """
source /opt/pihole/basic-install.sh source /opt/pihole/basic-install.sh
@ -78,21 +77,10 @@ def test_installPihole_fresh_install_readableFiles(host):
}, },
host, host,
) )
mock_command_2(
"rc-service",
{
"rc-service pihole-FTL enable": ("", "0"),
"rc-service pihole-FTL restart": ("", "0"),
"rc-service pihole-FTL start": ("", "0"),
"*": ('echo "rc-service call with $@"', "0"),
},
host,
)
# try to install man # try to install man
host.run("command -v apt-get > /dev/null && apt-get install -qq man") host.run("command -v apt-get > /dev/null && apt-get install -qq man")
host.run("command -v dnf > /dev/null && dnf install -y man") host.run("command -v dnf > /dev/null && dnf install -y man")
host.run("command -v yum > /dev/null && yum install -y man") host.run("command -v yum > /dev/null && yum install -y man")
host.run("command -v apk > /dev/null && apk add mandoc man-pages")
# Workaround to get FTLv6 installed until it reaches master branch # Workaround to get FTLv6 installed until it reaches master branch
host.run('echo "' + FTL_BRANCH + '" > /etc/pihole/ftlbranch') host.run('echo "' + FTL_BRANCH + '" > /etc/pihole/ftlbranch')
install = host.run( install = host.run(
@ -101,8 +89,10 @@ def test_installPihole_fresh_install_readableFiles(host):
export DEBIAN_FRONTEND=noninteractive export DEBIAN_FRONTEND=noninteractive
umask 0027 umask 0027
runUnattended=true runUnattended=true
useUpdateVars=true
source /opt/pihole/basic-install.sh > /dev/null source /opt/pihole/basic-install.sh > /dev/null
runUnattended=true runUnattended=true
useUpdateVars=true
main main
/opt/pihole/pihole-FTL-prestart.sh /opt/pihole/pihole-FTL-prestart.sh
""" """
@ -115,7 +105,7 @@ def test_installPihole_fresh_install_readableFiles(host):
maninstalled = False maninstalled = False
piholeuser = "pihole" piholeuser = "pihole"
exit_status_success = 0 exit_status_success = 0
test_cmd = 'su -s /bin/bash -c "test -{0} {1}" -p {2}' test_cmd = 'su --shell /bin/bash --command "test -{0} {1}" -p {2}'
# check files in /etc/pihole for read, write and execute permission # check files in /etc/pihole for read, write and execute permission
check_etc = test_cmd.format("r", "/etc/pihole", piholeuser) check_etc = test_cmd.format("r", "/etc/pihole", piholeuser)
actual_rc = host.run(check_etc).rc actual_rc = host.run(check_etc).rc
@ -137,6 +127,10 @@ def test_installPihole_fresh_install_readableFiles(host):
check_localversion = test_cmd.format("r", "/etc/pihole/versions", piholeuser) check_localversion = test_cmd.format("r", "/etc/pihole/versions", piholeuser)
actual_rc = host.run(check_localversion).rc actual_rc = host.run(check_localversion).rc
assert exit_status_success == actual_rc assert exit_status_success == actual_rc
# readable logrotate
check_logrotate = test_cmd.format("r", "/etc/pihole/logrotate", piholeuser)
actual_rc = host.run(check_logrotate).rc
assert exit_status_success == actual_rc
# readable macvendor.db # readable macvendor.db
check_macvendor = test_cmd.format("r", "/etc/pihole/macvendor.db", piholeuser) check_macvendor = test_cmd.format("r", "/etc/pihole/macvendor.db", piholeuser)
actual_rc = host.run(check_macvendor).rc actual_rc = host.run(check_macvendor).rc
@ -255,7 +249,6 @@ def test_FTL_detect_no_errors(host, arch, detected_string, supported):
{ {
"-A /bin/sh": ("Tag_CPU_arch: " + arch, "0"), "-A /bin/sh": ("Tag_CPU_arch: " + arch, "0"),
"-A /usr/bin/sh": ("Tag_CPU_arch: " + arch, "0"), "-A /usr/bin/sh": ("Tag_CPU_arch: " + arch, "0"),
"-A /usr/sbin/sh": ("Tag_CPU_arch: " + arch, "0"),
}, },
host, host,
) )
@ -476,6 +469,50 @@ def test_validate_ip(host):
test_address("0.0.0.0#00001", False) test_address("0.0.0.0#00001", False)
def test_os_check_fails(host):
"""Confirms install fails on unsupported OS"""
host.run(
"""
source /opt/pihole/basic-install.sh
package_manager_detect
build_dependency_package
install_dependent_packages
cat <<EOT > /etc/os-release
ID=UnsupportedOS
VERSION_ID="2"
EOT
"""
)
detectOS = host.run(
"""t
source /opt/pihole/basic-install.sh
os_check
"""
)
expected_stdout = "Unsupported OS detected: UnsupportedOS"
assert expected_stdout in detectOS.stdout
def test_os_check_passes(host):
"""Confirms OS meets the requirements"""
host.run(
"""
source /opt/pihole/basic-install.sh
package_manager_detect
build_dependency_package
install_dependent_packages
"""
)
detectOS = host.run(
"""
source /opt/pihole/basic-install.sh
os_check
"""
)
expected_stdout = "Supported OS detected"
assert expected_stdout in detectOS.stdout
def test_package_manager_has_pihole_deps(host): def test_package_manager_has_pihole_deps(host):
"""Confirms OS is able to install the required packages for Pi-hole""" """Confirms OS is able to install the required packages for Pi-hole"""
mock_command("dialog", {"*": ("", "0")}, host) mock_command("dialog", {"*": ("", "0")}, host)
@ -483,7 +520,6 @@ def test_package_manager_has_pihole_deps(host):
""" """
source /opt/pihole/basic-install.sh source /opt/pihole/basic-install.sh
package_manager_detect package_manager_detect
update_package_cache
build_dependency_package build_dependency_package
install_dependent_packages install_dependent_packages
""" """
@ -500,7 +536,6 @@ def test_meta_package_uninstall(host):
""" """
source /opt/pihole/basic-install.sh source /opt/pihole/basic-install.sh
package_manager_detect package_manager_detect
update_package_cache
build_dependency_package build_dependency_package
install_dependent_packages install_dependent_packages
""" """

View File

@ -1,10 +0,0 @@
[tox]
envlist = py3
[testenv:py3]
allowlist_externals = docker
deps = -rrequirements.txt
setenv =
COLUMNS=120
commands = docker buildx build --load --progress plain -f _alpine_3_21.Dockerfile -t pytest_pihole:test_container ../
pytest {posargs:-vv -n auto} ./test_any_automated_install.py ./test_any_utils.py

View File

@ -1,10 +0,0 @@
[tox]
envlist = py3
[testenv:py3]
allowlist_externals = docker
deps = -rrequirements.txt
setenv =
COLUMNS=120
commands = docker buildx build --load --progress plain -f _alpine_3_22.Dockerfile -t pytest_pihole:test_container ../
pytest {posargs:-vv -n auto} ./test_any_automated_install.py ./test_any_utils.py

View File

@ -1,10 +0,0 @@
[tox]
envlist = py3
[testenv:py3]
allowlist_externals = docker
deps = -rrequirements.txt
setenv =
COLUMNS=120
commands = docker buildx build --load --progress plain -f _centos_10.Dockerfile -t pytest_pihole:test_container ../
pytest {posargs:-vv -n auto} ./test_any_automated_install.py ./test_any_utils.py ./test_centos_fedora_common_support.py

View File

@ -1,10 +0,0 @@
[tox]
envlist = py3
[testenv:py3]
allowlist_externals = docker
deps = -rrequirements.txt
setenv =
COLUMNS=120
commands = docker buildx build --load --progress plain -f _debian_13.Dockerfile -t pytest_pihole:test_container ../
pytest {posargs:-vv -n auto} ./test_any_automated_install.py ./test_any_utils.py

View File

@ -1,10 +0,0 @@
[tox]
envlist = py3
[testenv]
allowlist_externals = docker
deps = -rrequirements.txt
setenv =
COLUMNS=120
commands = docker buildx build --load --progress plain -f _fedora_42.Dockerfile -t pytest_pihole:test_container ../
pytest {posargs:-vv -n auto} ./test_any_automated_install.py ./test_any_utils.py ./test_centos_fedora_common_support.py

View File

@ -1,10 +0,0 @@
[tox]
envlist = py3
[testenv]
allowlist_externals = docker
deps = -rrequirements.txt
setenv =
COLUMNS=120
commands = docker buildx build --load --progress plain -f _fedora_43.Dockerfile -t pytest_pihole:test_container ../
pytest {posargs:-vv -n auto} ./test_any_automated_install.py ./test_any_utils.py ./test_centos_fedora_common_support.py