This commit is contained in:
parent
d2205b11a7
commit
02b9aebbe5
341 changed files with 1571 additions and 32574 deletions
|
|
@ -2,7 +2,7 @@ root = "."
|
||||||
tmp_dir = "var"
|
tmp_dir = "var"
|
||||||
|
|
||||||
[build]
|
[build]
|
||||||
cmd = "go build -o ./var/main ./cmd/anubis"
|
cmd = "go build -o ./var/main ./cmd/nuke"
|
||||||
bin = "./var/main"
|
bin = "./var/main"
|
||||||
args = ["--use-remote-address"]
|
args = ["--use-remote-address"]
|
||||||
exclude_dir = ["var", "vendor", "docs", "node_modules"]
|
exclude_dir = ["var", "vendor", "docs", "node_modules"]
|
||||||
|
|
|
||||||
|
|
@ -1,13 +1,13 @@
|
||||||
# Anubis Dev Container
|
# nuke Dev Container
|
||||||
|
|
||||||
Anubis offers a [development container](https://containers.dev/) image in order to make it easier to contribute to the project. This image is based on [Xe/devcontainer-base/go](https://github.com/Xe/devcontainer-base/tree/main/src/go), which is based on Debian Bookworm with the following customizations:
|
nuke offers a [development container](https://containers.dev/) image in order to make it easier to contribute to the project. This image is based on [Xe/devcontainer-base/go](https://github.com/Xe/devcontainer-base/tree/main/src/go), which is based on Debian Bookworm with the following customizations:
|
||||||
|
|
||||||
- [Fish](https://fishshell.com/) as the shell complete with a custom theme
|
- [Fish](https://fishshell.com/) as the shell complete with a custom theme
|
||||||
- [Go](https://go.dev) at the most recent stable version
|
- [Go](https://go.dev) at the most recent stable version
|
||||||
- [Node.js](https://nodejs.org/en) at the most recent stable version
|
- [Node.js](https://nodejs.org/en) at the most recent stable version
|
||||||
- [Atuin](https://atuin.sh/) to sync shell history between your host OS and the development container
|
- [Atuin](https://atuin.sh/) to sync shell history between your host OS and the development container
|
||||||
- [Docker](https://docker.com) to manage and build Anubis container images from inside the development container
|
- [Docker](https://docker.com) to manage and build nuke container images from inside the development container
|
||||||
- [Ko](https://ko.build/) to build production-ready Anubis container images
|
- [Ko](https://ko.build/) to build production-ready nuke container images
|
||||||
- [Neovim](https://neovim.io/) for use with Git
|
- [Neovim](https://neovim.io/) for use with Git
|
||||||
|
|
||||||
This development container is tested and known to work with [Visual Studio Code](https://code.visualstudio.com/). If you run into problems with it outside of VS Code, please file an issue and let us know what editor you are using.
|
This development container is tested and known to work with [Visual Studio Code](https://code.visualstudio.com/). If you run into problems with it outside of VS Code, please file an issue and let us know what editor you are using.
|
||||||
|
|
|
||||||
|
|
@ -2,11 +2,9 @@
|
||||||
// README at: https://github.com/devcontainers/templates/tree/main/src/debian
|
// README at: https://github.com/devcontainers/templates/tree/main/src/debian
|
||||||
{
|
{
|
||||||
"name": "Dev",
|
"name": "Dev",
|
||||||
"dockerComposeFile": [
|
"dockerComposeFile": ["./docker-compose.yaml"],
|
||||||
"./docker-compose.yaml"
|
|
||||||
],
|
|
||||||
"service": "workspace",
|
"service": "workspace",
|
||||||
"workspaceFolder": "/workspace/anubis",
|
"workspaceFolder": "/workspace/nuke",
|
||||||
"postStartCommand": "bash ./.devcontainer/poststart.sh",
|
"postStartCommand": "bash ./.devcontainer/poststart.sh",
|
||||||
"features": {
|
"features": {
|
||||||
"ghcr.io/xe/devcontainer-features/ko:1.1.0": {},
|
"ghcr.io/xe/devcontainer-features/ko:1.1.0": {},
|
||||||
|
|
|
||||||
|
|
@ -14,12 +14,12 @@ services:
|
||||||
|
|
||||||
# VS Code workspace service
|
# VS Code workspace service
|
||||||
workspace:
|
workspace:
|
||||||
image: ghcr.io/techarohq/anubis/devcontainer
|
image: git.sad.ovh/sophie/nuke/devcontainer
|
||||||
build:
|
build:
|
||||||
context: ..
|
context: ..
|
||||||
dockerfile: .devcontainer/Dockerfile
|
dockerfile: .devcontainer/Dockerfile
|
||||||
volumes:
|
volumes:
|
||||||
- ../:/workspace/anubis:cached
|
- ../:/workspace/nuke:cached
|
||||||
environment:
|
environment:
|
||||||
VALKEY_URL: redis://valkey:6379/0
|
VALKEY_URL: redis://valkey:6379/0
|
||||||
#entrypoint: ["/usr/bin/sleep", "infinity"]
|
#entrypoint: ["/usr/bin/sleep", "infinity"]
|
||||||
|
|
|
||||||
4
.github/ISSUE_TEMPLATE/config.yml
vendored
4
.github/ISSUE_TEMPLATE/config.yml
vendored
|
|
@ -1,5 +1,5 @@
|
||||||
blank_issues_enabled: false
|
blank_issues_enabled: false
|
||||||
contact_links:
|
contact_links:
|
||||||
- name: Security
|
- name: Security
|
||||||
url: https://techaro.lol/contact
|
url: https://sad.ovh
|
||||||
about: Do not file security reports here. Email security@techaro.lol.
|
about: Do not file security reports here. Email sophie@sad.ovh.
|
||||||
|
|
|
||||||
11
.github/actions/spelling/expect.txt
vendored
11
.github/actions/spelling/expect.txt
vendored
|
|
@ -7,8 +7,8 @@ alibaba
|
||||||
alrest
|
alrest
|
||||||
amazonbot
|
amazonbot
|
||||||
anthro
|
anthro
|
||||||
anubis
|
nuke
|
||||||
anubistest
|
nuketest
|
||||||
apnic
|
apnic
|
||||||
APNICRANDNETAU
|
APNICRANDNETAU
|
||||||
Applebot
|
Applebot
|
||||||
|
|
@ -335,15 +335,13 @@ tarrif
|
||||||
taviso
|
taviso
|
||||||
tbn
|
tbn
|
||||||
tbr
|
tbr
|
||||||
techaro
|
sophie
|
||||||
techarohq
|
sad.ovh
|
||||||
telegrambot
|
telegrambot
|
||||||
templ
|
templ
|
||||||
templruntime
|
templruntime
|
||||||
testarea
|
testarea
|
||||||
Thancred
|
Thancred
|
||||||
thoth
|
|
||||||
thothmock
|
|
||||||
Tik
|
Tik
|
||||||
Timpibot
|
Timpibot
|
||||||
TLog
|
TLog
|
||||||
|
|
@ -375,7 +373,6 @@ websites
|
||||||
Webzio
|
Webzio
|
||||||
whois
|
whois
|
||||||
wildbase
|
wildbase
|
||||||
withthothmock
|
|
||||||
wolfbeast
|
wolfbeast
|
||||||
wordpress
|
wordpress
|
||||||
workaround
|
workaround
|
||||||
|
|
|
||||||
34
.github/dependabot.yml
vendored
34
.github/dependabot.yml
vendored
|
|
@ -1,34 +0,0 @@
|
||||||
version: 2
|
|
||||||
updates:
|
|
||||||
- package-ecosystem: github-actions
|
|
||||||
directory: /
|
|
||||||
schedule:
|
|
||||||
interval: weekly
|
|
||||||
groups:
|
|
||||||
github-actions:
|
|
||||||
patterns:
|
|
||||||
- "*"
|
|
||||||
cooldown:
|
|
||||||
default-days: 7
|
|
||||||
|
|
||||||
- package-ecosystem: gomod
|
|
||||||
directory: /
|
|
||||||
schedule:
|
|
||||||
interval: weekly
|
|
||||||
groups:
|
|
||||||
gomod:
|
|
||||||
patterns:
|
|
||||||
- "*"
|
|
||||||
cooldown:
|
|
||||||
default-days: 7
|
|
||||||
|
|
||||||
- package-ecosystem: npm
|
|
||||||
directory: /
|
|
||||||
schedule:
|
|
||||||
interval: weekly
|
|
||||||
groups:
|
|
||||||
npm:
|
|
||||||
patterns:
|
|
||||||
- "*"
|
|
||||||
cooldown:
|
|
||||||
default-days: 7
|
|
||||||
72
.github/workflows/asset-verification.yml
vendored
72
.github/workflows/asset-verification.yml
vendored
|
|
@ -1,72 +0,0 @@
|
||||||
name: Asset Build Verification
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches: ["main"]
|
|
||||||
pull_request:
|
|
||||||
branches: ["main"]
|
|
||||||
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
asset_verification:
|
|
||||||
runs-on: ubuntu-24.04
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
|
||||||
with:
|
|
||||||
persist-credentials: false
|
|
||||||
|
|
||||||
- name: build essential
|
|
||||||
run: |
|
|
||||||
sudo apt-get update
|
|
||||||
sudo apt-get install -y build-essential
|
|
||||||
|
|
||||||
- uses: actions/setup-node@6044e13b5dc448c55e2357c09f80417699197238 # v6.2.0
|
|
||||||
with:
|
|
||||||
node-version: '24.11.0'
|
|
||||||
- uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
|
|
||||||
with:
|
|
||||||
go-version: '1.25.4'
|
|
||||||
|
|
||||||
- name: install node deps
|
|
||||||
run: |
|
|
||||||
npm ci
|
|
||||||
|
|
||||||
- name: Check for uncommitted changes before asset build
|
|
||||||
id: check-changes-before
|
|
||||||
run: |
|
|
||||||
if [[ -n $(git status --porcelain) ]]; then
|
|
||||||
echo "has_changes=true" >> $GITHUB_OUTPUT
|
|
||||||
else
|
|
||||||
echo "has_changes=false" >> $GITHUB_OUTPUT
|
|
||||||
fi
|
|
||||||
|
|
||||||
- name: Fail if there are uncommitted changes before build
|
|
||||||
if: steps.check-changes-before.outputs.has_changes == 'true'
|
|
||||||
run: |
|
|
||||||
echo "There are uncommitted changes before running npm run assets"
|
|
||||||
git status
|
|
||||||
exit 1
|
|
||||||
|
|
||||||
- name: Run asset build
|
|
||||||
run: |
|
|
||||||
npm run assets
|
|
||||||
|
|
||||||
- name: Check for uncommitted changes after asset build
|
|
||||||
id: check-changes-after
|
|
||||||
run: |
|
|
||||||
if [[ -n $(git status --porcelain) ]]; then
|
|
||||||
echo "has_changes=true" >> $GITHUB_OUTPUT
|
|
||||||
else
|
|
||||||
echo "has_changes=false" >> $GITHUB_OUTPUT
|
|
||||||
fi
|
|
||||||
|
|
||||||
- name: Fail if assets generated changes
|
|
||||||
if: steps.check-changes-after.outputs.has_changes == 'true'
|
|
||||||
run: |
|
|
||||||
echo "npm run assets generated uncommitted changes. This indicates the repository has outdated generated files."
|
|
||||||
echo "Please run 'npm run assets' locally and commit the changes."
|
|
||||||
git status
|
|
||||||
git diff
|
|
||||||
exit 1
|
|
||||||
58
.github/workflows/docker-pr.yml
vendored
58
.github/workflows/docker-pr.yml
vendored
|
|
@ -1,58 +0,0 @@
|
||||||
name: Docker image builds (pull requests)
|
|
||||||
|
|
||||||
on:
|
|
||||||
pull_request:
|
|
||||||
branches: ["main"]
|
|
||||||
|
|
||||||
env:
|
|
||||||
DOCKER_METADATA_SET_OUTPUT_ENV: "true"
|
|
||||||
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
build:
|
|
||||||
runs-on: ubuntu-24.04
|
|
||||||
steps:
|
|
||||||
- name: Checkout code
|
|
||||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
|
||||||
with:
|
|
||||||
fetch-tags: true
|
|
||||||
fetch-depth: 0
|
|
||||||
persist-credentials: false
|
|
||||||
|
|
||||||
- name: build essential
|
|
||||||
run: |
|
|
||||||
sudo apt-get update
|
|
||||||
sudo apt-get install -y build-essential
|
|
||||||
|
|
||||||
- uses: actions/setup-node@6044e13b5dc448c55e2357c09f80417699197238 # v6.2.0
|
|
||||||
with:
|
|
||||||
node-version: '24.11.0'
|
|
||||||
- uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
|
|
||||||
with:
|
|
||||||
go-version: '1.25.4'
|
|
||||||
|
|
||||||
- uses: ko-build/setup-ko@d006021bd0c28d1ce33a07e7943d48b079944c8d # v0.9
|
|
||||||
|
|
||||||
- name: Docker meta
|
|
||||||
id: meta
|
|
||||||
uses: docker/metadata-action@c299e40c65443455700f0fdfc63efafe5b349051 # v5.10.0
|
|
||||||
with:
|
|
||||||
images: ghcr.io/${{ github.repository }}
|
|
||||||
|
|
||||||
- name: Build and push
|
|
||||||
id: build
|
|
||||||
run: |
|
|
||||||
npm ci
|
|
||||||
npm run container
|
|
||||||
env:
|
|
||||||
PULL_REQUEST_ID: ${{ github.event.number }}
|
|
||||||
DOCKER_REPO: ghcr.io/${{ github.repository }}
|
|
||||||
SLOG_LEVEL: debug
|
|
||||||
|
|
||||||
- run: |
|
|
||||||
echo "Test this with:"
|
|
||||||
echo "docker pull ${DOCKER_IMAGE}"
|
|
||||||
env:
|
|
||||||
DOCKER_IMAGE: ${{ steps.build.outputs.docker_image }}
|
|
||||||
53
.github/workflows/docker.yml
vendored
53
.github/workflows/docker.yml
vendored
|
|
@ -18,10 +18,10 @@ permissions:
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build:
|
build:
|
||||||
runs-on: ubuntu-24.04
|
runs-on: node-16
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
uses: http://github.com/actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||||
with:
|
with:
|
||||||
fetch-tags: true
|
fetch-tags: true
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
@ -29,32 +29,49 @@ jobs:
|
||||||
|
|
||||||
- name: build essential
|
- name: build essential
|
||||||
run: |
|
run: |
|
||||||
sudo apt-get update
|
# Add Docker's official GPG key:
|
||||||
sudo apt-get install -y build-essential
|
apt update -y
|
||||||
|
apt install -y ca-certificates curl
|
||||||
|
install -m 0755 -d /etc/apt/keyrings
|
||||||
|
curl -fsSL https://download.docker.com/linux/debian/gpg -o /etc/apt/keyrings/docker.asc
|
||||||
|
chmod a+r /etc/apt/keyrings/docker.asc
|
||||||
|
|
||||||
|
# Add the repository to Apt sources:
|
||||||
|
tee /etc/apt/sources.list.d/docker.sources <<EOF
|
||||||
|
Types: deb
|
||||||
|
URIs: https://download.docker.com/linux/debian
|
||||||
|
Suites: $(. /etc/os-release && echo "$VERSION_CODENAME")
|
||||||
|
Components: stable
|
||||||
|
Signed-By: /etc/apt/keyrings/docker.asc
|
||||||
|
EOF
|
||||||
|
|
||||||
|
apt-get update -y
|
||||||
|
apt-get install -y build-essential jq docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin zstd brotli
|
||||||
- name: Set lowercase image name
|
- name: Set lowercase image name
|
||||||
run: |
|
run: |
|
||||||
echo "IMAGE=ghcr.io/${GITHUB_REPOSITORY,,}" >> $GITHUB_ENV
|
echo "IMAGE=git.sad.ovh/${GITHUB_REPOSITORY,,}" >> $GITHUB_ENV
|
||||||
|
- uses: http://github.com/actions/setup-node@6044e13b5dc448c55e2357c09f80417699197238 # v6.2.0
|
||||||
- uses: actions/setup-node@6044e13b5dc448c55e2357c09f80417699197238 # v6.2.0
|
|
||||||
with:
|
with:
|
||||||
node-version: '24.11.0'
|
node-version: "24.11.0"
|
||||||
- uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
|
- uses: http://github.com/actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
|
||||||
with:
|
with:
|
||||||
go-version: '1.25.4'
|
go-version: "1.24.2" # this is the version in go.mod
|
||||||
|
cache: false
|
||||||
- uses: ko-build/setup-ko@d006021bd0c28d1ce33a07e7943d48b079944c8d # v0.9
|
- uses: http://github.com/ko-build/setup-ko@7e9da37392e224a3f8eb1015b4f7f4b0daaf3c46 # v0.9
|
||||||
|
with:
|
||||||
|
version: v0.18.1
|
||||||
|
env:
|
||||||
|
KO_DOCKER_REPO: git.sad.ovh/sophie/nuke
|
||||||
- name: Log into registry
|
- name: Log into registry
|
||||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
uses: http://github.com/docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||||
with:
|
with:
|
||||||
registry: ghcr.io
|
registry: git.sad.ovh
|
||||||
username: ${{ github.repository_owner }}
|
username: ${{ github.repository_owner }}
|
||||||
password: ${{ secrets.GITHUB_TOKEN }}
|
password: ${{ secrets.PAT_DOCKER }}
|
||||||
|
|
||||||
- name: Docker meta
|
- name: Docker meta
|
||||||
id: meta
|
id: meta
|
||||||
uses: docker/metadata-action@c299e40c65443455700f0fdfc63efafe5b349051 # v5.10.0
|
uses: http://github.com/docker/metadata-action@c299e40c65443455700f0fdfc63efafe5b349051 # v5.10.0
|
||||||
with:
|
with:
|
||||||
images: ${{ env.IMAGE }}
|
images: ${{ env.IMAGE }}
|
||||||
|
|
||||||
|
|
@ -68,7 +85,7 @@ jobs:
|
||||||
SLOG_LEVEL: debug
|
SLOG_LEVEL: debug
|
||||||
|
|
||||||
- name: Generate artifact attestation
|
- name: Generate artifact attestation
|
||||||
uses: actions/attest-build-provenance@00014ed6ed5efc5b1ab7f7f34a39eb55d41aa4f8 # v3.1.0
|
uses: http://github.com/actions/attest-build-provenance@00014ed6ed5efc5b1ab7f7f34a39eb55d41aa4f8 # v3.1.0
|
||||||
with:
|
with:
|
||||||
subject-name: ${{ env.IMAGE }}
|
subject-name: ${{ env.IMAGE }}
|
||||||
subject-digest: ${{ steps.build.outputs.digest }}
|
subject-digest: ${{ steps.build.outputs.digest }}
|
||||||
|
|
|
||||||
67
.github/workflows/docs-deploy.yml
vendored
67
.github/workflows/docs-deploy.yml
vendored
|
|
@ -1,67 +0,0 @@
|
||||||
name: Docs deploy
|
|
||||||
|
|
||||||
on:
|
|
||||||
workflow_dispatch:
|
|
||||||
push:
|
|
||||||
branches: ["main"]
|
|
||||||
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
packages: write
|
|
||||||
attestations: write
|
|
||||||
id-token: write
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
build:
|
|
||||||
if: github.repository == 'TecharoHQ/anubis'
|
|
||||||
runs-on: ubuntu-24.04
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
|
||||||
with:
|
|
||||||
persist-credentials: false
|
|
||||||
|
|
||||||
- name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3.12.0
|
|
||||||
|
|
||||||
- name: Log into registry
|
|
||||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
|
||||||
with:
|
|
||||||
registry: ghcr.io
|
|
||||||
username: techarohq
|
|
||||||
password: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
|
|
||||||
- name: Docker meta
|
|
||||||
id: meta
|
|
||||||
uses: docker/metadata-action@c299e40c65443455700f0fdfc63efafe5b349051 # v5.10.0
|
|
||||||
with:
|
|
||||||
images: ghcr.io/techarohq/anubis/docs
|
|
||||||
tags: |
|
|
||||||
type=sha,enable=true,priority=100,prefix=,suffix=,format=long
|
|
||||||
main
|
|
||||||
|
|
||||||
- name: Build and push
|
|
||||||
id: build
|
|
||||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
|
|
||||||
with:
|
|
||||||
context: ./docs
|
|
||||||
cache-to: type=gha
|
|
||||||
cache-from: type=gha
|
|
||||||
tags: ${{ steps.meta.outputs.tags }}
|
|
||||||
labels: ${{ steps.meta.outputs.labels }}
|
|
||||||
platforms: linux/amd64
|
|
||||||
push: true
|
|
||||||
|
|
||||||
- name: Apply k8s manifests to limsa lominsa
|
|
||||||
uses: actions-hub/kubectl@f6d776bd78f4523e36d6c74d34f9941c242b2213 # v1.35.0
|
|
||||||
env:
|
|
||||||
KUBE_CONFIG: ${{ secrets.LIMSA_LOMINSA_KUBECONFIG }}
|
|
||||||
with:
|
|
||||||
args: apply -k docs/manifest
|
|
||||||
|
|
||||||
- name: Apply k8s manifests to limsa lominsa
|
|
||||||
uses: actions-hub/kubectl@f6d776bd78f4523e36d6c74d34f9941c242b2213 # v1.35.0
|
|
||||||
env:
|
|
||||||
KUBE_CONFIG: ${{ secrets.LIMSA_LOMINSA_KUBECONFIG }}
|
|
||||||
with:
|
|
||||||
args: rollout restart -n default deploy/anubis-docs
|
|
||||||
42
.github/workflows/docs-test.yml
vendored
42
.github/workflows/docs-test.yml
vendored
|
|
@ -1,42 +0,0 @@
|
||||||
name: Docs test build
|
|
||||||
|
|
||||||
on:
|
|
||||||
pull_request:
|
|
||||||
branches: ["main"]
|
|
||||||
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
actions: write
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
build:
|
|
||||||
runs-on: ubuntu-24.04
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
|
||||||
with:
|
|
||||||
persist-credentials: false
|
|
||||||
|
|
||||||
- name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3.12.0
|
|
||||||
|
|
||||||
- name: Docker meta
|
|
||||||
id: meta
|
|
||||||
uses: docker/metadata-action@c299e40c65443455700f0fdfc63efafe5b349051 # v5.10.0
|
|
||||||
with:
|
|
||||||
images: ghcr.io/techarohq/anubis/docs
|
|
||||||
tags: |
|
|
||||||
type=sha,enable=true,priority=100,prefix=,suffix=,format=long
|
|
||||||
main
|
|
||||||
|
|
||||||
- name: Build and push
|
|
||||||
id: build
|
|
||||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
|
|
||||||
with:
|
|
||||||
context: ./docs
|
|
||||||
cache-to: type=gha
|
|
||||||
cache-from: type=gha
|
|
||||||
tags: ${{ steps.meta.outputs.tags }}
|
|
||||||
labels: ${{ steps.meta.outputs.labels }}
|
|
||||||
platforms: linux/amd64
|
|
||||||
push: false
|
|
||||||
76
.github/workflows/go-mod-tidy-check.yml
vendored
76
.github/workflows/go-mod-tidy-check.yml
vendored
|
|
@ -1,76 +0,0 @@
|
||||||
name: Go Mod Tidy Check
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches: ["main"]
|
|
||||||
pull_request:
|
|
||||||
branches: ["main"]
|
|
||||||
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
go_mod_tidy_check:
|
|
||||||
runs-on: ubuntu-24.04
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
|
||||||
with:
|
|
||||||
persist-credentials: false
|
|
||||||
|
|
||||||
- uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
|
|
||||||
with:
|
|
||||||
go-version: '1.25.4'
|
|
||||||
|
|
||||||
- name: Check go.mod and go.sum in main directory
|
|
||||||
run: |
|
|
||||||
# Store original file state
|
|
||||||
cp go.mod go.mod.orig
|
|
||||||
cp go.sum go.sum.orig
|
|
||||||
|
|
||||||
# Run go mod tidy
|
|
||||||
go mod tidy
|
|
||||||
|
|
||||||
# Check if files changed
|
|
||||||
if ! diff -q go.mod.orig go.mod > /dev/null 2>&1; then
|
|
||||||
echo "ERROR: go.mod in main directory has changed after running 'go mod tidy'"
|
|
||||||
echo "Please run 'go mod tidy' locally and commit the changes"
|
|
||||||
diff go.mod.orig go.mod
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
if ! diff -q go.sum.orig go.sum > /dev/null 2>&1; then
|
|
||||||
echo "ERROR: go.sum in main directory has changed after running 'go mod tidy'"
|
|
||||||
echo "Please run 'go mod tidy' locally and commit the changes"
|
|
||||||
diff go.sum.orig go.sum
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "SUCCESS: go.mod and go.sum in main directory are tidy"
|
|
||||||
|
|
||||||
- name: Check go.mod and go.sum in test directory
|
|
||||||
run: |
|
|
||||||
cd test
|
|
||||||
|
|
||||||
# Store original file state
|
|
||||||
cp go.mod go.mod.orig
|
|
||||||
cp go.sum go.sum.orig
|
|
||||||
|
|
||||||
# Run go mod tidy
|
|
||||||
go mod tidy
|
|
||||||
|
|
||||||
# Check if files changed
|
|
||||||
if ! diff -q go.mod.orig go.mod > /dev/null 2>&1; then
|
|
||||||
echo "ERROR: go.mod in test directory has changed after running 'go mod tidy'"
|
|
||||||
echo "Please run 'go mod tidy' locally and commit the changes"
|
|
||||||
diff go.mod.orig go.mod
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
if ! diff -q go.sum.orig go.sum > /dev/null 2>&1; then
|
|
||||||
echo "ERROR: go.sum in test directory has changed after running 'go mod tidy'"
|
|
||||||
echo "Please run 'go mod tidy' locally and commit the changes"
|
|
||||||
diff go.sum.orig go.sum
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "SUCCESS: go.mod and go.sum in test directory are tidy"
|
|
||||||
64
.github/workflows/go.yml
vendored
64
.github/workflows/go.yml
vendored
|
|
@ -1,64 +0,0 @@
|
||||||
name: Go
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches: ["main"]
|
|
||||||
pull_request:
|
|
||||||
branches: ["main"]
|
|
||||||
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
actions: write
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
go_tests:
|
|
||||||
#runs-on: alrest-techarohq
|
|
||||||
runs-on: ubuntu-24.04
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
|
||||||
with:
|
|
||||||
persist-credentials: false
|
|
||||||
|
|
||||||
- name: build essential
|
|
||||||
run: |
|
|
||||||
sudo apt-get update
|
|
||||||
sudo apt-get install -y build-essential
|
|
||||||
|
|
||||||
- uses: actions/setup-node@6044e13b5dc448c55e2357c09f80417699197238 # v6.2.0
|
|
||||||
with:
|
|
||||||
node-version: '24.11.0'
|
|
||||||
- uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
|
|
||||||
with:
|
|
||||||
go-version: '1.25.4'
|
|
||||||
|
|
||||||
- name: Cache playwright binaries
|
|
||||||
uses: actions/cache@8b402f58fbc84540c8b491a91e594a4576fec3d7 # v5.0.2
|
|
||||||
id: playwright-cache
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/.cache/ms-playwright
|
|
||||||
key: ${{ runner.os }}-playwright-${{ hashFiles('**/go.sum') }}
|
|
||||||
|
|
||||||
- name: install node deps
|
|
||||||
run: |
|
|
||||||
npm ci
|
|
||||||
|
|
||||||
- name: install playwright browsers
|
|
||||||
run: |
|
|
||||||
npx --no-install playwright@1.52.0 install --with-deps
|
|
||||||
npx --no-install playwright@1.52.0 run-server --port 9001 &
|
|
||||||
|
|
||||||
- name: Build
|
|
||||||
run: npm run build
|
|
||||||
|
|
||||||
- name: Test
|
|
||||||
run: npm run test
|
|
||||||
|
|
||||||
- name: Lint with staticcheck
|
|
||||||
uses: dominikh/staticcheck-action@024238d2898c874f26d723e7d0ff4308c35589a2 # v1.4.0
|
|
||||||
with:
|
|
||||||
version: "latest"
|
|
||||||
|
|
||||||
- name: Govulncheck
|
|
||||||
run: |
|
|
||||||
go tool govulncheck ./...
|
|
||||||
53
.github/workflows/package-builds-stable.yml
vendored
53
.github/workflows/package-builds-stable.yml
vendored
|
|
@ -1,53 +0,0 @@
|
||||||
name: Package builds (stable)
|
|
||||||
|
|
||||||
on:
|
|
||||||
workflow_dispatch:
|
|
||||||
# release:
|
|
||||||
# types: [published]
|
|
||||||
|
|
||||||
permissions:
|
|
||||||
contents: write
|
|
||||||
actions: write
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
package_builds:
|
|
||||||
#runs-on: alrest-techarohq
|
|
||||||
runs-on: ubuntu-24.04
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
|
||||||
with:
|
|
||||||
persist-credentials: false
|
|
||||||
fetch-tags: true
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
- name: build essential
|
|
||||||
run: |
|
|
||||||
sudo apt-get update
|
|
||||||
sudo apt-get install -y build-essential
|
|
||||||
|
|
||||||
- uses: actions/setup-node@6044e13b5dc448c55e2357c09f80417699197238 # v6.2.0
|
|
||||||
with:
|
|
||||||
node-version: '24.11.0'
|
|
||||||
- uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
|
|
||||||
with:
|
|
||||||
go-version: '1.25.4'
|
|
||||||
|
|
||||||
- name: install node deps
|
|
||||||
run: |
|
|
||||||
npm ci
|
|
||||||
|
|
||||||
- name: Build Packages
|
|
||||||
run: |
|
|
||||||
go tool yeet
|
|
||||||
|
|
||||||
- name: Upload released artifacts
|
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: ${{ github.TOKEN }}
|
|
||||||
RELEASE_VERSION: ${{github.event.release.tag_name}}
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
RELEASE="${RELEASE_VERSION}"
|
|
||||||
cd var
|
|
||||||
for file in *; do
|
|
||||||
gh release upload $RELEASE $file
|
|
||||||
done
|
|
||||||
47
.github/workflows/package-builds-unstable.yml
vendored
47
.github/workflows/package-builds-unstable.yml
vendored
|
|
@ -1,47 +0,0 @@
|
||||||
name: Package builds (unstable)
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches: ["main"]
|
|
||||||
pull_request:
|
|
||||||
branches: ["main"]
|
|
||||||
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
actions: write
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
package_builds:
|
|
||||||
#runs-on: alrest-techarohq
|
|
||||||
runs-on: ubuntu-24.04
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
|
||||||
with:
|
|
||||||
persist-credentials: false
|
|
||||||
fetch-tags: true
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
- name: build essential
|
|
||||||
run: |
|
|
||||||
sudo apt-get update
|
|
||||||
sudo apt-get install -y build-essential
|
|
||||||
|
|
||||||
- uses: actions/setup-node@6044e13b5dc448c55e2357c09f80417699197238 # v6.2.0
|
|
||||||
with:
|
|
||||||
node-version: '24.11.0'
|
|
||||||
- uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
|
|
||||||
with:
|
|
||||||
go-version: '1.25.4'
|
|
||||||
|
|
||||||
- name: install node deps
|
|
||||||
run: |
|
|
||||||
npm ci
|
|
||||||
|
|
||||||
- name: Build Packages
|
|
||||||
run: |
|
|
||||||
go tool yeet
|
|
||||||
|
|
||||||
- uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
|
|
||||||
with:
|
|
||||||
name: packages
|
|
||||||
path: var/*
|
|
||||||
64
.github/workflows/smoke-tests.yml
vendored
64
.github/workflows/smoke-tests.yml
vendored
|
|
@ -1,64 +0,0 @@
|
||||||
name: Smoke tests
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches: ["main"]
|
|
||||||
pull_request:
|
|
||||||
branches: ["main"]
|
|
||||||
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
smoke-test:
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
test:
|
|
||||||
- default-config-macro
|
|
||||||
- docker-registry
|
|
||||||
- double_slash
|
|
||||||
- forced-language
|
|
||||||
- git-clone
|
|
||||||
- git-push
|
|
||||||
- healthcheck
|
|
||||||
- i18n
|
|
||||||
- log-file
|
|
||||||
- nginx
|
|
||||||
- palemoon/amd64
|
|
||||||
#- palemoon/i386
|
|
||||||
- robots_txt
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- name: Checkout code
|
|
||||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
|
||||||
with:
|
|
||||||
persist-credentials: false
|
|
||||||
|
|
||||||
- uses: actions/setup-node@6044e13b5dc448c55e2357c09f80417699197238 # v6.2.0
|
|
||||||
with:
|
|
||||||
node-version: "24.11.0"
|
|
||||||
- uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
|
|
||||||
with:
|
|
||||||
go-version: "1.25.4"
|
|
||||||
|
|
||||||
- uses: ko-build/setup-ko@d006021bd0c28d1ce33a07e7943d48b079944c8d # v0.9
|
|
||||||
|
|
||||||
- name: Install utils
|
|
||||||
run: |
|
|
||||||
go install ./utils/cmd/...
|
|
||||||
|
|
||||||
- name: Run test
|
|
||||||
run: |
|
|
||||||
cd test/${{ matrix.test }}
|
|
||||||
backoff-retry --try-count 10 ./test.sh
|
|
||||||
|
|
||||||
- name: Sanitize artifact name
|
|
||||||
if: always()
|
|
||||||
run: echo "ARTIFACT_NAME=${{ matrix.test }}" | sed 's|/|-|g' >> $GITHUB_ENV
|
|
||||||
|
|
||||||
- name: Upload artifact
|
|
||||||
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f
|
|
||||||
if: always()
|
|
||||||
with:
|
|
||||||
name: ${{ env.ARTIFACT_NAME }}
|
|
||||||
path: test/${{ matrix.test }}/var
|
|
||||||
118
.github/workflows/spelling.yml
vendored
118
.github/workflows/spelling.yml
vendored
|
|
@ -1,118 +0,0 @@
|
||||||
name: Check Spelling
|
|
||||||
|
|
||||||
# Comment management is handled through a secondary job, for details see:
|
|
||||||
# https://github.com/check-spelling/check-spelling/wiki/Feature%3A-Restricted-Permissions
|
|
||||||
#
|
|
||||||
# `jobs.comment-push` runs when a push is made to a repository and the `jobs.spelling` job needs to make a comment
|
|
||||||
# (in odd cases, it might actually run just to collapse a comment, but that's fairly rare)
|
|
||||||
# it needs `contents: write` in order to add a comment.
|
|
||||||
#
|
|
||||||
# `jobs.comment-pr` runs when a pull_request is made to a repository and the `jobs.spelling` job needs to make a comment
|
|
||||||
# or collapse a comment (in the case where it had previously made a comment and now no longer needs to show a comment)
|
|
||||||
# it needs `pull-requests: write` in order to manipulate those comments.
|
|
||||||
|
|
||||||
# Updating pull request branches is managed via comment handling.
|
|
||||||
# For details, see: https://github.com/check-spelling/check-spelling/wiki/Feature:-Update-expect-list
|
|
||||||
#
|
|
||||||
# These elements work together to make it happen:
|
|
||||||
#
|
|
||||||
# `on.issue_comment`
|
|
||||||
# This event listens to comments by users asking to update the metadata.
|
|
||||||
#
|
|
||||||
# `jobs.update`
|
|
||||||
# This job runs in response to an issue_comment and will push a new commit
|
|
||||||
# to update the spelling metadata.
|
|
||||||
#
|
|
||||||
# `with.experimental_apply_changes_via_bot`
|
|
||||||
# Tells the action to support and generate messages that enable it
|
|
||||||
# to make a commit to update the spelling metadata.
|
|
||||||
#
|
|
||||||
# `with.ssh_key`
|
|
||||||
# In order to trigger workflows when the commit is made, you can provide a
|
|
||||||
# secret (typically, a write-enabled github deploy key).
|
|
||||||
#
|
|
||||||
# For background, see: https://github.com/check-spelling/check-spelling/wiki/Feature:-Update-with-deploy-key
|
|
||||||
|
|
||||||
# SARIF reporting
|
|
||||||
#
|
|
||||||
# Access to SARIF reports is generally restricted (by GitHub) to members of the repository.
|
|
||||||
#
|
|
||||||
# Requires enabling `security-events: write`
|
|
||||||
# and configuring the action with `use_sarif: 1`
|
|
||||||
#
|
|
||||||
# For information on the feature, see: https://github.com/check-spelling/check-spelling/wiki/Feature:-SARIF-output
|
|
||||||
|
|
||||||
# Minimal workflow structure:
|
|
||||||
#
|
|
||||||
# on:
|
|
||||||
# push:
|
|
||||||
# ...
|
|
||||||
# pull_request_target:
|
|
||||||
# ...
|
|
||||||
# jobs:
|
|
||||||
# # you only want the spelling job, all others should be omitted
|
|
||||||
# spelling:
|
|
||||||
# # remove `security-events: write` and `use_sarif: 1`
|
|
||||||
# # remove `experimental_apply_changes_via_bot: 1`
|
|
||||||
# ... otherwise adjust the `with:` as you wish
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- '**'
|
|
||||||
tags-ignore:
|
|
||||||
- '**'
|
|
||||||
pull_request:
|
|
||||||
branches:
|
|
||||||
- '**'
|
|
||||||
types:
|
|
||||||
- 'opened'
|
|
||||||
- 'reopened'
|
|
||||||
- 'synchronize'
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
spelling:
|
|
||||||
name: Check Spelling
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
pull-requests: read
|
|
||||||
actions: read
|
|
||||||
security-events: write
|
|
||||||
outputs:
|
|
||||||
followup: ${{ steps.spelling.outputs.followup }}
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
if: ${{ contains(github.event_name, 'pull_request') || github.event_name == 'push' }}
|
|
||||||
concurrency:
|
|
||||||
group: spelling-${{ github.event.pull_request.number || github.ref }}
|
|
||||||
# note: If you use only_check_changed_files, you do not want cancel-in-progress
|
|
||||||
cancel-in-progress: true
|
|
||||||
steps:
|
|
||||||
- name: check-spelling
|
|
||||||
id: spelling
|
|
||||||
uses: check-spelling/check-spelling@c635c2f3f714eec2fcf27b643a1919b9a811ef2e # v0.0.25
|
|
||||||
with:
|
|
||||||
suppress_push_for_open_pull_request: ${{ github.actor != 'dependabot[bot]' && 1 }}
|
|
||||||
checkout: true
|
|
||||||
check_file_names: 1
|
|
||||||
post_comment: 0
|
|
||||||
use_magic_file: 1
|
|
||||||
warnings: bad-regex,binary-file,deprecated-feature,ignored-expect-variant,large-file,limited-references,no-newline-at-eof,noisy-file,non-alpha-in-dictionary,token-is-substring,unexpected-line-ending,whitespace-in-dictionary,minified-file,unsupported-configuration,no-files-to-check,unclosed-block-ignore-begin,unclosed-block-ignore-end
|
|
||||||
use_sarif: ${{ (!github.event.pull_request || (github.event.pull_request.head.repo.full_name == github.repository)) && 1 }}
|
|
||||||
check_extra_dictionaries: ""
|
|
||||||
dictionary_source_prefixes: >
|
|
||||||
{
|
|
||||||
"cspell": "https://raw.githubusercontent.com/check-spelling/cspell-dicts/v20241114/dictionaries/"
|
|
||||||
}
|
|
||||||
extra_dictionaries: |
|
|
||||||
cspell:software-terms/softwareTerms.txt
|
|
||||||
cspell:golang/go.txt
|
|
||||||
cspell:npm/npm.txt
|
|
||||||
cspell:k8s/k8s.txt
|
|
||||||
cspell:python/python/python-lib.txt
|
|
||||||
cspell:aws/aws.txt
|
|
||||||
cspell:node/node.txt
|
|
||||||
cspell:html/html.txt
|
|
||||||
cspell:filetypes/filetypes.txt
|
|
||||||
cspell:python/common/extra.txt
|
|
||||||
cspell:docker/docker-words.txt
|
|
||||||
cspell:fullstack/fullstack.txt
|
|
||||||
37
.github/workflows/ssh-ci-runner-cron.yml
vendored
37
.github/workflows/ssh-ci-runner-cron.yml
vendored
|
|
@ -1,37 +0,0 @@
|
||||||
name: Regenerate ssh ci runner image
|
|
||||||
|
|
||||||
on:
|
|
||||||
# pull_request:
|
|
||||||
# branches: ["main"]
|
|
||||||
schedule:
|
|
||||||
- cron: "0 0 1,8,15,22 * *"
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
permissions:
|
|
||||||
pull-requests: write
|
|
||||||
contents: write
|
|
||||||
packages: write
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
ssh-ci-rebuild:
|
|
||||||
if: github.repository == 'TecharoHQ/anubis'
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- name: Checkout code
|
|
||||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
|
||||||
with:
|
|
||||||
fetch-tags: true
|
|
||||||
fetch-depth: 0
|
|
||||||
persist-credentials: false
|
|
||||||
- name: Log into registry
|
|
||||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
|
||||||
with:
|
|
||||||
registry: ghcr.io
|
|
||||||
username: ${{ github.repository_owner }}
|
|
||||||
password: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
- name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3.12.0
|
|
||||||
- name: Build and push
|
|
||||||
run: |
|
|
||||||
cd ./test/ssh-ci
|
|
||||||
docker buildx bake --push
|
|
||||||
45
.github/workflows/ssh-ci.yml
vendored
45
.github/workflows/ssh-ci.yml
vendored
|
|
@ -1,45 +0,0 @@
|
||||||
name: SSH CI
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches: ["main"]
|
|
||||||
# pull_request:
|
|
||||||
# branches: ["main"]
|
|
||||||
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
ssh:
|
|
||||||
if: github.repository == 'TecharoHQ/anubis'
|
|
||||||
runs-on: alrest-techarohq
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
host:
|
|
||||||
- riscv64
|
|
||||||
- ppc64le
|
|
||||||
- aarch64-4k
|
|
||||||
- aarch64-16k
|
|
||||||
steps:
|
|
||||||
- name: Checkout code
|
|
||||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
|
||||||
with:
|
|
||||||
fetch-tags: true
|
|
||||||
fetch-depth: 0
|
|
||||||
persist-credentials: false
|
|
||||||
|
|
||||||
- name: Install CI target SSH key
|
|
||||||
uses: shimataro/ssh-key-action@d4fffb50872869abe2d9a9098a6d9c5aa7d16be4 # v2.7.0
|
|
||||||
with:
|
|
||||||
key: ${{ secrets.CI_SSH_KEY }}
|
|
||||||
name: id_rsa
|
|
||||||
known_hosts: ${{ secrets.CI_SSH_KNOWN_HOSTS }}
|
|
||||||
|
|
||||||
- uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
|
|
||||||
with:
|
|
||||||
go-version: '1.25.4'
|
|
||||||
|
|
||||||
- name: Run CI
|
|
||||||
run: go run ./utils/cmd/backoff-retry bash test/ssh-ci/rigging.sh ${{ matrix.host }}
|
|
||||||
env:
|
|
||||||
GITHUB_RUN_ID: ${{ github.run_id }}
|
|
||||||
35
.github/workflows/zizmor.yml
vendored
35
.github/workflows/zizmor.yml
vendored
|
|
@ -1,35 +0,0 @@
|
||||||
name: zizmor
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
paths:
|
|
||||||
- '.github/workflows/*.ya?ml'
|
|
||||||
pull_request:
|
|
||||||
paths:
|
|
||||||
- '.github/workflows/*.ya?ml'
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
zizmor:
|
|
||||||
name: zizmor latest via PyPI
|
|
||||||
runs-on: ubuntu-24.04
|
|
||||||
permissions:
|
|
||||||
security-events: write
|
|
||||||
steps:
|
|
||||||
- name: Checkout repository
|
|
||||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
|
||||||
with:
|
|
||||||
persist-credentials: false
|
|
||||||
|
|
||||||
- name: Install the latest version of uv
|
|
||||||
uses: astral-sh/setup-uv@61cb8a9741eeb8a550a1b8544337180c0fc8476b # v7.2.0
|
|
||||||
|
|
||||||
- name: Run zizmor 🌈
|
|
||||||
run: uvx zizmor --format sarif . > results.sarif
|
|
||||||
env:
|
|
||||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
|
|
||||||
- name: Upload SARIF file
|
|
||||||
uses: github/codeql-action/upload-sarif@5d4e8d1aca955e8d8589aabd499c5cae939e33c7 # v4.31.9
|
|
||||||
with:
|
|
||||||
sarif_file: results.sarif
|
|
||||||
category: zizmor
|
|
||||||
6
.ko.yaml
6
.ko.yaml
|
|
@ -5,9 +5,9 @@ defaultPlatforms:
|
||||||
- linux/arm/v7
|
- linux/arm/v7
|
||||||
|
|
||||||
builds:
|
builds:
|
||||||
- id: anubis
|
- id: nuke
|
||||||
main: ./cmd/anubis
|
main: ./cmd/nuke
|
||||||
ldflags:
|
ldflags:
|
||||||
- -s -w
|
- -s -w
|
||||||
- -extldflags "-static"
|
- -extldflags "-static"
|
||||||
- -X github.com/TecharoHQ/anubis.Version={{.Env.VERSION}}
|
- -X git.sad.ovh/sophie/nuke.Version={{.Env.VERSION}}
|
||||||
|
|
|
||||||
12
.vscode/launch.json
vendored
12
.vscode/launch.json
vendored
|
|
@ -9,19 +9,19 @@
|
||||||
"type": "go",
|
"type": "go",
|
||||||
"request": "launch",
|
"request": "launch",
|
||||||
"mode": "auto",
|
"mode": "auto",
|
||||||
"program": "${fileDirname}"
|
"program": "${fileDirname}",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "Anubis [dev]",
|
"name": "Nuke [dev]",
|
||||||
"command": "npm run dev",
|
"command": "npm run dev",
|
||||||
"request": "launch",
|
"request": "launch",
|
||||||
"type": "node-terminal"
|
"type": "node-terminal",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "Start Docs",
|
"name": "Start Docs",
|
||||||
"command": "cd docs && npm ci && npm run start",
|
"command": "cd docs && npm ci && npm run start",
|
||||||
"request": "launch",
|
"request": "launch",
|
||||||
"type": "node-terminal"
|
"type": "node-terminal",
|
||||||
}
|
},
|
||||||
]
|
],
|
||||||
}
|
}
|
||||||
8
Makefile
8
Makefile
|
|
@ -17,9 +17,9 @@ assets: deps
|
||||||
./xess/build.sh
|
./xess/build.sh
|
||||||
|
|
||||||
build: assets
|
build: assets
|
||||||
$(GO) build -o ./var/anubis ./cmd/anubis
|
$(GO) build -o ./var/nuke ./cmd/nuke
|
||||||
$(GO) build -o ./var/robots2policy ./cmd/robots2policy
|
$(GO) build -o ./var/robots2policy ./cmd/robots2policy
|
||||||
@echo "Anubis is now built to ./var/anubis"
|
@echo "nuke is now built to ./var/nuke"
|
||||||
|
|
||||||
lint: assets
|
lint: assets
|
||||||
$(GO) vet ./...
|
$(GO) vet ./...
|
||||||
|
|
@ -27,8 +27,8 @@ lint: assets
|
||||||
$(GO) tool govulncheck ./...
|
$(GO) tool govulncheck ./...
|
||||||
|
|
||||||
prebaked-build:
|
prebaked-build:
|
||||||
$(GO) build -o ./var/anubis -ldflags "-X 'github.com/TecharoHQ/anubis.Version=$(VERSION)'" ./cmd/anubis
|
$(GO) build -o ./var/nuke -ldflags "-X 'git.sad.ovh/sophie/nuke.Version=$(VERSION)'" ./cmd/nuke
|
||||||
$(GO) build -o ./var/robots2policy -ldflags "-X 'github.com/TecharoHQ/anubis.Version=$(VERSION)'" ./cmd/robots2policy
|
$(GO) build -o ./var/robots2policy -ldflags "-X 'git.sad.ovh/sophie/nuke.Version=$(VERSION)'" ./cmd/robots2policy
|
||||||
|
|
||||||
test: assets
|
test: assets
|
||||||
$(GO) test ./...
|
$(GO) test ./...
|
||||||
|
|
|
||||||
107
README.md
107
README.md
|
|
@ -1,100 +1,13 @@
|
||||||
# Anubis
|
fork of anubis & rebranded "nuke"
|
||||||
|
|
||||||
<center>
|
anubis is fully licensed under MIT so this is permitted
|
||||||
<img width=256 src="./web/static/img/happy.webp" alt="A smiling chibi dark-skinned anthro jackal with brown hair and tall ears looking victorious with a thumbs-up" />
|
|
||||||
</center>
|
|
||||||
|
|
||||||

|
features removed:
|
||||||

|
- all of "thoth", which is techaro.lol's weird ASN / geoip resolver (slopware)
|
||||||

|
will be replaced with ipify.org or proxycheck.io eventually
|
||||||

|
- docs, follow anubis docs
|
||||||

|
- all of the CI, it is excessive to the max 💯💯💯
|
||||||
[](https://github.com/sponsors/Xe)
|
|
||||||
|
|
||||||
## Sponsors
|
features soon removed:
|
||||||
|
- most testing jigs, this is tested like this is being used by fortune 500 companies and should be closed source roflmfao
|
||||||
Anubis is brought to you by sponsors and donors like:
|
- anubis mascot because i'm like 95% sure it's under license so i can't use it
|
||||||
|
|
||||||
### Diamond Tier
|
|
||||||
|
|
||||||
<a href="https://www.raptorcs.com/content/base/products.html">
|
|
||||||
<img src="./docs/static/img/sponsors/raptor-computing-logo.webp" alt="Raptor Computing Systems" height=64 />
|
|
||||||
</a>
|
|
||||||
<a href="https://databento.com/?utm_source=anubis&utm_medium=sponsor&utm_campaign=anubis">
|
|
||||||
<img src="./docs/static/img/sponsors/databento-logo.webp" alt="Databento" height="64" />
|
|
||||||
</a>
|
|
||||||
|
|
||||||
### Gold Tier
|
|
||||||
|
|
||||||
<a href="https://distrust.co?utm_campaign=github&utm_medium=referral&utm_content=anubis">
|
|
||||||
<img src="./docs/static/img/sponsors/distrust-logo.webp" alt="Distrust" height="64">
|
|
||||||
</a>
|
|
||||||
<a href="https://terminaltrove.com/?utm_campaign=github&utm_medium=referral&utm_content=anubis&utm_source=abgh">
|
|
||||||
<img src="./docs/static/img/sponsors/terminal-trove.webp" alt="Terminal Trove" height="64">
|
|
||||||
</a>
|
|
||||||
<a href="https://canine.tools?utm_campaign=github&utm_medium=referral&utm_content=anubis">
|
|
||||||
<img src="./docs/static/img/sponsors/caninetools-logo.webp" alt="canine.tools" height="64">
|
|
||||||
</a>
|
|
||||||
<a href="https://weblate.org/">
|
|
||||||
<img src="./docs/static/img/sponsors/weblate-logo.webp" alt="Weblate" height="64">
|
|
||||||
</a>
|
|
||||||
<a href="https://uberspace.de/">
|
|
||||||
<img src="./docs/static/img/sponsors/uberspace-logo.webp" alt="Uberspace" height="64">
|
|
||||||
</a>
|
|
||||||
<a href="https://wildbase.xyz/">
|
|
||||||
<img src="./docs/static/img/sponsors/wildbase-logo.webp" alt="Wildbase" height="64">
|
|
||||||
</a>
|
|
||||||
<a href="https://emma.pet">
|
|
||||||
<img
|
|
||||||
src="./docs/static/img/sponsors/nepeat-logo.webp"
|
|
||||||
alt="Cat eyes over the word Emma in a serif font"
|
|
||||||
height="64"
|
|
||||||
/>
|
|
||||||
</a>
|
|
||||||
<a href="https://fabulous.systems/">
|
|
||||||
<img
|
|
||||||
src="./docs/static/img/sponsors/fabulous-systems.webp"
|
|
||||||
alt="Cat eyes over the word Emma in a serif font"
|
|
||||||
height="64"
|
|
||||||
/>
|
|
||||||
</a>
|
|
||||||
|
|
||||||
## Overview
|
|
||||||
|
|
||||||
Anubis is a Web AI Firewall Utility that [weighs the soul of your connection](https://en.wikipedia.org/wiki/Weighing_of_souls) using one or more challenges in order to protect upstream resources from scraper bots.
|
|
||||||
|
|
||||||
This program is designed to help protect the small internet from the endless storm of requests that flood in from AI companies. Anubis is as lightweight as possible to ensure that everyone can afford to protect the communities closest to them.
|
|
||||||
|
|
||||||
Anubis is a bit of a nuclear response. This will result in your website being blocked from smaller scrapers and may inhibit "good bots" like the Internet Archive. You can configure [bot policy definitions](./docs/docs/admin/policies.mdx) to explicitly allowlist them and we are working on a curated set of "known good" bots to allow for a compromise between discoverability and uptime.
|
|
||||||
|
|
||||||
In most cases, you should not need this and can probably get by using Cloudflare to protect a given origin. However, for circumstances where you can't or won't use Cloudflare, Anubis is there for you.
|
|
||||||
|
|
||||||
If you want to try this out, visit the Anubis documentation site at [anubis.techaro.lol](https://anubis.techaro.lol).
|
|
||||||
|
|
||||||
## Support
|
|
||||||
|
|
||||||
If you run into any issues running Anubis, please [open an issue](https://github.com/TecharoHQ/anubis/issues/new?template=Blank+issue). Please include all the information I would need to diagnose your issue.
|
|
||||||
|
|
||||||
For live chat, please join the [Patreon](https://patreon.com/cadey) and ask in the Patron discord in the channel `#anubis`.
|
|
||||||
|
|
||||||
## Star History
|
|
||||||
|
|
||||||
<a href="https://www.star-history.com/#TecharoHQ/anubis&Date">
|
|
||||||
<picture>
|
|
||||||
<source media="(prefers-color-scheme: dark)" srcset="https://api.star-history.com/svg?repos=TecharoHQ/anubis&type=Date&theme=dark" />
|
|
||||||
<source media="(prefers-color-scheme: light)" srcset="https://api.star-history.com/svg?repos=TecharoHQ/anubis&type=Date" />
|
|
||||||
<img alt="Star History Chart" src="https://api.star-history.com/svg?repos=TecharoHQ/anubis&type=Date" />
|
|
||||||
</picture>
|
|
||||||
</a>
|
|
||||||
|
|
||||||
## Packaging Status
|
|
||||||
|
|
||||||
[](https://repology.org/project/anubis-anti-crawler/versions)
|
|
||||||
|
|
||||||
## Contributors
|
|
||||||
|
|
||||||
<a href="https://github.com/TecharoHQ/anubis/graphs/contributors">
|
|
||||||
<img src="https://contrib.rocks/image?repo=TecharoHQ/anubis" />
|
|
||||||
</a>
|
|
||||||
|
|
||||||
Made with [contrib.rocks](https://contrib.rocks).
|
|
||||||
|
|
|
||||||
13
SECURITY.md
13
SECURITY.md
|
|
@ -1,13 +0,0 @@
|
||||||
# Security Policy
|
|
||||||
|
|
||||||
Techaro follows the [Semver 2.0 scheme](https://semver.org/).
|
|
||||||
|
|
||||||
## Supported Versions
|
|
||||||
|
|
||||||
Techaro strives to support the two most recent minor versions of Anubis. Patches to those versions will be published as patch releases.
|
|
||||||
|
|
||||||
## Reporting a Vulnerability
|
|
||||||
|
|
||||||
Email security@techaro.lol with details on the vulnerability and reproduction steps. You will get a response as soon as possible.
|
|
||||||
|
|
||||||
Please take care to send your email as a mixed plaintext and HTML message. Messages with GPG signatures or that are plaintext only may be blocked by the spam filter.
|
|
||||||
407
bun.lock
Normal file
407
bun.lock
Normal file
|
|
@ -0,0 +1,407 @@
|
||||||
|
{
|
||||||
|
"lockfileVersion": 1,
|
||||||
|
"configVersion": 0,
|
||||||
|
"workspaces": {
|
||||||
|
"": {
|
||||||
|
"name": "@sophie/nuke",
|
||||||
|
"dependencies": {
|
||||||
|
"@aws-crypto/sha256-js": "^5.2.0",
|
||||||
|
"brotli": "^1.3.3",
|
||||||
|
"preact": "^10.28.2",
|
||||||
|
},
|
||||||
|
"devDependencies": {
|
||||||
|
"cssnano": "^7.1.2",
|
||||||
|
"cssnano-preset-advanced": "^7.0.10",
|
||||||
|
"esbuild": "^0.27.2",
|
||||||
|
"playwright": "^1.52.0",
|
||||||
|
"postcss-cli": "^11.0.1",
|
||||||
|
"postcss-import": "^16.1.1",
|
||||||
|
"postcss-import-url": "^7.2.0",
|
||||||
|
"postcss-url": "^10.1.3",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"packages": {
|
||||||
|
"@aws-crypto/sha256-js": ["@aws-crypto/sha256-js@5.2.0", "", { "dependencies": { "@aws-crypto/util": "^5.2.0", "@aws-sdk/types": "^3.222.0", "tslib": "^2.6.2" } }, "sha512-FFQQyu7edu4ufvIZ+OadFpHHOt+eSTBaYaki44c+akjg7qZg9oOQeLlk77F6tSYqjDAFClrHJk9tMf0HdVyOvA=="],
|
||||||
|
|
||||||
|
"@aws-crypto/util": ["@aws-crypto/util@5.2.0", "", { "dependencies": { "@aws-sdk/types": "^3.222.0", "@smithy/util-utf8": "^2.0.0", "tslib": "^2.6.2" } }, "sha512-4RkU9EsI6ZpBve5fseQlGNUWKMa1RLPQ1dnjnQoe07ldfIzcsGb5hC5W0Dm7u423KWzawlrpbjXBrXCEv9zazQ=="],
|
||||||
|
|
||||||
|
"@aws-sdk/types": ["@aws-sdk/types@3.840.0", "", { "dependencies": { "@smithy/types": "^4.3.1", "tslib": "^2.6.2" } }, "sha512-xliuHaUFZxEx1NSXeLLZ9Dyu6+EJVQKEoD+yM+zqUo3YDZ7medKJWY6fIOKiPX/N7XbLdBYwajb15Q7IL8KkeA=="],
|
||||||
|
|
||||||
|
"@esbuild/aix-ppc64": ["@esbuild/aix-ppc64@0.27.2", "", { "os": "aix", "cpu": "ppc64" }, "sha512-GZMB+a0mOMZs4MpDbj8RJp4cw+w1WV5NYD6xzgvzUJ5Ek2jerwfO2eADyI6ExDSUED+1X8aMbegahsJi+8mgpw=="],
|
||||||
|
|
||||||
|
"@esbuild/android-arm": ["@esbuild/android-arm@0.27.2", "", { "os": "android", "cpu": "arm" }, "sha512-DVNI8jlPa7Ujbr1yjU2PfUSRtAUZPG9I1RwW4F4xFB1Imiu2on0ADiI/c3td+KmDtVKNbi+nffGDQMfcIMkwIA=="],
|
||||||
|
|
||||||
|
"@esbuild/android-arm64": ["@esbuild/android-arm64@0.27.2", "", { "os": "android", "cpu": "arm64" }, "sha512-pvz8ZZ7ot/RBphf8fv60ljmaoydPU12VuXHImtAs0XhLLw+EXBi2BLe3OYSBslR4rryHvweW5gmkKFwTiFy6KA=="],
|
||||||
|
|
||||||
|
"@esbuild/android-x64": ["@esbuild/android-x64@0.27.2", "", { "os": "android", "cpu": "x64" }, "sha512-z8Ank4Byh4TJJOh4wpz8g2vDy75zFL0TlZlkUkEwYXuPSgX8yzep596n6mT7905kA9uHZsf/o2OJZubl2l3M7A=="],
|
||||||
|
|
||||||
|
"@esbuild/darwin-arm64": ["@esbuild/darwin-arm64@0.27.2", "", { "os": "darwin", "cpu": "arm64" }, "sha512-davCD2Zc80nzDVRwXTcQP/28fiJbcOwvdolL0sOiOsbwBa72kegmVU0Wrh1MYrbuCL98Omp5dVhQFWRKR2ZAlg=="],
|
||||||
|
|
||||||
|
"@esbuild/darwin-x64": ["@esbuild/darwin-x64@0.27.2", "", { "os": "darwin", "cpu": "x64" }, "sha512-ZxtijOmlQCBWGwbVmwOF/UCzuGIbUkqB1faQRf5akQmxRJ1ujusWsb3CVfk/9iZKr2L5SMU5wPBi1UWbvL+VQA=="],
|
||||||
|
|
||||||
|
"@esbuild/freebsd-arm64": ["@esbuild/freebsd-arm64@0.27.2", "", { "os": "freebsd", "cpu": "arm64" }, "sha512-lS/9CN+rgqQ9czogxlMcBMGd+l8Q3Nj1MFQwBZJyoEKI50XGxwuzznYdwcav6lpOGv5BqaZXqvBSiB/kJ5op+g=="],
|
||||||
|
|
||||||
|
"@esbuild/freebsd-x64": ["@esbuild/freebsd-x64@0.27.2", "", { "os": "freebsd", "cpu": "x64" }, "sha512-tAfqtNYb4YgPnJlEFu4c212HYjQWSO/w/h/lQaBK7RbwGIkBOuNKQI9tqWzx7Wtp7bTPaGC6MJvWI608P3wXYA=="],
|
||||||
|
|
||||||
|
"@esbuild/linux-arm": ["@esbuild/linux-arm@0.27.2", "", { "os": "linux", "cpu": "arm" }, "sha512-vWfq4GaIMP9AIe4yj1ZUW18RDhx6EPQKjwe7n8BbIecFtCQG4CfHGaHuh7fdfq+y3LIA2vGS/o9ZBGVxIDi9hw=="],
|
||||||
|
|
||||||
|
"@esbuild/linux-arm64": ["@esbuild/linux-arm64@0.27.2", "", { "os": "linux", "cpu": "arm64" }, "sha512-hYxN8pr66NsCCiRFkHUAsxylNOcAQaxSSkHMMjcpx0si13t1LHFphxJZUiGwojB1a/Hd5OiPIqDdXONia6bhTw=="],
|
||||||
|
|
||||||
|
"@esbuild/linux-ia32": ["@esbuild/linux-ia32@0.27.2", "", { "os": "linux", "cpu": "ia32" }, "sha512-MJt5BRRSScPDwG2hLelYhAAKh9imjHK5+NE/tvnRLbIqUWa+0E9N4WNMjmp/kXXPHZGqPLxggwVhz7QP8CTR8w=="],
|
||||||
|
|
||||||
|
"@esbuild/linux-loong64": ["@esbuild/linux-loong64@0.27.2", "", { "os": "linux", "cpu": "none" }, "sha512-lugyF1atnAT463aO6KPshVCJK5NgRnU4yb3FUumyVz+cGvZbontBgzeGFO1nF+dPueHD367a2ZXe1NtUkAjOtg=="],
|
||||||
|
|
||||||
|
"@esbuild/linux-mips64el": ["@esbuild/linux-mips64el@0.27.2", "", { "os": "linux", "cpu": "none" }, "sha512-nlP2I6ArEBewvJ2gjrrkESEZkB5mIoaTswuqNFRv/WYd+ATtUpe9Y09RnJvgvdag7he0OWgEZWhviS1OTOKixw=="],
|
||||||
|
|
||||||
|
"@esbuild/linux-ppc64": ["@esbuild/linux-ppc64@0.27.2", "", { "os": "linux", "cpu": "ppc64" }, "sha512-C92gnpey7tUQONqg1n6dKVbx3vphKtTHJaNG2Ok9lGwbZil6DrfyecMsp9CrmXGQJmZ7iiVXvvZH6Ml5hL6XdQ=="],
|
||||||
|
|
||||||
|
"@esbuild/linux-riscv64": ["@esbuild/linux-riscv64@0.27.2", "", { "os": "linux", "cpu": "none" }, "sha512-B5BOmojNtUyN8AXlK0QJyvjEZkWwy/FKvakkTDCziX95AowLZKR6aCDhG7LeF7uMCXEJqwa8Bejz5LTPYm8AvA=="],
|
||||||
|
|
||||||
|
"@esbuild/linux-s390x": ["@esbuild/linux-s390x@0.27.2", "", { "os": "linux", "cpu": "s390x" }, "sha512-p4bm9+wsPwup5Z8f4EpfN63qNagQ47Ua2znaqGH6bqLlmJ4bx97Y9JdqxgGZ6Y8xVTixUnEkoKSHcpRlDnNr5w=="],
|
||||||
|
|
||||||
|
"@esbuild/linux-x64": ["@esbuild/linux-x64@0.27.2", "", { "os": "linux", "cpu": "x64" }, "sha512-uwp2Tip5aPmH+NRUwTcfLb+W32WXjpFejTIOWZFw/v7/KnpCDKG66u4DLcurQpiYTiYwQ9B7KOeMJvLCu/OvbA=="],
|
||||||
|
|
||||||
|
"@esbuild/netbsd-arm64": ["@esbuild/netbsd-arm64@0.27.2", "", { "os": "none", "cpu": "arm64" }, "sha512-Kj6DiBlwXrPsCRDeRvGAUb/LNrBASrfqAIok+xB0LxK8CHqxZ037viF13ugfsIpePH93mX7xfJp97cyDuTZ3cw=="],
|
||||||
|
|
||||||
|
"@esbuild/netbsd-x64": ["@esbuild/netbsd-x64@0.27.2", "", { "os": "none", "cpu": "x64" }, "sha512-HwGDZ0VLVBY3Y+Nw0JexZy9o/nUAWq9MlV7cahpaXKW6TOzfVno3y3/M8Ga8u8Yr7GldLOov27xiCnqRZf0tCA=="],
|
||||||
|
|
||||||
|
"@esbuild/openbsd-arm64": ["@esbuild/openbsd-arm64@0.27.2", "", { "os": "openbsd", "cpu": "arm64" }, "sha512-DNIHH2BPQ5551A7oSHD0CKbwIA/Ox7+78/AWkbS5QoRzaqlev2uFayfSxq68EkonB+IKjiuxBFoV8ESJy8bOHA=="],
|
||||||
|
|
||||||
|
"@esbuild/openbsd-x64": ["@esbuild/openbsd-x64@0.27.2", "", { "os": "openbsd", "cpu": "x64" }, "sha512-/it7w9Nb7+0KFIzjalNJVR5bOzA9Vay+yIPLVHfIQYG/j+j9VTH84aNB8ExGKPU4AzfaEvN9/V4HV+F+vo8OEg=="],
|
||||||
|
|
||||||
|
"@esbuild/openharmony-arm64": ["@esbuild/openharmony-arm64@0.27.2", "", { "os": "none", "cpu": "arm64" }, "sha512-LRBbCmiU51IXfeXk59csuX/aSaToeG7w48nMwA6049Y4J4+VbWALAuXcs+qcD04rHDuSCSRKdmY63sruDS5qag=="],
|
||||||
|
|
||||||
|
"@esbuild/sunos-x64": ["@esbuild/sunos-x64@0.27.2", "", { "os": "sunos", "cpu": "x64" }, "sha512-kMtx1yqJHTmqaqHPAzKCAkDaKsffmXkPHThSfRwZGyuqyIeBvf08KSsYXl+abf5HDAPMJIPnbBfXvP2ZC2TfHg=="],
|
||||||
|
|
||||||
|
"@esbuild/win32-arm64": ["@esbuild/win32-arm64@0.27.2", "", { "os": "win32", "cpu": "arm64" }, "sha512-Yaf78O/B3Kkh+nKABUF++bvJv5Ijoy9AN1ww904rOXZFLWVc5OLOfL56W+C8F9xn5JQZa3UX6m+IktJnIb1Jjg=="],
|
||||||
|
|
||||||
|
"@esbuild/win32-ia32": ["@esbuild/win32-ia32@0.27.2", "", { "os": "win32", "cpu": "ia32" }, "sha512-Iuws0kxo4yusk7sw70Xa2E2imZU5HoixzxfGCdxwBdhiDgt9vX9VUCBhqcwY7/uh//78A1hMkkROMJq9l27oLQ=="],
|
||||||
|
|
||||||
|
"@esbuild/win32-x64": ["@esbuild/win32-x64@0.27.2", "", { "os": "win32", "cpu": "x64" }, "sha512-sRdU18mcKf7F+YgheI/zGf5alZatMUTKj/jNS6l744f9u3WFu4v7twcUI9vu4mknF4Y9aDlblIie0IM+5xxaqQ=="],
|
||||||
|
|
||||||
|
"@smithy/is-array-buffer": ["@smithy/is-array-buffer@2.2.0", "", { "dependencies": { "tslib": "^2.6.2" } }, "sha512-GGP3O9QFD24uGeAXYUjwSTXARoqpZykHadOmA8G5vfJPK0/DC67qa//0qvqrJzL1xc8WQWX7/yc7fwudjPHPhA=="],
|
||||||
|
|
||||||
|
"@smithy/types": ["@smithy/types@4.3.1", "", { "dependencies": { "tslib": "^2.6.2" } }, "sha512-UqKOQBL2x6+HWl3P+3QqFD4ncKq0I8Nuz9QItGv5WuKuMHuuwlhvqcZCoXGfc+P1QmfJE7VieykoYYmrOoFJxA=="],
|
||||||
|
|
||||||
|
"@smithy/util-buffer-from": ["@smithy/util-buffer-from@2.2.0", "", { "dependencies": { "@smithy/is-array-buffer": "^2.2.0", "tslib": "^2.6.2" } }, "sha512-IJdWBbTcMQ6DA0gdNhh/BwrLkDR+ADW5Kr1aZmd4k3DIF6ezMV4R2NIAmT08wQJ3yUK82thHWmC/TnK/wpMMIA=="],
|
||||||
|
|
||||||
|
"@smithy/util-utf8": ["@smithy/util-utf8@2.3.0", "", { "dependencies": { "@smithy/util-buffer-from": "^2.2.0", "tslib": "^2.6.2" } }, "sha512-R8Rdn8Hy72KKcebgLiv8jQcQkXoLMOGGv5uI1/k0l+snqkOzQ1R0ChUBCxWMlBsFMekWjq0wRudIweFs7sKT5A=="],
|
||||||
|
|
||||||
|
"ansi-regex": ["ansi-regex@5.0.1", "", {}, "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ=="],
|
||||||
|
|
||||||
|
"ansi-styles": ["ansi-styles@4.3.0", "", { "dependencies": { "color-convert": "^2.0.1" } }, "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg=="],
|
||||||
|
|
||||||
|
"anymatch": ["anymatch@3.1.3", "", { "dependencies": { "normalize-path": "^3.0.0", "picomatch": "^2.0.4" } }, "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw=="],
|
||||||
|
|
||||||
|
"autoprefixer": ["autoprefixer@10.4.21", "", { "dependencies": { "browserslist": "^4.24.4", "caniuse-lite": "^1.0.30001702", "fraction.js": "^4.3.7", "normalize-range": "^0.1.2", "picocolors": "^1.1.1", "postcss-value-parser": "^4.2.0" }, "peerDependencies": { "postcss": "^8.1.0" }, "bin": "bin/autoprefixer" }, "sha512-O+A6LWV5LDHSJD3LjHYoNi4VLsj/Whi7k6zG12xTYaU4cQ8oxQGckXNX8cRHK5yOZ/ppVHe0ZBXGzSV9jXdVbQ=="],
|
||||||
|
|
||||||
|
"balanced-match": ["balanced-match@1.0.2", "", {}, "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw=="],
|
||||||
|
|
||||||
|
"base64-js": ["base64-js@1.5.1", "", {}, "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA=="],
|
||||||
|
|
||||||
|
"baseline-browser-mapping": ["baseline-browser-mapping@2.8.23", "", { "bin": "dist/cli.js" }, "sha512-616V5YX4bepJFzNyOfce5Fa8fDJMfoxzOIzDCZwaGL8MKVpFrXqfNUoIpRn9YMI5pXf/VKgzjB4htFMsFKKdiQ=="],
|
||||||
|
|
||||||
|
"binary-extensions": ["binary-extensions@2.3.0", "", {}, "sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw=="],
|
||||||
|
|
||||||
|
"boolbase": ["boolbase@1.0.0", "", {}, "sha512-JZOSA7Mo9sNGB8+UjSgzdLtokWAky1zbztM3WRLCbZ70/3cTANmQmOdR7y2g+J0e2WXywy1yS468tY+IruqEww=="],
|
||||||
|
|
||||||
|
"brace-expansion": ["brace-expansion@1.1.12", "", { "dependencies": { "balanced-match": "^1.0.0", "concat-map": "0.0.1" } }, "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg=="],
|
||||||
|
|
||||||
|
"braces": ["braces@3.0.3", "", { "dependencies": { "fill-range": "^7.1.1" } }, "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA=="],
|
||||||
|
|
||||||
|
"brotli": ["brotli@1.3.3", "", { "dependencies": { "base64-js": "^1.1.2" } }, "sha512-oTKjJdShmDuGW94SyyaoQvAjf30dZaHnjJ8uAF+u2/vGJkJbJPJAT1gDiOJP5v1Zb6f9KEyW/1HpuaWIXtGHPg=="],
|
||||||
|
|
||||||
|
"browserslist": ["browserslist@4.27.0", "", { "dependencies": { "baseline-browser-mapping": "^2.8.19", "caniuse-lite": "^1.0.30001751", "electron-to-chromium": "^1.5.238", "node-releases": "^2.0.26", "update-browserslist-db": "^1.1.4" }, "bin": "cli.js" }, "sha512-AXVQwdhot1eqLihwasPElhX2tAZiBjWdJ9i/Zcj2S6QYIjkx62OKSfnobkriB81C3l4w0rVy3Nt4jaTBltYEpw=="],
|
||||||
|
|
||||||
|
"caniuse-api": ["caniuse-api@3.0.0", "", { "dependencies": { "browserslist": "^4.0.0", "caniuse-lite": "^1.0.0", "lodash.memoize": "^4.1.2", "lodash.uniq": "^4.5.0" } }, "sha512-bsTwuIg/BZZK/vreVTYYbSWoe2F+71P7K5QGEX+pT250DZbfU1MQ5prOKpPR+LL6uWKK3KMwMCAS74QB3Um1uw=="],
|
||||||
|
|
||||||
|
"caniuse-lite": ["caniuse-lite@1.0.30001753", "", {}, "sha512-Bj5H35MD/ebaOV4iDLqPEtiliTN29qkGtEHCwawWn4cYm+bPJM2NsaP30vtZcnERClMzp52J4+aw2UNbK4o+zw=="],
|
||||||
|
|
||||||
|
"chokidar": ["chokidar@3.6.0", "", { "dependencies": { "anymatch": "~3.1.2", "braces": "~3.0.2", "glob-parent": "~5.1.2", "is-binary-path": "~2.1.0", "is-glob": "~4.0.1", "normalize-path": "~3.0.0", "readdirp": "~3.6.0" }, "optionalDependencies": { "fsevents": "~2.3.2" } }, "sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw=="],
|
||||||
|
|
||||||
|
"cliui": ["cliui@8.0.1", "", { "dependencies": { "string-width": "^4.2.0", "strip-ansi": "^6.0.1", "wrap-ansi": "^7.0.0" } }, "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ=="],
|
||||||
|
|
||||||
|
"color-convert": ["color-convert@2.0.1", "", { "dependencies": { "color-name": "~1.1.4" } }, "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ=="],
|
||||||
|
|
||||||
|
"color-name": ["color-name@1.1.4", "", {}, "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA=="],
|
||||||
|
|
||||||
|
"colord": ["colord@2.9.3", "", {}, "sha512-jeC1axXpnb0/2nn/Y1LPuLdgXBLH7aDcHu4KEKfqw3CUhX7ZpfBSlPKyqXE6btIgEzfWtrX3/tyBCaCvXvMkOw=="],
|
||||||
|
|
||||||
|
"commander": ["commander@11.1.0", "", {}, "sha512-yPVavfyCcRhmorC7rWlkHn15b4wDVgVmBA7kV4QVBsF7kv/9TKJAbAXVTxvTnwP8HHKjRCJDClKbciiYS7p0DQ=="],
|
||||||
|
|
||||||
|
"concat-map": ["concat-map@0.0.1", "", {}, "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg=="],
|
||||||
|
|
||||||
|
"css-declaration-sorter": ["css-declaration-sorter@7.2.0", "", { "peerDependencies": { "postcss": "^8.0.9" } }, "sha512-h70rUM+3PNFuaBDTLe8wF/cdWu+dOZmb7pJt8Z2sedYbAcQVQV/tEchueg3GWxwqS0cxtbxmaHEdkNACqcvsow=="],
|
||||||
|
|
||||||
|
"css-select": ["css-select@5.2.2", "", { "dependencies": { "boolbase": "^1.0.0", "css-what": "^6.1.0", "domhandler": "^5.0.2", "domutils": "^3.0.1", "nth-check": "^2.0.1" } }, "sha512-TizTzUddG/xYLA3NXodFM0fSbNizXjOKhqiQQwvhlspadZokn1KDy0NZFS0wuEubIYAV5/c1/lAr0TaaFXEXzw=="],
|
||||||
|
|
||||||
|
"css-tree": ["css-tree@3.1.0", "", { "dependencies": { "mdn-data": "2.12.2", "source-map-js": "^1.0.1" } }, "sha512-0eW44TGN5SQXU1mWSkKwFstI/22X2bG1nYzZTYMAWjylYURhse752YgbE4Cx46AC+bAvI+/dYTPRk1LqSUnu6w=="],
|
||||||
|
|
||||||
|
"css-what": ["css-what@6.2.2", "", {}, "sha512-u/O3vwbptzhMs3L1fQE82ZSLHQQfto5gyZzwteVIEyeaY5Fc7R4dapF/BvRoSYFeqfBk4m0V1Vafq5Pjv25wvA=="],
|
||||||
|
|
||||||
|
"cssesc": ["cssesc@3.0.0", "", { "bin": "bin/cssesc" }, "sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg=="],
|
||||||
|
|
||||||
|
"cssnano": ["cssnano@7.1.2", "", { "dependencies": { "cssnano-preset-default": "^7.0.10", "lilconfig": "^3.1.3" }, "peerDependencies": { "postcss": "^8.4.32" } }, "sha512-HYOPBsNvoiFeR1eghKD5C3ASm64v9YVyJB4Ivnl2gqKoQYvjjN/G0rztvKQq8OxocUtC6sjqY8jwYngIB4AByA=="],
|
||||||
|
|
||||||
|
"cssnano-preset-advanced": ["cssnano-preset-advanced@7.0.10", "", { "dependencies": { "autoprefixer": "^10.4.21", "browserslist": "^4.27.0", "cssnano-preset-default": "^7.0.10", "postcss-discard-unused": "^7.0.4", "postcss-merge-idents": "^7.0.1", "postcss-reduce-idents": "^7.0.1", "postcss-zindex": "^7.0.1" }, "peerDependencies": { "postcss": "^8.4.32" } }, "sha512-lfsKxX4H6WS7BbNyDxkGOu2VgN4bbHQpY8llA3i3SJ9ozAPJ1MHq265Aw0aslM161qiS0zhCHaC6zRcEbNAgUA=="],
|
||||||
|
|
||||||
|
"cssnano-preset-default": ["cssnano-preset-default@7.0.10", "", { "dependencies": { "browserslist": "^4.27.0", "css-declaration-sorter": "^7.2.0", "cssnano-utils": "^5.0.1", "postcss-calc": "^10.1.1", "postcss-colormin": "^7.0.5", "postcss-convert-values": "^7.0.8", "postcss-discard-comments": "^7.0.5", "postcss-discard-duplicates": "^7.0.2", "postcss-discard-empty": "^7.0.1", "postcss-discard-overridden": "^7.0.1", "postcss-merge-longhand": "^7.0.5", "postcss-merge-rules": "^7.0.7", "postcss-minify-font-values": "^7.0.1", "postcss-minify-gradients": "^7.0.1", "postcss-minify-params": "^7.0.5", "postcss-minify-selectors": "^7.0.5", "postcss-normalize-charset": "^7.0.1", "postcss-normalize-display-values": "^7.0.1", "postcss-normalize-positions": "^7.0.1", "postcss-normalize-repeat-style": "^7.0.1", "postcss-normalize-string": "^7.0.1", "postcss-normalize-timing-functions": "^7.0.1", "postcss-normalize-unicode": "^7.0.5", "postcss-normalize-url": "^7.0.1", "postcss-normalize-whitespace": "^7.0.1", "postcss-ordered-values": "^7.0.2", "postcss-reduce-initial": "^7.0.5", "postcss-reduce-transforms": "^7.0.1", "postcss-svgo": "^7.1.0", "postcss-unique-selectors": "^7.0.4" }, "peerDependencies": { "postcss": "^8.4.32" } }, "sha512-6ZBjW0Lf1K1Z+0OKUAUpEN62tSXmYChXWi2NAA0afxEVsj9a+MbcB1l5qel6BHJHmULai2fCGRthCeKSFbScpA=="],
|
||||||
|
|
||||||
|
"cssnano-utils": ["cssnano-utils@5.0.1", "", { "peerDependencies": { "postcss": "^8.4.32" } }, "sha512-ZIP71eQgG9JwjVZsTPSqhc6GHgEr53uJ7tK5///VfyWj6Xp2DBmixWHqJgPno+PqATzn48pL42ww9x5SSGmhZg=="],
|
||||||
|
|
||||||
|
"csso": ["csso@5.0.5", "", { "dependencies": { "css-tree": "~2.2.0" } }, "sha512-0LrrStPOdJj+SPCCrGhzryycLjwcgUSHBtxNA8aIDxf0GLsRh1cKYhB00Gd1lDOS4yGH69+SNn13+TWbVHETFQ=="],
|
||||||
|
|
||||||
|
"cuint": ["cuint@0.2.2", "", {}, "sha512-d4ZVpCW31eWwCMe1YT3ur7mUDnTXbgwyzaL320DrcRT45rfjYxkt5QWLrmOJ+/UEAI2+fQgKe/fCjR8l4TpRgw=="],
|
||||||
|
|
||||||
|
"dependency-graph": ["dependency-graph@1.0.0", "", {}, "sha512-cW3gggJ28HZ/LExwxP2B++aiKxhJXMSIt9K48FOXQkm+vuG5gyatXnLsONRJdzO/7VfjDIiaOOa/bs4l464Lwg=="],
|
||||||
|
|
||||||
|
"dom-serializer": ["dom-serializer@2.0.0", "", { "dependencies": { "domelementtype": "^2.3.0", "domhandler": "^5.0.2", "entities": "^4.2.0" } }, "sha512-wIkAryiqt/nV5EQKqQpo3SToSOV9J0DnbJqwK7Wv/Trc92zIAYZ4FlMu+JPFW1DfGFt81ZTCGgDEabffXeLyJg=="],
|
||||||
|
|
||||||
|
"domelementtype": ["domelementtype@2.3.0", "", {}, "sha512-OLETBj6w0OsagBwdXnPdN0cnMfF9opN69co+7ZrbfPGrdpPVNBUj02spi6B1N7wChLQiPn4CSH/zJvXw56gmHw=="],
|
||||||
|
|
||||||
|
"domhandler": ["domhandler@5.0.3", "", { "dependencies": { "domelementtype": "^2.3.0" } }, "sha512-cgwlv/1iFQiFnU96XXgROh8xTeetsnJiDsTc7TYCLFd9+/WNkIqPTxiM/8pSd8VIrhXGTf1Ny1q1hquVqDJB5w=="],
|
||||||
|
|
||||||
|
"domutils": ["domutils@3.2.2", "", { "dependencies": { "dom-serializer": "^2.0.0", "domelementtype": "^2.3.0", "domhandler": "^5.0.3" } }, "sha512-6kZKyUajlDuqlHKVX1w7gyslj9MPIXzIFiz/rGu35uC1wMi+kMhQwGhl4lt9unC9Vb9INnY9Z3/ZA3+FhASLaw=="],
|
||||||
|
|
||||||
|
"electron-to-chromium": ["electron-to-chromium@1.5.244", "", {}, "sha512-OszpBN7xZX4vWMPJwB9illkN/znA8M36GQqQxi6MNy9axWxhOfJyZZJtSLQCpEFLHP2xK33BiWx9aIuIEXVCcw=="],
|
||||||
|
|
||||||
|
"emoji-regex": ["emoji-regex@8.0.0", "", {}, "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A=="],
|
||||||
|
|
||||||
|
"entities": ["entities@4.5.0", "", {}, "sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw=="],
|
||||||
|
|
||||||
|
"esbuild": ["esbuild@0.27.2", "", { "optionalDependencies": { "@esbuild/aix-ppc64": "0.27.2", "@esbuild/android-arm": "0.27.2", "@esbuild/android-arm64": "0.27.2", "@esbuild/android-x64": "0.27.2", "@esbuild/darwin-arm64": "0.27.2", "@esbuild/darwin-x64": "0.27.2", "@esbuild/freebsd-arm64": "0.27.2", "@esbuild/freebsd-x64": "0.27.2", "@esbuild/linux-arm": "0.27.2", "@esbuild/linux-arm64": "0.27.2", "@esbuild/linux-ia32": "0.27.2", "@esbuild/linux-loong64": "0.27.2", "@esbuild/linux-mips64el": "0.27.2", "@esbuild/linux-ppc64": "0.27.2", "@esbuild/linux-riscv64": "0.27.2", "@esbuild/linux-s390x": "0.27.2", "@esbuild/linux-x64": "0.27.2", "@esbuild/netbsd-arm64": "0.27.2", "@esbuild/netbsd-x64": "0.27.2", "@esbuild/openbsd-arm64": "0.27.2", "@esbuild/openbsd-x64": "0.27.2", "@esbuild/openharmony-arm64": "0.27.2", "@esbuild/sunos-x64": "0.27.2", "@esbuild/win32-arm64": "0.27.2", "@esbuild/win32-ia32": "0.27.2", "@esbuild/win32-x64": "0.27.2" }, "bin": "bin/esbuild" }, "sha512-HyNQImnsOC7X9PMNaCIeAm4ISCQXs5a5YasTXVliKv4uuBo1dKrG0A+uQS8M5eXjVMnLg3WgXaKvprHlFJQffw=="],
|
||||||
|
|
||||||
|
"escalade": ["escalade@3.2.0", "", {}, "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA=="],
|
||||||
|
|
||||||
|
"fdir": ["fdir@6.5.0", "", { "peerDependencies": { "picomatch": "^3 || ^4" } }, "sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg=="],
|
||||||
|
|
||||||
|
"fill-range": ["fill-range@7.1.1", "", { "dependencies": { "to-regex-range": "^5.0.1" } }, "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg=="],
|
||||||
|
|
||||||
|
"fraction.js": ["fraction.js@4.3.7", "", {}, "sha512-ZsDfxO51wGAXREY55a7la9LScWpwv9RxIrYABrlvOFBlH/ShPnrtsXeuUIfXKKOVicNxQ+o8JTbJvjS4M89yew=="],
|
||||||
|
|
||||||
|
"fs-extra": ["fs-extra@11.3.1", "", { "dependencies": { "graceful-fs": "^4.2.0", "jsonfile": "^6.0.1", "universalify": "^2.0.0" } }, "sha512-eXvGGwZ5CL17ZSwHWd3bbgk7UUpF6IFHtP57NYYakPvHOs8GDgDe5KJI36jIJzDkJ6eJjuzRA8eBQb6SkKue0g=="],
|
||||||
|
|
||||||
|
"fsevents": ["fsevents@2.3.2", "", { "os": "darwin" }, "sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA=="],
|
||||||
|
|
||||||
|
"function-bind": ["function-bind@1.1.2", "", {}, "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA=="],
|
||||||
|
|
||||||
|
"get-caller-file": ["get-caller-file@2.0.5", "", {}, "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg=="],
|
||||||
|
|
||||||
|
"glob-parent": ["glob-parent@5.1.2", "", { "dependencies": { "is-glob": "^4.0.1" } }, "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow=="],
|
||||||
|
|
||||||
|
"graceful-fs": ["graceful-fs@4.2.11", "", {}, "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ=="],
|
||||||
|
|
||||||
|
"hasown": ["hasown@2.0.2", "", { "dependencies": { "function-bind": "^1.1.2" } }, "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ=="],
|
||||||
|
|
||||||
|
"http-https": ["http-https@1.0.0", "", {}, "sha512-o0PWwVCSp3O0wS6FvNr6xfBCHgt0m1tvPLFOCc2iFDKTRAXhB7m8klDf7ErowFH8POa6dVdGatKU5I1YYwzUyg=="],
|
||||||
|
|
||||||
|
"is-binary-path": ["is-binary-path@2.1.0", "", { "dependencies": { "binary-extensions": "^2.0.0" } }, "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw=="],
|
||||||
|
|
||||||
|
"is-core-module": ["is-core-module@2.16.1", "", { "dependencies": { "hasown": "^2.0.2" } }, "sha512-UfoeMA6fIJ8wTYFEUjelnaGI67v6+N7qXJEvQuIGa99l4xsCruSYOVSQ0uPANn4dAzm8lkYPaKLrrijLq7x23w=="],
|
||||||
|
|
||||||
|
"is-extglob": ["is-extglob@2.1.1", "", {}, "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ=="],
|
||||||
|
|
||||||
|
"is-fullwidth-code-point": ["is-fullwidth-code-point@3.0.0", "", {}, "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg=="],
|
||||||
|
|
||||||
|
"is-glob": ["is-glob@4.0.3", "", { "dependencies": { "is-extglob": "^2.1.1" } }, "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg=="],
|
||||||
|
|
||||||
|
"is-number": ["is-number@7.0.0", "", {}, "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng=="],
|
||||||
|
|
||||||
|
"is-url": ["is-url@1.2.4", "", {}, "sha512-ITvGim8FhRiYe4IQ5uHSkj7pVaPDrCTkNd3yq3cV7iZAcJdHTUMPMEHcqSOy9xZ9qFenQCvi+2wjH9a1nXqHww=="],
|
||||||
|
|
||||||
|
"jsonfile": ["jsonfile@6.2.0", "", { "dependencies": { "universalify": "^2.0.0" }, "optionalDependencies": { "graceful-fs": "^4.1.6" } }, "sha512-FGuPw30AdOIUTRMC2OMRtQV+jkVj2cfPqSeWXv1NEAJ1qZ5zb1X6z1mFhbfOB/iy3ssJCD+3KuZ8r8C3uVFlAg=="],
|
||||||
|
|
||||||
|
"lilconfig": ["lilconfig@3.1.3", "", {}, "sha512-/vlFKAoH5Cgt3Ie+JLhRbwOsCQePABiU3tJ1egGvyQ+33R/vcwM2Zl2QR/LzjsBeItPt3oSVXapn+m4nQDvpzw=="],
|
||||||
|
|
||||||
|
"lodash.assign": ["lodash.assign@4.2.0", "", {}, "sha512-hFuH8TY+Yji7Eja3mGiuAxBqLagejScbG8GbG0j6o9vzn0YL14My+ktnqtZgFTosKymC9/44wP6s7xyuLfnClw=="],
|
||||||
|
|
||||||
|
"lodash.memoize": ["lodash.memoize@4.1.2", "", {}, "sha512-t7j+NzmgnQzTAYXcsHYLgimltOV1MXHtlOWf6GjL9Kj8GK5FInw5JotxvbOs+IvV1/Dzo04/fCGfLVs7aXb4Ag=="],
|
||||||
|
|
||||||
|
"lodash.trim": ["lodash.trim@4.5.1", "", {}, "sha512-nJAlRl/K+eiOehWKDzoBVrSMhK0K3A3YQsUNXHQa5yIrKBAhsZgSu3KoAFoFT+mEgiyBHddZ0pRk1ITpIp90Wg=="],
|
||||||
|
|
||||||
|
"lodash.uniq": ["lodash.uniq@4.5.0", "", {}, "sha512-xfBaXQd9ryd9dlSDvnvI0lvxfLJlYAZzXomUYzLKtUeOQvOP5piqAWuGtrhWeqaXK9hhoM/iyJc5AV+XfsX3HQ=="],
|
||||||
|
|
||||||
|
"make-dir": ["make-dir@3.1.0", "", { "dependencies": { "semver": "^6.0.0" } }, "sha512-g3FeP20LNwhALb/6Cz6Dd4F2ngze0jz7tbzrD2wAV+o9FeNHe4rL+yK2md0J/fiSf1sa1ADhXqi5+oVwOM/eGw=="],
|
||||||
|
|
||||||
|
"mdn-data": ["mdn-data@2.12.2", "", {}, "sha512-IEn+pegP1aManZuckezWCO+XZQDplx1366JoVhTpMpBB1sPey/SbveZQUosKiKiGYjg1wH4pMlNgXbCiYgihQA=="],
|
||||||
|
|
||||||
|
"mime": ["mime@2.5.2", "", { "bin": "cli.js" }, "sha512-tqkh47FzKeCPD2PUiPB6pkbMzsCasjxAfC62/Wap5qrUWcb+sFasXUC5I3gYM5iBM8v/Qpn4UK0x+j0iHyFPDg=="],
|
||||||
|
|
||||||
|
"minimatch": ["minimatch@3.0.8", "", { "dependencies": { "brace-expansion": "^1.1.7" } }, "sha512-6FsRAQsxQ61mw+qP1ZzbL9Bc78x2p5OqNgNpnoAFLTrX8n5Kxph0CsnhmKKNXTWjXqU5L0pGPR7hYk+XWZr60Q=="],
|
||||||
|
|
||||||
|
"nanoid": ["nanoid@3.3.11", "", { "bin": "bin/nanoid.cjs" }, "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w=="],
|
||||||
|
|
||||||
|
"node-releases": ["node-releases@2.0.27", "", {}, "sha512-nmh3lCkYZ3grZvqcCH+fjmQ7X+H0OeZgP40OierEaAptX4XofMh5kwNbWh7lBduUzCcV/8kZ+NDLCwm2iorIlA=="],
|
||||||
|
|
||||||
|
"normalize-path": ["normalize-path@3.0.0", "", {}, "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA=="],
|
||||||
|
|
||||||
|
"normalize-range": ["normalize-range@0.1.2", "", {}, "sha512-bdok/XvKII3nUpklnV6P2hxtMNrCboOjAcyBuQnWEhO665FwrSNRxU+AqpsyvO6LgGYPspN+lu5CLtw4jPRKNA=="],
|
||||||
|
|
||||||
|
"nth-check": ["nth-check@2.1.1", "", { "dependencies": { "boolbase": "^1.0.0" } }, "sha512-lqjrjmaOoAnWfMmBPL+XNnynZh2+swxiX3WUE0s4yEHI6m+AwrK2UZOimIRl3X/4QctVqS8AiZjFqyOGrMXb/w=="],
|
||||||
|
|
||||||
|
"path-parse": ["path-parse@1.0.7", "", {}, "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw=="],
|
||||||
|
|
||||||
|
"picocolors": ["picocolors@1.1.1", "", {}, "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA=="],
|
||||||
|
|
||||||
|
"picomatch": ["picomatch@4.0.3", "", {}, "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q=="],
|
||||||
|
|
||||||
|
"pify": ["pify@2.3.0", "", {}, "sha512-udgsAY+fTnvv7kI7aaxbqwWNb0AHiB0qBO89PZKPkoTmGOgdbrHDKD+0B2X4uTfJ/FT1R09r9gTsjUjNJotuog=="],
|
||||||
|
|
||||||
|
"playwright": ["playwright@1.52.0", "", { "dependencies": { "playwright-core": "1.52.0" }, "optionalDependencies": { "fsevents": "2.3.2" }, "bin": "cli.js" }, "sha512-JAwMNMBlxJ2oD1kce4KPtMkDeKGHQstdpFPcPH3maElAXon/QZeTvtsfXmTMRyO9TslfoYOXkSsvao2nE1ilTw=="],
|
||||||
|
|
||||||
|
"playwright-core": ["playwright-core@1.52.0", "", { "bin": "cli.js" }, "sha512-l2osTgLXSMeuLZOML9qYODUQoPPnUsKsb5/P6LJ2e6uPKXUdPK5WYhN4z03G+YNbWmGDY4YENauNu4ZKczreHg=="],
|
||||||
|
|
||||||
|
"postcss": ["postcss@8.5.6", "", { "dependencies": { "nanoid": "^3.3.11", "picocolors": "^1.1.1", "source-map-js": "^1.2.1" } }, "sha512-3Ybi1tAuwAP9s0r1UQ2J4n5Y0G05bJkpUIO0/bI9MhwmD70S5aTWbXGBwxHrelT+XM1k6dM0pk+SwNkpTRN7Pg=="],
|
||||||
|
|
||||||
|
"postcss-calc": ["postcss-calc@10.1.1", "", { "dependencies": { "postcss-selector-parser": "^7.0.0", "postcss-value-parser": "^4.2.0" }, "peerDependencies": { "postcss": "^8.4.38" } }, "sha512-NYEsLHh8DgG/PRH2+G9BTuUdtf9ViS+vdoQ0YA5OQdGsfN4ztiwtDWNtBl9EKeqNMFnIu8IKZ0cLxEQ5r5KVMw=="],
|
||||||
|
|
||||||
|
"postcss-cli": ["postcss-cli@11.0.1", "", { "dependencies": { "chokidar": "^3.3.0", "dependency-graph": "^1.0.0", "fs-extra": "^11.0.0", "picocolors": "^1.0.0", "postcss-load-config": "^5.0.0", "postcss-reporter": "^7.0.0", "pretty-hrtime": "^1.0.3", "read-cache": "^1.0.0", "slash": "^5.0.0", "tinyglobby": "^0.2.12", "yargs": "^17.0.0" }, "peerDependencies": { "postcss": "^8.0.0" }, "bin": { "postcss": "index.js" } }, "sha512-0UnkNPSayHKRe/tc2YGW6XnSqqOA9eqpiRMgRlV1S6HdGi16vwJBx7lviARzbV1HpQHqLLRH3o8vTcB0cLc+5g=="],
|
||||||
|
|
||||||
|
"postcss-colormin": ["postcss-colormin@7.0.5", "", { "dependencies": { "browserslist": "^4.27.0", "caniuse-api": "^3.0.0", "colord": "^2.9.3", "postcss-value-parser": "^4.2.0" }, "peerDependencies": { "postcss": "^8.4.32" } }, "sha512-ekIBP/nwzRWhEMmIxHHbXHcMdzd1HIUzBECaj5KEdLz9DVP2HzT065sEhvOx1dkLjYW7jyD0CngThx6bpFi2fA=="],
|
||||||
|
|
||||||
|
"postcss-convert-values": ["postcss-convert-values@7.0.8", "", { "dependencies": { "browserslist": "^4.27.0", "postcss-value-parser": "^4.2.0" }, "peerDependencies": { "postcss": "^8.4.32" } }, "sha512-+XNKuPfkHTCEo499VzLMYn94TiL3r9YqRE3Ty+jP7UX4qjewUONey1t7CG21lrlTLN07GtGM8MqFVp86D4uKJg=="],
|
||||||
|
|
||||||
|
"postcss-discard-comments": ["postcss-discard-comments@7.0.5", "", { "dependencies": { "postcss-selector-parser": "^7.1.0" }, "peerDependencies": { "postcss": "^8.4.32" } }, "sha512-IR2Eja8WfYgN5n32vEGSctVQ1+JARfu4UH8M7bgGh1bC+xI/obsPJXaBpQF7MAByvgwZinhpHpdrmXtvVVlKcQ=="],
|
||||||
|
|
||||||
|
"postcss-discard-duplicates": ["postcss-discard-duplicates@7.0.2", "", { "peerDependencies": { "postcss": "^8.4.32" } }, "sha512-eTonaQvPZ/3i1ASDHOKkYwAybiM45zFIc7KXils4mQmHLqIswXD9XNOKEVxtTFnsmwYzF66u4LMgSr0abDlh5w=="],
|
||||||
|
|
||||||
|
"postcss-discard-empty": ["postcss-discard-empty@7.0.1", "", { "peerDependencies": { "postcss": "^8.4.32" } }, "sha512-cFrJKZvcg/uxB6Ijr4l6qmn3pXQBna9zyrPC+sK0zjbkDUZew+6xDltSF7OeB7rAtzaaMVYSdbod+sZOCWnMOg=="],
|
||||||
|
|
||||||
|
"postcss-discard-overridden": ["postcss-discard-overridden@7.0.1", "", { "peerDependencies": { "postcss": "^8.4.32" } }, "sha512-7c3MMjjSZ/qYrx3uc1940GSOzN1Iqjtlqe8uoSg+qdVPYyRb0TILSqqmtlSFuE4mTDECwsm397Ya7iXGzfF7lg=="],
|
||||||
|
|
||||||
|
"postcss-discard-unused": ["postcss-discard-unused@7.0.4", "", { "dependencies": { "postcss-selector-parser": "^7.1.0" }, "peerDependencies": { "postcss": "^8.4.32" } }, "sha512-/d6sIm8SSJbDDzdHyt/BWZ5upC6Dtn6JIL0uQts+AuvA5ddVmkw/3H4NtDv7DybGzCA1o3Q9R6kt4qsnS2mCSQ=="],
|
||||||
|
|
||||||
|
"postcss-import": ["postcss-import@16.1.1", "", { "dependencies": { "postcss-value-parser": "^4.0.0", "read-cache": "^1.0.0", "resolve": "^1.1.7" }, "peerDependencies": { "postcss": "^8.0.0" } }, "sha512-2xVS1NCZAfjtVdvXiyegxzJ447GyqCeEI5V7ApgQVOWnros1p5lGNovJNapwPpMombyFBfqDwt7AD3n2l0KOfQ=="],
|
||||||
|
|
||||||
|
"postcss-import-url": ["postcss-import-url@7.2.0", "", { "dependencies": { "http-https": "^1.0.0", "is-url": "^1.2.4", "lodash.assign": "^4.2.0", "lodash.trim": "^4.5.1", "resolve-relative-url": "^1.0.0" }, "peerDependencies": { "postcss": "^8.0.0" } }, "sha512-El61K/5+Rv753G9mBiHyQlOIN2mBfN0YHPMXLlgIo/m1+tPDLM32wd97WoUjc8FHUnC6EyyfVA8RDuKoyuVl0Q=="],
|
||||||
|
|
||||||
|
"postcss-load-config": ["postcss-load-config@5.1.0", "", { "dependencies": { "lilconfig": "^3.1.1", "yaml": "^2.4.2" }, "peerDependencies": { "jiti": ">=1.21.0", "postcss": ">=8.0.9", "tsx": "^4.8.1" }, "optionalPeers": ["jiti", "tsx"] }, "sha512-G5AJ+IX0aD0dygOE0yFZQ/huFFMSNneyfp0e3/bT05a8OfPC5FUoZRPfGijUdGOJNMewJiwzcHJXFafFzeKFVA=="],
|
||||||
|
|
||||||
|
"postcss-merge-idents": ["postcss-merge-idents@7.0.1", "", { "dependencies": { "cssnano-utils": "^5.0.1", "postcss-value-parser": "^4.2.0" }, "peerDependencies": { "postcss": "^8.4.32" } }, "sha512-2KaHTdWvoxMnNn7/aBhS1fnjdMBXHtT9tbW0wwH6/pWeMnIllb3wJ/iy5y67C7+uyW9gIOL7VM4XtvkRI6+ZXQ=="],
|
||||||
|
|
||||||
|
"postcss-merge-longhand": ["postcss-merge-longhand@7.0.5", "", { "dependencies": { "postcss-value-parser": "^4.2.0", "stylehacks": "^7.0.5" }, "peerDependencies": { "postcss": "^8.4.32" } }, "sha512-Kpu5v4Ys6QI59FxmxtNB/iHUVDn9Y9sYw66D6+SZoIk4QTz1prC4aYkhIESu+ieG1iylod1f8MILMs1Em3mmIw=="],
|
||||||
|
|
||||||
|
"postcss-merge-rules": ["postcss-merge-rules@7.0.7", "", { "dependencies": { "browserslist": "^4.27.0", "caniuse-api": "^3.0.0", "cssnano-utils": "^5.0.1", "postcss-selector-parser": "^7.1.0" }, "peerDependencies": { "postcss": "^8.4.32" } }, "sha512-njWJrd/Ms6XViwowaaCc+/vqhPG3SmXn725AGrnl+BgTuRPEacjiLEaGq16J6XirMJbtKkTwnt67SS+e2WGoew=="],
|
||||||
|
|
||||||
|
"postcss-minify-font-values": ["postcss-minify-font-values@7.0.1", "", { "dependencies": { "postcss-value-parser": "^4.2.0" }, "peerDependencies": { "postcss": "^8.4.32" } }, "sha512-2m1uiuJeTplll+tq4ENOQSzB8LRnSUChBv7oSyFLsJRtUgAAJGP6LLz0/8lkinTgxrmJSPOEhgY1bMXOQ4ZXhQ=="],
|
||||||
|
|
||||||
|
"postcss-minify-gradients": ["postcss-minify-gradients@7.0.1", "", { "dependencies": { "colord": "^2.9.3", "cssnano-utils": "^5.0.1", "postcss-value-parser": "^4.2.0" }, "peerDependencies": { "postcss": "^8.4.32" } }, "sha512-X9JjaysZJwlqNkJbUDgOclyG3jZEpAMOfof6PUZjPnPrePnPG62pS17CjdM32uT1Uq1jFvNSff9l7kNbmMSL2A=="],
|
||||||
|
|
||||||
|
"postcss-minify-params": ["postcss-minify-params@7.0.5", "", { "dependencies": { "browserslist": "^4.27.0", "cssnano-utils": "^5.0.1", "postcss-value-parser": "^4.2.0" }, "peerDependencies": { "postcss": "^8.4.32" } }, "sha512-FGK9ky02h6Ighn3UihsyeAH5XmLEE2MSGH5Tc4tXMFtEDx7B+zTG6hD/+/cT+fbF7PbYojsmmWjyTwFwW1JKQQ=="],
|
||||||
|
|
||||||
|
"postcss-minify-selectors": ["postcss-minify-selectors@7.0.5", "", { "dependencies": { "cssesc": "^3.0.0", "postcss-selector-parser": "^7.1.0" }, "peerDependencies": { "postcss": "^8.4.32" } }, "sha512-x2/IvofHcdIrAm9Q+p06ZD1h6FPcQ32WtCRVodJLDR+WMn8EVHI1kvLxZuGKz/9EY5nAmI6lIQIrpo4tBy5+ug=="],
|
||||||
|
|
||||||
|
"postcss-normalize-charset": ["postcss-normalize-charset@7.0.1", "", { "peerDependencies": { "postcss": "^8.4.32" } }, "sha512-sn413ofhSQHlZFae//m9FTOfkmiZ+YQXsbosqOWRiVQncU2BA3daX3n0VF3cG6rGLSFVc5Di/yns0dFfh8NFgQ=="],
|
||||||
|
|
||||||
|
"postcss-normalize-display-values": ["postcss-normalize-display-values@7.0.1", "", { "dependencies": { "postcss-value-parser": "^4.2.0" }, "peerDependencies": { "postcss": "^8.4.32" } }, "sha512-E5nnB26XjSYz/mGITm6JgiDpAbVuAkzXwLzRZtts19jHDUBFxZ0BkXAehy0uimrOjYJbocby4FVswA/5noOxrQ=="],
|
||||||
|
|
||||||
|
"postcss-normalize-positions": ["postcss-normalize-positions@7.0.1", "", { "dependencies": { "postcss-value-parser": "^4.2.0" }, "peerDependencies": { "postcss": "^8.4.32" } }, "sha512-pB/SzrIP2l50ZIYu+yQZyMNmnAcwyYb9R1fVWPRxm4zcUFCY2ign7rcntGFuMXDdd9L2pPNUgoODDk91PzRZuQ=="],
|
||||||
|
|
||||||
|
"postcss-normalize-repeat-style": ["postcss-normalize-repeat-style@7.0.1", "", { "dependencies": { "postcss-value-parser": "^4.2.0" }, "peerDependencies": { "postcss": "^8.4.32" } }, "sha512-NsSQJ8zj8TIDiF0ig44Byo3Jk9e4gNt9x2VIlJudnQQ5DhWAHJPF4Tr1ITwyHio2BUi/I6Iv0HRO7beHYOloYQ=="],
|
||||||
|
|
||||||
|
"postcss-normalize-string": ["postcss-normalize-string@7.0.1", "", { "dependencies": { "postcss-value-parser": "^4.2.0" }, "peerDependencies": { "postcss": "^8.4.32" } }, "sha512-QByrI7hAhsoze992kpbMlJSbZ8FuCEc1OT9EFbZ6HldXNpsdpZr+YXC5di3UEv0+jeZlHbZcoCADgb7a+lPmmQ=="],
|
||||||
|
|
||||||
|
"postcss-normalize-timing-functions": ["postcss-normalize-timing-functions@7.0.1", "", { "dependencies": { "postcss-value-parser": "^4.2.0" }, "peerDependencies": { "postcss": "^8.4.32" } }, "sha512-bHifyuuSNdKKsnNJ0s8fmfLMlvsQwYVxIoUBnowIVl2ZAdrkYQNGVB4RxjfpvkMjipqvbz0u7feBZybkl/6NJg=="],
|
||||||
|
|
||||||
|
"postcss-normalize-unicode": ["postcss-normalize-unicode@7.0.5", "", { "dependencies": { "browserslist": "^4.27.0", "postcss-value-parser": "^4.2.0" }, "peerDependencies": { "postcss": "^8.4.32" } }, "sha512-X6BBwiRxVaFHrb2WyBMddIeB5HBjJcAaUHyhLrM2FsxSq5TFqcHSsK7Zu1otag+o0ZphQGJewGH1tAyrD0zX1Q=="],
|
||||||
|
|
||||||
|
"postcss-normalize-url": ["postcss-normalize-url@7.0.1", "", { "dependencies": { "postcss-value-parser": "^4.2.0" }, "peerDependencies": { "postcss": "^8.4.32" } }, "sha512-sUcD2cWtyK1AOL/82Fwy1aIVm/wwj5SdZkgZ3QiUzSzQQofrbq15jWJ3BA7Z+yVRwamCjJgZJN0I9IS7c6tgeQ=="],
|
||||||
|
|
||||||
|
"postcss-normalize-whitespace": ["postcss-normalize-whitespace@7.0.1", "", { "dependencies": { "postcss-value-parser": "^4.2.0" }, "peerDependencies": { "postcss": "^8.4.32" } }, "sha512-vsbgFHMFQrJBJKrUFJNZ2pgBeBkC2IvvoHjz1to0/0Xk7sII24T0qFOiJzG6Fu3zJoq/0yI4rKWi7WhApW+EFA=="],
|
||||||
|
|
||||||
|
"postcss-ordered-values": ["postcss-ordered-values@7.0.2", "", { "dependencies": { "cssnano-utils": "^5.0.1", "postcss-value-parser": "^4.2.0" }, "peerDependencies": { "postcss": "^8.4.32" } }, "sha512-AMJjt1ECBffF7CEON/Y0rekRLS6KsePU6PRP08UqYW4UGFRnTXNrByUzYK1h8AC7UWTZdQ9O3Oq9kFIhm0SFEw=="],
|
||||||
|
|
||||||
|
"postcss-reduce-idents": ["postcss-reduce-idents@7.0.1", "", { "dependencies": { "postcss-value-parser": "^4.2.0" }, "peerDependencies": { "postcss": "^8.4.32" } }, "sha512-CHwIHGaPitJUWY/LLz/jKNI/Zq+KWhH1kfj0SDCTrSQQmcO4fwJ/vkifLTsRhWP6/256MvCHY+RJR3sPwtgA/g=="],
|
||||||
|
|
||||||
|
"postcss-reduce-initial": ["postcss-reduce-initial@7.0.5", "", { "dependencies": { "browserslist": "^4.27.0", "caniuse-api": "^3.0.0" }, "peerDependencies": { "postcss": "^8.4.32" } }, "sha512-RHagHLidG8hTZcnr4FpyMB2jtgd/OcyAazjMhoy5qmWJOx1uxKh4ntk0Pb46ajKM0rkf32lRH4C8c9qQiPR6IA=="],
|
||||||
|
|
||||||
|
"postcss-reduce-transforms": ["postcss-reduce-transforms@7.0.1", "", { "dependencies": { "postcss-value-parser": "^4.2.0" }, "peerDependencies": { "postcss": "^8.4.32" } }, "sha512-MhyEbfrm+Mlp/36hvZ9mT9DaO7dbncU0CvWI8V93LRkY6IYlu38OPg3FObnuKTUxJ4qA8HpurdQOo5CyqqO76g=="],
|
||||||
|
|
||||||
|
"postcss-reporter": ["postcss-reporter@7.1.0", "", { "dependencies": { "picocolors": "^1.0.0", "thenby": "^1.3.4" }, "peerDependencies": { "postcss": "^8.1.0" } }, "sha512-/eoEylGWyy6/DOiMP5lmFRdmDKThqgn7D6hP2dXKJI/0rJSO1ADFNngZfDzxL0YAxFvws+Rtpuji1YIHj4mySA=="],
|
||||||
|
|
||||||
|
"postcss-selector-parser": ["postcss-selector-parser@7.1.0", "", { "dependencies": { "cssesc": "^3.0.0", "util-deprecate": "^1.0.2" } }, "sha512-8sLjZwK0R+JlxlYcTuVnyT2v+htpdrjDOKuMcOVdYjt52Lh8hWRYpxBPoKx/Zg+bcjc3wx6fmQevMmUztS/ccA=="],
|
||||||
|
|
||||||
|
"postcss-svgo": ["postcss-svgo@7.1.0", "", { "dependencies": { "postcss-value-parser": "^4.2.0", "svgo": "^4.0.0" }, "peerDependencies": { "postcss": "^8.4.32" } }, "sha512-KnAlfmhtoLz6IuU3Sij2ycusNs4jPW+QoFE5kuuUOK8awR6tMxZQrs5Ey3BUz7nFCzT3eqyFgqkyrHiaU2xx3w=="],
|
||||||
|
|
||||||
|
"postcss-unique-selectors": ["postcss-unique-selectors@7.0.4", "", { "dependencies": { "postcss-selector-parser": "^7.1.0" }, "peerDependencies": { "postcss": "^8.4.32" } }, "sha512-pmlZjsmEAG7cHd7uK3ZiNSW6otSZ13RHuZ/4cDN/bVglS5EpF2r2oxY99SuOHa8m7AWoBCelTS3JPpzsIs8skQ=="],
|
||||||
|
|
||||||
|
"postcss-url": ["postcss-url@10.1.3", "", { "dependencies": { "make-dir": "~3.1.0", "mime": "~2.5.2", "minimatch": "~3.0.4", "xxhashjs": "~0.2.2" }, "peerDependencies": { "postcss": "^8.0.0" } }, "sha512-FUzyxfI5l2tKmXdYc6VTu3TWZsInayEKPbiyW+P6vmmIrrb4I6CGX0BFoewgYHLK+oIL5FECEK02REYRpBvUCw=="],
|
||||||
|
|
||||||
|
"postcss-value-parser": ["postcss-value-parser@4.2.0", "", {}, "sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ=="],
|
||||||
|
|
||||||
|
"postcss-zindex": ["postcss-zindex@7.0.1", "", { "peerDependencies": { "postcss": "^8.4.32" } }, "sha512-bZEfMUhaxNiXC8tw1qoFeYVCusQHlPJS5iqvuzePQjXBGIkyyCeGbqsyeyoODIuLmNE7Wc8GdTIhXpaaWbTX8Q=="],
|
||||||
|
|
||||||
|
"preact": ["preact@10.28.2", "", {}, "sha512-lbteaWGzGHdlIuiJ0l2Jq454m6kcpI1zNje6d8MlGAFlYvP2GO4ibnat7P74Esfz4sPTdM6UxtTwh/d3pwM9JA=="],
|
||||||
|
|
||||||
|
"pretty-hrtime": ["pretty-hrtime@1.0.3", "", {}, "sha512-66hKPCr+72mlfiSjlEB1+45IjXSqvVAIy6mocupoww4tBFE9R9IhwwUGoI4G++Tc9Aq+2rxOt0RFU6gPcrte0A=="],
|
||||||
|
|
||||||
|
"punycode": ["punycode@1.3.2", "", {}, "sha512-RofWgt/7fL5wP1Y7fxE7/EmTLzQVnB0ycyibJ0OOHIlJqTNzglYFxVwETOcIoJqJmpDXJ9xImDv+Fq34F/d4Dw=="],
|
||||||
|
|
||||||
|
"querystring": ["querystring@0.2.0", "", {}, "sha512-X/xY82scca2tau62i9mDyU9K+I+djTMUsvwf7xnUX5GLvVzgJybOJf4Y6o9Zx3oJK/LSXg5tTZBjwzqVPaPO2g=="],
|
||||||
|
|
||||||
|
"read-cache": ["read-cache@1.0.0", "", { "dependencies": { "pify": "^2.3.0" } }, "sha512-Owdv/Ft7IjOgm/i0xvNDZ1LrRANRfew4b2prF3OWMQLxLfu3bS8FVhCsrSCMK4lR56Y9ya+AThoTpDCTxCmpRA=="],
|
||||||
|
|
||||||
|
"readdirp": ["readdirp@3.6.0", "", { "dependencies": { "picomatch": "^2.2.1" } }, "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA=="],
|
||||||
|
|
||||||
|
"require-directory": ["require-directory@2.1.1", "", {}, "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q=="],
|
||||||
|
|
||||||
|
"resolve": ["resolve@1.22.10", "", { "dependencies": { "is-core-module": "^2.16.0", "path-parse": "^1.0.7", "supports-preserve-symlinks-flag": "^1.0.0" }, "bin": "bin/resolve" }, "sha512-NPRy+/ncIMeDlTAsuqwKIiferiawhefFJtkNSW0qZJEqMEb+qBt/77B/jGeeek+F0uOeN05CDa6HXbbIgtVX4w=="],
|
||||||
|
|
||||||
|
"resolve-relative-url": ["resolve-relative-url@1.0.0", "", { "dependencies": { "url": "0.10.x" } }, "sha512-zpcelQBAmrwckiyRmym9os1goECU3EzuTU/UrYkGzXV0i14n8FkyGUvwkOYA5klqVLq1Hz/EiFZMS7bZQdd+EA=="],
|
||||||
|
|
||||||
|
"sax": ["sax@1.4.1", "", {}, "sha512-+aWOz7yVScEGoKNd4PA10LZ8sk0A/z5+nXQG5giUO5rprX9jgYsTdov9qCchZiPIZezbZH+jRut8nPodFAX4Jg=="],
|
||||||
|
|
||||||
|
"semver": ["semver@6.3.1", "", { "bin": "bin/semver.js" }, "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA=="],
|
||||||
|
|
||||||
|
"slash": ["slash@5.1.0", "", {}, "sha512-ZA6oR3T/pEyuqwMgAKT0/hAv8oAXckzbkmR0UkUosQ+Mc4RxGoJkRmwHgHufaenlyAgE1Mxgpdcrf75y6XcnDg=="],
|
||||||
|
|
||||||
|
"source-map-js": ["source-map-js@1.2.1", "", {}, "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA=="],
|
||||||
|
|
||||||
|
"string-width": ["string-width@4.2.3", "", { "dependencies": { "emoji-regex": "^8.0.0", "is-fullwidth-code-point": "^3.0.0", "strip-ansi": "^6.0.1" } }, "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g=="],
|
||||||
|
|
||||||
|
"strip-ansi": ["strip-ansi@6.0.1", "", { "dependencies": { "ansi-regex": "^5.0.1" } }, "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A=="],
|
||||||
|
|
||||||
|
"stylehacks": ["stylehacks@7.0.5", "", { "dependencies": { "browserslist": "^4.24.5", "postcss-selector-parser": "^7.1.0" }, "peerDependencies": { "postcss": "^8.4.32" } }, "sha512-5kNb7V37BNf0Q3w+1pxfa+oiNPS++/b4Jil9e/kPDgrk1zjEd6uR7SZeJiYaLYH6RRSC1XX2/37OTeU/4FvuIA=="],
|
||||||
|
|
||||||
|
"supports-preserve-symlinks-flag": ["supports-preserve-symlinks-flag@1.0.0", "", {}, "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w=="],
|
||||||
|
|
||||||
|
"svgo": ["svgo@4.0.0", "", { "dependencies": { "commander": "^11.1.0", "css-select": "^5.1.0", "css-tree": "^3.0.1", "css-what": "^6.1.0", "csso": "^5.0.5", "picocolors": "^1.1.1", "sax": "^1.4.1" }, "bin": "bin/svgo.js" }, "sha512-VvrHQ+9uniE+Mvx3+C9IEe/lWasXCU0nXMY2kZeLrHNICuRiC8uMPyM14UEaMOFA5mhyQqEkB02VoQ16n3DLaw=="],
|
||||||
|
|
||||||
|
"thenby": ["thenby@1.3.4", "", {}, "sha512-89Gi5raiWA3QZ4b2ePcEwswC3me9JIg+ToSgtE0JWeCynLnLxNr/f9G+xfo9K+Oj4AFdom8YNJjibIARTJmapQ=="],
|
||||||
|
|
||||||
|
"tinyglobby": ["tinyglobby@0.2.15", "", { "dependencies": { "fdir": "^6.5.0", "picomatch": "^4.0.3" } }, "sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ=="],
|
||||||
|
|
||||||
|
"to-regex-range": ["to-regex-range@5.0.1", "", { "dependencies": { "is-number": "^7.0.0" } }, "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ=="],
|
||||||
|
|
||||||
|
"tslib": ["tslib@2.8.1", "", {}, "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w=="],
|
||||||
|
|
||||||
|
"universalify": ["universalify@2.0.1", "", {}, "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw=="],
|
||||||
|
|
||||||
|
"update-browserslist-db": ["update-browserslist-db@1.1.4", "", { "dependencies": { "escalade": "^3.2.0", "picocolors": "^1.1.1" }, "peerDependencies": { "browserslist": ">= 4.21.0" }, "bin": "cli.js" }, "sha512-q0SPT4xyU84saUX+tomz1WLkxUbuaJnR1xWt17M7fJtEJigJeWUNGUqrauFXsHnqev9y9JTRGwk13tFBuKby4A=="],
|
||||||
|
|
||||||
|
"url": ["url@0.10.3", "", { "dependencies": { "punycode": "1.3.2", "querystring": "0.2.0" } }, "sha512-hzSUW2q06EqL1gKM/a+obYHLIO6ct2hwPuviqTTOcfFVc61UbfJ2Q32+uGL/HCPxKqrdGB5QUwIe7UqlDgwsOQ=="],
|
||||||
|
|
||||||
|
"util-deprecate": ["util-deprecate@1.0.2", "", {}, "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw=="],
|
||||||
|
|
||||||
|
"wrap-ansi": ["wrap-ansi@7.0.0", "", { "dependencies": { "ansi-styles": "^4.0.0", "string-width": "^4.1.0", "strip-ansi": "^6.0.0" } }, "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q=="],
|
||||||
|
|
||||||
|
"xxhashjs": ["xxhashjs@0.2.2", "", { "dependencies": { "cuint": "^0.2.2" } }, "sha512-AkTuIuVTET12tpsVIQo+ZU6f/qDmKuRUcjaqR+OIvm+aCBsZ95i7UVY5WJ9TMsSaZ0DA2WxoZ4acu0sPH+OKAw=="],
|
||||||
|
|
||||||
|
"y18n": ["y18n@5.0.8", "", {}, "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA=="],
|
||||||
|
|
||||||
|
"yaml": ["yaml@2.8.1", "", { "bin": "bin.mjs" }, "sha512-lcYcMxX2PO9XMGvAJkJ3OsNMw+/7FKes7/hgerGUYWIoWu5j/+YQqcZr5JnPZWzOsEBgMbSbiSTn/dv/69Mkpw=="],
|
||||||
|
|
||||||
|
"yargs": ["yargs@17.7.2", "", { "dependencies": { "cliui": "^8.0.1", "escalade": "^3.1.1", "get-caller-file": "^2.0.5", "require-directory": "^2.1.1", "string-width": "^4.2.3", "y18n": "^5.0.5", "yargs-parser": "^21.1.1" } }, "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w=="],
|
||||||
|
|
||||||
|
"yargs-parser": ["yargs-parser@21.1.1", "", {}, "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw=="],
|
||||||
|
|
||||||
|
"anymatch/picomatch": ["picomatch@2.3.1", "", {}, "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA=="],
|
||||||
|
|
||||||
|
"csso/css-tree": ["css-tree@2.2.1", "", { "dependencies": { "mdn-data": "2.0.28", "source-map-js": "^1.0.1" } }, "sha512-OA0mILzGc1kCOCSJerOeqDxDQ4HOh+G8NbOJFOTgOCzpw7fCBubk0fEyxp8AgOL/jvLgYA/uV0cMbe43ElF1JA=="],
|
||||||
|
|
||||||
|
"readdirp/picomatch": ["picomatch@2.3.1", "", {}, "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA=="],
|
||||||
|
|
||||||
|
"csso/css-tree/mdn-data": ["mdn-data@2.0.28", "", {}, "sha512-aylIc7Z9y4yzHYAJNuESG3hfhC+0Ibp/MAMiaOZgNv4pmEdFyfZhhhny4MNiAfWdBQ1RQ2mfDWmM1x8SvGyp8g=="],
|
||||||
|
}
|
||||||
|
}
|
||||||
2
cmd/anubis/.gitignore
vendored
2
cmd/anubis/.gitignore
vendored
|
|
@ -1,2 +0,0 @@
|
||||||
*.rpm
|
|
||||||
anubis
|
|
||||||
|
|
@ -10,14 +10,14 @@ import (
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/TecharoHQ/anubis/internal"
|
"git.sad.ovh/sophie/nuke/internal"
|
||||||
"github.com/facebookgo/flagenv"
|
"github.com/facebookgo/flagenv"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
dockerAnnotations = flag.String("docker-annotations", os.Getenv("DOCKER_METADATA_OUTPUT_ANNOTATIONS"), "Docker image annotations")
|
dockerAnnotations = flag.String("docker-annotations", os.Getenv("DOCKER_METADATA_OUTPUT_ANNOTATIONS"), "Docker image annotations")
|
||||||
dockerLabels = flag.String("docker-labels", os.Getenv("DOCKER_METADATA_OUTPUT_LABELS"), "Docker image labels")
|
dockerLabels = flag.String("docker-labels", os.Getenv("DOCKER_METADATA_OUTPUT_LABELS"), "Docker image labels")
|
||||||
dockerRepo = flag.String("docker-repo", "registry.int.xeserv.us/techaro/anubis", "Docker image repository for Anubis")
|
dockerRepo = flag.String("docker-repo", "git.sad.ovh/sophie/nuke", "Docker image repository for Nuke")
|
||||||
dockerTags = flag.String("docker-tags", os.Getenv("DOCKER_METADATA_OUTPUT_TAGS"), "newline separated docker tags including the registry name")
|
dockerTags = flag.String("docker-tags", os.Getenv("DOCKER_METADATA_OUTPUT_TAGS"), "newline separated docker tags including the registry name")
|
||||||
githubEventName = flag.String("github-event-name", "", "GitHub event name")
|
githubEventName = flag.String("github-event-name", "", "GitHub event name")
|
||||||
pullRequestID = flag.Int("pull-request-id", -1, "GitHub pull request ID")
|
pullRequestID = flag.Int("pull-request-id", -1, "GitHub pull request ID")
|
||||||
|
|
@ -33,9 +33,9 @@ func main() {
|
||||||
koDockerRepo := strings.TrimSuffix(*dockerRepo, "/"+filepath.Base(*dockerRepo))
|
koDockerRepo := strings.TrimSuffix(*dockerRepo, "/"+filepath.Base(*dockerRepo))
|
||||||
|
|
||||||
if *githubEventName == "pull_request" && *pullRequestID != -1 {
|
if *githubEventName == "pull_request" && *pullRequestID != -1 {
|
||||||
*dockerRepo = fmt.Sprintf("ttl.sh/techaro/pr-%d/anubis", *pullRequestID)
|
*dockerRepo = fmt.Sprintf("ttl.sh/sophie/pr-%d/nuke", *pullRequestID)
|
||||||
*dockerTags = fmt.Sprintf("ttl.sh/techaro/pr-%d/anubis:24h", *pullRequestID)
|
*dockerTags = fmt.Sprintf("ttl.sh/sophie/pr-%d/nuke:24h", *pullRequestID)
|
||||||
koDockerRepo = fmt.Sprintf("ttl.sh/techaro/pr-%d", *pullRequestID)
|
koDockerRepo = fmt.Sprintf("ttl.sh/sophie/pr-%d", *pullRequestID)
|
||||||
|
|
||||||
slog.Info(
|
slog.Info(
|
||||||
"Building image for pull request",
|
"Building image for pull request",
|
||||||
|
|
@ -102,7 +102,7 @@ func main() {
|
||||||
tags = append(tags, img.tag)
|
tags = append(tags, img.tag)
|
||||||
}
|
}
|
||||||
|
|
||||||
output, err := run(fmt.Sprintf("ko build --platform=all --base-import-paths --tags=%q --image-user=1000 --image-annotation=%q --image-label=%q ./cmd/anubis | tail -n1", strings.Join(tags, ","), *dockerAnnotations, *dockerLabels))
|
output, err := run(fmt.Sprintf("ko build --platform=all --base-import-paths --tags=%q --image-user=1000 --image-annotation=%q --image-label=%q ./cmd/nuke | tail -n1", strings.Join(tags, ","), *dockerAnnotations, *dockerLabels))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("can't run ko build, check stderr: %v", err)
|
log.Fatalf("can't run ko build, check stderr: %v", err)
|
||||||
}
|
}
|
||||||
|
|
@ -125,8 +125,8 @@ func parseImageList(imageList string) ([]image, error) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// reg.xeiaso.net/techaro/anubis:latest
|
// reg.xeiaso.net/techaro/nuke:latest
|
||||||
// repository: reg.xeiaso.net/techaro/anubis
|
// repository: reg.xeiaso.net/techaro/nuke
|
||||||
// tag: latest
|
// tag: latest
|
||||||
index := strings.LastIndex(img, ":")
|
index := strings.LastIndex(img, ":")
|
||||||
result = append(result, image{
|
result = append(result, image{
|
||||||
|
|
|
||||||
2
cmd/nuke/.gitignore
vendored
Normal file
2
cmd/nuke/.gitignore
vendored
Normal file
|
|
@ -0,0 +1,2 @@
|
||||||
|
*.rpm
|
||||||
|
nuke
|
||||||
|
|
@ -27,14 +27,13 @@ import (
|
||||||
"syscall"
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/TecharoHQ/anubis"
|
"git.sad.ovh/sophie/nuke"
|
||||||
"github.com/TecharoHQ/anubis/data"
|
"git.sad.ovh/sophie/nuke/data"
|
||||||
"github.com/TecharoHQ/anubis/internal"
|
"git.sad.ovh/sophie/nuke/internal"
|
||||||
libanubis "github.com/TecharoHQ/anubis/lib"
|
libnuke "git.sad.ovh/sophie/nuke/lib"
|
||||||
"github.com/TecharoHQ/anubis/lib/config"
|
"git.sad.ovh/sophie/nuke/lib/config"
|
||||||
botPolicy "github.com/TecharoHQ/anubis/lib/policy"
|
botPolicy "git.sad.ovh/sophie/nuke/lib/policy"
|
||||||
"github.com/TecharoHQ/anubis/lib/thoth"
|
"git.sad.ovh/sophie/nuke/web"
|
||||||
"github.com/TecharoHQ/anubis/web"
|
|
||||||
"github.com/facebookgo/flagenv"
|
"github.com/facebookgo/flagenv"
|
||||||
_ "github.com/joho/godotenv/autoload"
|
_ "github.com/joho/godotenv/autoload"
|
||||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||||
|
|
@ -45,26 +44,26 @@ var (
|
||||||
basePrefix = flag.String("base-prefix", "", "base prefix (root URL) the application is served under e.g. /myapp")
|
basePrefix = flag.String("base-prefix", "", "base prefix (root URL) the application is served under e.g. /myapp")
|
||||||
bind = flag.String("bind", ":8923", "network address to bind HTTP to")
|
bind = flag.String("bind", ":8923", "network address to bind HTTP to")
|
||||||
bindNetwork = flag.String("bind-network", "tcp", "network family to bind HTTP to, e.g. unix, tcp")
|
bindNetwork = flag.String("bind-network", "tcp", "network family to bind HTTP to, e.g. unix, tcp")
|
||||||
challengeDifficulty = flag.Int("difficulty", anubis.DefaultDifficulty, "difficulty of the challenge")
|
challengeDifficulty = flag.Int("difficulty", nuke.DefaultDifficulty, "difficulty of the challenge")
|
||||||
cookieDomain = flag.String("cookie-domain", "", "if set, the top-level domain that the Anubis cookie will be valid for")
|
cookieDomain = flag.String("cookie-domain", "", "if set, the top-level domain that the Nuke cookie will be valid for")
|
||||||
cookieDynamicDomain = flag.Bool("cookie-dynamic-domain", false, "if set, automatically set the cookie Domain value based on the request domain")
|
cookieDynamicDomain = flag.Bool("cookie-dynamic-domain", false, "if set, automatically set the cookie Domain value based on the request domain")
|
||||||
cookieExpiration = flag.Duration("cookie-expiration-time", anubis.CookieDefaultExpirationTime, "The amount of time the authorization cookie is valid for")
|
cookieExpiration = flag.Duration("cookie-expiration-time", nuke.CookieDefaultExpirationTime, "The amount of time the authorization cookie is valid for")
|
||||||
cookiePrefix = flag.String("cookie-prefix", anubis.CookieName, "prefix for browser cookies created by Anubis")
|
cookiePrefix = flag.String("cookie-prefix", nuke.CookieName, "prefix for browser cookies created by Nuke")
|
||||||
cookiePartitioned = flag.Bool("cookie-partitioned", false, "if true, sets the partitioned flag on Anubis cookies, enabling CHIPS support")
|
cookiePartitioned = flag.Bool("cookie-partitioned", false, "if true, sets the partitioned flag on Nuke cookies, enabling CHIPS support")
|
||||||
difficultyInJWT = flag.Bool("difficulty-in-jwt", false, "if true, adds a difficulty field in the JWT claims")
|
difficultyInJWT = flag.Bool("difficulty-in-jwt", false, "if true, adds a difficulty field in the JWT claims")
|
||||||
useSimplifiedExplanation = flag.Bool("use-simplified-explanation", false, "if true, replaces the text when clicking \"Why am I seeing this?\" with a more simplified text for a non-tech-savvy audience.")
|
useSimplifiedExplanation = flag.Bool("use-simplified-explanation", false, "if true, replaces the text when clicking \"Why am I seeing this?\" with a more simplified text for a non-tech-savvy audience.")
|
||||||
forcedLanguage = flag.String("forced-language", "", "if set, this language is being used instead of the one from the request's Accept-Language header")
|
forcedLanguage = flag.String("forced-language", "", "if set, this language is being used instead of the one from the request's Accept-Language header")
|
||||||
hs512Secret = flag.String("hs512-secret", "", "secret used to sign JWTs, uses ed25519 if not set")
|
hs512Secret = flag.String("hs512-secret", "", "secret used to sign JWTs, uses ed25519 if not set")
|
||||||
cookieSecure = flag.Bool("cookie-secure", true, "if true, sets the secure flag on Anubis cookies")
|
cookieSecure = flag.Bool("cookie-secure", true, "if true, sets the secure flag on Nuke cookies")
|
||||||
cookieSameSite = flag.String("cookie-same-site", "None", "sets the same site option on Anubis cookies, will auto-downgrade None to Lax if cookie-secure is false. Valid values are None, Lax, Strict, and Default.")
|
cookieSameSite = flag.String("cookie-same-site", "None", "sets the same site option on Nuke cookies, will auto-downgrade None to Lax if cookie-secure is false. Valid values are None, Lax, Strict, and Default.")
|
||||||
ed25519PrivateKeyHex = flag.String("ed25519-private-key-hex", "", "private key used to sign JWTs, if not set a random one will be assigned")
|
ed25519PrivateKeyHex = flag.String("ed25519-private-key-hex", "", "private key used to sign JWTs, if not set a random one will be assigned")
|
||||||
ed25519PrivateKeyHexFile = flag.String("ed25519-private-key-hex-file", "", "file name containing value for ed25519-private-key-hex")
|
ed25519PrivateKeyHexFile = flag.String("ed25519-private-key-hex-file", "", "file name containing value for ed25519-private-key-hex")
|
||||||
metricsBind = flag.String("metrics-bind", ":9090", "network address to bind metrics to")
|
metricsBind = flag.String("metrics-bind", ":9090", "network address to bind metrics to")
|
||||||
metricsBindNetwork = flag.String("metrics-bind-network", "tcp", "network family for the metrics server to bind to")
|
metricsBindNetwork = flag.String("metrics-bind-network", "tcp", "network family for the metrics server to bind to")
|
||||||
socketMode = flag.String("socket-mode", "0770", "socket mode (permissions) for unix domain sockets.")
|
socketMode = flag.String("socket-mode", "0770", "socket mode (permissions) for unix domain sockets.")
|
||||||
robotsTxt = flag.Bool("serve-robots-txt", false, "serve a robots.txt file that disallows all robots")
|
robotsTxt = flag.Bool("serve-robots-txt", false, "serve a robots.txt file that disallows all robots")
|
||||||
policyFname = flag.String("policy-fname", "", "full path to anubis policy document (defaults to a sensible built-in policy)")
|
policyFname = flag.String("policy-fname", "", "full path to nuke policy document (defaults to a sensible built-in policy)")
|
||||||
redirectDomains = flag.String("redirect-domains", "", "list of domains separated by commas which anubis is allowed to redirect to. Leaving this unset allows any domain.")
|
redirectDomains = flag.String("redirect-domains", "", "list of domains separated by commas which nuke is allowed to redirect to. Leaving this unset allows any domain.")
|
||||||
slogLevel = flag.String("slog-level", "INFO", "logging level (see https://pkg.go.dev/log/slog#hdr-Levels)")
|
slogLevel = flag.String("slog-level", "INFO", "logging level (see https://pkg.go.dev/log/slog#hdr-Levels)")
|
||||||
stripBasePrefix = flag.Bool("strip-base-prefix", false, "if true, strips the base prefix from requests forwarded to the target server")
|
stripBasePrefix = flag.Bool("strip-base-prefix", false, "if true, strips the base prefix from requests forwarded to the target server")
|
||||||
target = flag.String("target", "http://localhost:3923", "target to reverse proxy to, set to an empty string to disable proxying when only using auth request")
|
target = flag.String("target", "http://localhost:3923", "target to reverse proxy to, set to an empty string to disable proxying when only using auth request")
|
||||||
|
|
@ -72,22 +71,19 @@ var (
|
||||||
targetHost = flag.String("target-host", "", "if set, the value of the Host header when forwarding requests to the target")
|
targetHost = flag.String("target-host", "", "if set, the value of the Host header when forwarding requests to the target")
|
||||||
targetInsecureSkipVerify = flag.Bool("target-insecure-skip-verify", false, "if true, skips TLS validation for the backend")
|
targetInsecureSkipVerify = flag.Bool("target-insecure-skip-verify", false, "if true, skips TLS validation for the backend")
|
||||||
targetDisableKeepAlive = flag.Bool("target-disable-keepalive", false, "if true, disables HTTP keep-alive for the backend")
|
targetDisableKeepAlive = flag.Bool("target-disable-keepalive", false, "if true, disables HTTP keep-alive for the backend")
|
||||||
healthcheck = flag.Bool("healthcheck", false, "run a health check against Anubis")
|
healthcheck = flag.Bool("healthcheck", false, "run a health check against nuke")
|
||||||
useRemoteAddress = flag.Bool("use-remote-address", false, "read the client's IP address from the network request, useful for debugging and running Anubis on bare metal")
|
useRemoteAddress = flag.Bool("use-remote-address", false, "read the client's IP address from the network request, useful for debugging and running nuke on bare metal")
|
||||||
debugBenchmarkJS = flag.Bool("debug-benchmark-js", false, "respond to every request with a challenge for benchmarking hashrate")
|
debugBenchmarkJS = flag.Bool("debug-benchmark-js", false, "respond to every request with a challenge for benchmarking hashrate")
|
||||||
ogPassthrough = flag.Bool("og-passthrough", false, "enable Open Graph tag passthrough")
|
ogPassthrough = flag.Bool("og-passthrough", false, "enable Open Graph tag passthrough")
|
||||||
ogTimeToLive = flag.Duration("og-expiry-time", 24*time.Hour, "Open Graph tag cache expiration time")
|
ogTimeToLive = flag.Duration("og-expiry-time", 24*time.Hour, "Open Graph tag cache expiration time")
|
||||||
ogCacheConsiderHost = flag.Bool("og-cache-consider-host", false, "enable or disable the use of the host in the Open Graph tag cache")
|
ogCacheConsiderHost = flag.Bool("og-cache-consider-host", false, "enable or disable the use of the host in the Open Graph tag cache")
|
||||||
extractResources = flag.String("extract-resources", "", "if set, extract the static resources to the specified folder")
|
extractResources = flag.String("extract-resources", "", "if set, extract the static resources to the specified folder")
|
||||||
webmasterEmail = flag.String("webmaster-email", "", "if set, displays webmaster's email on the reject page for appeals")
|
webmasterEmail = flag.String("webmaster-email", "", "if set, displays webmaster's email on the reject page for appeals")
|
||||||
versionFlag = flag.Bool("version", false, "print Anubis version")
|
versionFlag = flag.Bool("version", false, "print nuke version")
|
||||||
publicUrl = flag.String("public-url", "", "the externally accessible URL for this Anubis instance, used for constructing redirect URLs (e.g., for forwardAuth).")
|
publicUrl = flag.String("public-url", "", "the externally accessible URL for this nuke instance, used for constructing redirect URLs (e.g., for forwardAuth).")
|
||||||
xffStripPrivate = flag.Bool("xff-strip-private", true, "if set, strip private addresses from X-Forwarded-For")
|
xffStripPrivate = flag.Bool("xff-strip-private", true, "if set, strip private addresses from X-Forwarded-For")
|
||||||
customRealIPHeader = flag.String("custom-real-ip-header", "", "if set, read remote IP from header of this name (in case your environment doesn't set X-Real-IP header)")
|
customRealIPHeader = flag.String("custom-real-ip-header", "", "if set, read remote IP from header of this name (in case your environment doesn't set X-Real-IP header)")
|
||||||
|
|
||||||
thothInsecure = flag.Bool("thoth-insecure", false, "if set, connect to Thoth over plain HTTP/2, don't enable this unless support told you to")
|
|
||||||
thothURL = flag.String("thoth-url", "", "if set, URL for Thoth, the IP reputation database for Anubis")
|
|
||||||
thothToken = flag.String("thoth-token", "", "if set, API token for Thoth, the IP reputation database for Anubis")
|
|
||||||
jwtRestrictionHeader = flag.String("jwt-restriction-header", "X-Real-IP", "If set, the JWT is only valid if the current value of this header matched the value when the JWT was created")
|
jwtRestrictionHeader = flag.String("jwt-restriction-header", "X-Real-IP", "If set, the JWT is only valid if the current value of this header matched the value when the JWT was created")
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -231,7 +227,7 @@ func makeReverseProxy(target string, targetSNI string, targetHost string, insecu
|
||||||
return dialer.DialContext(ctx, "unix", addr)
|
return dialer.DialContext(ctx, "unix", addr)
|
||||||
}
|
}
|
||||||
// tell transport how to handle the unix url scheme
|
// tell transport how to handle the unix url scheme
|
||||||
transport.RegisterProtocol("unix", libanubis.UnixRoundTripper{Transport: transport})
|
transport.RegisterProtocol("unix", libnuke.UnixRoundTripper{Transport: transport})
|
||||||
}
|
}
|
||||||
|
|
||||||
if insecureSkipVerify || targetSNI != "" {
|
if insecureSkipVerify || targetSNI != "" {
|
||||||
|
|
@ -269,14 +265,14 @@ func main() {
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
|
|
||||||
if *versionFlag {
|
if *versionFlag {
|
||||||
fmt.Println("Anubis", anubis.Version)
|
fmt.Println("Nuke", nuke.Version)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
internal.SetHealth("anubis", healthv1.HealthCheckResponse_NOT_SERVING)
|
internal.SetHealth("nuke", healthv1.HealthCheckResponse_NOT_SERVING)
|
||||||
|
|
||||||
lg := internal.InitSlog(*slogLevel, os.Stderr)
|
lg := internal.InitSlog(*slogLevel, os.Stderr)
|
||||||
lg.Info("starting up Anubis")
|
lg.Info("starting up Nuke")
|
||||||
|
|
||||||
if *healthcheck {
|
if *healthcheck {
|
||||||
log.Println("running healthcheck")
|
log.Println("running healthcheck")
|
||||||
|
|
@ -309,7 +305,7 @@ func main() {
|
||||||
}
|
}
|
||||||
|
|
||||||
var rp http.Handler
|
var rp http.Handler
|
||||||
// when using anubis via Systemd and environment variables, then it is not possible to set targe to an empty string but only to space
|
// when using nuke via Systemd and environment variables, then it is not possible to set targe to an empty string but only to space
|
||||||
if strings.TrimSpace(*target) != "" {
|
if strings.TrimSpace(*target) != "" {
|
||||||
var err error
|
var err error
|
||||||
rp, err = makeReverseProxy(*target, *targetSNI, *targetHost, *targetInsecureSkipVerify, *targetDisableKeepAlive)
|
rp, err = makeReverseProxy(*target, *targetSNI, *targetHost, *targetInsecureSkipVerify, *targetDisableKeepAlive)
|
||||||
|
|
@ -321,25 +317,8 @@ func main() {
|
||||||
if *cookieDomain != "" && *cookieDynamicDomain {
|
if *cookieDomain != "" && *cookieDynamicDomain {
|
||||||
log.Fatalf("you can't set COOKIE_DOMAIN and COOKIE_DYNAMIC_DOMAIN at the same time")
|
log.Fatalf("you can't set COOKIE_DOMAIN and COOKIE_DYNAMIC_DOMAIN at the same time")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Thoth configuration
|
|
||||||
switch {
|
|
||||||
case *thothURL != "" && *thothToken == "":
|
|
||||||
lg.Warn("THOTH_URL is set but no THOTH_TOKEN is set")
|
|
||||||
case *thothURL == "" && *thothToken != "":
|
|
||||||
lg.Warn("THOTH_TOKEN is set but no THOTH_URL is set")
|
|
||||||
case *thothURL != "" && *thothToken != "":
|
|
||||||
lg.Debug("connecting to Thoth")
|
|
||||||
thothClient, err := thoth.New(ctx, *thothURL, *thothToken, *thothInsecure)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("can't dial thoth at %s: %v", *thothURL, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx = thoth.With(ctx, thothClient)
|
|
||||||
}
|
|
||||||
|
|
||||||
lg.Info("loading policy file", "fname", *policyFname)
|
lg.Info("loading policy file", "fname", *policyFname)
|
||||||
policy, err := libanubis.LoadPoliciesOrDefault(ctx, *policyFname, *challengeDifficulty, *slogLevel)
|
policy, err := libnuke.LoadPoliciesOrDefault(ctx, *policyFname, *challengeDifficulty, *slogLevel)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("can't parse policy file: %v", err)
|
log.Fatalf("can't parse policy file: %v", err)
|
||||||
}
|
}
|
||||||
|
|
@ -351,7 +330,7 @@ func main() {
|
||||||
if policy.Store.IsPersistent() {
|
if policy.Store.IsPersistent() {
|
||||||
if *hs512Secret == "" && *ed25519PrivateKeyHex == "" && *ed25519PrivateKeyHexFile == "" {
|
if *hs512Secret == "" && *ed25519PrivateKeyHex == "" && *ed25519PrivateKeyHexFile == "" {
|
||||||
lg.Warn("[misconfiguration] persistent storage backend is configured, but no private key is set. " +
|
lg.Warn("[misconfiguration] persistent storage backend is configured, but no private key is set. " +
|
||||||
"Challenges will be invalidated when Anubis restarts. " +
|
"Challenges will be invalidated when nuke restarts. " +
|
||||||
"Set HS512_SECRET, ED25519_PRIVATE_KEY_HEX, or ED25519_PRIVATE_KEY_HEX_FILE to ensure challenges survive service restarts. " +
|
"Set HS512_SECRET, ED25519_PRIVATE_KEY_HEX, or ED25519_PRIVATE_KEY_HEX_FILE to ensure challenges survive service restarts. " +
|
||||||
"See: https://anubis.techaro.lol/docs/admin/installation#key-generation")
|
"See: https://anubis.techaro.lol/docs/admin/installation#key-generation")
|
||||||
}
|
}
|
||||||
|
|
@ -413,7 +392,7 @@ func main() {
|
||||||
log.Fatalf("failed to generate ed25519 key: %v", err)
|
log.Fatalf("failed to generate ed25519 key: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
lg.Warn("generating random key, Anubis will have strange behavior when multiple instances are behind the same load balancer target, for more information: see https://anubis.techaro.lol/docs/admin/installation#key-generation")
|
lg.Warn("generating random key, Nuke will have strange behavior when multiple instances are behind the same load balancer target, for more information: see https://anubis.techaro.lol/docs/admin/installation#key-generation")
|
||||||
}
|
}
|
||||||
|
|
||||||
var redirectDomainsList []string
|
var redirectDomainsList []string
|
||||||
|
|
@ -427,13 +406,13 @@ func main() {
|
||||||
redirectDomainsList = append(redirectDomainsList, strings.TrimSpace(domain))
|
redirectDomainsList = append(redirectDomainsList, strings.TrimSpace(domain))
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
lg.Warn("REDIRECT_DOMAINS is not set, Anubis will only redirect to the same domain a request is coming from, see https://anubis.techaro.lol/docs/admin/configuration/redirect-domains")
|
lg.Warn("REDIRECT_DOMAINS is not set, Nuke will only redirect to the same domain a request is coming from, see https://anubis.techaro.lol/docs/admin/configuration/redirect-domains")
|
||||||
}
|
}
|
||||||
|
|
||||||
anubis.CookieName = *cookiePrefix + "-auth"
|
nuke.CookieName = *cookiePrefix + "-auth"
|
||||||
anubis.TestCookieName = *cookiePrefix + "-cookie-verification"
|
nuke.TestCookieName = *cookiePrefix + "-cookie-verification"
|
||||||
anubis.ForcedLanguage = *forcedLanguage
|
nuke.ForcedLanguage = *forcedLanguage
|
||||||
anubis.UseSimplifiedExplanation = *useSimplifiedExplanation
|
nuke.UseSimplifiedExplanation = *useSimplifiedExplanation
|
||||||
|
|
||||||
// If OpenGraph configuration values are not set in the config file, use the
|
// If OpenGraph configuration values are not set in the config file, use the
|
||||||
// values from flags / envvars.
|
// values from flags / envvars.
|
||||||
|
|
@ -444,7 +423,7 @@ func main() {
|
||||||
policy.OpenGraph.Override = map[string]string{}
|
policy.OpenGraph.Override = map[string]string{}
|
||||||
}
|
}
|
||||||
|
|
||||||
s, err := libanubis.New(libanubis.Options{
|
s, err := libnuke.New(libnuke.Options{
|
||||||
BasePrefix: *basePrefix,
|
BasePrefix: *basePrefix,
|
||||||
StripBasePrefix: *stripBasePrefix,
|
StripBasePrefix: *stripBasePrefix,
|
||||||
Next: rp,
|
Next: rp,
|
||||||
|
|
@ -467,11 +446,11 @@ func main() {
|
||||||
CookieSameSite: parseSameSite(*cookieSameSite),
|
CookieSameSite: parseSameSite(*cookieSameSite),
|
||||||
PublicUrl: *publicUrl,
|
PublicUrl: *publicUrl,
|
||||||
JWTRestrictionHeader: *jwtRestrictionHeader,
|
JWTRestrictionHeader: *jwtRestrictionHeader,
|
||||||
Logger: policy.Logger.With("subsystem", "anubis"),
|
Logger: policy.Logger.With("subsystem", "nuke"),
|
||||||
DifficultyInJWT: *difficultyInJWT,
|
DifficultyInJWT: *difficultyInJWT,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("can't construct libanubis.Server: %v", err)
|
log.Fatalf("can't construct libnuke.Server: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
var h http.Handler
|
var h http.Handler
|
||||||
|
|
@ -490,7 +469,7 @@ func main() {
|
||||||
"difficulty", *challengeDifficulty,
|
"difficulty", *challengeDifficulty,
|
||||||
"serveRobotsTXT", *robotsTxt,
|
"serveRobotsTXT", *robotsTxt,
|
||||||
"target", *target,
|
"target", *target,
|
||||||
"version", anubis.Version,
|
"version", nuke.Version,
|
||||||
"use-remote-address", *useRemoteAddress,
|
"use-remote-address", *useRemoteAddress,
|
||||||
"debug-benchmark-js", *debugBenchmarkJS,
|
"debug-benchmark-js", *debugBenchmarkJS,
|
||||||
"og-passthrough", *ogPassthrough,
|
"og-passthrough", *ogPassthrough,
|
||||||
|
|
@ -510,7 +489,7 @@ func main() {
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
internal.SetHealth("anubis", healthv1.HealthCheckResponse_SERVING)
|
internal.SetHealth("nuke", healthv1.HealthCheckResponse_SERVING)
|
||||||
|
|
||||||
if err := srv.Serve(listener); !errors.Is(err, http.ErrServerClosed) {
|
if err := srv.Serve(listener); !errors.Is(err, http.ErrServerClosed) {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
|
|
@ -524,9 +503,9 @@ func metricsServer(ctx context.Context, lg slog.Logger, done func()) {
|
||||||
mux := http.NewServeMux()
|
mux := http.NewServeMux()
|
||||||
mux.Handle("/metrics", promhttp.Handler())
|
mux.Handle("/metrics", promhttp.Handler())
|
||||||
mux.HandleFunc("/healthz", func(w http.ResponseWriter, r *http.Request) {
|
mux.HandleFunc("/healthz", func(w http.ResponseWriter, r *http.Request) {
|
||||||
st, ok := internal.GetHealth("anubis")
|
st, ok := internal.GetHealth("nuke")
|
||||||
if !ok {
|
if !ok {
|
||||||
slog.Error("health service anubis does not exist, file a bug")
|
slog.Error("health service nuke does not exist, file a bug")
|
||||||
}
|
}
|
||||||
|
|
||||||
switch st {
|
switch st {
|
||||||
|
|
@ -1,6 +1,6 @@
|
||||||
/*
|
/*
|
||||||
Batch process robots.txt files from archives like https://github.com/nrjones8/robots-dot-txt-archive-bot/tree/master/data/cleaned
|
Batch process robots.txt files from archives like https://github.com/nrjones8/robots-dot-txt-archive-bot/tree/master/data/cleaned
|
||||||
into Anubis CEL policies. Usage: go run batch_process.go <directory with robots.txt files>
|
into nuke CEL policies. Usage: go run batch_process.go <directory with robots.txt files>
|
||||||
*/
|
*/
|
||||||
package main
|
package main
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -12,7 +12,7 @@ import (
|
||||||
"regexp"
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/TecharoHQ/anubis/lib/config"
|
"git.sad.ovh/sophie/nuke/lib/config"
|
||||||
|
|
||||||
"sigs.k8s.io/yaml"
|
"sigs.k8s.io/yaml"
|
||||||
)
|
)
|
||||||
|
|
@ -36,7 +36,7 @@ type RobotsRule struct {
|
||||||
IsBlacklist bool // true if this is a specifically denied user agent
|
IsBlacklist bool // true if this is a specifically denied user agent
|
||||||
}
|
}
|
||||||
|
|
||||||
type AnubisRule struct {
|
type NukeRule struct {
|
||||||
Expression *config.ExpressionOrList `yaml:"expression,omitempty" json:"expression,omitempty"`
|
Expression *config.ExpressionOrList `yaml:"expression,omitempty" json:"expression,omitempty"`
|
||||||
Challenge *config.ChallengeRules `yaml:"challenge,omitempty" json:"challenge,omitempty"`
|
Challenge *config.ChallengeRules `yaml:"challenge,omitempty" json:"challenge,omitempty"`
|
||||||
Weight *config.Weight `yaml:"weight,omitempty" json:"weight,omitempty"`
|
Weight *config.Weight `yaml:"weight,omitempty" json:"weight,omitempty"`
|
||||||
|
|
@ -95,11 +95,11 @@ func main() {
|
||||||
log.Fatalf("failed to parse robots.txt: %v", err)
|
log.Fatalf("failed to parse robots.txt: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Convert to Anubis rules
|
// Convert to Nuke rules
|
||||||
anubisRules := convertToAnubisRules(rules)
|
nukeRules := convertToNukeRules(rules)
|
||||||
|
|
||||||
// Check if any rules were generated
|
// Check if any rules were generated
|
||||||
if len(anubisRules) == 0 {
|
if len(nukeRules) == 0 {
|
||||||
log.Fatal("no valid rules generated from robots.txt - file may be empty or contain no disallow directives")
|
log.Fatal("no valid rules generated from robots.txt - file may be empty or contain no disallow directives")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -107,9 +107,9 @@ func main() {
|
||||||
var output []byte
|
var output []byte
|
||||||
switch strings.ToLower(*outputFormat) {
|
switch strings.ToLower(*outputFormat) {
|
||||||
case "yaml":
|
case "yaml":
|
||||||
output, err = yaml.Marshal(anubisRules)
|
output, err = yaml.Marshal(nukeRules)
|
||||||
case "json":
|
case "json":
|
||||||
output, err = json.MarshalIndent(anubisRules, "", " ")
|
output, err = json.MarshalIndent(nukeRules, "", " ")
|
||||||
default:
|
default:
|
||||||
log.Fatalf("unsupported output format: %s (use yaml or json)", *outputFormat)
|
log.Fatalf("unsupported output format: %s (use yaml or json)", *outputFormat)
|
||||||
}
|
}
|
||||||
|
|
@ -126,7 +126,7 @@ func main() {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("failed to write output file: %v", err)
|
log.Fatalf("failed to write output file: %v", err)
|
||||||
}
|
}
|
||||||
fmt.Printf("Generated Anubis policy written to %s\n", *outputFile)
|
fmt.Printf("Generated Nuke policy written to %s\n", *outputFile)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -227,8 +227,8 @@ func parseIntSafe(s string) (int, error) {
|
||||||
return result, err
|
return result, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func convertToAnubisRules(robotsRules []RobotsRule) []AnubisRule {
|
func convertToNukeRules(robotsRules []RobotsRule) []NukeRule {
|
||||||
var anubisRules []AnubisRule
|
var nukeRules []NukeRule
|
||||||
ruleCounter := 0
|
ruleCounter := 0
|
||||||
|
|
||||||
// Process each robots rule individually
|
// Process each robots rule individually
|
||||||
|
|
@ -238,7 +238,7 @@ func convertToAnubisRules(robotsRules []RobotsRule) []AnubisRule {
|
||||||
// Handle crawl delay
|
// Handle crawl delay
|
||||||
if robotsRule.CrawlDelay > 0 && *crawlDelay > 0 {
|
if robotsRule.CrawlDelay > 0 && *crawlDelay > 0 {
|
||||||
ruleCounter++
|
ruleCounter++
|
||||||
rule := AnubisRule{
|
rule := NukeRule{
|
||||||
Name: fmt.Sprintf("%s-crawl-delay-%d", *policyName, ruleCounter),
|
Name: fmt.Sprintf("%s-crawl-delay-%d", *policyName, ruleCounter),
|
||||||
Action: "WEIGH",
|
Action: "WEIGH",
|
||||||
Weight: &config.Weight{Adjust: *crawlDelay},
|
Weight: &config.Weight{Adjust: *crawlDelay},
|
||||||
|
|
@ -266,13 +266,13 @@ func convertToAnubisRules(robotsRules []RobotsRule) []AnubisRule {
|
||||||
Any: expressions,
|
Any: expressions,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
anubisRules = append(anubisRules, rule)
|
nukeRules = append(nukeRules, rule)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Handle blacklisted user agents
|
// Handle blacklisted user agents
|
||||||
if robotsRule.IsBlacklist {
|
if robotsRule.IsBlacklist {
|
||||||
ruleCounter++
|
ruleCounter++
|
||||||
rule := AnubisRule{
|
rule := NukeRule{
|
||||||
Name: fmt.Sprintf("%s-blacklist-%d", *policyName, ruleCounter),
|
Name: fmt.Sprintf("%s-blacklist-%d", *policyName, ruleCounter),
|
||||||
Action: *userAgentDeny,
|
Action: *userAgentDeny,
|
||||||
}
|
}
|
||||||
|
|
@ -306,7 +306,7 @@ func convertToAnubisRules(robotsRules []RobotsRule) []AnubisRule {
|
||||||
Any: expressions,
|
Any: expressions,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
anubisRules = append(anubisRules, rule)
|
nukeRules = append(nukeRules, rule)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Handle specific disallow rules
|
// Handle specific disallow rules
|
||||||
|
|
@ -316,7 +316,7 @@ func convertToAnubisRules(robotsRules []RobotsRule) []AnubisRule {
|
||||||
}
|
}
|
||||||
|
|
||||||
ruleCounter++
|
ruleCounter++
|
||||||
rule := AnubisRule{
|
rule := NukeRule{
|
||||||
Name: fmt.Sprintf("%s-disallow-%d", *policyName, ruleCounter),
|
Name: fmt.Sprintf("%s-disallow-%d", *policyName, ruleCounter),
|
||||||
Action: *baseAction,
|
Action: *baseAction,
|
||||||
}
|
}
|
||||||
|
|
@ -338,7 +338,7 @@ func convertToAnubisRules(robotsRules []RobotsRule) []AnubisRule {
|
||||||
continue // Skip wildcard as it's handled separately
|
continue // Skip wildcard as it's handled separately
|
||||||
}
|
}
|
||||||
ruleCounter++
|
ruleCounter++
|
||||||
subRule := AnubisRule{
|
subRule := NukeRule{
|
||||||
Name: fmt.Sprintf("%s-disallow-%d", *policyName, ruleCounter),
|
Name: fmt.Sprintf("%s-disallow-%d", *policyName, ruleCounter),
|
||||||
Action: *baseAction,
|
Action: *baseAction,
|
||||||
Expression: &config.ExpressionOrList{
|
Expression: &config.ExpressionOrList{
|
||||||
|
|
@ -348,7 +348,7 @@ func convertToAnubisRules(robotsRules []RobotsRule) []AnubisRule {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
anubisRules = append(anubisRules, subRule)
|
nukeRules = append(nukeRules, subRule)
|
||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
@ -361,11 +361,11 @@ func convertToAnubisRules(robotsRules []RobotsRule) []AnubisRule {
|
||||||
All: conditions,
|
All: conditions,
|
||||||
}
|
}
|
||||||
|
|
||||||
anubisRules = append(anubisRules, rule)
|
nukeRules = append(nukeRules, rule)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return anubisRules
|
return nukeRules
|
||||||
}
|
}
|
||||||
|
|
||||||
func buildPathCondition(robotsPath string) string {
|
func buildPathCondition(robotsPath string) string {
|
||||||
|
|
|
||||||
|
|
@ -136,16 +136,16 @@ func TestDataFileConversion(t *testing.T) {
|
||||||
*userAgentDeny = oldDeniedAction
|
*userAgentDeny = oldDeniedAction
|
||||||
}()
|
}()
|
||||||
|
|
||||||
// Convert to Anubis rules
|
// Convert to Nuke rules
|
||||||
anubisRules := convertToAnubisRules(rules)
|
nukeRules := convertToNukeRules(rules)
|
||||||
|
|
||||||
// Generate output
|
// Generate output
|
||||||
var actualOutput []byte
|
var actualOutput []byte
|
||||||
switch strings.ToLower(*outputFormat) {
|
switch strings.ToLower(*outputFormat) {
|
||||||
case "yaml":
|
case "yaml":
|
||||||
actualOutput, err = yaml.Marshal(anubisRules)
|
actualOutput, err = yaml.Marshal(nukeRules)
|
||||||
case "json":
|
case "json":
|
||||||
actualOutput, err = json.MarshalIndent(anubisRules, "", " ")
|
actualOutput, err = json.MarshalIndent(nukeRules, "", " ")
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to marshal output: %v", err)
|
t.Fatalf("Failed to marshal output: %v", err)
|
||||||
|
|
@ -249,10 +249,10 @@ Disallow: /admin`
|
||||||
*policyName = "test-policy"
|
*policyName = "test-policy"
|
||||||
defer func() { *policyName = oldPolicyName }()
|
defer func() { *policyName = oldPolicyName }()
|
||||||
|
|
||||||
anubisRules := convertToAnubisRules(rules)
|
nukeRules := convertToNukeRules(rules)
|
||||||
|
|
||||||
// Test YAML output
|
// Test YAML output
|
||||||
yamlOutput, err := yaml.Marshal(anubisRules)
|
yamlOutput, err := yaml.Marshal(nukeRules)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to marshal YAML: %v", err)
|
t.Fatalf("Failed to marshal YAML: %v", err)
|
||||||
}
|
}
|
||||||
|
|
@ -262,7 +262,7 @@ Disallow: /admin`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test JSON output
|
// Test JSON output
|
||||||
jsonOutput, err := json.MarshalIndent(anubisRules, "", " ")
|
jsonOutput, err := json.MarshalIndent(nukeRules, "", " ")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to marshal JSON: %v", err)
|
t.Fatalf("Failed to marshal JSON: %v", err)
|
||||||
}
|
}
|
||||||
|
|
@ -290,14 +290,14 @@ Disallow: /admin`
|
||||||
*baseAction = action
|
*baseAction = action
|
||||||
defer func() { *baseAction = oldAction }()
|
defer func() { *baseAction = oldAction }()
|
||||||
|
|
||||||
anubisRules := convertToAnubisRules(rules)
|
nukeRules := convertToNukeRules(rules)
|
||||||
|
|
||||||
if len(anubisRules) != 1 {
|
if len(nukeRules) != 1 {
|
||||||
t.Fatalf("Expected 1 rule, got %d", len(anubisRules))
|
t.Fatalf("Expected 1 rule, got %d", len(nukeRules))
|
||||||
}
|
}
|
||||||
|
|
||||||
if anubisRules[0].Action != action {
|
if nukeRules[0].Action != action {
|
||||||
t.Errorf("Expected action %s, got %s", action, anubisRules[0].Action)
|
t.Errorf("Expected action %s, got %s", action, nukeRules[0].Action)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
@ -325,10 +325,10 @@ Disallow: /`
|
||||||
*policyName = name
|
*policyName = name
|
||||||
defer func() { *policyName = oldName }()
|
defer func() { *policyName = oldName }()
|
||||||
|
|
||||||
anubisRules := convertToAnubisRules(rules)
|
nukeRules := convertToNukeRules(rules)
|
||||||
|
|
||||||
// Check that all rule names use the custom prefix
|
// Check that all rule names use the custom prefix
|
||||||
for _, rule := range anubisRules {
|
for _, rule := range nukeRules {
|
||||||
if !strings.HasPrefix(rule.Name, name+"-") {
|
if !strings.HasPrefix(rule.Name, name+"-") {
|
||||||
t.Errorf("Rule name %s doesn't start with expected prefix %s-", rule.Name, name)
|
t.Errorf("Rule name %s doesn't start with expected prefix %s-", rule.Name, name)
|
||||||
}
|
}
|
||||||
|
|
@ -360,11 +360,11 @@ Crawl-delay: 60`
|
||||||
*crawlDelay = weight
|
*crawlDelay = weight
|
||||||
defer func() { *crawlDelay = oldWeight }()
|
defer func() { *crawlDelay = oldWeight }()
|
||||||
|
|
||||||
anubisRules := convertToAnubisRules(rules)
|
nukeRules := convertToNukeRules(rules)
|
||||||
|
|
||||||
// Count weight rules and verify they have correct weight
|
// Count weight rules and verify they have correct weight
|
||||||
weightRules := 0
|
weightRules := 0
|
||||||
for _, rule := range anubisRules {
|
for _, rule := range nukeRules {
|
||||||
if rule.Action == "WEIGH" && rule.Weight != nil {
|
if rule.Action == "WEIGH" && rule.Weight != nil {
|
||||||
weightRules++
|
weightRules++
|
||||||
if rule.Weight.Adjust != weight {
|
if rule.Weight.Adjust != weight {
|
||||||
|
|
@ -402,10 +402,10 @@ Disallow: /`
|
||||||
*userAgentDeny = action
|
*userAgentDeny = action
|
||||||
defer func() { *userAgentDeny = oldAction }()
|
defer func() { *userAgentDeny = oldAction }()
|
||||||
|
|
||||||
anubisRules := convertToAnubisRules(rules)
|
nukeRules := convertToNukeRules(rules)
|
||||||
|
|
||||||
// All rules should be blacklist rules with the specified action
|
// All rules should be blacklist rules with the specified action
|
||||||
for _, rule := range anubisRules {
|
for _, rule := range nukeRules {
|
||||||
if !strings.Contains(rule.Name, "blacklist") {
|
if !strings.Contains(rule.Name, "blacklist") {
|
||||||
t.Errorf("Expected blacklist rule, got %s", rule.Name)
|
t.Errorf("Expected blacklist rule, got %s", rule.Name)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
# Make SASL login work on bookstack with Anubis
|
# Make SASL login work on bookstack with Nuke
|
||||||
# https://www.bookstackapp.com/docs/admin/saml2-auth/
|
# https://www.bookstackapp.com/docs/admin/saml2-auth/
|
||||||
- name: allow-bookstack-sasl-login-routes
|
- name: allow-bookstack-sasl-login-routes
|
||||||
action: ALLOW
|
action: ALLOW
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
## Anubis has the ability to let you import snippets of configuration into the main
|
## Nuke has the ability to let you import snippets of configuration into the main
|
||||||
## configuration file. This allows you to break up your config into smaller parts
|
## configuration file. This allows you to break up your config into smaller parts
|
||||||
## that get logically assembled into one big file.
|
## that get logically assembled into one big file.
|
||||||
##
|
##
|
||||||
|
|
@ -6,9 +6,9 @@
|
||||||
## bot config snippet. You cannot do both in a single bot rule.
|
## bot config snippet. You cannot do both in a single bot rule.
|
||||||
##
|
##
|
||||||
## Import paths can either be prefixed with (data) to import from the common/shared
|
## Import paths can either be prefixed with (data) to import from the common/shared
|
||||||
## rules in the data folder in the Anubis source tree or will point to absolute/relative
|
## rules in the data folder in the Nuke source tree or will point to absolute/relative
|
||||||
## paths in your filesystem. If you don't have access to the Anubis source tree, check
|
## paths in your filesystem. If you don't have access to the Nuke source tree, check
|
||||||
## /usr/share/docs/anubis/data or in the tarball you extracted Anubis from.
|
## /usr/share/docs/nuke/data or in the tarball you extracted Nuke from.
|
||||||
|
|
||||||
bots:
|
bots:
|
||||||
# You can import the entire default config with this macro:
|
# You can import the entire default config with this macro:
|
||||||
|
|
@ -16,7 +16,7 @@ bots:
|
||||||
|
|
||||||
# Pathological bots to deny
|
# Pathological bots to deny
|
||||||
- # This correlates to data/bots/_deny-pathological.yaml in the source tree
|
- # This correlates to data/bots/_deny-pathological.yaml in the source tree
|
||||||
# https://github.com/TecharoHQ/anubis/blob/main/data/bots/_deny-pathological.yaml
|
# https://git.sad.ovh/sophie/nuke/blob/main/data/bots/_deny-pathological.yaml
|
||||||
import: (data)/bots/_deny-pathological.yaml
|
import: (data)/bots/_deny-pathological.yaml
|
||||||
- import: (data)/bots/aggressive-brazilian-scrapers.yaml
|
- import: (data)/bots/aggressive-brazilian-scrapers.yaml
|
||||||
|
|
||||||
|
|
@ -28,7 +28,7 @@ bots:
|
||||||
# - import: (data)/meta/ai-block-permissive.yaml
|
# - import: (data)/meta/ai-block-permissive.yaml
|
||||||
|
|
||||||
# Search engine crawlers to allow, defaults to:
|
# Search engine crawlers to allow, defaults to:
|
||||||
# - Google (so they don't try to bypass Anubis)
|
# - Google (so they don't try to bypass Nuke)
|
||||||
# - Apple
|
# - Apple
|
||||||
# - Bing
|
# - Bing
|
||||||
# - DuckDuckGo
|
# - DuckDuckGo
|
||||||
|
|
@ -53,29 +53,6 @@ bots:
|
||||||
# difficulty: 16 # impossible
|
# difficulty: 16 # impossible
|
||||||
# algorithm: slow # intentionally waste CPU cycles and time
|
# algorithm: slow # intentionally waste CPU cycles and time
|
||||||
|
|
||||||
# Requires a subscription to Thoth to use, see
|
|
||||||
# https://anubis.techaro.lol/docs/admin/thoth#geoip-based-filtering
|
|
||||||
- name: countries-with-aggressive-scrapers
|
|
||||||
action: WEIGH
|
|
||||||
geoip:
|
|
||||||
countries:
|
|
||||||
- BR
|
|
||||||
- CN
|
|
||||||
weight:
|
|
||||||
adjust: 10
|
|
||||||
|
|
||||||
# Requires a subscription to Thoth to use, see
|
|
||||||
# https://anubis.techaro.lol/docs/admin/thoth#asn-based-filtering
|
|
||||||
- name: aggressive-asns-without-functional-abuse-contact
|
|
||||||
action: WEIGH
|
|
||||||
asns:
|
|
||||||
match:
|
|
||||||
- 13335 # Cloudflare
|
|
||||||
- 136907 # Huawei Cloud
|
|
||||||
- 45102 # Alibaba Cloud
|
|
||||||
weight:
|
|
||||||
adjust: 10
|
|
||||||
|
|
||||||
# ## System load based checks.
|
# ## System load based checks.
|
||||||
# # If the system is under high load, add weight.
|
# # If the system is under high load, add weight.
|
||||||
# - name: high-load-average
|
# - name: high-load-average
|
||||||
|
|
@ -84,7 +61,7 @@ bots:
|
||||||
# weight:
|
# weight:
|
||||||
# adjust: 20
|
# adjust: 20
|
||||||
|
|
||||||
## If your backend service is running on the same operating system as Anubis,
|
## If your backend service is running on the same operating system as Nuke,
|
||||||
## you can uncomment this rule to make the challenge easier when the system is
|
## you can uncomment this rule to make the challenge easier when the system is
|
||||||
## under low load.
|
## under low load.
|
||||||
##
|
##
|
||||||
|
|
@ -107,14 +84,14 @@ dnsbl: false
|
||||||
|
|
||||||
# #
|
# #
|
||||||
# impressum:
|
# impressum:
|
||||||
# # Displayed at the bottom of every page rendered by Anubis.
|
# # Displayed at the bottom of every page rendered by Nuke.
|
||||||
# footer: >-
|
# footer: >-
|
||||||
# This website is hosted by Zombocom. If you have any complaints or notes
|
# This website is hosted by Zombocom. If you have any complaints or notes
|
||||||
# about the service, please contact
|
# about the service, please contact
|
||||||
# <a href="mailto:contact@domainhere.example">contact@domainhere.example</a>
|
# <a href="mailto:contact@domainhere.example">contact@domainhere.example</a>
|
||||||
# and we will assist you as soon as possible.
|
# and we will assist you as soon as possible.
|
||||||
|
|
||||||
# # The imprint page that will be linked to at the footer of every Anubis page.
|
# # The imprint page that will be linked to at the footer of every Nuke page.
|
||||||
# page:
|
# page:
|
||||||
# # The HTML <title> of the page
|
# # The HTML <title> of the page
|
||||||
# title: Imprint and Privacy Policy
|
# title: Imprint and Privacy Policy
|
||||||
|
|
@ -150,13 +127,11 @@ openGraph:
|
||||||
# # Correlates to properties in https://ogp.me/
|
# # Correlates to properties in https://ogp.me/
|
||||||
# override:
|
# override:
|
||||||
# # og:title is required, it is the title of the website
|
# # og:title is required, it is the title of the website
|
||||||
# "og:title": "Techaro Anubis"
|
# "og:title": "sophie Nuke"
|
||||||
# "og:description": >-
|
# "og:description": >-
|
||||||
# Anubis is a Web AI Firewall Utility that helps you fight the bots
|
# Nuke is great
|
||||||
# away so that you can maintain uptime at work!
|
|
||||||
# "description": >-
|
# "description": >-
|
||||||
# Anubis is a Web AI Firewall Utility that helps you fight the bots
|
# Nuke is great
|
||||||
# away so that you can maintain uptime at work!
|
|
||||||
|
|
||||||
# By default, send HTTP 200 back to clients that either get issued a challenge
|
# By default, send HTTP 200 back to clients that either get issued a challenge
|
||||||
# or a denial. This seems weird, but this is load-bearing due to the fact that
|
# or a denial. This seems weird, but this is load-bearing due to the fact that
|
||||||
|
|
@ -166,7 +141,7 @@ status_codes:
|
||||||
CHALLENGE: 200
|
CHALLENGE: 200
|
||||||
DENY: 200
|
DENY: 200
|
||||||
|
|
||||||
# Anubis can store temporary data in one of a few backends. See the storage
|
# Nuke can store temporary data in one of a few backends. See the storage
|
||||||
# backends section of the docs for more information:
|
# backends section of the docs for more information:
|
||||||
#
|
#
|
||||||
# https://anubis.techaro.lol/docs/admin/policies#storage-backends
|
# https://anubis.techaro.lol/docs/admin/policies#storage-backends
|
||||||
|
|
@ -182,13 +157,13 @@ store:
|
||||||
# - name: the name that is reported down the stack and used for metrics
|
# - name: the name that is reported down the stack and used for metrics
|
||||||
# - expression: A CEL expression with the request weight in the variable
|
# - expression: A CEL expression with the request weight in the variable
|
||||||
# weight
|
# weight
|
||||||
# - action: the Anubis action to apply, similar to in a bot policy
|
# - action: the Nuke action to apply, similar to in a bot policy
|
||||||
# - challenge: which challenge to send to the user, similar to in a bot policy
|
# - challenge: which challenge to send to the user, similar to in a bot policy
|
||||||
#
|
#
|
||||||
# See https://anubis.techaro.lol/docs/admin/configuration/thresholds for more
|
# See https://anubis.techaro.lol/docs/admin/configuration/thresholds for more
|
||||||
# information.
|
# information.
|
||||||
thresholds:
|
thresholds:
|
||||||
# By default Anubis ships with the following thresholds:
|
# By default Nuke ships with the following thresholds:
|
||||||
- name: minimal-suspicion # This client is likely fine, its soul is lighter than a feather
|
- name: minimal-suspicion # This client is likely fine, its soul is lighter than a feather
|
||||||
expression: weight <= 0 # a feather weighs zero units
|
expression: weight <= 0 # a feather weighs zero units
|
||||||
action: ALLOW # Allow the traffic through
|
action: ALLOW # Allow the traffic through
|
||||||
|
|
|
||||||
|
|
@ -37,29 +37,6 @@
|
||||||
# difficulty: 16 # impossible
|
# difficulty: 16 # impossible
|
||||||
# algorithm: slow # intentionally waste CPU cycles and time
|
# algorithm: slow # intentionally waste CPU cycles and time
|
||||||
|
|
||||||
# Requires a subscription to Thoth to use, see
|
|
||||||
# https://anubis.techaro.lol/docs/admin/thoth#geoip-based-filtering
|
|
||||||
- name: countries-with-aggressive-scrapers
|
|
||||||
action: WEIGH
|
|
||||||
geoip:
|
|
||||||
countries:
|
|
||||||
- BR
|
|
||||||
- CN
|
|
||||||
weight:
|
|
||||||
adjust: 10
|
|
||||||
|
|
||||||
# Requires a subscription to Thoth to use, see
|
|
||||||
# https://anubis.techaro.lol/docs/admin/thoth#asn-based-filtering
|
|
||||||
- name: aggressive-asns-without-functional-abuse-contact
|
|
||||||
action: WEIGH
|
|
||||||
asns:
|
|
||||||
match:
|
|
||||||
- 13335 # Cloudflare
|
|
||||||
- 136907 # Huawei Cloud
|
|
||||||
- 45102 # Alibaba Cloud
|
|
||||||
weight:
|
|
||||||
adjust: 10
|
|
||||||
|
|
||||||
# ## System load based checks.
|
# ## System load based checks.
|
||||||
# # If the system is under high load, add weight.
|
# # If the system is under high load, add weight.
|
||||||
# - name: high-load-average
|
# - name: high-load-average
|
||||||
|
|
|
||||||
|
|
@ -1,21 +0,0 @@
|
||||||
# Dependencies
|
|
||||||
/node_modules
|
|
||||||
|
|
||||||
# Production
|
|
||||||
/build
|
|
||||||
|
|
||||||
# Generated files
|
|
||||||
.docusaurus
|
|
||||||
.cache-loader
|
|
||||||
|
|
||||||
# Misc
|
|
||||||
.DS_Store
|
|
||||||
.env.local
|
|
||||||
.env.development.local
|
|
||||||
.env.test.local
|
|
||||||
.env.production.local
|
|
||||||
|
|
||||||
npm-debug.log*
|
|
||||||
yarn-debug.log*
|
|
||||||
yarn-error.log*
|
|
||||||
|
|
||||||
20
docs/.gitignore
vendored
20
docs/.gitignore
vendored
|
|
@ -1,20 +0,0 @@
|
||||||
# Dependencies
|
|
||||||
/node_modules
|
|
||||||
|
|
||||||
# Production
|
|
||||||
/build
|
|
||||||
|
|
||||||
# Generated files
|
|
||||||
.docusaurus
|
|
||||||
.cache-loader
|
|
||||||
|
|
||||||
# Misc
|
|
||||||
.DS_Store
|
|
||||||
.env.local
|
|
||||||
.env.development.local
|
|
||||||
.env.test.local
|
|
||||||
.env.production.local
|
|
||||||
|
|
||||||
npm-debug.log*
|
|
||||||
yarn-debug.log*
|
|
||||||
yarn-error.log*
|
|
||||||
|
|
@ -1,11 +0,0 @@
|
||||||
FROM docker.io/library/node:lts AS build
|
|
||||||
|
|
||||||
WORKDIR /app
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
RUN npm ci && npm run build
|
|
||||||
|
|
||||||
FROM ghcr.io/xe/nginx-micro
|
|
||||||
COPY --from=build /app/build /www
|
|
||||||
COPY ./manifest/cfg/nginx/nginx.conf /conf
|
|
||||||
LABEL org.opencontainers.image.source="https://github.com/TecharoHQ/anubis"
|
|
||||||
|
|
@ -1,41 +0,0 @@
|
||||||
# Website
|
|
||||||
|
|
||||||
This website is built using [Docusaurus](https://docusaurus.io/), a modern static website generator.
|
|
||||||
|
|
||||||
### Installation
|
|
||||||
|
|
||||||
```
|
|
||||||
$ yarn
|
|
||||||
```
|
|
||||||
|
|
||||||
### Local Development
|
|
||||||
|
|
||||||
```
|
|
||||||
$ yarn start
|
|
||||||
```
|
|
||||||
|
|
||||||
This command starts a local development server and opens up a browser window. Most changes are reflected live without having to restart the server.
|
|
||||||
|
|
||||||
### Build
|
|
||||||
|
|
||||||
```
|
|
||||||
$ yarn build
|
|
||||||
```
|
|
||||||
|
|
||||||
This command generates static content into the `build` directory and can be served using any static contents hosting service.
|
|
||||||
|
|
||||||
### Deployment
|
|
||||||
|
|
||||||
Using SSH:
|
|
||||||
|
|
||||||
```
|
|
||||||
$ USE_SSH=true yarn deploy
|
|
||||||
```
|
|
||||||
|
|
||||||
Not using SSH:
|
|
||||||
|
|
||||||
```
|
|
||||||
$ GIT_USER=<Your GitHub username> yarn deploy
|
|
||||||
```
|
|
||||||
|
|
||||||
If you are using GitHub pages for hosting, this command is a convenient way to build the website and push to the `gh-pages` branch.
|
|
||||||
|
|
@ -1,14 +0,0 @@
|
||||||
---
|
|
||||||
slug: welcome
|
|
||||||
title: Welcome to the Anubis blog!
|
|
||||||
authors: [xe]
|
|
||||||
tags: [intro]
|
|
||||||
---
|
|
||||||
|
|
||||||
Hello, world!
|
|
||||||
|
|
||||||
At Techaro, we've been working on making Anubis even better, and in the process we want to share what we've done, how it works, and signal boost cool things the community has done. As things happen, we'll blog about them so that you can learn from our struggles.
|
|
||||||
|
|
||||||
More details to come soon!
|
|
||||||
|
|
||||||
{/* truncate */}
|
|
||||||
|
|
@ -1,248 +0,0 @@
|
||||||
---
|
|
||||||
slug: release/v1.20.0
|
|
||||||
title: Anubis v1.20.0 is now available!
|
|
||||||
authors: [xe]
|
|
||||||
tags: [release]
|
|
||||||
image: sunburst.webp
|
|
||||||
---
|
|
||||||
|
|
||||||

|
|
||||||
|
|
||||||
Hey all!
|
|
||||||
|
|
||||||
Today we released [Anubis v1.20.0: Thancred Waters](https://github.com/TecharoHQ/anubis/releases/tag/v1.20.0). This adds a lot of new and exciting features to Anubis, including but not limited to the `WEIGH` action, custom weight thresholds, Imprint/impressum support, and a no-JS challenge. Here's what you need to know so you can protect your websites in new and exciting ways!
|
|
||||||
|
|
||||||
{/* truncate */}
|
|
||||||
|
|
||||||
## Sponsoring the product
|
|
||||||
|
|
||||||
If you rely on Anubis to keep your website safe, please consider sponsoring the project on [GitHub Sponsors](https://github.com/sponsors/Xe) or [Patreon](https://patreon.com/cadey). Funding helps pay hosting bills and offset the time spent on making this project the best it can be. Every little bit helps and when enough money is raised, [I can make Anubis my full-time job](https://github.com/TecharoHQ/anubis/discussions/278).
|
|
||||||
|
|
||||||
I am waiting to hear back from NLNet on if Anubis was selected for funding or not. Let's hope it is!
|
|
||||||
|
|
||||||
## Deprecation warning: `DIFFICULTY`
|
|
||||||
|
|
||||||
Anubis v1.20.0 is the last version to support the `DIFFICULTY` flag in the exact way it currently does. In future versions, this will be ineffectual and you should use the [custom threshold system](/docs/admin/configuration/thresholds) instead.
|
|
||||||
|
|
||||||
If this becomes an imposition in practice, this will be reverted.
|
|
||||||
|
|
||||||
## Chrome won't show "invalid response" after "Success!"
|
|
||||||
|
|
||||||
There were a bunch of smaller fixes in Anubis this time around, but the biggest one was finally squashing the ["invalid response" after "Success!" issue](https://github.com/TecharoHQ/anubis/issues/564) that had been plaguing Chrome users. This was a really annoying issue to track down but it was discovered while we were working on better end-to-end / functional testing: [Chrome randomizes the `Accept-Language` header](https://github.com/explainers-by-googlers/reduce-accept-language) so that websites can't do fingerprinting as easily.
|
|
||||||
|
|
||||||
When Anubis issues a challenge, it grabs [information that the browser sends to the user](/docs/design/how-anubis-works#challenge-format) to create a challenge string. Anubis doesn't store these challenge strings anywhere, and when a solution is being checked it calculates the challenge string from the request. This means that they'd get a challenge on one end, compute the response for that challenge, and then the server would validate that against a different challenge. This server-side validation would fail, leading to the user seeing "invalid response" after the client reported success.
|
|
||||||
|
|
||||||
I suspect this was why Vanadium and Cromite were having sporadic issues as well.
|
|
||||||
|
|
||||||
## New Features
|
|
||||||
|
|
||||||
The biggest feature in Anubis is the "weight" subsystem. This allows administrators to make custom rules that change the suspicion level of a request without having to take immediate action. As an example, consider the self-hostable git forge [Gitea](https://about.gitea.com/). When you load a page in Gitea, it creates a session cookie that your browser sends with every request. Weight allows you to mark a request that includes a Gitea session token as _less_ suspicious:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
- name: gitea-session-token
|
|
||||||
action: WEIGH
|
|
||||||
expression:
|
|
||||||
all:
|
|
||||||
# Check if the request has a Cookie header
|
|
||||||
- '"Cookie" in headers'
|
|
||||||
# Check if the request's Cookie header contains the Gitea session token
|
|
||||||
- headers["Cookie"].contains("i_love_gitea=")
|
|
||||||
# Remove 5 weight points
|
|
||||||
weight:
|
|
||||||
adjust: -5
|
|
||||||
```
|
|
||||||
|
|
||||||
This is different from the past where you could only allow every request with a Gitea session token, meaning that the invention of lying would allow malicious clients to bypass protection.
|
|
||||||
|
|
||||||
Weight is added and removed whenever a `WEIGH` rule is encountered. When all rules are processed and the request doesn't match any `ALLOW`, `CHALLENGE`, or `DENY` rules, Anubis uses [weight thresholds](/docs/admin/configuration/thresholds) to figure out how to handle that request. Thresholds are defined in the [policy file](/docs/admin/policies) alongside your bot rules:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
thresholds:
|
|
||||||
- name: minimal-suspicion # This client is likely fine, its soul is lighter than a feather
|
|
||||||
expression: weight <= 0 # a feather weighs zero units
|
|
||||||
action: ALLOW # Allow the traffic through
|
|
||||||
# For clients that had some weight reduced through custom rules, give them a
|
|
||||||
# lightweight challenge.
|
|
||||||
- name: mild-suspicion
|
|
||||||
expression:
|
|
||||||
all:
|
|
||||||
- weight > 0
|
|
||||||
- weight < 10
|
|
||||||
action: CHALLENGE
|
|
||||||
challenge:
|
|
||||||
# https://anubis.techaro.lol/docs/admin/configuration/challenges/metarefresh
|
|
||||||
algorithm: metarefresh
|
|
||||||
difficulty: 1
|
|
||||||
report_as: 1
|
|
||||||
# For clients that are browser-like but have either gained points from custom rules or
|
|
||||||
# report as a standard browser.
|
|
||||||
- name: moderate-suspicion
|
|
||||||
expression:
|
|
||||||
all:
|
|
||||||
- weight >= 10
|
|
||||||
- weight < 20
|
|
||||||
action: CHALLENGE
|
|
||||||
challenge:
|
|
||||||
# https://anubis.techaro.lol/docs/admin/configuration/challenges/proof-of-work
|
|
||||||
algorithm: fast
|
|
||||||
difficulty: 2 # two leading zeros, very fast for most clients
|
|
||||||
report_as: 2
|
|
||||||
# For clients that are browser like and have gained many points from custom rules
|
|
||||||
- name: extreme-suspicion
|
|
||||||
expression: weight >= 20
|
|
||||||
action: CHALLENGE
|
|
||||||
challenge:
|
|
||||||
# https://anubis.techaro.lol/docs/admin/configuration/challenges/proof-of-work
|
|
||||||
algorithm: fast
|
|
||||||
difficulty: 4
|
|
||||||
report_as: 4
|
|
||||||
```
|
|
||||||
|
|
||||||
:::note
|
|
||||||
|
|
||||||
If you don't have thresholds defined in your Anubis policy file, Anubis will default to the "legacy" behaviour where browser-like clients get a challenge at the default difficulty.
|
|
||||||
|
|
||||||
:::
|
|
||||||
|
|
||||||
This lets most clients through if they pass a simple [proof of work challenge](/docs/admin/configuration/challenges/proof-of-work), but any clients that are less suspicious (like ones with a Gitea session token) are given the lightweight [Meta Refresh](/docs/admin/configuration/challenges/metarefresh) challenge instead.
|
|
||||||
|
|
||||||
Threshold expressions are like [Bot rule expressions](/docs/admin/configuration/expressions), but there's only one input: the request's weight. If no thresholds match, the request is allowed through.
|
|
||||||
|
|
||||||
### Imprint/Impressum Support
|
|
||||||
|
|
||||||
European countries like Germany [require an imprint/impressum](https://www.ionos.com/digitalguide/websites/digital-law/a-case-for-thinking-global-germanys-impressum-laws/) to be present in the footer of their website. This allows users to contact someone on the team behind a website in case they run into issues. This also must generally have a separate page where users can view an extended imprint with other information like a privacy policy or a copyright notice.
|
|
||||||
|
|
||||||
Anubis v1.20.0 and later [has support for showing imprints](/docs/admin/configuration/impressum). You can configure two kinds of imprints:
|
|
||||||
|
|
||||||
1. An imprint that is shown in the footer of every Anubis page.
|
|
||||||
2. An extended imprint / privacy policy that is shown when users click on the "Imprint" link. For example, [here's the imprint for the website you're looking at right now](https://anubis.techaro.lol/.within.website/x/cmd/anubis/api/imprint).
|
|
||||||
|
|
||||||
Imprints are configured in [the policy file](/docs/admin/policies/):
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
impressum:
|
|
||||||
# Displayed at the bottom of every page rendered by Anubis.
|
|
||||||
footer: >-
|
|
||||||
This website is hosted by Zombocom. If you have any complaints or notes
|
|
||||||
about the service, please contact
|
|
||||||
<a href="mailto:contact@zombocom.example">contact@zombocom.example</a> and
|
|
||||||
we will assist you as soon as possible.
|
|
||||||
|
|
||||||
# The imprint page that will be linked to at the footer of every Anubis page.
|
|
||||||
page:
|
|
||||||
# The HTML <title> of the page
|
|
||||||
title: Imprint and Privacy Policy
|
|
||||||
# The HTML contents of the page. The exact contents of this page can
|
|
||||||
# and will vary by locale. Please consult with a lawyer if you are not
|
|
||||||
# sure what to put here.
|
|
||||||
body: >-
|
|
||||||
<p>Last updated: June 2025</p>
|
|
||||||
|
|
||||||
<h2>Information that is gathered from visitors</h2>
|
|
||||||
|
|
||||||
<p>In common with other websites, log files are stored on the web server
|
|
||||||
saving details such as the visitor's IP address, browser type, referring
|
|
||||||
page and time of visit.</p>
|
|
||||||
|
|
||||||
<p>Cookies may be used to remember visitor preferences when interacting
|
|
||||||
with the website.</p>
|
|
||||||
|
|
||||||
<p>Where registration is required, the visitor's email and a username
|
|
||||||
will be stored on the server.</p>
|
|
||||||
|
|
||||||
<!-- ... -->
|
|
||||||
```
|
|
||||||
|
|
||||||
If this is insufficient, please [file an issue](https://github.com/TecharoHQ/anubis/issues/new) with a link to the relevant legislation for your country so that this feature can be amended and improved.
|
|
||||||
|
|
||||||
### No-JS Challenge
|
|
||||||
|
|
||||||
One of the first issues in Anubis before it was moved to the [TecharoHQ org](https://github.com/TecharoHQ) was a request [to support challenging browsers without using JavaScript](https://github.com/Xe/x/issues/651). This is a pretty challenging thing to do without rethinking how Anubis works from a fundamentally low level, and with v1.20.0, [Anubis finally has support for running without client-side JavaScript](https://github.com/TecharoHQ/anubis/issues/95) thanks to the [Meta Refresh](/docs/admin/configuration/challenges/metarefresh) challenge.
|
|
||||||
|
|
||||||
When Anubis decides it needs to send a challenge to your browser, it sends a challenge page. Historically, this challenge page is [an HTML template](https://github.com/TecharoHQ/anubis/blob/main/web/index.templ) that kicks off some JavaScript, reads the challenge information out of the page body, and then solves it as fast as possible in order to let users see the website they want to visit.
|
|
||||||
|
|
||||||
In v1.20.0, Anubis has a challenge registry to hold [different client challenge implementations](/docs/admin/configuration/challenges/). This allows us to implement anything we want as long as it can render a page to show a challenge and then check if the result is correct. This is going to be used to implement a WebAssembly-based proof of work option (one that will be way more efficient than the existing browser JS version), but as a proof of concept I implemented a simple challenge using [HTML `<meta refresh>`](https://en.wikipedia.org/wiki/Meta_refresh).
|
|
||||||
|
|
||||||
In my testing, this has worked with every browser I have thrown it at (including CLI browsers, the browser embedded in emacs, etc.). The default configuration of Anubis does use the [meta refresh challenge](/docs/admin/configuration/challenges/metarefresh) for [clients with a very low suspicion](/docs/admin/configuration/thresholds), but by default clients will be sent an [easy proof of work challenge](/docs/admin/configuration/challenges/proof-of-work).
|
|
||||||
|
|
||||||
If the false positive rate of this challenge turns out to not be very high in practice, the meta refresh challenge will be enabled by default for browsers in future versions of Anubis.
|
|
||||||
|
|
||||||
### `robots2policy`
|
|
||||||
|
|
||||||
Anubis was created because crawler bots don't respect [`robots.txt` files](https://www.robotstxt.org/). Administrators have been working on refining and crafting their `robots.txt` files for years, and one common comment is that people don't know where to start crafting their own rules.
|
|
||||||
|
|
||||||
Anubis now ships with a [`robots2policy` tool](/docs/admin/robots2policy) that lets you convert your `robots.txt` file to an Anubis policy.
|
|
||||||
|
|
||||||
```text
|
|
||||||
robots2policy -input https://github.com/robots.txt
|
|
||||||
```
|
|
||||||
|
|
||||||
:::note
|
|
||||||
|
|
||||||
If you installed Anubis from [an OS package](/docs/admin/native-install), you may need to run `anubis-robots2policy` instead of `robots2policy`.
|
|
||||||
|
|
||||||
:::
|
|
||||||
|
|
||||||
We hope that this will help you get started with Anubis faster. We are working on a version of this that will run in the documentation via WebAssembly.
|
|
||||||
|
|
||||||
### Open Graph configuration is being moved to the policy file
|
|
||||||
|
|
||||||
Anubis supports reading [Open Graph tags](/docs/admin/configuration/open-graph) from target services and returning them in challenge pages. This makes the right metadata show up when linking services protected by Anubis in chat applications or on social media.
|
|
||||||
|
|
||||||
In order to test the migration of all of the configuration to the policy file, Open Graph configuration has been moved to the policy file. For more information, please read [the Open Graph configuration options](/docs/admin/configuration/open-graph#configuration-options).
|
|
||||||
|
|
||||||
You can also set default Open Graph tags:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
openGraph:
|
|
||||||
enabled: true
|
|
||||||
ttl: 24h
|
|
||||||
# If set, return these opengraph values instead of looking them up with
|
|
||||||
# the target service.
|
|
||||||
#
|
|
||||||
# Correlates to properties in https://ogp.me/
|
|
||||||
override:
|
|
||||||
# og:title is required, it is the title of the website
|
|
||||||
"og:title": "Techaro Anubis"
|
|
||||||
"og:description": >-
|
|
||||||
Anubis is a Web AI Firewall Utility that helps you fight the bots
|
|
||||||
away so that you can maintain uptime at work!
|
|
||||||
"description": >-
|
|
||||||
Anubis is a Web AI Firewall Utility that helps you fight the bots
|
|
||||||
away so that you can maintain uptime at work!
|
|
||||||
```
|
|
||||||
|
|
||||||
## Improvements and optimizations
|
|
||||||
|
|
||||||
One of the biggest improvements we've made in v1.20.0 is replacing [SHA-256 with xxhash](https://github.com/TecharoHQ/anubis/pull/676). Anubis uses hashes all over the place to help with identifying clients, matching against rules when allowing traffic through, in error messages sent to users, and more. Historically these have been done with [SHA-256](https://en.wikipedia.org/wiki/SHA-2), however this has been having a mild performance impact in real-world use. As a result, we now use [xxhash](https://xxhash.com/) when possible. This makes policy matching 3x faster in some scenarios and reduces memory usage across the board.
|
|
||||||
|
|
||||||
Anubis now uses [bart](https://pkg.go.dev/github.com/gaissmai/bart) for doing IP address matching when you specify addresses in a `remote_address` check configuration or when you are matching against [advanced checks](/docs/admin/thoth). This uses the same kind of IP address routing configuration that your OS kernel does, making it very fast to query information about IP addresses. This makes IP address range matches anywhere from 3-14 times faster depending on the number of addresses it needs to match against. For more information and benchmarks, check out [@JasonLovesDoggo](https://github.com/JasonLovesDoggo)'s PR: [perf: replace cidranger with bart for significant performance improvements #675](https://github.com/TecharoHQ/anubis/pull/675).
|
|
||||||
|
|
||||||
## What's up next?
|
|
||||||
|
|
||||||
v1.21.0 is already shaping up to be a massive improvement as Anubis adds [internationalization](https://en.wikipedia.org/wiki/Internationalization) support, allowing your users to see its messages in the language they're most comfortable with.
|
|
||||||
|
|
||||||
So far Anubis supports the following languages:
|
|
||||||
|
|
||||||
- English (Simplified and Traditional)
|
|
||||||
- French
|
|
||||||
- Portuguese (Brazil)
|
|
||||||
- Spanish
|
|
||||||
|
|
||||||
If you want to contribute translations, please [file an issue](https://github.com/TecharoHQ/anubis/issues/new) with your language of choice or submit a pull request to [the `lib/localization/locales` folder](https://github.com/TecharoHQ/anubis/tree/main/lib/localization/locales). We are about to introduce features to the translation stack, so you may want to hold off a hot minute, but we welcome any and all contributions to making Anubis useful to a global audience.
|
|
||||||
|
|
||||||
Other things we plan to do:
|
|
||||||
|
|
||||||
- Move configuration to the policy file
|
|
||||||
- Support reloading the policy file at runtime without having to restart Anubis
|
|
||||||
- Detecting if a client is "brand new"
|
|
||||||
- A [Valkey](https://valkey.io/)-backed store for sharing information between instances of Anubis
|
|
||||||
- Augmenting No-JS support in the paid product
|
|
||||||
- TLS fingerprinting
|
|
||||||
- Automated testing improvements in CI (FreeBSD CI support, better automated integration/functional testing, etc.)
|
|
||||||
|
|
||||||
## Conclusion
|
|
||||||
|
|
||||||
I hope that these features let you get the same Anubis power you've come to know and love and increases the things you can do with it! I've been really excited to ship [thresholds](/docs/admin/configuration/thresholds) and the cloud-based services for Anubis.
|
|
||||||
|
|
||||||
If you run into any problems, please [file an issue](https://github.com/TecharoHQ/anubis/issues/new). Otherwise, have a good day and get back to making your communities great.
|
|
||||||
Binary file not shown.
|
Before Width: | Height: | Size: 9.2 KiB |
|
|
@ -1,105 +0,0 @@
|
||||||
---
|
|
||||||
slug: incident/TI-20250709-0001
|
|
||||||
title: "TI-20250709-0001: IPv4 traffic failures for Techaro services"
|
|
||||||
authors: [xe]
|
|
||||||
tags: [incident]
|
|
||||||
image: ./window-portal.jpg
|
|
||||||
---
|
|
||||||
|
|
||||||

|
|
||||||
|
|
||||||
Techaro services were down for IPv4 traffic on July 9th, 2025. This blogpost is a report of what happened, what actions were taken to resolve the situation, and what actions are being done in the near future to prevent this problem. Enjoy this incident report!
|
|
||||||
|
|
||||||
{/* truncate */}
|
|
||||||
|
|
||||||
:::note
|
|
||||||
|
|
||||||
In other companies, this kind of documentation would be kept internal. At Techaro, we believe that you deserve radical candor and the truth. As such, we are proving our lofty words with actions by publishing details about how things go wrong publicly.
|
|
||||||
|
|
||||||
Everything past this point follows my standard incident root cause meeting template.
|
|
||||||
|
|
||||||
:::
|
|
||||||
|
|
||||||
This incident report will focus on the services affected, timeline of what happened at which stage of the incident, where we got lucky, the root cause analysis, and what action items are being planned or taken to prevent this from happening in the future.
|
|
||||||
|
|
||||||
## Timeline
|
|
||||||
|
|
||||||
All events take place on July 9th, 2025.
|
|
||||||
|
|
||||||
| Time (UTC) | Description |
|
|
||||||
| :--------- | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
|
||||||
| 12:32 | Uptime Kuma reports that another unrelated website on the same cluster was timing out. |
|
|
||||||
| 12:33 | Uptime Kuma reports that Thoth's production endpoint is failing gRPC health checks. |
|
|
||||||
| 12:35 | Investigation begins, [announcement made on Xe's Bluesky](https://bsky.app/profile/xeiaso.net/post/3ltjtdczpwc2x) due to the impact including their personal blog. |
|
|
||||||
| 12:39 | `nginx-ingress` logs on the production cluster show IPv6 traffic but an abrupt cutoff in IPv4 traffic around 12:32 UTC. Ticket is opened with the hosting provider. |
|
|
||||||
| 12:41 | IPv4 traffic resumes long enough for Uptime Kuma to report uptime, but then immediately fails again. |
|
|
||||||
| 12:46 | IPv4 traffic resumes long enough for Uptime Kuma to report uptime, but then immediately fails again. (repeat instances of this have been scrubbed, but it happened about every 5-10 minutes) |
|
|
||||||
| 12:48 | First reply from the hosting provider. |
|
|
||||||
| 12:57 | Reply to hosting provider, ask to reboot the load balancer. |
|
|
||||||
| 13:00 | Incident responder because busy due to a meeting under the belief that the downtime was out of their control and that uptime monitoring software would let them know if it came back up. |
|
|
||||||
| 13:20 | Incident responder ended meeting and went back to monitoring downtime and preparing this document. |
|
|
||||||
| 13:34 | IPv4 traffic starts to show up in the `ingress-nginx` logs. |
|
|
||||||
| 13:35 | All services start to report healthy. Incident status changes to monitoring. |
|
|
||||||
| 13:48 | Incident closed. |
|
|
||||||
| 14:07 | Incident re-opened. Issues seem to be manifesting as BGP issues in the upstream provider. |
|
|
||||||
| 14:10 | IPv4 traffic resumes and then stops. |
|
|
||||||
| 14:18 | IPv4 traffic resumes again. Incident status changes to monitoring. |
|
|
||||||
| 14:40 | Incident closed. |
|
|
||||||
|
|
||||||
## Services affected
|
|
||||||
|
|
||||||
| Service name | User impact |
|
|
||||||
| :-------------------------------------------------- | :----------------- |
|
|
||||||
| [Anubis Docs](https://anubis.techaro.lol) (IPv4) | Connection timeout |
|
|
||||||
| [Anubis Docs](https://anubis.techaro.lol) (IPv6) | None |
|
|
||||||
| [Thoth](/docs/admin/thoth/) (IPv4) | Connection timeout |
|
|
||||||
| [Thoth](/docs/admin/thoth/) (IPv6) | None |
|
|
||||||
| Other websites colocated on the same cluster (IPv4) | Connection timeout |
|
|
||||||
| Other websites colocated on the same cluster (IPv6) | None |
|
|
||||||
|
|
||||||
## Root cause analysis
|
|
||||||
|
|
||||||
In simplify server management, Techaro runs a [Kubernetes](https://kubernetes.io/) cluster on [Vultr VKE](https://www.vultr.com/kubernetes/) (Vultr Kubernetes Engine). When you do this, Vultr needs to provision a [load balancer](https://docs.vultr.com/how-to-use-a-vultr-load-balancer-with-vke) to bridge the gap between the outside world and the Kubernetes world, kinda like this:
|
|
||||||
|
|
||||||
```mermaid
|
|
||||||
---
|
|
||||||
title: Overall architecture
|
|
||||||
---
|
|
||||||
|
|
||||||
flowchart LR
|
|
||||||
UT(User Traffic)
|
|
||||||
subgraph Provider Infrastructure
|
|
||||||
LB[Load Balancer]
|
|
||||||
end
|
|
||||||
subgraph Kubernetes
|
|
||||||
IN(ingress-nginx)
|
|
||||||
TH(Thoth)
|
|
||||||
AN(Anubis Docs)
|
|
||||||
OS(Other sites)
|
|
||||||
|
|
||||||
IN --> TH
|
|
||||||
IN --> AN
|
|
||||||
IN --> OS
|
|
||||||
end
|
|
||||||
|
|
||||||
UT --> LB --> IN
|
|
||||||
```
|
|
||||||
|
|
||||||
Techaro controls everything inside the Kubernetes side of that diagram. Anything else is out of our control. That load balancer is routed to the public internet via [Border Gateway Protocol (BGP)](https://en.wikipedia.org/wiki/Border_Gateway_Protocol).
|
|
||||||
|
|
||||||
If there is an interruption with the BGP sessions in the upstream provider, this can manifest as things either not working or inconsistently working. This is made more difficult by the fact that the IPv4 and IPv6 internets are technically separate networks. With this in mind, it's very possible to have IPv4 traffic fail but not IPv6 traffic.
|
|
||||||
|
|
||||||
The root cause is that the hosting provider we use for production services had flapping IPv4 BGP sessions in its Toronto region. When this happens all we can do is open a ticket and wait for it to come back up.
|
|
||||||
|
|
||||||
## Where we got lucky
|
|
||||||
|
|
||||||
The Uptime Kuma instance that caught this incident runs on an IPv4-only network. If it was dual stack, this would not have been caught as quickly.
|
|
||||||
|
|
||||||
The `ingress-nginx` logs print IP addresses of remote clients to the log feed. If this was not the case, it would be much more difficult to find this error.
|
|
||||||
|
|
||||||
## Action items
|
|
||||||
|
|
||||||
- A single instance of downtime like this is not enough reason to move providers. Moving providers because of this is thus out of scope.
|
|
||||||
- Techaro needs a status page hosted on a different cloud provider than is used for the production cluster (`TecharoHQ/TODO#6`).
|
|
||||||
- Health checks for IPv4 and IPv6 traffic need to be created (`TecharoHQ/TODO#7`).
|
|
||||||
- Remove the requirement for [Anubis to pass Thoth health checks before it can start if Thoth is enabled](https://github.com/TecharoHQ/anubis/pull/794).
|
|
||||||
Binary file not shown.
|
Before Width: | Height: | Size: 30 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 62 KiB |
|
|
@ -1,369 +0,0 @@
|
||||||
---
|
|
||||||
slug: release/v1.21.1
|
|
||||||
title: Anubis v1.21.1 is now available!
|
|
||||||
authors: [xe]
|
|
||||||
tags: [release]
|
|
||||||
image: anubis-i18n.webp
|
|
||||||
---
|
|
||||||
|
|
||||||

|
|
||||||
|
|
||||||
Hey all!
|
|
||||||
|
|
||||||
Recently we released [Anubis v1.21.1: Minfilia Warde (Echo 1)](https://github.com/TecharoHQ/anubis/releases/tag/v1.21.1). This is a fairly meaty release and like [last time](../2025-06-27-release-1.20.0/index.mdx) this blogpost will tell you what you need to know before you update. Kick back, get some popcorn and let's dig into this!
|
|
||||||
|
|
||||||
{/* truncate */}
|
|
||||||
|
|
||||||
In this release, Anubis becomes internationalized, gains the ability to use system load as input to issuing challenges, finally fixes the "invalid response" after "success" bug, and more! Please read these notes before upgrading as the changes are big enough that administrators should take action to ensure that the upgrade goes smoothly.
|
|
||||||
|
|
||||||
This release is brought to you by [FreeCAD](https://www.freecad.org/), an open-source computer aided design tool that lets you design things for the real world.
|
|
||||||
|
|
||||||
## What's in this release?
|
|
||||||
|
|
||||||
The biggest change is that the ["invalid response" after "success" bug](https://github.com/TecharoHQ/anubis/issues/564) is now finally fixed for good by totally rewriting how [Anubis' challenge issuance flow works](#challenge-flow-v2).
|
|
||||||
|
|
||||||
This release gives Anubis the following features:
|
|
||||||
|
|
||||||
- [Internationalization support](#internationalization), allowing Anubis to render its messages in the human language you speak.
|
|
||||||
- Anubis now supports the [`missingHeader`](#missingHeader-function) function to assert the absence of headers in requests.
|
|
||||||
- Anubis now has the ability to [store data persistently on the server](#persistent-data-storage).
|
|
||||||
- Anubis can use [the system load average](#load-average-checks) as a factor to determine if it needs to filter traffic or not.
|
|
||||||
- Add `COOKIE_SECURE` option to set the cookie [Secure flag](https://developer.mozilla.org/en-US/docs/Web/HTTP/Guides/Cookies#block_access_to_your_cookies)
|
|
||||||
- Sets cookie defaults to use [SameSite: None](https://web.dev/articles/samesite-cookies-explained)
|
|
||||||
- Allow [Common Crawl](https://commoncrawl.org/) by default so scrapers have less incentive to scrape
|
|
||||||
- Add `/healthz` metrics route for use in platform-based health checks.
|
|
||||||
- Start exposing JA4H fingerprints for later use in CEL expressions.
|
|
||||||
|
|
||||||
And this release also fixes the following bugs:
|
|
||||||
|
|
||||||
- [Challenge issuance has been totally rewritten](#challenge-flow-v2) to finally squash the infamous ["invalid response" after "success" bug](https://github.com/TecharoHQ/anubis/issues/564) for good.
|
|
||||||
- In order to reduce confusion, the "Success" interstitial that shows up when you pass a proof of work challenge has been removed.
|
|
||||||
- Don't block Anubis starting up if [Thoth](/docs/admin/thoth/) health checks fail.
|
|
||||||
- The "Try again" button on the error page has been fixed. Previously it meant "try the solution again" instead of "try the challenge again".
|
|
||||||
- In certain cases, a user could be stuck with a test cookie that is invalid, locking them out of the service for up to half an hour. This has been fixed with better validation of this case and clearing the cookie.
|
|
||||||
- "Proof of work" has been removed from the branding due to some users having extremely negative connotations with it.
|
|
||||||
|
|
||||||
We try to avoid introducing breaking changes as much as possible, but these are the changes that may be relevant for you as an administrator:
|
|
||||||
|
|
||||||
- The [challenge format](#challenge-format-change) has been changed in order to account for [the new challenge issuance flow](#challenge-flow-v2).
|
|
||||||
- The [systemd service `RuntimeDirectory` has been changed](#breaking-change-systemd-runtimedirectory-change).
|
|
||||||
|
|
||||||
### Sponsoring the project
|
|
||||||
|
|
||||||
If you rely on Anubis to keep your website safe, please consider sponsoring the project on [GitHub Sponsors](https://github.com/sponsors/Xe) or [Patreon](https://patreon.com/cadey). Funding helps pay hosting bills and offset the time spent on making this project the best it can be. Every little bit helps and when enough money is raised, [I can make Anubis my full-time job](https://github.com/TecharoHQ/anubis/discussions/278).
|
|
||||||
|
|
||||||
Once this pie chart is at 100%, I can start to reduce my hours at my day job as most of my needs will be met (pre-tax):
|
|
||||||
|
|
||||||
```mermaid
|
|
||||||
pie title Funding update
|
|
||||||
"GitHub Sponsors" : 29
|
|
||||||
"Patreon" : 14
|
|
||||||
"Remaining" : 56
|
|
||||||
```
|
|
||||||
|
|
||||||
I am waiting to hear back from NLNet on if Anubis was selected for funding or not. Let's hope it is!
|
|
||||||
|
|
||||||
## New features
|
|
||||||
|
|
||||||
### Internationalization
|
|
||||||
|
|
||||||
Anubis now supports localized responses. Locales can be added in [lib/localization/locales/](https://github.com/TecharoHQ/anubis/tree/main/lib/localization/locales). This release includes support for the following languages:
|
|
||||||
|
|
||||||
- [Brazilian Portuguese](https://github.com/TecharoHQ/anubis/pull/726)
|
|
||||||
- [Chinese (Simplified)](https://github.com/TecharoHQ/anubis/pull/774)
|
|
||||||
- [Chinese (Traditional)](https://github.com/TecharoHQ/anubis/pull/759)
|
|
||||||
- [Czech](https://github.com/TecharoHQ/anubis/pull/849)
|
|
||||||
- English
|
|
||||||
- [Estonian](https://github.com/TecharoHQ/anubis/pull/783)
|
|
||||||
- [Filipino](https://github.com/TecharoHQ/anubis/pull/775)
|
|
||||||
- [Finnish](https://github.com/TecharoHQ/anubis/pull/863)
|
|
||||||
- [French](https://github.com/TecharoHQ/anubis/pull/716)
|
|
||||||
- [German](https://github.com/TecharoHQ/anubis/pull/741)
|
|
||||||
- [Japanese](https://github.com/TecharoHQ/anubis/pull/772)
|
|
||||||
- [Icelandic](https://github.com/TecharoHQ/anubis/pull/780)
|
|
||||||
- [Italian](https://github.com/TecharoHQ/anubis/pull/778)
|
|
||||||
- [Norwegian](https://github.com/TecharoHQ/anubis/pull/855)
|
|
||||||
- [Russian](https://github.com/TecharoHQ/anubis/pull/882)
|
|
||||||
- [Spanish](https://github.com/TecharoHQ/anubis/pull/716)
|
|
||||||
- [Turkish](https://github.com/TecharoHQ/anubis/pull/751)
|
|
||||||
|
|
||||||
If facts or local regulations demand, you can set Anubis default language with the `FORCED_LANGUAGE` environment variable or the `--forced-language` command line argument:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
FORCED_LANGUAGE=de
|
|
||||||
```
|
|
||||||
|
|
||||||
## Big ticket bug fixes
|
|
||||||
|
|
||||||
These issues affect every user of Anubis. Administrators should upgrade Anubis as soon as possible to mitigate them.
|
|
||||||
|
|
||||||
### Fix event loop thrashing when solving a proof of work challenge
|
|
||||||
|
|
||||||
Anubis has a progress bar so that users can have something moving while it works. This gives users more confidence that something is happening and that the website is not being malicious with CPU usage. However, the way it was implemented way back in [#87](https://github.com/TecharoHQ/anubis/pull/87) had a subtle bug:
|
|
||||||
|
|
||||||
```js
|
|
||||||
if (
|
|
||||||
(nonce > oldNonce) | 1023 && // we've wrapped past 1024
|
|
||||||
(nonce >> 10) % threads === threadId // and it's our turn
|
|
||||||
) {
|
|
||||||
postMessage(nonce);
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
The logic here looks fine but is subtly wrong as was reported in [#877](https://github.com/TecharoHQ/anubis/issues/877) by the main Pale Moon developer.
|
|
||||||
|
|
||||||
For context, `nonce` is a counter that increments by the worker count every loop. This is intended to spread the load between CPU cores as such:
|
|
||||||
|
|
||||||
| Iteration | Worker ID | Nonce |
|
|
||||||
| :-------- | :-------- | :---- |
|
|
||||||
| 1 | 0 | 0 |
|
|
||||||
| 1 | 1 | 1 |
|
|
||||||
| 2 | 0 | 2 |
|
|
||||||
| 2 | 1 | 3 |
|
|
||||||
|
|
||||||
And so on. This makes the proof of work challenge as fast as it can possibly be so that Anubis quickly goes away and you can enjoy the service it is protecting.
|
|
||||||
|
|
||||||
The incorrect part of this is the boolean logic, specifically the part with the bitwise or `|`. I think the intent was to use a logical or (`||`), but this had the effect of making the `postMessage` handler fire on every iteration. The intent of this snippet (as the comment clearly indicates) is to make sure that the main event loop is only updated with the worker status every 1024 iterations per worker. This had the opposite effect, causing a lot of messages to be sent from workers to the parent JavaScript context.
|
|
||||||
|
|
||||||
This is bad for the event loop.
|
|
||||||
|
|
||||||
Instead, I have ripped out that statement and replaced it with a much simpler increment only counter that fires every 1024 iterations. Additionally, only the first thread communicates back to the parent process. This does mean that in theory the other workers could be ahead of the first thread (posting a message out of a worker has a nonzero cost), but in practice I don't think this will be as much of an issue as the current behaviour is.
|
|
||||||
|
|
||||||
The root cause of the stack exhaustion is likely the pressure caused by all of the postMessage futures piling up. Maybe the larger stack size in 64 bit environments is causing this to be fine there, maybe it's some combination of newer hardware in 64 bit systems making this not be as much of a problem due to it being able to handle events fast enough to keep up with the pressure.
|
|
||||||
|
|
||||||
Either way, thanks much to [@wolfbeast](https://github.com/wolfbeast) and the Pale Moon community for finding this. This will make Anubis faster for everyone!
|
|
||||||
|
|
||||||
### Fix potential memory leak when discovering a solution
|
|
||||||
|
|
||||||
In some cases, the parallel solution finder in Anubis could cause all of the worker promises to leak due to the fact the promises were being improperly terminated. A recursion bomb happens in the following scenario:
|
|
||||||
|
|
||||||
1. A worker sends a message indicating it found a solution to the proof of work challenge.
|
|
||||||
2. The `onmessage` handler for that worker calls `terminate()`
|
|
||||||
3. Inside `terminate()`, the parent process loops through all other workers and calls `w.terminate()` on them.
|
|
||||||
4. It's possible that terminating a worker could lead to the `onerror` event handler.
|
|
||||||
5. This would create a recursive loop of `onmessage` -> `terminate` -> `onerror` -> `terminate` -> `onerror` and so on.
|
|
||||||
|
|
||||||
This infinite recursion quickly consumes all available stack space, but this has never been noticed in development because all of my computers have at least 64Gi of ram provisioned to them under the axiom paying for more ram is cheaper than paying in my time spent having to work around not having enough ram. Additionally, ia32 has a smaller base stack size, which means that they will run into this issue much sooner than users on other CPU architectures will.
|
|
||||||
|
|
||||||
The fix adds a boolean `settled` flag to prevent termination from running more than once.
|
|
||||||
|
|
||||||
## Expressions features
|
|
||||||
|
|
||||||
Anubis v1.21.1 adds additional [expressions](/docs/admin/configuration/expressions) features so that you can make your request matching even more granular.
|
|
||||||
|
|
||||||
### `missingHeader` function
|
|
||||||
|
|
||||||
Anubis [expressions](/docs/admin/configuration/expressions) have [a few functions exposed](/docs/admin/configuration/expressions/#functions-exposed-to-anubis-expressions). Anubis v1.21.1 adds the `missingHeader` function, allowing you to assert the _absence_ of a header in requests.
|
|
||||||
|
|
||||||
Let's say you're getting a lot of requests from clients that are pretending to be Google Chrome. Google Chrome sends a few signals to web servers, the main one of them is the [`Sec-Ch-Ua`](https://developer.mozilla.org/en-US/docs/Web/HTTP/Reference/Headers/Sec-CH-UA). Sec-CH-UA is part of Google's [User Agent Client Hints](https://wicg.github.io/ua-client-hints/#sec-ch-ua) proposal, but it being present is a sign that the client is more likely Google Chrome than not. With the `missingHeader` function, you can write a rule to [add weight](/docs/admin/policies/#request-weight) to requests without `Sec-Ch-Ua` that claim to be Google Chrome.
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
# Adds weight clients that claim to be Google Chrome without setting Sec-Ch-Ua
|
|
||||||
- name: old-chrome
|
|
||||||
action: WEIGH
|
|
||||||
weight:
|
|
||||||
adjust: 10
|
|
||||||
expression:
|
|
||||||
all:
|
|
||||||
- userAgent.matches("Chrome/[1-9][0-9]?\\.0\\.0\\.0")
|
|
||||||
- missingHeader(headers, "Sec-Ch-Ua")
|
|
||||||
```
|
|
||||||
|
|
||||||
When combined with [weight thresholds](/docs/admin/configuration/thresholds), this allows you to make requests that don't match the signature of Google Chrome more suspicious, which will make them have a more difficult challenge.
|
|
||||||
|
|
||||||
### Load average checks
|
|
||||||
|
|
||||||
Anubis can dynamically take action [based on the system load average](/docs/admin/configuration/expressions/#using-the-system-load-average), allowing you to write rules like this:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
## System load based checks.
|
|
||||||
# If the system is under high load for the last minute, add weight.
|
|
||||||
- name: high-load-average
|
|
||||||
action: WEIGH
|
|
||||||
expression: load_1m >= 10.0 # make sure to end the load comparison in a .0
|
|
||||||
weight:
|
|
||||||
adjust: 20
|
|
||||||
|
|
||||||
# If it is not for the last 15 minutes, remove weight.
|
|
||||||
- name: low-load-average
|
|
||||||
action: WEIGH
|
|
||||||
expression: load_15m <= 4.0 # make sure to end the load comparison in a .0
|
|
||||||
weight:
|
|
||||||
adjust: -10
|
|
||||||
```
|
|
||||||
|
|
||||||
Something to keep in mind about system load average is that it is not aware of the number of cores the system has. If you have a 16 core system that has 16 processes running but none of them is hogging the CPU, then you will get a load average below 16. If you are in doubt, make your "high load" metric at least two times the number of CPU cores and your "low load" metric at least half of the number of CPU cores. For example:
|
|
||||||
|
|
||||||
| Kind | Core count | Load threshold |
|
|
||||||
| --------: | :--------- | :------------- |
|
|
||||||
| high load | 4 | `8.0` |
|
|
||||||
| low load | 4 | `2.0` |
|
|
||||||
| high load | 16 | `32.0` |
|
|
||||||
| low load | 16 | `8` |
|
|
||||||
|
|
||||||
Also keep in mind that this does not account for other kinds of latency like I/O latency or downstream API response latency. A system can have its web applications unresponsive due to high latency from a MySQL server but still have that web application server report a load near or at zero.
|
|
||||||
|
|
||||||
:::note
|
|
||||||
|
|
||||||
This does not work if you are using Kubernetes.
|
|
||||||
|
|
||||||
:::
|
|
||||||
|
|
||||||
When combined with [weight thresholds](/docs/admin/configuration/thresholds), this allows you to make incoming sessions "back off" while the server is under high load.
|
|
||||||
|
|
||||||
## Challenge flow v2
|
|
||||||
|
|
||||||
The main goal of Anubis is to weigh the risks of incoming requests in order to protect upstream resources against abusive clients like badly written scrapers. In order to separate "good" clients (like users wanting to learn from a website's content) from "bad" clients, Anubis issues [challenges](/docs/admin/configuration/challenges/).
|
|
||||||
|
|
||||||
Previously the Anubis challenge flow looked like this:
|
|
||||||
|
|
||||||
```mermaid
|
|
||||||
---
|
|
||||||
title: Old Anubis challenge flow
|
|
||||||
---
|
|
||||||
flowchart LR
|
|
||||||
user(User Browser)
|
|
||||||
subgraph Anubis
|
|
||||||
mIC{Challenge?}
|
|
||||||
ic(Issue Challenge)
|
|
||||||
rp(Proxy to service)
|
|
||||||
mIC -->|User needs a challenge| ic
|
|
||||||
mIC -->|User does not need a challenge| rp
|
|
||||||
end
|
|
||||||
target(Target Service)
|
|
||||||
rp --> target
|
|
||||||
user --> mIC
|
|
||||||
ic -->|Pass a challenge| user
|
|
||||||
target -->|Site data| users
|
|
||||||
```
|
|
||||||
|
|
||||||
In order to issue a challenge, Anubis generated a challenge string based on request metadata that we assumed wouldn't drastically change between requests, including but not limited to:
|
|
||||||
|
|
||||||
- The client's User-Agent string.
|
|
||||||
- The client [`Accept-Language` header](https://developer.mozilla.org/en-US/docs/Web/HTTP/Reference/Headers/Accept-Language) value.
|
|
||||||
- The client's IP address.
|
|
||||||
|
|
||||||
Anubis also didn't store any information about challenges so that it can remain lightweight and handle the onslaught of requests from scrapers. The assumption was that the challenge string function was idempotent per client across time. What actually ended up happening was something like this:
|
|
||||||
|
|
||||||
```mermaid
|
|
||||||
---
|
|
||||||
title: Anubis challenge string idempotency
|
|
||||||
---
|
|
||||||
sequenceDiagram
|
|
||||||
User->>+Anubis: GET /wiki/some-page
|
|
||||||
Anubis->>+Make Challenge: Generate a challenge string
|
|
||||||
Make Challenge->>-Anubis: Challenge string: taco salad
|
|
||||||
Anubis->>-User: HTTP 401 solve a challenge
|
|
||||||
User->>+Anubis: GET internal-api/pass-challenge
|
|
||||||
Anubis->>+Make Challenge: Generate a challenge string
|
|
||||||
Make Challenge->>-Anubis: Challenge string: burrito bar
|
|
||||||
Anubis->>+User: Error: invalid response
|
|
||||||
```
|
|
||||||
|
|
||||||
Various attempts were made to fix this. All of these ended up failing. Many difficulties were discovered including but not limited to:
|
|
||||||
|
|
||||||
- Removing `Accept-Language` from consideration because [Chrome randomizes the contents of `Accept-Language` to reduce fingerprinting](https://github.com/explainers-by-googlers/reduce-accept-language), a behaviour which [causes a lot of confusion](https://www.reddit.com/r/chrome/comments/nhpnez/google_chrome_is_randomly_switching_languages_on/) for users with multiple system languages selected.
|
|
||||||
- [IPv6 privacy extensions](https://www.internetsociety.org/resources/deploy360/2014/privacy-extensions-for-ipv6-slaac/) mean that each request could be coming from a different IP address (at least one legitimate user in the wild has been observed to have a different IP address per TCP session across an entire `/48`).
|
|
||||||
- Some [US mobile phone carriers make it too easy for your IP address to drastically change](https://news.ycombinator.com/item?id=32038215) without user input.
|
|
||||||
- [Happy eyeballs](https://en.wikipedia.org/wiki/Happy_Eyeballs) means that some requests can come in over IPv4 and some requests can come in over IPv6.
|
|
||||||
- To make things worse, you can't even assert that users are from the same [BGP autonomous system](<https://en.wikipedia.org/wiki/Autonomous_system_(Internet)>) because some users could have ISPs that are IPv4 only, forcing them to use a different IP address space to get IPv6 internet access. This sounds like it's rare enough, but I personally have to do this even though I pay for 8 gigabit fiber from my ISP and only get IPv4 service from them.
|
|
||||||
|
|
||||||
Amusingly enough, the only part of this that has survived is the assertion that a user hasn't changed their `User-Agent` string. Maybe [that one guy that sets his Chrome version to `150`](https://github.com/TecharoHQ/anubis/issues/239) would have issues, but so far I've not seen any evidence that a client randomly changing their user agent between challenge issuance and solving can possibly be legitimate.
|
|
||||||
|
|
||||||
As a result, the entire subsystem that generated challenges before had to be ripped out and rewritten from scratch.
|
|
||||||
|
|
||||||
It was replaced with a new flow that stores data on the server side, compares that data against what the client responds with, and then checks pass/fail that way:
|
|
||||||
|
|
||||||
```mermaid
|
|
||||||
---
|
|
||||||
title: New challenge flow
|
|
||||||
---
|
|
||||||
sequenceDiagram
|
|
||||||
User->>+Anubis: GET /wiki/some-page
|
|
||||||
Anubis->>+Make Challenge: Generate a challenge string
|
|
||||||
Make Challenge->>+Store: Store info for challenge 1234
|
|
||||||
Make Challenge->>-Anubis: Challenge string: taco salad, ID 1234
|
|
||||||
Anubis->>-User: HTTP 401 solve a challenge
|
|
||||||
User->>+Anubis: GET internal-api/pass-challenge, challenge 1234
|
|
||||||
Anubis->>+Validate Challenge: verify challenge 1234
|
|
||||||
Validate Challenge->>+Store: Get info for challenge 1234
|
|
||||||
Store->>-Validate Challenge: Here you go!
|
|
||||||
Validate Challenge->>-Anubis: Valid ✅
|
|
||||||
Anubis->>+User: Here's a cookie to get past Anubis
|
|
||||||
```
|
|
||||||
|
|
||||||
As a result, the [challenge format](#challenge-format-change) had to change. Old cookies will still be validated, but the next minor version (v1.22.0) will include validation to ensure that all challenges are accounted for on the server side. This data is stored in the active [storage backend](/docs/admin/policies/#storage-backends) for up to 30 minutes. This also fixes [#746](https://github.com/TecharoHQ/anubis/issues/746) and other similar instances of this issue.
|
|
||||||
|
|
||||||
### Challenge format change
|
|
||||||
|
|
||||||
Previously Anubis did no accounting for challenges that it issued. This means that if Anubis restarted during a client, the client would be able to proceed once Anubis came back online.
|
|
||||||
|
|
||||||
During the upgrade to v1.21.0 and when v1.21.0 (or later) restarts with the [in-memory storage backend](/docs/admin/policies/#memory), you may see a higher rate of failed challenges than normal. If this persists beyond a few minutes, [open an issue](https://github.com/TecharoHQ/anubis/issues/new).
|
|
||||||
|
|
||||||
If you are using the in-memory storage backend, please consider using [a different storage backend](/docs/admin/policies/#storage-backends).
|
|
||||||
|
|
||||||
### Storage
|
|
||||||
|
|
||||||
Anubis offers a few different storage backends depending on your needs:
|
|
||||||
|
|
||||||
| Backend | Description |
|
|
||||||
| :--------------------------------------- | :------------------------------------------------------------------------------------------------------------- |
|
|
||||||
| [`memory`](/docs/admin/policies/#memory) | An in-memory hashmap that is cleared when Anubis is restarted. |
|
|
||||||
| [`bbolt`](/docs/admin/policies/#bbolt) | A memory-mapped key/value store that can persist between Anubis restarts. |
|
|
||||||
| [`valkey`](/docs/admin/policies/#valkey) | A networked key/value store that can persist between Anubis restarts and coordinate across multiple instances. |
|
|
||||||
|
|
||||||
Please review the documentation for each storage method to figure out the one best for your needs. If you aren't sure, consult this diagram:
|
|
||||||
|
|
||||||
```mermaid
|
|
||||||
---
|
|
||||||
title: What storage backend do I need?
|
|
||||||
---
|
|
||||||
flowchart TD
|
|
||||||
OneInstance{Do you only have
|
|
||||||
one instance of
|
|
||||||
Anubis?}
|
|
||||||
Persistence{Do you have
|
|
||||||
persistent disk
|
|
||||||
access in your
|
|
||||||
environment?}
|
|
||||||
bbolt[(bbolt)]
|
|
||||||
memory[(memory)]
|
|
||||||
valkey[(valkey)]
|
|
||||||
OneInstance -->|Yes| Persistence
|
|
||||||
OneInstance -->|No| valkey
|
|
||||||
Persistence -->|Yes| bbolt
|
|
||||||
Persistence -->|No| memory
|
|
||||||
```
|
|
||||||
|
|
||||||
## Breaking change: systemd `RuntimeDirectory` change
|
|
||||||
|
|
||||||
The following potentially breaking change applies to native installs with systemd only:
|
|
||||||
|
|
||||||
Each instance of systemd service template now has a unique `RuntimeDirectory`, as opposed to each instance of the service sharing a `RuntimeDirectory`. This change was made to avoid [the `RuntimeDirectory` getting nuked](https://github.com/TecharoHQ/anubis/issues/748) any time one of the Anubis instances restarts.
|
|
||||||
|
|
||||||
If you configured Anubis' unix sockets to listen on `/run/anubis/foo.sock` for instance `anubis@foo`, you will need to configure Anubis to listen on `/run/anubis/foo/foo.sock` and additionally configure your HTTP load balancer as appropriate.
|
|
||||||
|
|
||||||
If you need the legacy behaviour, install this [systemd unit dropin](https://www.flatcar.org/docs/latest/setup/systemd/drop-in-units/):
|
|
||||||
|
|
||||||
```systemd
|
|
||||||
# /etc/systemd/system/anubis@.service.d/50-runtimedir.conf
|
|
||||||
[Service]
|
|
||||||
RuntimeDirectory=anubis
|
|
||||||
```
|
|
||||||
|
|
||||||
Just keep in mind that this will cause problems when Anubis restarts.
|
|
||||||
|
|
||||||
## What's up next?
|
|
||||||
|
|
||||||
The biggest things we want to do in the next release (in no particular order):
|
|
||||||
|
|
||||||
- A rewrite of bot checking rule configuration syntax to make it less ambiguous.
|
|
||||||
- [JA4](https://blog.foxio.io/ja4+-network-fingerprinting) (and other forms of) fingerprinting and coordination with [Thoth](/docs/admin/thoth/) to allow clients with high aggregate pass rates through without seeing Anubis at all.
|
|
||||||
- Advanced heuristics for [users of the unbranded variant of Anubis](/docs/admin/botstopper/).
|
|
||||||
- Optimize the release flow so that releases can be triggered and executed by continuous integration tools. The ultimate goal is to make it possible to release Anubis in 15 minutes after pressing a single "mint release" button.
|
|
||||||
- Add "hot reloading" support to Anubis, allowing administrators to update the rules without restarting the service.
|
|
||||||
- Fix [multiple slash support](https://github.com/TecharoHQ/anubis/issues/754) for web applications that require optional path variables.
|
|
||||||
- Add weight to "brand new" clients.
|
|
||||||
- Implement a "maze" feature that tries to get crawlers ensnared in a maze of random links so that clients that are more than 20 links in can be reported to the home base.
|
|
||||||
- Open [Thoth-based advanced checks](/docs/admin/thoth/) to more users with an easier onboarding flow.
|
|
||||||
- More smoke tests including for browsers like [Pale Moon](https://www.palemoon.org/).
|
|
||||||
Binary file not shown.
|
Before Width: | Height: | Size: 14 KiB |
|
|
@ -1,60 +0,0 @@
|
||||||
---
|
|
||||||
slug: 2025/funding-update
|
|
||||||
title: Funding update
|
|
||||||
authors: [xe]
|
|
||||||
tags: [funding]
|
|
||||||
image: around-the-bend.webp
|
|
||||||
---
|
|
||||||
|
|
||||||

|
|
||||||
|
|
||||||
As we finish up work on [all of the features in the next release of Anubis](/docs/CHANGELOG#unreleased), I took a moment to add up the financials and here's an update on the recurring revenue of the project. Once I reach the [$5000 per month](https://github.com/TecharoHQ/anubis/discussions/278) mark, I can start reducing hours at my dayjob and start to make working on Anubis my full time job.
|
|
||||||
|
|
||||||
{/* truncate */}
|
|
||||||
|
|
||||||
Note that this only counts _recurring_ revenue (subscriptions to [BotStopper](/docs/admin/botstopper) and monthly repeating donations). Every one of the one-time donations I get is a gift and I am grateful for them, but I cannot make critically important financial decisions off of sporadic one-time donations.
|
|
||||||
|
|
||||||
:::note
|
|
||||||
|
|
||||||
All currency figures in this article are USD (United States Dollars) unless denoted otherwise.
|
|
||||||
|
|
||||||
:::
|
|
||||||
|
|
||||||
Here's the funding breakdown by income stream:
|
|
||||||
|
|
||||||
```mermaid
|
|
||||||
pie title Funding update August 2025
|
|
||||||
"GitHub Sponsors" : 3500
|
|
||||||
"Patreon" : 1500
|
|
||||||
"Liberapay" : 100
|
|
||||||
"Remaining" : 4800
|
|
||||||
```
|
|
||||||
|
|
||||||
Assuming that some of my private support contracts and other sales effort go through, this will slightly change the shapes of this (a new pie chart segment will emerge for "Manual invoices"), but I am halfway there. This is a huge bar to pass and as it stands right now this is just enough income to pay for my monthly rent (not accounting for tax).
|
|
||||||
|
|
||||||
As a reminder, here's the rough plan for the phases I want to hit based on the _recurring_ donation totals:
|
|
||||||
|
|
||||||
| Monthly donations | Details |
|
|
||||||
| :-------------------------- | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
|
||||||
| $0-5,000 per month | Anubis is a nights and weekends project based on how much spare time and energy I have. |
|
|
||||||
| $5,000-10,000 per month | Anubis gets 1-2 days per week of my time put into it consistently and I go part-time at my dayjob. |
|
|
||||||
| $10,000-15,000 per month | Anubis becomes my full time job. Features that are currently exclusive to [BotStopper](/docs/admin/botstopper/) start to trickle down to the open source version of Anubis. |
|
|
||||||
| $15,000 per month and above | I start planning hiring for Techaro. |
|
|
||||||
|
|
||||||
If your organization benefits from Anubis, please consider donating to the project in order to make this sustainable. The fewer financial problems I have means the more that Anubis can become better.
|
|
||||||
|
|
||||||
## New funding platform: Liberapay
|
|
||||||
|
|
||||||
After many comments about the funding options, I have set up [Liberapay](https://liberapay.com/Xe/) as an option to receive donations. Additional funding targets will be added to Liberapay as soon as I hear back from my accountant with more information. All money received via Liberapay goes directly towards supporting the project.
|
|
||||||
|
|
||||||
## Next goals
|
|
||||||
|
|
||||||
Here's my short term goals for the immediate future:
|
|
||||||
|
|
||||||
1. Finish [Thoth](/docs/admin/thoth/) and run a backfill to mass issue API keys.
|
|
||||||
2. Document and publish the writeup for the multi-region Google Cloud spot instance setup that Thoth is built upon.
|
|
||||||
3. Release v1.22.0 of Anubis with Traefik support and other important fixes.
|
|
||||||
4. Continue growing the project into a sustainable business.
|
|
||||||
5. Work through the [blog backlog](https://github.com/TecharoHQ/anubis/issues?q=is%3Aissue%20state%3Aopen%20label%3Ablog) to document the thoughts behind Anubis and how parts of it work.
|
|
||||||
|
|
||||||
Thank you for supporting Anubis! It's only going to get better from here.
|
|
||||||
|
|
@ -1,214 +0,0 @@
|
||||||
import React, { useState, useEffect, useMemo } from 'react';
|
|
||||||
import styles from './styles.module.css';
|
|
||||||
|
|
||||||
// A helper function to perform SHA-256 hashing.
|
|
||||||
// It takes a string, encodes it, hashes it, and returns a hex string.
|
|
||||||
async function sha256(message) {
|
|
||||||
try {
|
|
||||||
const msgBuffer = new TextEncoder().encode(message);
|
|
||||||
const hashBuffer = await crypto.subtle.digest('SHA-256', msgBuffer);
|
|
||||||
const hashArray = Array.from(new Uint8Array(hashBuffer));
|
|
||||||
const hashHex = hashArray.map(b => b.toString(16).padStart(2, '0')).join('');
|
|
||||||
return hashHex;
|
|
||||||
} catch (error) {
|
|
||||||
console.error("Hashing failed:", error);
|
|
||||||
return "Error hashing data";
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Generates a random hex string of a given byte length
|
|
||||||
const generateRandomHex = (bytes = 16) => {
|
|
||||||
const buffer = new Uint8Array(bytes);
|
|
||||||
crypto.getRandomValues(buffer);
|
|
||||||
return Array.from(buffer)
|
|
||||||
.map(byte => byte.toString(16).padStart(2, '0'))
|
|
||||||
.join('');
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
// Icon components for better visual feedback
|
|
||||||
const CheckIcon = () => (
|
|
||||||
<svg xmlns="http://www.w3.org/2000/svg" className={styles.iconGreen} fill="none" viewBox="0 0 24 24" stroke="currentColor">
|
|
||||||
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M9 12l2 2 4-4m6 2a9 9 0 11-18 0 9 9 0 0118 0z" />
|
|
||||||
</svg>
|
|
||||||
);
|
|
||||||
|
|
||||||
const XCircleIcon = () => (
|
|
||||||
<svg xmlns="http://www.w3.org/2000/svg" className={styles.iconRed} fill="none" viewBox="0 0 24 24" stroke="currentColor">
|
|
||||||
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M10 14l2-2m0 0l2-2m-2 2l-2-2m2 2l2 2m7-2a9 9 0 11-18 0 9 9 0 0118 0z" />
|
|
||||||
</svg>
|
|
||||||
);
|
|
||||||
|
|
||||||
// Main Application Component
|
|
||||||
export default function App() {
|
|
||||||
// State for the challenge, initialized with a random 16-byte hex string.
|
|
||||||
const [challenge, setChallenge] = useState(() => generateRandomHex(16));
|
|
||||||
// State for the nonce, which is the variable we can change
|
|
||||||
const [nonce, setNonce] = useState(0);
|
|
||||||
// State to store the resulting hash
|
|
||||||
const [hash, setHash] = useState('');
|
|
||||||
// A flag to indicate if the current hash is the "winning" one
|
|
||||||
const [isMining, setIsMining] = useState(false);
|
|
||||||
const [isFound, setIsFound] = useState(false);
|
|
||||||
|
|
||||||
// The mining difficulty, i.e., the required number of leading zeros
|
|
||||||
const difficulty = "00";
|
|
||||||
|
|
||||||
// Memoize the combined data to avoid recalculating on every render
|
|
||||||
const combinedData = useMemo(() => `${challenge}${nonce}`, [challenge, nonce]);
|
|
||||||
|
|
||||||
// This effect hook recalculates the hash whenever the combinedData changes.
|
|
||||||
useEffect(() => {
|
|
||||||
let isMounted = true;
|
|
||||||
const calculateHash = async () => {
|
|
||||||
const calculatedHash = await sha256(combinedData);
|
|
||||||
if (isMounted) {
|
|
||||||
setHash(calculatedHash);
|
|
||||||
setIsFound(calculatedHash.startsWith(difficulty));
|
|
||||||
}
|
|
||||||
};
|
|
||||||
calculateHash();
|
|
||||||
return () => { isMounted = false; };
|
|
||||||
}, [combinedData, difficulty]);
|
|
||||||
|
|
||||||
// This effect handles the automatic mining process
|
|
||||||
useEffect(() => {
|
|
||||||
if (!isMining) return;
|
|
||||||
|
|
||||||
let miningNonce = nonce;
|
|
||||||
let continueMining = true;
|
|
||||||
|
|
||||||
const mine = async () => {
|
|
||||||
while (continueMining) {
|
|
||||||
const currentData = `${challenge}${miningNonce}`;
|
|
||||||
const currentHash = await sha256(currentData);
|
|
||||||
|
|
||||||
if (currentHash.startsWith(difficulty)) {
|
|
||||||
setNonce(miningNonce);
|
|
||||||
setIsMining(false);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
miningNonce++;
|
|
||||||
// Update the UI periodically to avoid freezing the browser
|
|
||||||
if (miningNonce % 100 === 0) {
|
|
||||||
setNonce(miningNonce);
|
|
||||||
await new Promise(resolve => setTimeout(resolve, 0)); // Yield to the browser
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
mine();
|
|
||||||
|
|
||||||
return () => {
|
|
||||||
continueMining = false;
|
|
||||||
}
|
|
||||||
}, [isMining, challenge, nonce, difficulty]);
|
|
||||||
|
|
||||||
|
|
||||||
const handleMineClick = () => {
|
|
||||||
setIsMining(true);
|
|
||||||
}
|
|
||||||
|
|
||||||
const handleStopClick = () => {
|
|
||||||
setIsMining(false);
|
|
||||||
}
|
|
||||||
|
|
||||||
const handleResetClick = () => {
|
|
||||||
setIsMining(false);
|
|
||||||
setNonce(0);
|
|
||||||
}
|
|
||||||
|
|
||||||
const handleNewChallengeClick = () => {
|
|
||||||
setIsMining(false);
|
|
||||||
setChallenge(generateRandomHex(16));
|
|
||||||
setNonce(0);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Helper to render the hash with colored leading characters
|
|
||||||
const renderHash = () => {
|
|
||||||
if (!hash) return <span>...</span>;
|
|
||||||
const prefix = hash.substring(0, difficulty.length);
|
|
||||||
const suffix = hash.substring(difficulty.length);
|
|
||||||
const prefixColor = isFound ? styles.hashPrefixGreen : styles.hashPrefixRed;
|
|
||||||
return (
|
|
||||||
<>
|
|
||||||
<span className={`${prefixColor} ${styles.hashPrefix}`}>{prefix}</span>
|
|
||||||
<span className={styles.hashSuffix}>{suffix}</span>
|
|
||||||
</>
|
|
||||||
);
|
|
||||||
};
|
|
||||||
|
|
||||||
return (
|
|
||||||
<div className={styles.container}>
|
|
||||||
<div className={styles.innerContainer}>
|
|
||||||
<div className={styles.grid}>
|
|
||||||
{/* Challenge Block */}
|
|
||||||
<div className={styles.block}>
|
|
||||||
<h2 className={styles.blockTitle}>1. Challenge</h2>
|
|
||||||
<p className={styles.challengeText}>{challenge}</p>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
{/* Nonce Control Block */}
|
|
||||||
<div className={styles.block}>
|
|
||||||
<h2 className={styles.blockTitle}>2. Nonce</h2>
|
|
||||||
<div className={styles.nonceControls}>
|
|
||||||
<button onClick={() => setNonce(n => n - 1)} disabled={isMining} className={styles.nonceButton}>
|
|
||||||
<svg xmlns="http://www.w3.org/2000/svg" className={styles.iconSmall} fill="none" viewBox="0 0 24 24" stroke="currentColor"><path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M20 12H4" /></svg>
|
|
||||||
</button>
|
|
||||||
<span className={styles.nonceValue}>{nonce}</span>
|
|
||||||
<button onClick={() => setNonce(n => n + 1)} disabled={isMining} className={styles.nonceButton}>
|
|
||||||
<svg xmlns="http://www.w3.org/2000/svg" className={styles.iconSmall} fill="none" viewBox="0 0 24 24" stroke="currentColor"><path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M12 4v16m8-8H4" /></svg>
|
|
||||||
</button>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
{/* Combined Data Block */}
|
|
||||||
<div className={styles.block}>
|
|
||||||
<h2 className={styles.blockTitle}>3. Combined Data</h2>
|
|
||||||
<p className={styles.combinedDataText}>{combinedData}</p>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
{/* Arrow pointing down */}
|
|
||||||
<div className={styles.arrowContainer}>
|
|
||||||
<svg xmlns="http://www.w3.org/2000/svg" className={styles.iconGray} fill="none" viewBox="0 0 24 24" stroke="currentColor">
|
|
||||||
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M19 14l-7 7m0 0l-7-7m7 7V3" />
|
|
||||||
</svg>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
{/* Hash Output Block */}
|
|
||||||
<div className={`${styles.hashContainer} ${isFound ? styles.hashContainerSuccess : styles.hashContainerError}`}>
|
|
||||||
<div className={styles.hashContent}>
|
|
||||||
<div className={styles.hashText}>
|
|
||||||
<h2 className={styles.blockTitle}>4. Resulting Hash (SHA-256)</h2>
|
|
||||||
<p className={styles.hashValue}>{renderHash()}</p>
|
|
||||||
</div>
|
|
||||||
<div className={styles.hashIcon}>
|
|
||||||
{isFound ? <CheckIcon /> : <XCircleIcon />}
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
{/* Mining Controls */}
|
|
||||||
<div className={styles.buttonContainer}>
|
|
||||||
{!isMining ? (
|
|
||||||
<button onClick={handleMineClick} className={`${styles.button} ${styles.buttonCyan}`}>
|
|
||||||
Auto-Mine
|
|
||||||
</button>
|
|
||||||
) : (
|
|
||||||
<button onClick={handleStopClick} className={`${styles.button} ${styles.buttonYellow}`}>
|
|
||||||
Stop Mining
|
|
||||||
</button>
|
|
||||||
)}
|
|
||||||
<button onClick={handleNewChallengeClick} className={`${styles.button} ${styles.buttonIndigo}`}>
|
|
||||||
New Challenge
|
|
||||||
</button>
|
|
||||||
<button onClick={handleResetClick} className={`${styles.button} ${styles.buttonGray}`}>
|
|
||||||
Reset Nonce
|
|
||||||
</button>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
@ -1,366 +0,0 @@
|
||||||
/* Main container styles */
|
|
||||||
.container {
|
|
||||||
display: flex;
|
|
||||||
flex-direction: column;
|
|
||||||
align-items: center;
|
|
||||||
justify-content: center;
|
|
||||||
color: white;
|
|
||||||
font-family: ui-sans-serif, system-ui, sans-serif;
|
|
||||||
margin-top: 2rem;
|
|
||||||
margin-bottom: 2rem;
|
|
||||||
}
|
|
||||||
|
|
||||||
.innerContainer {
|
|
||||||
width: 100%;
|
|
||||||
max-width: 56rem;
|
|
||||||
margin: 0 auto;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Header styles */
|
|
||||||
.header {
|
|
||||||
text-align: center;
|
|
||||||
margin-bottom: 2.5rem;
|
|
||||||
}
|
|
||||||
|
|
||||||
.title {
|
|
||||||
font-size: 2.25rem;
|
|
||||||
font-weight: 700;
|
|
||||||
color: rgb(34 211 238);
|
|
||||||
}
|
|
||||||
|
|
||||||
.subtitle {
|
|
||||||
font-size: 1.125rem;
|
|
||||||
color: rgb(156 163 175);
|
|
||||||
margin-top: 0.5rem;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Grid layout styles */
|
|
||||||
.grid {
|
|
||||||
display: grid;
|
|
||||||
grid-template-columns: repeat(3, 1fr);
|
|
||||||
gap: 1rem;
|
|
||||||
align-items: center;
|
|
||||||
text-align: center;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Block styles */
|
|
||||||
.block {
|
|
||||||
background-color: rgb(31 41 55);
|
|
||||||
padding: 1.5rem;
|
|
||||||
border-radius: 0.5rem;
|
|
||||||
box-shadow: 0 10px 15px -3px rgb(0 0 0 / 0.1), 0 4px 6px -4px rgb(0 0 0 / 0.1);
|
|
||||||
height: 100%;
|
|
||||||
display: flex;
|
|
||||||
flex-direction: column;
|
|
||||||
justify-content: center;
|
|
||||||
}
|
|
||||||
|
|
||||||
.blockTitle {
|
|
||||||
font-size: 1.125rem;
|
|
||||||
font-weight: 600;
|
|
||||||
color: rgb(34 211 238);
|
|
||||||
margin-bottom: 0.5rem;
|
|
||||||
}
|
|
||||||
|
|
||||||
.challengeText {
|
|
||||||
font-size: 0.875rem;
|
|
||||||
color: rgb(209 213 219);
|
|
||||||
word-break: break-all;
|
|
||||||
font-family: ui-monospace, SFMono-Regular, monospace;
|
|
||||||
}
|
|
||||||
|
|
||||||
.combinedDataText {
|
|
||||||
font-size: 0.875rem;
|
|
||||||
color: rgb(156 163 175);
|
|
||||||
word-break: break-all;
|
|
||||||
font-family: ui-monospace, SFMono-Regular, monospace;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Nonce control styles */
|
|
||||||
.nonceControls {
|
|
||||||
display: flex;
|
|
||||||
align-items: center;
|
|
||||||
justify-content: center;
|
|
||||||
gap: 1rem;
|
|
||||||
}
|
|
||||||
|
|
||||||
.nonceButton {
|
|
||||||
background-color: rgb(55 65 81);
|
|
||||||
border-radius: 9999px;
|
|
||||||
padding: 0.5rem;
|
|
||||||
transition: background-color 200ms;
|
|
||||||
}
|
|
||||||
|
|
||||||
.nonceButton:hover:not(:disabled) {
|
|
||||||
background-color: rgb(34 211 238);
|
|
||||||
}
|
|
||||||
|
|
||||||
.nonceButton:disabled {
|
|
||||||
opacity: 0.5;
|
|
||||||
cursor: not-allowed;
|
|
||||||
}
|
|
||||||
|
|
||||||
.nonceValue {
|
|
||||||
font-size: 1.5rem;
|
|
||||||
font-family: ui-monospace, SFMono-Regular, monospace;
|
|
||||||
width: 6rem;
|
|
||||||
text-align: center;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Icon styles */
|
|
||||||
.icon {
|
|
||||||
height: 2rem;
|
|
||||||
width: 2rem;
|
|
||||||
}
|
|
||||||
|
|
||||||
.iconGreen {
|
|
||||||
height: 2rem;
|
|
||||||
width: 2rem;
|
|
||||||
color: rgb(74 222 128);
|
|
||||||
}
|
|
||||||
|
|
||||||
.iconRed {
|
|
||||||
height: 2rem;
|
|
||||||
width: 2rem;
|
|
||||||
color: rgb(248 113 113);
|
|
||||||
}
|
|
||||||
|
|
||||||
.iconSmall {
|
|
||||||
height: 1.5rem;
|
|
||||||
width: 1.5rem;
|
|
||||||
}
|
|
||||||
|
|
||||||
.iconGray {
|
|
||||||
height: 2.5rem;
|
|
||||||
width: 2.5rem;
|
|
||||||
color: rgb(75 85 99);
|
|
||||||
animation: pulse 2s cubic-bezier(0.4, 0, 0.6, 1) infinite;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Arrow animation */
|
|
||||||
@keyframes pulse {
|
|
||||||
0%,
|
|
||||||
100% {
|
|
||||||
opacity: 1;
|
|
||||||
}
|
|
||||||
50% {
|
|
||||||
opacity: 0.5;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
.arrowContainer {
|
|
||||||
display: flex;
|
|
||||||
justify-content: center;
|
|
||||||
margin: 1.5rem 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Hash output styles */
|
|
||||||
.hashContainer {
|
|
||||||
padding: 1.5rem;
|
|
||||||
border-radius: 0.5rem;
|
|
||||||
box-shadow: 0 10px 15px -3px rgb(0 0 0 / 0.1), 0 4px 6px -4px rgb(0 0 0 / 0.1);
|
|
||||||
transition: all 300ms;
|
|
||||||
border: 2px solid;
|
|
||||||
}
|
|
||||||
|
|
||||||
.hashContainerSuccess {
|
|
||||||
background-color: rgb(20 83 45 / 0.5);
|
|
||||||
border-color: rgb(74 222 128);
|
|
||||||
}
|
|
||||||
|
|
||||||
.hashContainerError {
|
|
||||||
background-color: rgb(127 29 29 / 0.5);
|
|
||||||
border-color: rgb(248 113 113);
|
|
||||||
}
|
|
||||||
|
|
||||||
.hashContent {
|
|
||||||
display: flex;
|
|
||||||
flex-direction: column;
|
|
||||||
align-items: center;
|
|
||||||
justify-content: space-between;
|
|
||||||
}
|
|
||||||
|
|
||||||
.hashText {
|
|
||||||
text-align: center;
|
|
||||||
}
|
|
||||||
|
|
||||||
.hashTextLg {
|
|
||||||
text-align: left;
|
|
||||||
}
|
|
||||||
|
|
||||||
.hashValue {
|
|
||||||
font-size: 0.875rem;
|
|
||||||
word-break: break-all;
|
|
||||||
}
|
|
||||||
|
|
||||||
.hashValueLg {
|
|
||||||
font-size: 1rem;
|
|
||||||
word-break: break-all;
|
|
||||||
}
|
|
||||||
|
|
||||||
.hashIcon {
|
|
||||||
margin-top: 1rem;
|
|
||||||
}
|
|
||||||
|
|
||||||
.hashIconLg {
|
|
||||||
margin-top: 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Hash highlighting */
|
|
||||||
.hashPrefix {
|
|
||||||
font-family: ui-monospace, SFMono-Regular, monospace;
|
|
||||||
}
|
|
||||||
|
|
||||||
.hashPrefixGreen {
|
|
||||||
color: rgb(74 222 128);
|
|
||||||
}
|
|
||||||
|
|
||||||
.hashPrefixRed {
|
|
||||||
color: rgb(248 113 113);
|
|
||||||
}
|
|
||||||
|
|
||||||
.hashSuffix {
|
|
||||||
font-family: ui-monospace, SFMono-Regular, monospace;
|
|
||||||
color: rgb(156 163 175);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Button styles */
|
|
||||||
.buttonContainer {
|
|
||||||
margin-top: 2rem;
|
|
||||||
display: flex;
|
|
||||||
align-items: center;
|
|
||||||
justify-content: center;
|
|
||||||
gap: 1rem;
|
|
||||||
}
|
|
||||||
|
|
||||||
.button {
|
|
||||||
font-weight: 700;
|
|
||||||
padding: 0.75rem 1.5rem;
|
|
||||||
border-radius: 0.5rem;
|
|
||||||
transition: transform 150ms;
|
|
||||||
}
|
|
||||||
|
|
||||||
.button:hover {
|
|
||||||
transform: scale(1.05);
|
|
||||||
}
|
|
||||||
|
|
||||||
.buttonCyan {
|
|
||||||
background-color: rgb(8 145 178);
|
|
||||||
color: white;
|
|
||||||
}
|
|
||||||
|
|
||||||
.buttonCyan:hover {
|
|
||||||
background-color: rgb(6 182 212);
|
|
||||||
}
|
|
||||||
|
|
||||||
.buttonYellow {
|
|
||||||
background-color: rgb(202 138 4);
|
|
||||||
color: white;
|
|
||||||
}
|
|
||||||
|
|
||||||
.buttonYellow:hover {
|
|
||||||
background-color: rgb(245 158 11);
|
|
||||||
}
|
|
||||||
|
|
||||||
.buttonIndigo {
|
|
||||||
background-color: rgb(79 70 229);
|
|
||||||
color: white;
|
|
||||||
}
|
|
||||||
|
|
||||||
.buttonIndigo:hover {
|
|
||||||
background-color: rgb(99 102 241);
|
|
||||||
}
|
|
||||||
|
|
||||||
.buttonGray {
|
|
||||||
background-color: rgb(55 65 81);
|
|
||||||
color: white;
|
|
||||||
}
|
|
||||||
|
|
||||||
.buttonGray:hover {
|
|
||||||
background-color: rgb(75 85 99);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Responsive styles */
|
|
||||||
@media (min-width: 768px) {
|
|
||||||
.title {
|
|
||||||
font-size: 3rem;
|
|
||||||
}
|
|
||||||
|
|
||||||
.grid {
|
|
||||||
grid-template-columns: repeat(3, 1fr);
|
|
||||||
gap: 1rem;
|
|
||||||
}
|
|
||||||
|
|
||||||
.hashContent {
|
|
||||||
flex-direction: row;
|
|
||||||
}
|
|
||||||
|
|
||||||
.hashText {
|
|
||||||
text-align: left;
|
|
||||||
}
|
|
||||||
|
|
||||||
.hashValue {
|
|
||||||
font-size: 1rem;
|
|
||||||
}
|
|
||||||
|
|
||||||
.hashIcon {
|
|
||||||
margin-top: 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@media (max-width: 767px) {
|
|
||||||
.grid {
|
|
||||||
display: flex;
|
|
||||||
flex-direction: column;
|
|
||||||
gap: 1rem;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@media (prefers-color-scheme: light) {
|
|
||||||
.block {
|
|
||||||
background-color: oklch(93% 0.034 272.788);
|
|
||||||
}
|
|
||||||
|
|
||||||
.challengeText {
|
|
||||||
color: oklch(12.9% 0.042 264.695);
|
|
||||||
}
|
|
||||||
|
|
||||||
.combinedDataText {
|
|
||||||
color: oklch(12.9% 0.042 264.695);
|
|
||||||
}
|
|
||||||
|
|
||||||
.nonceButton {
|
|
||||||
background-color: oklch(88.2% 0.059 254.128);
|
|
||||||
}
|
|
||||||
|
|
||||||
.nonceValue {
|
|
||||||
color: oklch(12.9% 0.042 264.695);
|
|
||||||
}
|
|
||||||
|
|
||||||
.blockTitle {
|
|
||||||
color: oklch(45% 0.085 224.283);
|
|
||||||
}
|
|
||||||
|
|
||||||
.hashContainerSuccess {
|
|
||||||
background-color: oklch(95% 0.052 163.051);
|
|
||||||
border-color: rgb(74 222 128);
|
|
||||||
}
|
|
||||||
|
|
||||||
.hashContainerError {
|
|
||||||
background-color: oklch(94.1% 0.03 12.58);
|
|
||||||
border-color: rgb(248 113 113);
|
|
||||||
}
|
|
||||||
|
|
||||||
.hashPrefixGreen {
|
|
||||||
color: oklch(53.2% 0.157 131.589);
|
|
||||||
font-weight: 600;
|
|
||||||
}
|
|
||||||
|
|
||||||
.hashPrefixRed {
|
|
||||||
color: oklch(45.5% 0.188 13.697);
|
|
||||||
}
|
|
||||||
|
|
||||||
.hashSuffix {
|
|
||||||
color: oklch(27.9% 0.041 260.031);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,129 +0,0 @@
|
||||||
---
|
|
||||||
slug: 2025/cpu-core-odd
|
|
||||||
title: Sometimes CPU cores are odd
|
|
||||||
description: "TL;DR: all the assumptions you have about processor design are wrong and if you are unlucky you will never run into problems that users do through sheer chance."
|
|
||||||
authors: [xe]
|
|
||||||
tags:
|
|
||||||
- bugfix
|
|
||||||
- implementation
|
|
||||||
image: parc-dsilence.webp
|
|
||||||
---
|
|
||||||
|
|
||||||
import ProofOfWorkDiagram from "./ProofOfWorkDiagram";
|
|
||||||
|
|
||||||

|
|
||||||
|
|
||||||
One of the biggest lessons that I've learned in my career is that all software has bugs, and the more complicated your software gets the more complicated your bugs get. A lot of the time those bugs will be fairly obvious and easy to spot, validate, and replicate. Sometimes, the process of fixing it will uncover your core assumptions about how things work in ways that will leave you feeling like you just got trolled.
|
|
||||||
|
|
||||||
Today I'm going to talk about a single line fix that prevents people on a large number of devices from having weird irreproducible issues with Anubis rejecting people when it frankly shouldn't. Stick around, it's gonna be a wild ride.
|
|
||||||
|
|
||||||
{/* truncate */}
|
|
||||||
|
|
||||||
## How this happened
|
|
||||||
|
|
||||||
Anubis is a web application firewall that tries to make sure that the client is a browser. It uses a few [challenge methods](/docs/admin/configuration/challenges/) to do this determination, but the main method is the [proof of work](/docs/admin/configuration/challenges/proof-of-work/) challenge which makes clients grind away at cryptographic checksums in order to rate limit clients from connecting too eagerly.
|
|
||||||
|
|
||||||
:::note
|
|
||||||
|
|
||||||
In retrospect implementing the proof of work challenge may have been a mistake and it's likely to be supplanted by things like [Proof of React](https://github.com/TecharoHQ/anubis/pull/1038) or other methods that have yet to be developed. Your patience and polite behaviour in the bug tracker is appreciated.
|
|
||||||
|
|
||||||
:::
|
|
||||||
|
|
||||||
In order to make sure the proof of work challenge screen _goes away as fast as possible_, the [worker code](https://github.com/TecharoHQ/anubis/tree/main/web/js/worker) is optimized within an inch of its digital life. One of the main ways that this code is optimized is with how it's run. Over the last 10-20 years, the main way that CPUs have gotten fast is via increasing multicore performance. Anubis tries to make sure that it can use as many cores as possible in order to take advantage of your device's CPU as much as it can.
|
|
||||||
|
|
||||||
This strategy sometimes has some issues though, for one Firefox seems to get _much slower_ if you have Anubis try to absolutely saturate all of the cores on the system. It also has a fairly high overhead between JavaScript JIT code and [WebCrypto](https://developer.mozilla.org/en-US/docs/Web/API/Web_Crypto_API). I did some testing and found out that Firefox's point of diminishing returns was about half of the CPU cores.
|
|
||||||
|
|
||||||
## Another "invalid response" bug
|
|
||||||
|
|
||||||
One of the complaints I've been getting from users and administrators using Anubis is that they've been running into issues where users get randomly rejected with an error message only saying "invalid response". This happens when the challenge validating process fails. This issue has been blocking the release of the next version of Anubis.
|
|
||||||
|
|
||||||
In order to demonstrate this better, I've made a little interactive diagram for the proof of work process:
|
|
||||||
|
|
||||||
<ProofOfWorkDiagram />
|
|
||||||
|
|
||||||
I've fixed a lot of the easy bugs in Anubis by this point. A lot of what's left is the hard bugs, but also specifically the kinds of hard bugs that involve weird hardware configurations. In order to try and catch these issues before software hits prod, I test Anubis against a bunch of hardware I have locally. Any issues I find and fix before software ships are issues that you don't hit in production.
|
|
||||||
|
|
||||||
Let's consider [the line of code](https://github.com/TecharoHQ/anubis/blob/main/web/js/algorithms/fast.mjs) that was causing this issue:
|
|
||||||
|
|
||||||
```js
|
|
||||||
threads = Math.max(navigator.hardwareConcurrency / 2, 1),
|
|
||||||
```
|
|
||||||
|
|
||||||
This is intended to make your browser spawn a proof of work worker for _half_ of your available CPU cores. If you only have one CPU core, you should only have one worker. Each thread is given this number of threads and uses that to increment the nonce so that each thread doesn't try to find a solution that another worker has already performed.
|
|
||||||
|
|
||||||
One of the subtle problems here is that all of the parts of this assume that the thread ID and nonce are integers without a decimal portion. Famously, [all JavaScript numbers are IEEE 754 floating point numbers](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Number). Surely there wouldn't be a case where the thread count could be a _decimal_ number, right?
|
|
||||||
|
|
||||||
Here's all the devices I use to test Anubis _and their core counts_:
|
|
||||||
|
|
||||||
| Device Name | Core Count |
|
|
||||||
| :--------------------------- | :--------- |
|
|
||||||
| MacBook Pro M3 Max | 16 |
|
|
||||||
| MacBook Pro M4 Max | 16 |
|
|
||||||
| AMD Ryzen 9 7950x3D | 32 |
|
|
||||||
| Google Pixel 9a (GrapheneOS) | 8 |
|
|
||||||
| iPhone 15 Pro Max | 6 |
|
|
||||||
| iPad Pro (M1) | 8 |
|
|
||||||
| iPad mini | 6 |
|
|
||||||
| Steam Deck | 8 |
|
|
||||||
| Core i5 10600 (homelab) | 12 |
|
|
||||||
| ROG Ally | 16 |
|
|
||||||
|
|
||||||
Notice something? All of those devices have an _even_ number of cores. Some devices such as the [Pixel 8 Pro](https://www.gsmarena.com/google_pixel_8_pro-12545.php) have an _odd_ number of cores. So what happens with that line of code as the JavaScript engine evaluates it?
|
|
||||||
|
|
||||||
Let's replace the [`navigator.hardwareConcurrency`](https://developer.mozilla.org/en-US/docs/Web/API/Navigator/hardwareConcurrency) with the Pixel 8 Pro's 9 cores:
|
|
||||||
|
|
||||||
```js
|
|
||||||
threads = Math.max(9 / 2, 1),
|
|
||||||
```
|
|
||||||
|
|
||||||
Then divide it by two:
|
|
||||||
|
|
||||||
```js
|
|
||||||
threads = Math.max(4.5, 1),
|
|
||||||
```
|
|
||||||
|
|
||||||
Oops, that's not ideal. However `4.5` is bigger than `1`, so [`Math.max`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Math/max) returns that:
|
|
||||||
|
|
||||||
```js
|
|
||||||
threads = 4.5,
|
|
||||||
```
|
|
||||||
|
|
||||||
This means that each time the proof of work equation is calculated, there is a 50% chance that a valid solution would include a nonce with a decimal portion in it. If the client finds a solution with such a nonce, then it would think the client was successful and submit the solution to the server, but the server only expects whole numbers back so it rejects that as an invalid response.
|
|
||||||
|
|
||||||
I keep telling more junior people that when you have the weirdest, most inconsistent bugs in software that it's going to boil down to the dumbest possible thing you can possibly imagine. People don't believe me, then they encounter bugs like this. Then they suddenly believe me.
|
|
||||||
|
|
||||||
Here is the fix:
|
|
||||||
|
|
||||||
```js
|
|
||||||
threads = Math.trunc(Math.max(navigator.hardwareConcurrency / 2, 1)),
|
|
||||||
```
|
|
||||||
|
|
||||||
This uses [`Math.trunc`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Math/trunc) to truncate away the decimal portion so that the Pixel 8 Pro has `4` workers instead of `4.5` workers.
|
|
||||||
|
|
||||||
## Today I learned this was possible
|
|
||||||
|
|
||||||
This was a total "today I learned" moment. I didn't actually think that hardware vendors shipped processors with an odd number of cores, however if you look at the core geometry of the Pixel 8 Pro, it has _three_ tiers of processor cores:
|
|
||||||
|
|
||||||
| Core type | Core model | Number |
|
|
||||||
| :----------------- | :------------------- | :----- |
|
|
||||||
| High performance | 3 Ghz Cortex X3 | 1 |
|
|
||||||
| Medium performance | 2.45 Ghz Cortex A715 | 4 |
|
|
||||||
| High efficiency | 2.15 Cortex A510 | 4 |
|
|
||||||
| Total | | 9 |
|
|
||||||
|
|
||||||
I guess every assumption that developers have about CPU design is probably wrong.
|
|
||||||
|
|
||||||
This probably isn't helped by the fact that for most of my career, the core count in phones has been largely irrelevant and most of the desktop / laptop CPUs I've had (where core count does matter) uses [simultaneous multithreading](https://en.wikipedia.org/wiki/Simultaneous_multithreading) to "multiply" the core count by two.
|
|
||||||
|
|
||||||
The client side fix is a bit of an "emergency stop" button to try and mitigate the badness as early as possible. In general I'm quite aware of the terrible UX involved with this flow failing and I'm still noodling through ways to make that UX better and easier for users / administrators to debug.
|
|
||||||
|
|
||||||
I'm looking into the following:
|
|
||||||
|
|
||||||
1. This could have been prevented on the server side by doing less strict input validation in compliance with [Postel's Law](https://en.wikipedia.org/wiki/Robustness_principle). I feel nervous about making such a security-sensitive endpoint _more liberal_ with the inputs it can accept, but it may be fine? I need to consult with a security expert.
|
|
||||||
2. Showing an encrypted error message on the "invalid response" page so that the user and administrator can work together to fix or report the issue. I remember Google doing this at least once, but I can't recall where I've seen it in the past. Either way, this is probably the most robust method even though it would require developing some additional tooling. I think it would be worth it.
|
|
||||||
|
|
||||||
I'm likely going to go with the second option. I will need to figure out a good flow for this. It's likely going to involve [age](https://github.com/FiloSottile/age). I'll say more about this when I have more to say.
|
|
||||||
|
|
||||||
In the meantime though, looks like I need to expense a used Pixel 8 Pro to add to the testing jungle for Anubis. If anyone has a deal out there, please let me know!
|
|
||||||
|
|
||||||
Thank you to the people that have been polite and helpful when trying to root cause and fix this issue.
|
|
||||||
Binary file not shown.
|
Before Width: | Height: | Size: 18 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 24 KiB |
|
|
@ -1,75 +0,0 @@
|
||||||
---
|
|
||||||
slug: 2025/file-abuse-reports
|
|
||||||
title: Taking steps to end abusive traffic from cloud providers
|
|
||||||
description: "Learn how to effectively file abuse reports with cloud providers to stop malicious traffic at its source and protect your services from automated abuse."
|
|
||||||
authors: [xe]
|
|
||||||
tags: [abuse, cloud, security, networking]
|
|
||||||
image: goose-pond.webp
|
|
||||||
---
|
|
||||||
|
|
||||||

|
|
||||||
|
|
||||||
As part of Anubis's ongoing development, I've been working to reduce friction for legitimate users by minimizing unnecessary challenge pages. While this improves the user experience, it can potentially expose services to increased abuse from public cloud infrastructure. To help administrators better protect their services, I want to share my strategies for filing abuse reports with IP space owners, enabling us to address malicious scraping at its source.
|
|
||||||
|
|
||||||
{/* truncate */}
|
|
||||||
|
|
||||||
In general, there are two kinds of IP addresses:
|
|
||||||
|
|
||||||
- Residential IP addresses: IP addresses that are allocated to residential customers such as home internet connections and cellular data plans. These IP addresses are increasingly shared between customers due to technologies like [CGNAT](https://en.wikipedia.org/wiki/Carrier-grade_NAT).
|
|
||||||
- Commercial IP addresses: IP addresses that are allocated to commercial customers such as cloud providers, VPS providers, root server providers, and other such business to business companies. These IP addresses are almost always statically allocated to one customer for a very long period of time (typically the lifetime of the server unless they are using things like dedicated IP addresses).
|
|
||||||
|
|
||||||
In general, filing abuse reports to residential IP addresses is a waste of time. The administrators do appreciate knowing what kinds of abusive traffic is causing grief, but many times the users of those IP addresses don't know that their computer is sending abusive traffic to your services. A lot of malware botnets that used to be used with DDOS for hire services are now being used as residential proxies. Those "free VPN apps" are almost certainly making you pay for your usage by making your computer a zombie in a botnet. At some level I really respect the hustle as they manage to sell other people's bandwidth for rates as ludicrous as $1.00 per gigabyte ingressed and egressed.
|
|
||||||
|
|
||||||
:::note
|
|
||||||
|
|
||||||
Keep in mind, I'm talking about the things you can find by searching "free VPN", not infrastructure for the public good like the Tor browser or I2P.
|
|
||||||
|
|
||||||
:::
|
|
||||||
|
|
||||||
What you should really focus on is traffic from commercial IP addresses, such as cloud providers. That's a case where the cloud customer is in direct violation of the acceptable use policy of the provider. Filing abuse reports gets the abuse team of the cloud provider to reach out to that customer and demand corrective action under threat of contractual violence.
|
|
||||||
|
|
||||||
## How to make an abuse report
|
|
||||||
|
|
||||||
In general, the best abuse reports contain the following information:
|
|
||||||
|
|
||||||
- Time of abusive requests.
|
|
||||||
- IP address, User-Agent header, or other unique identifiers that can help the abuse team educate the customer about their misbehaving infrastructure.
|
|
||||||
- Does the abusive IP address request robots.txt? If not, be sure to include that information.
|
|
||||||
- A brief description of the impact to your system such as high system load, pages not rendering, or database system crashes. This helps the provider establish the fact that their customer is causing you measurable harm.
|
|
||||||
- Context as to what your service is, what it does, and why they should care.
|
|
||||||
|
|
||||||
For example, let's say that someone was giving the Anubis docs a series of requests that caused the server to fall over and experience extended downtime. Here's what I would write to the abuse contact:
|
|
||||||
|
|
||||||
> Hello,
|
|
||||||
>
|
|
||||||
> I have received abusive traffic from one of your customers that has resulted in a denial of service to the users of the Anubis documentation website. Anubis is a web application firewall that administrators use to protect their websites against mass scraping and this documentation website helps administrators get started.
|
|
||||||
>
|
|
||||||
> On or about Thursday, October 30th at 04:00 UTC, A flurry of requests from the IP range `127.34.0.0/24` started to hit the `/admin/` routes, which caused unreasonable database load and ended up crashing PostgreSQL. This caused the documentation website to go down for three hours as it happened while the administrators were asleep. Based on logs, this caused 353 distinct users to not be able to load the documentation and the users filed bugs about it.
|
|
||||||
>
|
|
||||||
> I have attached the HTTP frontend logs for the abusive requests from your IP range. To protect our systems in the meantime while we perform additional hardening, I have blocked that IP address range in both our IP firewall and web application firewall configuration. Based on these logs, your customer seems to not have requested the standard `robots.txt` file, which includes instructions to deny access to those routes.
|
|
||||||
>
|
|
||||||
> Please let me know what other information you need on your end.
|
|
||||||
>
|
|
||||||
> Sincerely,
|
|
||||||
>
|
|
||||||
> [normal email signature]
|
|
||||||
|
|
||||||
Then in order to figure out where to send it, look the IP addresses up in the `whois` database. For example, if you want to find the abuse contact for the IP address `1.1.1.1`, use the [whois command](https://packages.debian.org/sid/whois) to find the abuse contact:
|
|
||||||
|
|
||||||
```
|
|
||||||
$ whois 1.1.1.1 | grep -i abuse
|
|
||||||
% Abuse contact for '1.1.1.0 - 1.1.1.255' is 'helpdesk@apnic.net'
|
|
||||||
abuse-c: AA1412-AP
|
|
||||||
remarks: All Cloudflare abuse reporting can be done via
|
|
||||||
remarks: resolver-abuse@cloudflare.com
|
|
||||||
abuse-mailbox: helpdesk@apnic.net
|
|
||||||
role: ABUSE APNICRANDNETAU
|
|
||||||
abuse-mailbox: helpdesk@apnic.net
|
|
||||||
mnt-by: APNIC-ABUSE
|
|
||||||
```
|
|
||||||
|
|
||||||
The abuse contact will be named either `abuse-c` or `abuse-mailbox`. For greatest effect, I suggest including all listed email addresses in your email to the abuse contact.
|
|
||||||
|
|
||||||
Once you send your email, you should expect a response within 2 business days at most. If they don't get back to you, please feel free to [contact me](https://xeiaso.net/contact/) so that the default set of Anubis rules can be edited according to patterns I'm seeing across the ecosystem.
|
|
||||||
|
|
||||||
Just remember that many cloud providers do not know how bad the scraping problem is. Filing abuse complaints makes it their problem. They don't want it to be their problem.
|
|
||||||
|
|
@ -1,9 +0,0 @@
|
||||||
xe:
|
|
||||||
name: Xe Iaso
|
|
||||||
title: CEO @ Techaro
|
|
||||||
url: https://github.com/Xe
|
|
||||||
image_url: https://github.com/Xe.png
|
|
||||||
email: xe@techaro.lol
|
|
||||||
page: true
|
|
||||||
socials:
|
|
||||||
github: Xe
|
|
||||||
|
|
@ -1,929 +0,0 @@
|
||||||
---
|
|
||||||
sidebar_position: 999
|
|
||||||
---
|
|
||||||
|
|
||||||
# Changelog
|
|
||||||
|
|
||||||
All notable changes to this project will be documented in this file.
|
|
||||||
|
|
||||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
|
||||||
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
|
||||||
|
|
||||||
## [Unreleased]
|
|
||||||
|
|
||||||
- Add iplist2rule tool that lets admins turn an IP address blocklist into an Anubis ruleset.
|
|
||||||
- Add Polish locale ([#1292](https://github.com/TecharoHQ/anubis/pull/1309))
|
|
||||||
- Fix honeypot and imprint links missing `BASE_PREFIX` when deployed behind a path prefix ([#1402](https://github.com/TecharoHQ/anubis/issues/1402))
|
|
||||||
- Improve idle performance in memory storage
|
|
||||||
|
|
||||||
<!-- This changes the project to: -->
|
|
||||||
|
|
||||||
## v1.24.0: Y'shtola Rhul
|
|
||||||
|
|
||||||
Anubis is back and better than ever! Lots of minor fixes with some big ones interspersed.
|
|
||||||
|
|
||||||
- Fix panic when validating challenges after privacy-mode browsers strip headers and the follow-up request matches an `ALLOW` threshold.
|
|
||||||
- Expose WEIGHT rule matches as Prometheus metrics.
|
|
||||||
- Allow more OCI registry clients [based on feedback](https://github.com/TecharoHQ/anubis/pull/1253#issuecomment-3506744184).
|
|
||||||
- Expose services directory in the embedded `(data)` filesystem.
|
|
||||||
- Add Ukrainian locale ([#1044](https://github.com/TecharoHQ/anubis/pull/1044)).
|
|
||||||
- Allow Renovate as an OCI registry client.
|
|
||||||
- Properly handle 4in6 addresses so that IP matching works with those addresses.
|
|
||||||
- Add support to simple Valkey/Redis cluster mode
|
|
||||||
- Open Graph passthrough now reuses the configured target Host/SNI/TLS settings, so metadata fetches succeed when the upstream certificate differs from the public domain. ([1283](https://github.com/TecharoHQ/anubis/pull/1283))
|
|
||||||
- Stabilize the CVE-2025-24369 regression test by always submitting an invalid proof instead of relying on random POW failures.
|
|
||||||
- Refine the check that ensures the presence of the Accept header to avoid breaking docker clients.
|
|
||||||
- Removed rules intended to reward actual browsers due to abuse in the wild.
|
|
||||||
|
|
||||||
### Dataset poisoning
|
|
||||||
|
|
||||||
Anubis has the ability to engage in [dataset poisoning attacks](https://www.anthropic.com/research/small-samples-poison) using the [dataset poisoning subsystem](./admin/honeypot/overview.mdx). This allows every Anubis instance to be a honeypot to attract and flag abusive scrapers so that no administrator action is required to ban them.
|
|
||||||
|
|
||||||
There is much more information about this feature in [the dataset poisoning subsystem documentation](./admin/honeypot/overview.mdx). Administrators that are interested in learning how this feature works should consult that documentation.
|
|
||||||
|
|
||||||
### Deprecate `report_as` in challenge configuration
|
|
||||||
|
|
||||||
Previously Anubis let you lie to users about the difficulty of a challenge to interfere with operators of malicious scrapers as a psychological attack:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
bots:
|
|
||||||
# Punish any bot with "bot" in the user-agent string
|
|
||||||
# This is known to have a high false-positive rate, use at your own risk
|
|
||||||
- name: generic-bot-catchall
|
|
||||||
user_agent_regex: (?i:bot|crawler)
|
|
||||||
action: CHALLENGE
|
|
||||||
challenge:
|
|
||||||
difficulty: 16 # impossible
|
|
||||||
report_as: 4 # lie to the operator
|
|
||||||
algorithm: slow # intentionally waste CPU cycles and time
|
|
||||||
```
|
|
||||||
|
|
||||||
This has turned out to be a bad idea because it has caused massive user experience problems and has been removed. If you are using this setting, you will get a warning in your logs like this:
|
|
||||||
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"time": "2025-11-25T23:10:31.092201549-05:00",
|
|
||||||
"level": "WARN",
|
|
||||||
"source": {
|
|
||||||
"function": "github.com/TecharoHQ/anubis/lib/policy.ParseConfig",
|
|
||||||
"file": "/home/xe/code/TecharoHQ/anubis/lib/policy/policy.go",
|
|
||||||
"line": 201
|
|
||||||
},
|
|
||||||
"msg": "use of deprecated report_as setting detected, please remove this from your policy file when possible",
|
|
||||||
"at": "config-validate",
|
|
||||||
"name": "mild-suspicion"
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
To remove this warning, remove this setting from your policy file.
|
|
||||||
|
|
||||||
### Logging customization
|
|
||||||
|
|
||||||
Anubis now supports the ability to log to multiple backends ("sinks"). This allows you to have Anubis [log to a file](./admin/policies.mdx#file-sink) instead of just logging to standard out. You can also customize the [logging level](./admin/policies.mdx#log-levels) in the policy file:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
logging:
|
|
||||||
level: "warn" # much less verbose logging
|
|
||||||
sink: file # log to a file
|
|
||||||
parameters:
|
|
||||||
file: "./var/anubis.log"
|
|
||||||
maxBackups: 3 # keep at least 3 old copies
|
|
||||||
maxBytes: 67108864 # each file can have up to 64 Mi of logs
|
|
||||||
maxAge: 7 # rotate files out every n days
|
|
||||||
oldFileTimeFormat: 2006-01-02T15-04-05 # RFC 3339-ish
|
|
||||||
compress: true # gzip-compress old log files
|
|
||||||
useLocalTime: false # timezone for rotated files is UTC
|
|
||||||
```
|
|
||||||
|
|
||||||
Additionally, information about [how Anubis uses each logging level](./admin/policies.mdx#log-levels) has been added to the documentation.
|
|
||||||
|
|
||||||
### DNS Features
|
|
||||||
|
|
||||||
- CEL expressions for:
|
|
||||||
- FCrDNS checks
|
|
||||||
- Forward DNS queries
|
|
||||||
- Reverse DNS queries
|
|
||||||
- `arpaReverseIP` to transform IPv4/6 addresses into ARPA reverse IP notation.
|
|
||||||
- `regexSafe` to escape regex special characters (useful for including `remoteAddress` or headers in regular expressions).
|
|
||||||
- DNS cache and other optimizations to minimize unnecessary DNS queries.
|
|
||||||
|
|
||||||
The DNS cache TTL can be changed in the bots config like this:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
dns_ttl:
|
|
||||||
forward: 600
|
|
||||||
reverse: 600
|
|
||||||
```
|
|
||||||
|
|
||||||
The default value for both forward and reverse queries is 300 seconds.
|
|
||||||
|
|
||||||
The `verifyFCrDNS` CEL function has two overloads:
|
|
||||||
|
|
||||||
- `(addr)`
|
|
||||||
Simply verifies that the remote side has PTR records pointing to the target address.
|
|
||||||
- `(addr, ptrPattern)`
|
|
||||||
Verifies that the remote side refers to a specific domain and that this domain points to the target IP.
|
|
||||||
|
|
||||||
## v1.23.1: Lyse Hext - Echo 1
|
|
||||||
|
|
||||||
- Fix `SERVE_ROBOTS_TXT` setting after the double slash fix broke it.
|
|
||||||
|
|
||||||
### Potentially breaking changes
|
|
||||||
|
|
||||||
#### Remove default Tencent Cloud block rule
|
|
||||||
|
|
||||||
v1.23.0 added a default rule to block Tencent Cloud. After an email from their abuse team where they promised to take action to clean up their reputation, I have removed the default block rule. If this network causes you problems, please contact [abuse@tencent.com](mailto:abuse@tencent.com) and supply the following information:
|
|
||||||
|
|
||||||
- Time of abusive requests.
|
|
||||||
- IP address, User-Agent header, or other unique identifiers that can help the abuse team educate the customer about their misbehaving infrastructure.
|
|
||||||
- Does the abusive IP address request robots.txt? If not, be sure to include that information.
|
|
||||||
- A brief description of the impact to your system such as high system load, pages not rendering, or database system crashes. This helps the provider establish the fact that their customer is causing you measurable harm.
|
|
||||||
- Context as to what your service is, what it does, and why they should care.
|
|
||||||
|
|
||||||
Mention that you are using Anubis or BotStopper to protect your services. If they do not respond to you, please [contact me](https://xeiaso.net/contact) as soon as possible.
|
|
||||||
|
|
||||||
#### Docker / OCI registry clients
|
|
||||||
|
|
||||||
Anubis v1.23.0 accidentally blocked Docker / OCI registry clients. In order to explicitly allow them, add an import for `(data)/clients/docker-client.yaml`:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
bots:
|
|
||||||
- import: (data)/meta/default-config.yaml
|
|
||||||
- import: (data)/clients/docker-client.yaml
|
|
||||||
```
|
|
||||||
|
|
||||||
This is technically a regression as these clients used to work in Anubis v1.22.0, however it is allowable to make this opt-in as most websites do not expect to be serving Docker / OCI registry client traffic.
|
|
||||||
|
|
||||||
## v1.23.0: Lyse Hext
|
|
||||||
|
|
||||||
- Add default tencent cloud DENY rule.
|
|
||||||
- Added `(data)/meta/default-config.yaml` for importing the entire default configuration at once.
|
|
||||||
- Add `-custom-real-ip-header` flag to get the original request IP from a different header than `x-real-ip`.
|
|
||||||
- Add `contentLength` variable to bot expressions.
|
|
||||||
- Add `COOKIE_SAME_SITE_MODE` to force anubis cookies SameSite value, and downgrade automatically from `None` to `Lax` if cookie is insecure.
|
|
||||||
- Fix lock convoy problem in decaymap ([#1103](https://github.com/TecharoHQ/anubis/issues/1103)).
|
|
||||||
- Fix lock convoy problem in bbolt by implementing the actor pattern ([#1103](https://github.com/TecharoHQ/anubis/issues/1103)).
|
|
||||||
- Remove bbolt actorify implementation due to causing production issues.
|
|
||||||
- Document missing environment variables in installation guide: `SLOG_LEVEL`, `COOKIE_PREFIX`, `FORCED_LANGUAGE`, and `TARGET_DISABLE_KEEPALIVE` ([#1086](https://github.com/TecharoHQ/anubis/pull/1086)).
|
|
||||||
- Add validation warning when persistent storage is used without setting signing keys.
|
|
||||||
- Fixed `robots2policy` to properly group consecutive user agents into `any:` instead of only processing the last one ([#925](https://github.com/TecharoHQ/anubis/pull/925)).
|
|
||||||
- Make the `fast` algorithm prefer purejs when running in an insecure context.
|
|
||||||
- Add the [`s3api` storage backend](./admin/policies.mdx#s3api) to allow Anubis to use S3 API compatible object storage as its storage backend.
|
|
||||||
- Fix a "stutter" in the cookie name prefix so the auth cookie is named `techaro.lol-anubis-auth` instead of `techaro.lol-anubis-auth-auth`.
|
|
||||||
- Make `cmd/containerbuild` support commas for separating elements of the `--docker-tags` argument as well as newlines.
|
|
||||||
- Add the `DIFFICULTY_IN_JWT` option, which allows one to add the `difficulty` field in the JWT claims which indicates the difficulty of the token ([#1063](https://github.com/TecharoHQ/anubis/pull/1063)).
|
|
||||||
- Ported the client-side JS to TypeScript to avoid egregious errors in the future.
|
|
||||||
- Fixes concurrency problems with very old browsers ([#1082](https://github.com/TecharoHQ/anubis/issues/1082)).
|
|
||||||
- Randomly use the Refresh header instead of the meta refresh tag in the metarefresh challenge.
|
|
||||||
- Update OpenRC service to truncate the runtime directory before starting Anubis.
|
|
||||||
- Make the git client profile more strictly match how the git client behaves.
|
|
||||||
- Make the default configuration reward users using normal browsers.
|
|
||||||
- Allow multiple consecutive slashes in a row in application paths ([#754](https://github.com/TecharoHQ/anubis/issues/754)).
|
|
||||||
- Add option to set `targetSNI` to special keyword 'auto' to indicate that it should be automatically set to the request Host name ([424](https://github.com/TecharoHQ/anubis/issues/424)).
|
|
||||||
- The Preact challenge has been removed from the default configuration. It will be deprecated in the future.
|
|
||||||
- An open redirect when in subrequest mode has been fixed.
|
|
||||||
|
|
||||||
### Potentially breaking changes
|
|
||||||
|
|
||||||
#### Multiple checks at once has and-like semantics instead of or-like semantics
|
|
||||||
|
|
||||||
Anubis lets you stack multiple checks at once with blocks like this:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
name: allow-prometheus
|
|
||||||
action: ALLOW
|
|
||||||
user_agent_regex: ^prometheus-probe$
|
|
||||||
remote_addresses:
|
|
||||||
- 192.168.2.0/24
|
|
||||||
```
|
|
||||||
|
|
||||||
Previously, this only returned ALLOW if _any one_ of the conditions matched. This behaviour has changed to only return ALLOW if _all_ of the conditions match. I expect this to have some issues with user configs, however this fix is grave enough that it's worth the risk of breaking configs. If this bites you, please let me know so we can make an escape hatch.
|
|
||||||
|
|
||||||
### Better error messages
|
|
||||||
|
|
||||||
In order to make it easier for legitimate clients to debug issues with their browser configuration and Anubis, Anubis will emit internal error detail in base 64 so that administrators can chase down issues. Future versions of this may also include a variant that encrypts the error detail messages.
|
|
||||||
|
|
||||||
### Bug Fixes
|
|
||||||
|
|
||||||
Sometimes the enhanced temporal assurance in [#1038](https://github.com/TecharoHQ/anubis/pull/1038) and [#1068](https://github.com/TecharoHQ/anubis/pull/1068) could backfire because Chromium and its ilk randomize the amount of time they wait in order to avoid a timing side channel attack. This has been fixed by both increasing the amount of time a client has to wait for the metarefresh and preact challenges as well as making the server side logic more permissive.
|
|
||||||
|
|
||||||
## v1.22.0: Yda Hext
|
|
||||||
|
|
||||||
> Someone has to make an effort at reconciliation if these conflicts are ever going to end.
|
|
||||||
|
|
||||||
In this release, we finally fix the odd number of CPU cores bug, pave the way for lighter weight challenges, make Anubis more adaptable, and more.
|
|
||||||
|
|
||||||
### Big ticket items
|
|
||||||
|
|
||||||
#### Proof of React challenge
|
|
||||||
|
|
||||||
A new ["proof of React"](./admin/configuration/challenges/preact.mdx) has been added. It runs a simple app in React that has several chained hooks. It is much more lightweight than the proof of work check.
|
|
||||||
|
|
||||||
#### Smaller features
|
|
||||||
|
|
||||||
- The [`segments`](./admin/configuration/expressions.mdx#segments) function was added for splitting a path into its slash-separated segments.
|
|
||||||
- Added possibility to disable HTTP keep-alive to support backends not properly handling it.
|
|
||||||
- When issuing a challenge, Anubis stores information about that challenge into the store. That stored information is later used to validate challenge responses. This works around nondeterminism in bot rules. ([#917](https://github.com/TecharoHQ/anubis/issues/917))
|
|
||||||
- One of the biggest sources of lag in Firefox has been eliminated: the use of WebCrypto. Now whenever Anubis detects the client is using Firefox (or Pale Moon), it will swap over to a pure-JS implementation of SHA-256 for speed.
|
|
||||||
- Proof of work solving has had a complete overhaul and rethink based on feedback from browser engine developers, frontend experts, and overall performance profiling.
|
|
||||||
- Optimize the performance of the pure-JS Anubis solver.
|
|
||||||
- Web Workers are stored as dedicated JavaScript files in `static/js/workers/*.mjs`.
|
|
||||||
- Pave the way for non-SHA256 solver methods and eventually one that uses WebAssembly (or WebAssembly code compiled to JS for those that disable WebAssembly).
|
|
||||||
- Legacy JavaScript code has been eliminated.
|
|
||||||
- When parsing [Open Graph tags](./admin/configuration/open-graph.mdx), add any URLs found in the responses to a temporary "allow cache" so that social preview images work.
|
|
||||||
- The hard dependency on WebCrypto has been removed, allowing a proof of work challenge to work over plain (unencrypted) HTTP.
|
|
||||||
- The Anubis version number is put in the footer of every page.
|
|
||||||
- Add a default block rule for Huawei Cloud.
|
|
||||||
- Add a default block rule for Alibaba Cloud.
|
|
||||||
- Added support to use Traefik forwardAuth middleware.
|
|
||||||
- Add X-Request-URI support so that Subrequest Authentication has path support.
|
|
||||||
- Added glob matching for `REDIRECT_DOMAINS`. You can pass `*.bugs.techaro.lol` to allow redirecting to anything ending with `.bugs.techaro.lol`. There is a limit of 4 wildcards.
|
|
||||||
|
|
||||||
### Fixes
|
|
||||||
|
|
||||||
#### Odd numbers of CPU cores are properly supported
|
|
||||||
|
|
||||||
Some phones have an odd number of CPU cores. This caused [interesting issues](https://anubis.techaro.lol/blog/2025/cpu-core-odd). This was fixed by [using `Math.trunc` to convert the number of CPU cores back into an integer](https://github.com/TecharoHQ/anubis/issues/1043).
|
|
||||||
|
|
||||||
#### Smaller fixes
|
|
||||||
|
|
||||||
- A standard library HTTP server log message about HTTP pipelining not working has been filtered out of Anubis' logs. There is no action that can be taken about it.
|
|
||||||
- Added a missing link to the Caddy installation environment in the installation documentation.
|
|
||||||
- Downstream consumers can change the default [log/slog#Logger](https://pkg.go.dev/log/slog#Logger) instance that Anubis uses by setting `opts.Logger` to your slog instance of choice ([#864](https://github.com/TecharoHQ/anubis/issues/864)).
|
|
||||||
- The [Thoth client](https://anubis.techaro.lol/docs/admin/thoth) is now public in the repo instead of being an internal package.
|
|
||||||
- [Custom-AsyncHttpClient](https://github.com/AsyncHttpClient/async-http-client)'s default User-Agent has an increased weight by default ([#852](https://github.com/TecharoHQ/anubis/issues/852)).
|
|
||||||
- Add option for replacing the default explanation text with a custom one ([#747](https://github.com/TecharoHQ/anubis/pull/747))
|
|
||||||
- The contact email in the LibreJS header has been changed.
|
|
||||||
- Firefox for Android support has been fixed by embedding the challenge ID into the pass-challenge route. This also fixes some inconsistent issues with other mobile browsers.
|
|
||||||
- The default `favicon` pattern in `data/common/keep-internet-working.yaml` has been updated to permit requests for png/gif/jpg/svg files as well as ico.
|
|
||||||
- The `--cookie-prefix` flag has been fixed so that it is fully respected.
|
|
||||||
- The default patterns in `data/common/keep-internet-working.yaml` have been updated to appropriately escape the '.' character in the regular expression patterns.
|
|
||||||
- Add optional restrictions for JWT based on the value of a header ([#697](https://github.com/TecharoHQ/anubis/pull/697))
|
|
||||||
- The word "hack" has been removed from the translation strings for Anubis due to incidents involving people misunderstanding that word and sending particularly horrible things to the project lead over email.
|
|
||||||
- Bump AI-robots.txt to version 1.39
|
|
||||||
- Inject adversarial input to break AI coding assistants.
|
|
||||||
- Add better logging when using Subrequest Authentication.
|
|
||||||
|
|
||||||
### Security-relevant changes
|
|
||||||
|
|
||||||
- Add a server-side check for the meta-refresh challenge that makes sure clients have waited for at least 95% of the time that they should.
|
|
||||||
|
|
||||||
#### Fix potential double-spend for challenges
|
|
||||||
|
|
||||||
Anubis operates by issuing a challenge and having the client present a solution for that challenge. Challenges are identified by a unique UUID, which is stored in the database.
|
|
||||||
|
|
||||||
The problem is that a challenge could potentially be used twice by a dedicated attacker making a targeted attack against Anubis. Challenge records did not have a "spent" or "used" field. In total, a dedicated attacker could solve a challenge once and reuse that solution across multiple sessions in order to mint additional tokens.
|
|
||||||
|
|
||||||
This was fixed by adding a "spent" field to challenges in the data store. When a challenge is solved, that "spent" field gets set to `true`. If a future attempt to solve this challenge is observed, it gets rejected.
|
|
||||||
|
|
||||||
With the advent of store based challenge issuance in [#749](https://github.com/TecharoHQ/anubis/pull/749), this means that these challenge IDs are [only good for 30 minutes](https://github.com/TecharoHQ/anubis/blob/e8dfff635015d6c906dddd49cb0eaf591326092a/lib/anubis.go#L130-L135d). Websites using the most recent version of Anubis have limited exposure to this problem.
|
|
||||||
|
|
||||||
Websites using older versions of Anubis have a much more increased exposure to this problem and are encouraged to keep this software updated as often and as frequently as possible.
|
|
||||||
|
|
||||||
Thanks to [@taviso](https://github.com/taviso) for reporting this issue.
|
|
||||||
|
|
||||||
### Breaking changes
|
|
||||||
|
|
||||||
- The "slow" frontend solver has been removed in order to reduce maintenance burden. Any existing uses of it will still work, but issue a warning upon startup asking administrators to upgrade to the "fast" frontend solver.
|
|
||||||
- The legacy JSON based policy file example has been removed and all documentation for how to write a policy file in JSON has been deleted. JSON based policy files will still work, but YAML is the superior option for Anubis configuration.
|
|
||||||
|
|
||||||
### New Locales
|
|
||||||
|
|
||||||
- Lithuanian [#972](https://github.com/TecharoHQ/anubis/pull/972)
|
|
||||||
- Vietnamese [#926](https://github.com/TecharoHQ/anubis/pull/926)
|
|
||||||
|
|
||||||
## v1.21.3: Minfilia Warde - Echo 3
|
|
||||||
|
|
||||||
### Added
|
|
||||||
|
|
||||||
#### New locales
|
|
||||||
|
|
||||||
Anubis now supports these new languages:
|
|
||||||
|
|
||||||
- [Swedish](https://github.com/TecharoHQ/anubis/pull/913)
|
|
||||||
|
|
||||||
### Fixes
|
|
||||||
|
|
||||||
#### Fixes a problem with nonstandard URLs and redirects
|
|
||||||
|
|
||||||
Fixes [GHSA-jhjj-2g64-px7c](https://github.com/TecharoHQ/anubis/security/advisories/GHSA-jhjj-2g64-px7c).
|
|
||||||
|
|
||||||
This could allow an attacker to craft an Anubis pass-challenge URL that forces a redirect to nonstandard URLs, such as the `javascript:` scheme which executes arbitrary JavaScript code in a browser context when the user clicks the "Try again" button.
|
|
||||||
|
|
||||||
This has been fixed by disallowing any URLs without the scheme `http` or `https`.
|
|
||||||
|
|
||||||
Additionally, the "Try again" button has been fixed to completely ignore the user-supplied redirect location. It now redirects to the home page (`/`).
|
|
||||||
|
|
||||||
## v1.21.2: Minfilia Warde - Echo 2
|
|
||||||
|
|
||||||
This contained an incomplete fix for [GHSA-jhjj-2g64-px7c](https://github.com/TecharoHQ/anubis/security/advisories/GHSA-jhjj-2g64-px7c). Do not use this version.
|
|
||||||
|
|
||||||
## v1.21.1: Minfilia Warde - Echo 1
|
|
||||||
|
|
||||||
- Expired records are now properly removed from bbolt databases ([#848](https://github.com/TecharoHQ/anubis/pull/848)).
|
|
||||||
- Fix hanging on service restart ([#853](https://github.com/TecharoHQ/anubis/issues/853))
|
|
||||||
|
|
||||||
### Added
|
|
||||||
|
|
||||||
Anubis now supports the [`missingHeader`](./admin/configuration/expressions.mdx#missingHeader) to assert the absence of headers in requests.
|
|
||||||
|
|
||||||
#### New locales
|
|
||||||
|
|
||||||
Anubis now supports these new languages:
|
|
||||||
|
|
||||||
- [Czech](https://github.com/TecharoHQ/anubis/pull/849)
|
|
||||||
- [Finnish](https://github.com/TecharoHQ/anubis/pull/863)
|
|
||||||
- [Norwegian Bokmål](https://github.com/TecharoHQ/anubis/pull/855)
|
|
||||||
- [Norwegian Nynorsk](https://github.com/TecharoHQ/anubis/pull/855)
|
|
||||||
- [Russian](https://github.com/TecharoHQ/anubis/pull/882)
|
|
||||||
|
|
||||||
### Fixes
|
|
||||||
|
|
||||||
#### Fix ["error: can't get challenge"](https://github.com/TecharoHQ/anubis/issues/869) when details about a challenge can't be found in the server side state
|
|
||||||
|
|
||||||
v1.21.0 changed the core challenge flow to maintain information about challenges on the server side instead of only doing them via stateless idempotent generation functions and relying on details to not change. There was a subtle bug introduced in this change: if a client has an unknown challenge ID set in its test cookie, Anubis will clear that cookie and then throw an HTTP 500 error.
|
|
||||||
|
|
||||||
This has been fixed by making Anubis throw a new challenge page instead.
|
|
||||||
|
|
||||||
#### Fix event loop thrashing when solving a proof of work challenge
|
|
||||||
|
|
||||||
Previously the "fast" proof of work solver had a fragment of JavaScript that attempted to only post an update about proof of work progress to the main browser window every 1024 iterations. This fragment of JavaScript was subtly incorrect in a way that passed review but actually made the workers send an update back to the main thread every iteration. This caused a pileup of unhandled async calls (similar to a socket accept() backlog pileup in Unix) that caused stack space exhaustion.
|
|
||||||
|
|
||||||
This has been fixed in the following ways:
|
|
||||||
|
|
||||||
1. The complicated boolean logic has been totally removed in favour of a worker-local iteration counter.
|
|
||||||
2. The progress bar is updated by worker `0` instead of all workers.
|
|
||||||
|
|
||||||
Hopefully this should limit the event loop thrashing and let ia32 browsers (as well as any environment with a smaller stack size than amd64 and aarch64 seem to have) function normally when processing Anubis proof of work challenges.
|
|
||||||
|
|
||||||
#### Fix potential memory leak when discovering a solution
|
|
||||||
|
|
||||||
In some cases, the parallel solution finder in Anubis could cause all of the worker promises to leak due to the fact the promises were being improperly terminated. This was fixed by having Anubis debounce worker termination instead of allowing it to potentially recurse infinitely.
|
|
||||||
|
|
||||||
## v1.21.0: Minfilia Warde
|
|
||||||
|
|
||||||
> Please, be at ease. You are among friends here.
|
|
||||||
|
|
||||||
In this release, Anubis becomes internationalized, gains the ability to use system load as input to issuing challenges, finally fixes the "invalid response" after "success" bug, and more! Please read these notes before upgrading as the changes are big enough that administrators should take action to ensure that the upgrade goes smoothly.
|
|
||||||
|
|
||||||
### Big ticket changes
|
|
||||||
|
|
||||||
The biggest change is that the ["invalid response" after "success" bug](https://github.com/TecharoHQ/anubis/issues/564) is now finally fixed for good by totally rewriting how Anubis' challenge issuance flow works. Instead of generating challenge strings from request metadata (under the assumption that the values being compared against are stable), Anubis now generates random data for each challenge. This data is stored in the active [storage backend](./admin/policies.mdx#storage-backends) for up to 30 minutes. This also fixes [#746](https://github.com/TecharoHQ/anubis/issues/746) and other similar instances of this issue.
|
|
||||||
|
|
||||||
In order to reduce confusion, the "Success" interstitial that shows up when you pass a proof of work challenge has been removed.
|
|
||||||
|
|
||||||
#### Storage
|
|
||||||
|
|
||||||
Anubis now is able to store things persistently [in memory](./admin/policies.mdx#memory), [on the disk](./admin/policies.mdx#bbolt), or [in Valkey](./admin/policies.mdx#valkey) (this includes other compatible software). By default Anubis uses the in-memory backend. If you have an environment with mutable storage (even if it is temporary), be sure to configure the [`bbolt`](./admin/policies.mdx#bbolt) storage backend.
|
|
||||||
|
|
||||||
#### Localization
|
|
||||||
|
|
||||||
Anubis now supports localized responses. Locales can be added in [lib/localization/locales/](https://github.com/TecharoHQ/anubis/tree/main/lib/localization/locales). This release includes support for the following languages:
|
|
||||||
|
|
||||||
- [Brazilian Portugese](https://github.com/TecharoHQ/anubis/pull/726)
|
|
||||||
- [Chinese (Simplified)](https://github.com/TecharoHQ/anubis/pull/774)
|
|
||||||
- [Chinese (Traditional)](https://github.com/TecharoHQ/anubis/pull/759)
|
|
||||||
- English
|
|
||||||
- [Estonian](https://github.com/TecharoHQ/anubis/pull/783)
|
|
||||||
- [Filipino](https://github.com/TecharoHQ/anubis/pull/775)
|
|
||||||
- [French](https://github.com/TecharoHQ/anubis/pull/716)
|
|
||||||
- [German](https://github.com/TecharoHQ/anubis/pull/741)
|
|
||||||
- [Icelandic](https://github.com/TecharoHQ/anubis/pull/780)
|
|
||||||
- [Italian](https://github.com/TecharoHQ/anubis/pull/778)
|
|
||||||
- [Japanese](https://github.com/TecharoHQ/anubis/pull/772)
|
|
||||||
- [Spanish](https://github.com/TecharoHQ/anubis/pull/716)
|
|
||||||
- [Turkish](https://github.com/TecharoHQ/anubis/pull/751)
|
|
||||||
|
|
||||||
If facts or local regulations demand, you can set Anubis default language with the `FORCED_LANGUAGE` environment variable or the `--forced-language` command line argument:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
FORCED_LANGUAGE=de
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Load average
|
|
||||||
|
|
||||||
Anubis can dynamically take action [based on the system load average](./admin/configuration/expressions.mdx#using-the-system-load-average), allowing you to write rules like this:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
## System load based checks.
|
|
||||||
# If the system is under high load for the last minute, add weight.
|
|
||||||
- name: high-load-average
|
|
||||||
action: WEIGH
|
|
||||||
expression: load_1m >= 10.0 # make sure to end the load comparison in a .0
|
|
||||||
weight:
|
|
||||||
adjust: 20
|
|
||||||
|
|
||||||
# If it is not for the last 15 minutes, remove weight.
|
|
||||||
- name: low-load-average
|
|
||||||
action: WEIGH
|
|
||||||
expression: load_15m <= 4.0 # make sure to end the load comparison in a .0
|
|
||||||
weight:
|
|
||||||
adjust: -10
|
|
||||||
```
|
|
||||||
|
|
||||||
Something to keep in mind about system load average is that it is not aware of the number of cores the system has. If you have a 16 core system that has 16 processes running but none of them is hogging the CPU, then you will get a load average below 16. If you are in doubt, make your "high load" metric at least two times the number of CPU cores and your "low load" metric at least half of the number of CPU cores. For example:
|
|
||||||
|
|
||||||
| Kind | Core count | Load threshold |
|
|
||||||
| --------: | :--------- | :------------- |
|
|
||||||
| high load | 4 | `8.0` |
|
|
||||||
| low load | 4 | `2.0` |
|
|
||||||
| high load | 16 | `32.0` |
|
|
||||||
| low load | 16 | `8` |
|
|
||||||
|
|
||||||
Also keep in mind that this does not account for other kinds of latency like I/O latency. A system can have its web applications unresponsive due to high latency from a MySQL server but still have that web application server report a load near or at zero.
|
|
||||||
|
|
||||||
### Other features and fixes
|
|
||||||
|
|
||||||
There are a bunch of other assorted features and fixes too:
|
|
||||||
|
|
||||||
- Add `COOKIE_SECURE` option to set the cookie [Secure flag](https://developer.mozilla.org/en-US/docs/Web/HTTP/Guides/Cookies#block_access_to_your_cookies)
|
|
||||||
- Sets cookie defaults to use [SameSite: None](https://web.dev/articles/samesite-cookies-explained)
|
|
||||||
- Determine the `BIND_NETWORK`/`--bind-network` value from the bind address ([#677](https://github.com/TecharoHQ/anubis/issues/677)).
|
|
||||||
- Implement a [development container](https://containers.dev/) manifest to make contributions easier.
|
|
||||||
- Fix dynamic cookie domains functionality ([#731](https://github.com/TecharoHQ/anubis/pull/731))
|
|
||||||
- Add option for custom cookie prefix ([#732](https://github.com/TecharoHQ/anubis/pull/732))
|
|
||||||
- Make the [Open Graph](./admin/configuration/open-graph.mdx) subsystem and DNSBL subsystem use [storage backends](./admin/policies.mdx#storage-backends) instead of storing everything in memory by default.
|
|
||||||
- Allow [Common Crawl](https://commoncrawl.org/) by default so scrapers have less incentive to scrape
|
|
||||||
- The [bbolt storage backend](./admin/policies.mdx#bbolt) now runs its cleanup every hour instead of every five minutes.
|
|
||||||
- Don't block Anubis starting up if [Thoth](./admin/thoth.mdx) health checks fail.
|
|
||||||
- A race condition involving [opening two challenge pages at once in different tabs](https://github.com/TecharoHQ/anubis/issues/832) causing one of them to fail has been fixed.
|
|
||||||
- The "Try again" button on the error page has been fixed. Previously it meant "try the solution again" instead of "try the challenge again".
|
|
||||||
- In certain cases, a user could be stuck with a test cookie that is invalid, locking them out of the service for up to half an hour. This has been fixed with better validation of this case and clearing the cookie.
|
|
||||||
- Start exposing JA4H fingerprints for later use in CEL expressions.
|
|
||||||
- Add `/healthz` route for use in platform-based health checks.
|
|
||||||
|
|
||||||
### Potentially breaking changes
|
|
||||||
|
|
||||||
We try to introduce breaking changes as much as possible, but these are the changes that may be relevant for you as an administrator:
|
|
||||||
|
|
||||||
#### Challenge format change
|
|
||||||
|
|
||||||
Previously Anubis did no accounting for challenges that it issued. This means that if Anubis restarted during a client, the client would be able to proceed once Anubis came back online.
|
|
||||||
|
|
||||||
During the upgrade to v1.21.0 and when v1.21.0 (or later) restarts with the [in-memory storage backend](./admin/policies.mdx#memory), you may see a higher rate of failed challenges than normal. If this persists beyond a few minutes, [open an issue](https://github.com/TecharoHQ/anubis/issues/new).
|
|
||||||
|
|
||||||
If you are using the in-memory storage backend, please consider using [a different storage backend](./admin/policies.mdx#storage-backends).
|
|
||||||
|
|
||||||
#### Systemd service changes
|
|
||||||
|
|
||||||
The following potentially breaking change applies to native installs with systemd only:
|
|
||||||
|
|
||||||
Each instance of systemd service template now has a unique `RuntimeDirectory`, as opposed to each instance of the service sharing a `RuntimeDirectory`. This change was made to avoid [the `RuntimeDirectory` getting nuked any time one of the Anubis instances restarts](https://github.com/TecharoHQ/anubis/issues/748).
|
|
||||||
|
|
||||||
If you configured Anubis' unix sockets to listen on `/run/anubis/foo.sock` for instance `anubis@foo`, you will need to configure Anubis to listen on `/run/anubis/foo/foo.sock` and additionally configure your HTTP load balancer as appropriate.
|
|
||||||
|
|
||||||
If you need the legacy behaviour, install this [systemd unit dropin](https://www.flatcar.org/docs/latest/setup/systemd/drop-in-units/):
|
|
||||||
|
|
||||||
```systemd
|
|
||||||
# /etc/systemd/system/anubis@.service.d/50-runtimedir.conf
|
|
||||||
[Service]
|
|
||||||
RuntimeDirectory=anubis
|
|
||||||
```
|
|
||||||
|
|
||||||
Just keep in mind that this will cause problems when Anubis restarts.
|
|
||||||
|
|
||||||
## v1.20.0: Thancred Waters
|
|
||||||
|
|
||||||
The big ticket items are as follows:
|
|
||||||
|
|
||||||
- Implement a no-JS challenge method: [`metarefresh`](./admin/configuration/challenges/metarefresh.mdx) ([#95](https://github.com/TecharoHQ/anubis/issues/95))
|
|
||||||
- Implement request "weight", allowing administrators to customize the behaviour of Anubis based on specific criteria
|
|
||||||
- Implement GeoIP and ASN based checks via [Thoth](https://anubis.techaro.lol/docs/admin/thoth) ([#206](https://github.com/TecharoHQ/anubis/issues/206))
|
|
||||||
- Add [custom weight thresholds](./admin/configuration/thresholds.mdx) via CEL ([#688](https://github.com/TecharoHQ/anubis/pull/688))
|
|
||||||
- Move Open Graph configuration [to the policy file](./admin/configuration/open-graph.mdx)
|
|
||||||
- Enable support for Open Graph metadata to be returned by default instead of doing lookups against the target
|
|
||||||
- Add `robots2policy` CLI utility to convert robots.txt files to Anubis challenge policies using CEL expressions ([#409](https://github.com/TecharoHQ/anubis/issues/409))
|
|
||||||
- Refactor challenge presentation logic to use a challenge registry
|
|
||||||
- Allow challenge implementations to register HTTP routes
|
|
||||||
- [Imprint/Impressum support](./admin/configuration/impressum.mdx) ([#362](https://github.com/TecharoHQ/anubis/issues/362))
|
|
||||||
- Fix "invalid response" after "Success!" in Chromium ([#564](https://github.com/TecharoHQ/anubis/issues/564))
|
|
||||||
|
|
||||||
A lot of performance improvements have been made:
|
|
||||||
|
|
||||||
- Replace internal SHA256 hashing with xxhash for 4-6x performance improvement in policy evaluation and cache operations
|
|
||||||
- Optimized the OGTags subsystem with reduced allocations and runtime per request by up to 66%
|
|
||||||
- Replace cidranger with bart for IP range checking, improving IP matching performance by 3-20x with zero heap
|
|
||||||
allocations
|
|
||||||
|
|
||||||
And some cleanups/refactors were added:
|
|
||||||
|
|
||||||
- Fix OpenGraph passthrough ([#717](https://github.com/TecharoHQ/anubis/issues/717))
|
|
||||||
- Remove the unused `/test-error` endpoint and update the testing endpoint `/make-challenge` to only be enabled in
|
|
||||||
development
|
|
||||||
- Add `--xff-strip-private` flag/envvar to toggle skipping X-Forwarded-For private addresses or not
|
|
||||||
- Bump AI-robots.txt to version 1.37
|
|
||||||
- Make progress bar styling more compatible (UXP, etc)
|
|
||||||
- Add `--strip-base-prefix` flag/envvar to strip the base prefix from request paths when forwarding to target servers
|
|
||||||
- Fix an off-by-one in the default threshold config
|
|
||||||
- Add functionality for HS512 JWT algorithm
|
|
||||||
- Add support for dynamic cookie domains with the `--cookie-dynamic-domain`/`COOKIE_DYNAMIC_DOMAIN` flag/envvar
|
|
||||||
|
|
||||||
Request weight is one of the biggest ticket features in Anubis. This enables Anubis to be much closer to a Web Application Firewall and when combined with custom thresholds allows administrators to have Anubis take advanced reactions. For more information about request weight, see [the request weight section](./admin/policies.mdx#request-weight) of the policy file documentation.
|
|
||||||
|
|
||||||
TL;DR when you have one or more WEIGHT rules like this:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
bots:
|
|
||||||
- name: gitea-session-token
|
|
||||||
action: WEIGH
|
|
||||||
expression:
|
|
||||||
all:
|
|
||||||
- '"Cookie" in headers'
|
|
||||||
- headers["Cookie"].contains("i_love_gitea=")
|
|
||||||
# Remove 5 weight points
|
|
||||||
weight:
|
|
||||||
adjust: -5
|
|
||||||
```
|
|
||||||
|
|
||||||
You can configure custom thresholds like this:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
thresholds:
|
|
||||||
- name: minimal-suspicion # This client is likely fine, its soul is lighter than a feather
|
|
||||||
expression: weight < 0 # a feather weighs zero units
|
|
||||||
action: ALLOW # Allow the traffic through
|
|
||||||
|
|
||||||
# For clients that had some weight reduced through custom rules, give them a
|
|
||||||
# lightweight challenge.
|
|
||||||
- name: mild-suspicion
|
|
||||||
expression:
|
|
||||||
all:
|
|
||||||
- weight >= 0
|
|
||||||
- weight < 10
|
|
||||||
action: CHALLENGE
|
|
||||||
challenge:
|
|
||||||
# https://anubis.techaro.lol/docs/admin/configuration/challenges/metarefresh
|
|
||||||
algorithm: metarefresh
|
|
||||||
difficulty: 1
|
|
||||||
report_as: 1
|
|
||||||
|
|
||||||
# For clients that are browser-like but have either gained points from custom
|
|
||||||
# rules or report as a standard browser.
|
|
||||||
- name: moderate-suspicion
|
|
||||||
expression:
|
|
||||||
all:
|
|
||||||
- weight >= 10
|
|
||||||
- weight < 20
|
|
||||||
action: CHALLENGE
|
|
||||||
challenge:
|
|
||||||
# https://anubis.techaro.lol/docs/admin/configuration/challenges/proof-of-work
|
|
||||||
algorithm: fast
|
|
||||||
difficulty: 2 # two leading zeros, very fast for most clients
|
|
||||||
report_as: 2
|
|
||||||
|
|
||||||
# For clients that are browser like and have gained many points from custom
|
|
||||||
# rules
|
|
||||||
- name: extreme-suspicion
|
|
||||||
expression: weight >= 20
|
|
||||||
action: CHALLENGE
|
|
||||||
challenge:
|
|
||||||
# https://anubis.techaro.lol/docs/admin/configuration/challenges/proof-of-work
|
|
||||||
algorithm: fast
|
|
||||||
difficulty: 4
|
|
||||||
report_as: 4
|
|
||||||
```
|
|
||||||
|
|
||||||
These thresholds apply when no other `ALLOW`, `DENY`, or `CHALLENGE` rule matches the request. `WEIGHT` rules add and remove request weight as needed:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
bots:
|
|
||||||
- name: gitea-session-token
|
|
||||||
action: WEIGH
|
|
||||||
expression:
|
|
||||||
all:
|
|
||||||
- '"Cookie" in headers'
|
|
||||||
- headers["Cookie"].contains("i_love_gitea=")
|
|
||||||
# Remove 5 weight points
|
|
||||||
weight:
|
|
||||||
adjust: -5
|
|
||||||
|
|
||||||
- name: bot-like-user-agent
|
|
||||||
action: WEIGH
|
|
||||||
expression: '"Bot" in userAgent'
|
|
||||||
# Add 5 weight points
|
|
||||||
weight:
|
|
||||||
adjust: 5
|
|
||||||
```
|
|
||||||
|
|
||||||
Of note: the default "generic browser" rule assigns 10 weight points:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
# Generic catchall rule
|
|
||||||
- name: generic-browser
|
|
||||||
user_agent_regex: >-
|
|
||||||
Mozilla|Opera
|
|
||||||
action: WEIGH
|
|
||||||
weight:
|
|
||||||
adjust: 10
|
|
||||||
```
|
|
||||||
|
|
||||||
Adjust this as you see fit.
|
|
||||||
|
|
||||||
## v1.19.1: Jenomis cen Lexentale - Echo 1
|
|
||||||
|
|
||||||
- Return `data/bots/ai-robots-txt.yaml` to avoid breaking configs [#599](https://github.com/TecharoHQ/anubis/issues/599)
|
|
||||||
|
|
||||||
## v1.19.0: Jenomis cen Lexentale
|
|
||||||
|
|
||||||
Mostly a bunch of small features, no big ticket things this time.
|
|
||||||
|
|
||||||
- Record if challenges were issued via the API or via embedded JSON in the challenge page HTML ([#531](https://github.com/TecharoHQ/anubis/issues/531))
|
|
||||||
- Ensure that clients that are shown a challenge support storing cookies
|
|
||||||
- Imprint the version number into challenge pages
|
|
||||||
- Encode challenge pages with gzip level 1
|
|
||||||
- Add PowerPC 64 bit little-endian builds (`GOARCH=ppc64le`)
|
|
||||||
- Add `check-spelling` for spell checking
|
|
||||||
- Add `--target-insecure-skip-verify` flag/envvar to allow Anubis to hit a self-signed HTTPS backend
|
|
||||||
- Minor adjustments to FreeBSD rc.d script to allow for more flexible configuration.
|
|
||||||
- Added Podman and Docker support for running Playwright tests
|
|
||||||
- Add a default rule to throw challenges when a request with the `X-Firefox-Ai` header is set
|
|
||||||
- Updated the nonce value in the challenge JWT cookie to be a string instead of a number
|
|
||||||
- Rename cookies in response to user feedback
|
|
||||||
- Ensure cookie renaming is consistent across configuration options
|
|
||||||
- Add Bookstack app in data
|
|
||||||
- Truncate everything but the first five characters of Accept-Language headers when making challenges
|
|
||||||
- Ensure client JavaScript is served with Content-Type text/javascript.
|
|
||||||
- Add `--target-host` flag/envvar to allow changing the value of the Host header in requests forwarded to the target service
|
|
||||||
- Bump AI-robots.txt to version 1.31
|
|
||||||
- Add `RuntimeDirectory` to systemd unit settings so native packages can listen over unix sockets
|
|
||||||
- Added SearXNG instance tracker whitelist policy
|
|
||||||
- Added Qualys SSL Labs whitelist policy
|
|
||||||
- Fixed cookie deletion logic ([#520](https://github.com/TecharoHQ/anubis/issues/520), [#522](https://github.com/TecharoHQ/anubis/pull/522))
|
|
||||||
- Add `--target-sni` flag/envvar to allow changing the value of the TLS handshake hostname in requests forwarded to the target service
|
|
||||||
- Fixed CEL expression matching validator to now properly error out when it receives empty expressions
|
|
||||||
- Added OpenRC init.d script
|
|
||||||
- Added `--version` flag
|
|
||||||
- Added `anubis_proxied_requests_total` metric to count proxied requests
|
|
||||||
- Add `Applebot` as "good" web crawler
|
|
||||||
- Reorganize AI/LLM crawler blocking into three separate stances, maintaining existing status quo as default
|
|
||||||
- Split out AI/LLM user agent blocking policies, adding documentation for each
|
|
||||||
|
|
||||||
## v1.18.0: Varis zos Galvus
|
|
||||||
|
|
||||||
The big ticket feature in this release is [CEL expression matching support](https://anubis.techaro.lol/docs/admin/configuration/expressions). This allows you to tailor your approach for the individual services you are protecting.
|
|
||||||
|
|
||||||
These can be as simple as:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
- name: allow-api-requests
|
|
||||||
action: ALLOW
|
|
||||||
expression:
|
|
||||||
all:
|
|
||||||
- '"Accept" in headers'
|
|
||||||
- 'headers["Accept"] == "application/json"'
|
|
||||||
- 'path.startsWith("/api/")'
|
|
||||||
```
|
|
||||||
|
|
||||||
Or as complicated as:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
- name: allow-git-clients
|
|
||||||
action: ALLOW
|
|
||||||
expression:
|
|
||||||
all:
|
|
||||||
- >-
|
|
||||||
(
|
|
||||||
userAgent.startsWith("git/") ||
|
|
||||||
userAgent.contains("libgit") ||
|
|
||||||
userAgent.startsWith("go-git") ||
|
|
||||||
userAgent.startsWith("JGit/") ||
|
|
||||||
userAgent.startsWith("JGit-")
|
|
||||||
)
|
|
||||||
- '"Git-Protocol" in headers'
|
|
||||||
- headers["Git-Protocol"] == "version=2"
|
|
||||||
```
|
|
||||||
|
|
||||||
The docs have more information, but here's a tl;dr of the variables you have access to in expressions:
|
|
||||||
|
|
||||||
| Name | Type | Explanation | Example |
|
|
||||||
| :-------------- | :-------------------- | :---------------------------------------------------------------------------------------------------------------------------------------- | :----------------------------------------------------------- |
|
|
||||||
| `headers` | `map[string, string]` | The [headers](https://developer.mozilla.org/en-US/docs/Web/HTTP/Reference/Headers) of the request being processed. | `{"User-Agent": "Mozilla/5.0 Gecko/20100101 Firefox/137.0"}` |
|
|
||||||
| `host` | `string` | The [HTTP hostname](https://web.dev/articles/url-parts#host) the request is targeted to. | `anubis.techaro.lol` |
|
|
||||||
| `method` | `string` | The [HTTP method](https://developer.mozilla.org/en-US/docs/Web/HTTP/Reference/Methods) in the request being processed. | `GET`, `POST`, `DELETE`, etc. |
|
|
||||||
| `path` | `string` | The [path](https://web.dev/articles/url-parts#pathname) of the request being processed. | `/`, `/api/memes/create` |
|
|
||||||
| `query` | `map[string, string]` | The [query parameters](https://web.dev/articles/url-parts#query) of the request being processed. | `?foo=bar` -> `{"foo": "bar"}` |
|
|
||||||
| `remoteAddress` | `string` | The IP address of the client. | `1.1.1.1` |
|
|
||||||
| `userAgent` | `string` | The [`User-Agent`](https://developer.mozilla.org/en-US/docs/Web/HTTP/Reference/Headers/User-Agent) string in the request being processed. | `Mozilla/5.0 Gecko/20100101 Firefox/137.0` |
|
|
||||||
|
|
||||||
This will be made more elaborate in the future. Give me time. This is a [simple, lovable, and complete](https://longform.asmartbear.com/slc/) implementation of this feature so that administrators can get hacking ASAP.
|
|
||||||
|
|
||||||
Other changes:
|
|
||||||
|
|
||||||
- Use CSS variables to deduplicate styles
|
|
||||||
- Fixed native packages not containing the stdlib and botPolicies.yaml
|
|
||||||
- Change import syntax to allow multi-level imports
|
|
||||||
- Changed the startup logging to use JSON formatting as all the other logs do
|
|
||||||
- Added the ability to do [expression matching with CEL](./admin/configuration/expressions.mdx)
|
|
||||||
- Add a warning for clients that don't store cookies
|
|
||||||
- Disable Open Graph passthrough by default ([#435](https://github.com/TecharoHQ/anubis/issues/435))
|
|
||||||
- Clarify the license of the mascot images ([#442](https://github.com/TecharoHQ/anubis/issues/442))
|
|
||||||
- Started Suppressing 'Context canceled' errors from http in the logs ([#446](https://github.com/TecharoHQ/anubis/issues/446))
|
|
||||||
|
|
||||||
## v1.17.1: Asahi sas Brutus: Echo 1
|
|
||||||
|
|
||||||
- Added customization of authorization cookie expiration time with `--cookie-expiration-time` flag or envvar
|
|
||||||
- Updated the `OG_PASSTHROUGH` to be true by default, thereby allowing Open Graph tags to be passed through by default
|
|
||||||
- Added the ability to [customize Anubis' HTTP status codes](./admin/configuration/custom-status-codes.mdx) ([#355](https://github.com/TecharoHQ/anubis/issues/355))
|
|
||||||
|
|
||||||
## v1.17.0: Asahi sas Brutus
|
|
||||||
|
|
||||||
- Ensure regexes can't end in newlines ([#372](https://github.com/TecharoHQ/anubis/issues/372))
|
|
||||||
- Add documentation for default allow behavior (implicit rule)
|
|
||||||
- Enable [importing configuration snippets](./admin/configuration/import.mdx) ([#321](https://github.com/TecharoHQ/anubis/pull/321))
|
|
||||||
- Refactor check logic to be more generic and work on a Checker type
|
|
||||||
- Add more AI user agents based on the [ai.robots.txt](https://github.com/ai-robots-txt/ai.robots.txt) project
|
|
||||||
- Embedded challenge data in initial HTML response to improve performance
|
|
||||||
- Added support to use Nginx' `auth_request` directive with Anubis
|
|
||||||
- Added support to allow to restrict the allowed redirect domains
|
|
||||||
- Whitelisted [DuckDuckBot](https://duckduckgo.com/duckduckgo-help-pages/results/duckduckbot/) in botPolicies
|
|
||||||
- Improvements to build scripts to make them less independent of the build host
|
|
||||||
- Improved the Open Graph error logging
|
|
||||||
- Added `Opera` to the `generic-browser` bot policy rule
|
|
||||||
- Added FreeBSD rc.d script so can be run as a FreeBSD daemon
|
|
||||||
- Allow requests from the Internet Archive
|
|
||||||
- Added example nginx configuration to documentation
|
|
||||||
- Added example Apache configuration to the documentation [#277](https://github.com/TecharoHQ/anubis/issues/277)
|
|
||||||
- Move per-environment configuration details into their own pages
|
|
||||||
- Added support for running anubis behind a prefix (e.g. `/myapp`)
|
|
||||||
- Added headers support to bot policy rules
|
|
||||||
- Moved configuration file from JSON to YAML by default
|
|
||||||
- Added documentation on how to use Anubis with Traefik in Docker
|
|
||||||
- Improved error handling in some edge cases
|
|
||||||
- Disable `generic-bot-catchall` rule because of its high false positive rate in real-world scenarios
|
|
||||||
- Moved all CSS inline to the Xess package, changed colors to be CSS variables
|
|
||||||
- Set or append to `X-Forwarded-For` header unless the remote connects over a loopback address [#328](https://github.com/TecharoHQ/anubis/issues/328)
|
|
||||||
- Fixed mojeekbot user agent regex
|
|
||||||
- Reduce Anubis' paranoia with user cookies ([#365](https://github.com/TecharoHQ/anubis/pull/365))
|
|
||||||
- Added support for Open Graph passthrough while using unix sockets
|
|
||||||
- The Open Graph subsystem now passes the HTTP `HOST` header through to the origin
|
|
||||||
- Updated the `OG_PASSTHROUGH` to be true by default, thereby allowing Open Graph tags to be passed through by default
|
|
||||||
|
|
||||||
## v1.16.0
|
|
||||||
|
|
||||||
Fordola rem Lupis
|
|
||||||
|
|
||||||
> I want to make them pay! All of them! Everyone who ever mocked or looked down on me -- I want the power to make them pay!
|
|
||||||
|
|
||||||
The following features are the "big ticket" items:
|
|
||||||
|
|
||||||
- Added support for native Debian, Red Hat, and tarball packaging strategies including installation and use directions
|
|
||||||
- A prebaked tarball has been added, allowing distros to build Anubis like they could in v1.15.x
|
|
||||||
- The placeholder Anubis mascot has been replaced with a design by [CELPHASE](https://bsky.app/profile/celphase.bsky.social)
|
|
||||||
- Verification page now shows hash rate and a progress bar for completion probability
|
|
||||||
- Added support for [Open Graph tags](https://ogp.me/) when rendering the challenge page. This allows for social previews to be generated when sharing the challenge page on social media platforms ([#195](https://github.com/TecharoHQ/anubis/pull/195))
|
|
||||||
- Added support for passing the ed25519 signing key in a file with `-ed25519-private-key-hex-file` or `ED25519_PRIVATE_KEY_HEX_FILE`
|
|
||||||
|
|
||||||
The other small fixes have been made:
|
|
||||||
|
|
||||||
- Added a periodic cleanup routine for the decaymap that removes expired entries, ensuring stale data is properly pruned
|
|
||||||
- Added a no-store Cache-Control header to the challenge page
|
|
||||||
- Hide the directory listings for Anubis' internal static content
|
|
||||||
- Changed `--debug-x-real-ip-default` to `--use-remote-address`, getting the IP address from the request's socket address instead
|
|
||||||
- DroneBL lookups have been disabled by default
|
|
||||||
- Static asset builds are now done on demand instead of the results being committed to source control
|
|
||||||
- The Dockerfile has been removed as it is no longer in use
|
|
||||||
- Developer documentation has been added to the docs site
|
|
||||||
- Show more errors when some predictable challenge page errors happen ([#150](https://github.com/TecharoHQ/anubis/issues/150))
|
|
||||||
- Added the `--debug-benchmark-js` flag for testing proof-of-work performance during development
|
|
||||||
- Use `TrimSuffix` instead of `TrimRight` on containerbuild
|
|
||||||
- Fix the startup logs to correctly show the address and port the server is listening on
|
|
||||||
- Add [LibreJS](https://www.gnu.org/software/librejs/) banner to Anubis JavaScript to allow LibreJS users to run the challenge
|
|
||||||
- Added a wait with button continue + 30 second auto continue after 30s if you click "Why am I seeing this?"
|
|
||||||
- Fixed a typo in the challenge page title
|
|
||||||
- Disabled running integration tests on Windows hosts due to it's reliance on posix features (see [#133](https://github.com/TecharoHQ/anubis/pull/133#issuecomment-2764732309))
|
|
||||||
- Fixed minor typos
|
|
||||||
- Added a Makefile to enable comfortable workflows for downstream packagers
|
|
||||||
- Added `zizmor` for GitHub Actions static analysis
|
|
||||||
- Fixed most `zizmor` findings
|
|
||||||
- Enabled Dependabot
|
|
||||||
- Added an air config for autoreload support in development ([#195](https://github.com/TecharoHQ/anubis/pull/195))
|
|
||||||
- Added an `--extract-resources` flag to extract static resources to a local folder
|
|
||||||
- Add noindex flag to all Anubis pages ([#227](https://github.com/TecharoHQ/anubis/issues/227))
|
|
||||||
- Added `WEBMASTER_EMAIL` variable, if it is present then display that email address on error pages ([#235](https://github.com/TecharoHQ/anubis/pull/235), [#115](https://github.com/TecharoHQ/anubis/issues/115))
|
|
||||||
- Hash pinned all GitHub Actions
|
|
||||||
|
|
||||||
## v1.15.1
|
|
||||||
|
|
||||||
Zenos yae Galvus: Echo 1
|
|
||||||
|
|
||||||
Fixes a recurrence of [CVE-2025-24369](https://github.com/Xe/x/security/advisories/GHSA-56w8-8ppj-2p4f)
|
|
||||||
due to an incorrect logic change in a refactor. This allows an attacker to mint a valid
|
|
||||||
access token by passing any SHA-256 hash instead of one that matches the proof-of-work
|
|
||||||
test.
|
|
||||||
|
|
||||||
This case has been added as a regression test. It was not when CVE-2025-24369 was released
|
|
||||||
due to the project not having the maturity required to enable this kind of regression testing.
|
|
||||||
|
|
||||||
## v1.15.0
|
|
||||||
|
|
||||||
Zenos yae Galvus
|
|
||||||
|
|
||||||
> Yes...the coming days promise to be most interesting. Most interesting.
|
|
||||||
|
|
||||||
Headline changes:
|
|
||||||
|
|
||||||
- ed25519 signing keys for Anubis can be stored in the flag `--ed25519-private-key-hex` or envvar `ED25519_PRIVATE_KEY_HEX`; if one is not provided when Anubis starts, a new one is generated and logged
|
|
||||||
- Add the ability to set the cookie domain with the envvar `COOKIE_DOMAIN=techaro.lol` for all domains under `techaro.lol`
|
|
||||||
- Add the ability to set the cookie partitioned flag with the envvar `COOKIE_PARTITIONED=true`
|
|
||||||
|
|
||||||
Many other small changes were made, including but not limited to:
|
|
||||||
|
|
||||||
- Fixed and clarified installation instructions
|
|
||||||
- Introduced integration tests using Playwright
|
|
||||||
- Refactor & Split up Anubis into cmd and lib.go
|
|
||||||
- Fixed bot check to only apply if address range matches
|
|
||||||
- Fix default difficulty setting that was broken in a refactor
|
|
||||||
- Linting fixes
|
|
||||||
- Make dark mode diff lines readable in the documentation
|
|
||||||
- Fix CI based browser smoke test
|
|
||||||
|
|
||||||
Users running Anubis' test suite may run into issues with the integration tests on Windows hosts. This is a known issue and will be fixed at some point in the future. In the meantime, use the Windows Subsystem for Linux (WSL).
|
|
||||||
|
|
||||||
## v1.14.2
|
|
||||||
|
|
||||||
Livia sas Junius: Echo 2
|
|
||||||
|
|
||||||
- Remove default RSS reader rule as it may allow for a targeted attack against rails apps
|
|
||||||
[#67](https://github.com/TecharoHQ/anubis/pull/67)
|
|
||||||
- Whitelist MojeekBot in botPolicies [#47](https://github.com/TecharoHQ/anubis/issues/47)
|
|
||||||
- botPolicies regex has been cleaned up [#66](https://github.com/TecharoHQ/anubis/pull/66)
|
|
||||||
|
|
||||||
## v1.14.1
|
|
||||||
|
|
||||||
Livia sas Junius: Echo 1
|
|
||||||
|
|
||||||
- Set the `X-Real-Ip` header based on the contents of `X-Forwarded-For`
|
|
||||||
[#62](https://github.com/TecharoHQ/anubis/issues/62)
|
|
||||||
|
|
||||||
## v1.14.0
|
|
||||||
|
|
||||||
Livia sas Junius
|
|
||||||
|
|
||||||
> Fail to do as my lord commands...and I will spare him the trouble of blocking you.
|
|
||||||
|
|
||||||
- Add explanation of what Anubis is doing to the challenge page [#25](https://github.com/TecharoHQ/anubis/issues/25)
|
|
||||||
- Administrators can now define artificially hard challenges using the "slow" algorithm:
|
|
||||||
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"name": "generic-bot-catchall",
|
|
||||||
"user_agent_regex": "(?i:bot|crawler)",
|
|
||||||
"action": "CHALLENGE",
|
|
||||||
"challenge": {
|
|
||||||
"difficulty": 16,
|
|
||||||
"report_as": 4,
|
|
||||||
"algorithm": "slow"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
This allows administrators to cause particularly malicious clients to use unreasonable amounts of CPU. The UI will also lie to the client about the difficulty.
|
|
||||||
|
|
||||||
- Docker images now explicitly call `docker.io/library/<thing>` to increase compatibility with Podman et. al
|
|
||||||
[#21](https://github.com/TecharoHQ/anubis/pull/21)
|
|
||||||
- Don't overflow the image when browser windows are small (eg. on phones)
|
|
||||||
[#27](https://github.com/TecharoHQ/anubis/pull/27)
|
|
||||||
- Lower the default difficulty to 5 from 4
|
|
||||||
- Don't duplicate work across multiple threads [#36](https://github.com/TecharoHQ/anubis/pull/36)
|
|
||||||
- Documentation has been moved to https://anubis.techaro.lol/ with sources in docs/
|
|
||||||
- Removed several visible AI artifacts (e.g., 6 fingers) [#37](https://github.com/TecharoHQ/anubis/pull/37)
|
|
||||||
- [KagiBot](https://kagi.com/bot) is allowed through the filter [#44](https://github.com/TecharoHQ/anubis/pull/44)
|
|
||||||
- Fixed hang when navigator.hardwareConcurrency is undefined
|
|
||||||
- Support Unix domain sockets [#45](https://github.com/TecharoHQ/anubis/pull/45)
|
|
||||||
- Allow filtering by remote addresses:
|
|
||||||
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"name": "qwantbot",
|
|
||||||
"user_agent_regex": "\\+https\\:\\/\\/help\\.qwant\\.com/bot/",
|
|
||||||
"action": "ALLOW",
|
|
||||||
"remote_addresses": ["91.242.162.0/24"]
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
This also works at an IP range level:
|
|
||||||
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"name": "internal-network",
|
|
||||||
"action": "ALLOW",
|
|
||||||
"remote_addresses": ["100.64.0.0/10"]
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
## 1.13.0
|
|
||||||
|
|
||||||
- Proof-of-work challenges are drastically sped up [#19](https://github.com/TecharoHQ/anubis/pull/19)
|
|
||||||
- Docker images are now built with the timestamp set to the commit timestamp
|
|
||||||
- The README now points to TecharoHQ/anubis instead of Xe/x
|
|
||||||
- Images are built using ko instead of `docker buildx build`
|
|
||||||
[#13](https://github.com/TecharoHQ/anubis/pull/13)
|
|
||||||
|
|
||||||
## 1.12.1
|
|
||||||
|
|
||||||
- Phrasing in the `<noscript>` warning was replaced from its original placeholder text to
|
|
||||||
something more suitable for general consumption
|
|
||||||
([fd6903a](https://github.com/TecharoHQ/anubis/commit/fd6903aeed315b8fddee32890d7458a9271e4798)).
|
|
||||||
- Footer links on the check page now point to Techaro's brand
|
|
||||||
([4ebccb1](https://github.com/TecharoHQ/anubis/commit/4ebccb197ec20d024328d7f92cad39bbbe4d6359))
|
|
||||||
- Anubis was imported from [Xe/x](https://github.com/Xe/x)
|
|
||||||
|
|
@ -1,8 +0,0 @@
|
||||||
{
|
|
||||||
"label": "Administrative guides",
|
|
||||||
"position": 40,
|
|
||||||
"link": {
|
|
||||||
"type": "generated-index",
|
|
||||||
"description": "Tradeoffs and considerations you may want to keep in mind when using Anubis."
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,340 +0,0 @@
|
||||||
---
|
|
||||||
title: "Commercial support and an unbranded version"
|
|
||||||
---
|
|
||||||
|
|
||||||
If you want to use Anubis but organizational policies prevent you from using the branding that the open source project ships, we offer a commercial version of Anubis named BotStopper. BotStopper builds off of the open source core of Anubis and offers organizations more control over the branding, including but not limited to:
|
|
||||||
|
|
||||||
- Custom images for different states of the challenge process (in process, success, failure)
|
|
||||||
- Custom CSS and fonts
|
|
||||||
- Custom titles for the challenge and error pages
|
|
||||||
- "Anubis" replaced with "BotStopper" across the UI
|
|
||||||
- A private bug tracker for issues
|
|
||||||
|
|
||||||
In the near future this will expand to:
|
|
||||||
|
|
||||||
- A private challenge implementation that does advanced fingerprinting to check if the client is a genuine browser or not
|
|
||||||
- Advanced fingerprinting via [Thoth-based advanced checks](./thoth.mdx)
|
|
||||||
|
|
||||||
In order to sign up for BotStopper, please do one of the following:
|
|
||||||
|
|
||||||
- Sign up [on GitHub Sponsors](https://github.com/sponsors/Xe) at the $50 per month tier or higher
|
|
||||||
- Email [sales@techaro.lol](mailto:sales@techaro.lol) with your requirements for invoicing, please note that custom invoicing will cost more than using GitHub Sponsors for understandable overhead reasons
|
|
||||||
|
|
||||||
## Installation
|
|
||||||
|
|
||||||
Install BotStopper like you would Anubis, but replace the image reference. EG:
|
|
||||||
|
|
||||||
```diff
|
|
||||||
-ghcr.io/techarohq/anubis:latest
|
|
||||||
+ghcr.io/techarohq/botstopper/anubis:latest
|
|
||||||
```
|
|
||||||
|
|
||||||
### Binary packages
|
|
||||||
|
|
||||||
Binary packages are available [in the GitHub Releases page](https://github.com/TecharoHQ/botstopper/releases), the main difference is that the package name is `techaro-botstopper`, the systemd service is `techaro-botstopper@your-instance.service`, the binary is `/usr/bin/botstopper`, and the configuration is in `/etc/techaro-botstopper`. All other instructions in the [native package install guide](./native-install.mdx) apply.
|
|
||||||
|
|
||||||
### Docker / Podman
|
|
||||||
|
|
||||||
In order to pull the BotStopper image, you need to [authenticate with GitHub's Container Registry](https://docs.github.com/en/packages/working-with-a-github-packages-registry/working-with-the-container-registry#authenticating-to-the-container-registry).
|
|
||||||
|
|
||||||
```text
|
|
||||||
docker login ghcr.io -u your-username --password-stdin
|
|
||||||
```
|
|
||||||
|
|
||||||
Then you can use the image as normal.
|
|
||||||
|
|
||||||
### Kubernetes
|
|
||||||
|
|
||||||
If you are using Kubernetes, you will need to create an image pull secret:
|
|
||||||
|
|
||||||
```text
|
|
||||||
kubectl create secret docker-registry \
|
|
||||||
techarohq-botstopper \
|
|
||||||
--docker-server ghcr.io \
|
|
||||||
--docker-username any-username \
|
|
||||||
--docker-password <your-access-token> \
|
|
||||||
```
|
|
||||||
|
|
||||||
Then attach it to your Deployment:
|
|
||||||
|
|
||||||
```diff
|
|
||||||
spec:
|
|
||||||
securityContext:
|
|
||||||
fsGroup: 1000
|
|
||||||
+ imagePullSecrets:
|
|
||||||
+ - name: techarohq-botstopper
|
|
||||||
```
|
|
||||||
|
|
||||||
## Configuration
|
|
||||||
|
|
||||||
### Docker compose
|
|
||||||
|
|
||||||
Follow [the upstream Docker compose directions](https://anubis.techaro.lol/docs/admin/environments/docker-compose) with the following additional options:
|
|
||||||
|
|
||||||
```diff
|
|
||||||
anubis:
|
|
||||||
image: ghcr.io/techarohq/botstopper/anubis:latest
|
|
||||||
environment:
|
|
||||||
BIND: ":8080"
|
|
||||||
DIFFICULTY: "4"
|
|
||||||
METRICS_BIND: ":9090"
|
|
||||||
SERVE_ROBOTS_TXT: "true"
|
|
||||||
TARGET: "http://nginx"
|
|
||||||
OG_PASSTHROUGH: "true"
|
|
||||||
OG_EXPIRY_TIME: "24h"
|
|
||||||
|
|
||||||
+ # botstopper config here
|
|
||||||
+ CHALLENGE_TITLE: "Doing math for your connection!"
|
|
||||||
+ ERROR_TITLE: "Something went wrong!"
|
|
||||||
+ OVERLAY_FOLDER: /assets
|
|
||||||
+ volumes:
|
|
||||||
+ - "./your_folder:/assets"
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Example
|
|
||||||
|
|
||||||
There is an example in [docker-compose.yaml](https://github.com/TecharoHQ/botstopper/blob/main/docker-compose.yaml). Start the example with `docker compose up`:
|
|
||||||
|
|
||||||
```text
|
|
||||||
docker compose up -d
|
|
||||||
```
|
|
||||||
|
|
||||||
And then open [https://botstopper.local.cetacean.club:8443](https://botstopper.local.cetacean.club:8443) in your browser.
|
|
||||||
|
|
||||||
> [!NOTE]
|
|
||||||
> This uses locally signed sacrificial TLS certificates stored in `./demo/pki`. Your browser will rightly reject these. Here is what the example looks like:
|
|
||||||
>
|
|
||||||
> 
|
|
||||||
|
|
||||||
## Custom images and CSS
|
|
||||||
|
|
||||||
Anubis uses an internal filesystem that contains CSS, JavaScript, and images. The BotStopper variant of Anubis lets you specify an overlay folder with the environment variable `OVERLAY_FOLDER`. The contents of this folder will be overlaid on top of Anubis' internal filesystem, allowing you to easily customize the images and CSS.
|
|
||||||
|
|
||||||
Your directory tree should look like this, assuming your data is in `./your_folder`:
|
|
||||||
|
|
||||||
```text
|
|
||||||
./your_folder
|
|
||||||
└── static
|
|
||||||
├── css
|
|
||||||
│ └── custom.css
|
|
||||||
└── img
|
|
||||||
├── happy.webp
|
|
||||||
├── pensive.webp
|
|
||||||
└── reject.webp
|
|
||||||
```
|
|
||||||
|
|
||||||
For an example directory tree using some off-the-shelf images the Tango icon set, see the [testdata](https://github.com/TecharoHQ/botstopper/tree/main/testdata/static/img) folder.
|
|
||||||
|
|
||||||
### Header-based overlay dispatch
|
|
||||||
|
|
||||||
If you run BotStopper in a multi-tenant environment where each tenant needs its own branding, BotStopper supports the ability to use request header values to direct asset reads to different folders under your `OVERLAY_FOLDER`. One of the most common ways to do this is based on the HTTP Host of the request. For example, if you set `ASSET_LOOKUP_HEADER=Host` in BotStopper's environment:
|
|
||||||
|
|
||||||
```text
|
|
||||||
$OVERLAY_FOLDER
|
|
||||||
├── static
|
|
||||||
│ ├── css
|
|
||||||
│ │ ├── custom.css
|
|
||||||
│ │ └── eyesore.css
|
|
||||||
│ └── img
|
|
||||||
│ ├── happy.webp
|
|
||||||
│ ├── pensive.webp
|
|
||||||
│ └── reject.webp
|
|
||||||
└── test.anubis.techaro.lol
|
|
||||||
└── static
|
|
||||||
├── css
|
|
||||||
│ └── custom.css
|
|
||||||
└── img
|
|
||||||
├── happy.webp
|
|
||||||
├── pensive.webp
|
|
||||||
└── reject.webp
|
|
||||||
```
|
|
||||||
|
|
||||||
Requests to `test.anubis.techaro.lol` will load assets in `$OVERLAY_FOLDER/test.anubis.techaro.lol/static` and all other requests will load them from `$OVERLAY_FOLDER/static`.
|
|
||||||
|
|
||||||
For an example, look at [the testdata folder in the BotStopper repo](https://github.com/TecharoHQ/botstopper/tree/main/testdata).
|
|
||||||
|
|
||||||
### Custom CSS
|
|
||||||
|
|
||||||
CSS customization is done mainly with CSS variables. View [the example custom CSS file](https://github.com/TecharoHQ/botstopper/blob/main/testdata/static/css/custom.css) for more information about what can be customized.
|
|
||||||
|
|
||||||
### Custom fonts
|
|
||||||
|
|
||||||
If you want to add custom fonts, copy the `woff2` files alongside your `custom.css` file and then include them with the [`@font-face` CSS at-rule](https://developer.mozilla.org/en-US/docs/Web/CSS/@font-face):
|
|
||||||
|
|
||||||
```css
|
|
||||||
@font-face {
|
|
||||||
font-family: "Oswald";
|
|
||||||
font-style: normal;
|
|
||||||
font-weight: 200 900;
|
|
||||||
font-display: swap;
|
|
||||||
src: url("./fonts/oswald.woff2") format("woff2");
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Then adjust your CSS variables accordingly:
|
|
||||||
|
|
||||||
```css
|
|
||||||
:root {
|
|
||||||
--body-sans-font: Oswald, sans-serif;
|
|
||||||
--body-preformatted-font: monospace;
|
|
||||||
--body-title-font: serif;
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
To convert `.ttf` fonts to [Web-optimized woff2 fonts](https://www.w3.org/TR/WOFF2/), use the `woff2_compress` command from the `woff2` or `woff2-tools` package:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ woff2_compress oswald.ttf
|
|
||||||
Processing oswald.ttf => oswald.woff2
|
|
||||||
Compressed 159517 to 70469.
|
|
||||||
```
|
|
||||||
|
|
||||||
Then you can import and use it as normal.
|
|
||||||
|
|
||||||
### Customizing images
|
|
||||||
|
|
||||||
Anubis uses three images to visually communicate the state of the program. These are:
|
|
||||||
|
|
||||||
| Image name | Intended message | Example |
|
|
||||||
| :------------- | :----------------------------------------------- | :-------------------------------- |
|
|
||||||
| `happy.webp` | You have passed validation, all is good |  |
|
|
||||||
| `pensive.webp` | Checking is running, hold steady until it's done |  |
|
|
||||||
| `reject.webp` | Something went wrong, this is a terminal state |  |
|
|
||||||
|
|
||||||
To make your own images at the optimal quality, use the following ffmpeg command:
|
|
||||||
|
|
||||||
```text
|
|
||||||
ffmpeg -i /path/to/image -vf scale=-1:384 happy.webp
|
|
||||||
```
|
|
||||||
|
|
||||||
`ffprobe` should report something like this on the generated images:
|
|
||||||
|
|
||||||
```text
|
|
||||||
Input #0, webp_pipe, from 'happy.webp':
|
|
||||||
Duration: N/A, bitrate: N/A
|
|
||||||
Stream #0:0: Video: webp, none, 25 fps, 25 tbr, 25 tbn
|
|
||||||
```
|
|
||||||
|
|
||||||
In testing 384 by 384 pixels gives the best balance between filesize, quality, and clarity.
|
|
||||||
|
|
||||||
```text
|
|
||||||
$ du -hs *
|
|
||||||
4.0K happy.webp
|
|
||||||
12K pensive.webp
|
|
||||||
8.0K reject.webp
|
|
||||||
```
|
|
||||||
|
|
||||||
## Custom HTML templates
|
|
||||||
|
|
||||||
If you need to completely control the HTML layout of all Anubis pages, you can customize the entire page with `USE_TEMPLATES=true`. This uses Go's standard library [html/template](https://pkg.go.dev/html/template) package to template HTML responses. Your templates can contain whatever HTML you want. The only catch is that you MUST include `{{ .Head }}` in the `<head>` element for challenge pages, and you MUST include `{{ .Body }}` in the `<body>` element for all pages.
|
|
||||||
|
|
||||||
In order to use this, you must define the following templates:
|
|
||||||
|
|
||||||
| Template path | Usage |
|
|
||||||
| :----------------------------------------- | :---------------------------------------------- |
|
|
||||||
| `$OVERLAY_FOLDER/templates/challenge.tmpl` | Challenge pages |
|
|
||||||
| `$OVERLAY_FOLDER/templates/error.tmpl` | Error pages |
|
|
||||||
| `$OVERLAY_FOLDER/templates/impressum.tmpl` | [Impressum](./configuration/impressum.mdx) page |
|
|
||||||
|
|
||||||
:::note
|
|
||||||
|
|
||||||
Currently HTML templates don't work together with [Header-based overlay dispatch](#header-based-overlay-dispatch). This is a known issue that will be fixed soon. If you enable header-based overlay dispatch, BotStopper will use the global `templates` folder instead of using the templates present in the overlay.
|
|
||||||
|
|
||||||
:::
|
|
||||||
|
|
||||||
Here are minimal (but working) examples for each template:
|
|
||||||
|
|
||||||
<details>
|
|
||||||
<summary>`challenge.tmpl`</summary>
|
|
||||||
|
|
||||||
:::note
|
|
||||||
|
|
||||||
You **MUST** include the `{{.Head}}` segment in a `<head>` tag. It contains important information for challenges to execute. If you don't include this, no clients will be able to pass challenges.
|
|
||||||
|
|
||||||
:::
|
|
||||||
|
|
||||||
```html
|
|
||||||
<!DOCTYPE html>
|
|
||||||
<html lang="{{ .Lang }}">
|
|
||||||
<head>
|
|
||||||
{{ .Head }}
|
|
||||||
</head>
|
|
||||||
<body>
|
|
||||||
{{ .Body }}
|
|
||||||
</body>
|
|
||||||
</html>
|
|
||||||
```
|
|
||||||
|
|
||||||
</details>
|
|
||||||
|
|
||||||
<details>
|
|
||||||
<summary>`error.tmpl`</summary>
|
|
||||||
|
|
||||||
```html
|
|
||||||
<!DOCTYPE html>
|
|
||||||
<html lang="{{ .Lang }}">
|
|
||||||
<body>
|
|
||||||
{{ .Body }}
|
|
||||||
</body>
|
|
||||||
</html>
|
|
||||||
```
|
|
||||||
|
|
||||||
</details>
|
|
||||||
|
|
||||||
<details>
|
|
||||||
<summary>`impressum.tmpl`</summary>
|
|
||||||
|
|
||||||
```html
|
|
||||||
<!DOCTYPE html>
|
|
||||||
<html lang="{{ .Lang }}">
|
|
||||||
<body>
|
|
||||||
{{ .Body }}
|
|
||||||
</body>
|
|
||||||
</html>
|
|
||||||
```
|
|
||||||
|
|
||||||
</details>
|
|
||||||
|
|
||||||
### Template functions
|
|
||||||
|
|
||||||
In order to make life easier, the following template functions are defined:
|
|
||||||
|
|
||||||
#### `Asset`
|
|
||||||
|
|
||||||
Constructs the path for a static asset in the [overlay folder](#custom-images-and-css)'s `static` directory.
|
|
||||||
|
|
||||||
```go
|
|
||||||
func Asset(string) string
|
|
||||||
```
|
|
||||||
|
|
||||||
Usage:
|
|
||||||
|
|
||||||
```html
|
|
||||||
<link rel="stylesheet" href="{{ Asset "css/example.css" }}" />
|
|
||||||
```
|
|
||||||
|
|
||||||
Generates:
|
|
||||||
|
|
||||||
```html
|
|
||||||
<link
|
|
||||||
rel="stylesheet"
|
|
||||||
href="/.within.website/x/cmd/anubis/static/css/example.css"
|
|
||||||
/>
|
|
||||||
```
|
|
||||||
|
|
||||||
## Customizing messages
|
|
||||||
|
|
||||||
You can customize messages using the following environment variables:
|
|
||||||
|
|
||||||
| Message | Environment variable | Default |
|
|
||||||
| :------------------- | :------------------- | :----------------------------------------- |
|
|
||||||
| Challenge page title | `CHALLENGE_TITLE` | `Ensuring the security of your connection` |
|
|
||||||
| Error page title | `ERROR_TITLE` | `Error` |
|
|
||||||
|
|
||||||
For example:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
# /etc/techaro-botstopper/gitea.env
|
|
||||||
CHALLENGE_TITLE="Wait a moment please!"
|
|
||||||
ERROR_TITLE="Client error"
|
|
||||||
```
|
|
||||||
|
|
@ -1,34 +0,0 @@
|
||||||
---
|
|
||||||
title: When using Caddy with Gitea/Forgejo
|
|
||||||
---
|
|
||||||
|
|
||||||
Gitea/Forgejo relies on the reverse proxy setting the `X-Real-Ip` header. Caddy does not do this out of the gate. Modify your Caddyfile like this:
|
|
||||||
|
|
||||||
```python
|
|
||||||
ellenjoe.int.within.lgbt {
|
|
||||||
# ...
|
|
||||||
# diff-remove
|
|
||||||
reverse_proxy http://localhost:3000
|
|
||||||
# diff-add
|
|
||||||
reverse_proxy http://localhost:3000 {
|
|
||||||
# diff-add
|
|
||||||
header_up X-Real-Ip {remote_host}
|
|
||||||
# diff-add
|
|
||||||
}
|
|
||||||
# ...
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Ensure that Gitea/Forgejo have `[security].REVERSE_PROXY_TRUSTED_PROXIES` set to the IP ranges that Anubis will appear from. Typically this is sufficient:
|
|
||||||
|
|
||||||
```ini
|
|
||||||
[security]
|
|
||||||
REVERSE_PROXY_TRUSTED_PROXIES = 127.0.0.0/8,::1/128
|
|
||||||
```
|
|
||||||
|
|
||||||
However if you are running Anubis in a separate Pod/Deployment in Kubernetes, you may have to adjust this to the IP range of the Pod space in your Container Networking Interface plugin:
|
|
||||||
|
|
||||||
```ini
|
|
||||||
[security]
|
|
||||||
REVERSE_PROXY_TRUSTED_PROXIES = 10.192.0.0/12
|
|
||||||
```
|
|
||||||
|
|
@ -1,27 +0,0 @@
|
||||||
# Client IP Headers
|
|
||||||
|
|
||||||
Currently Anubis will always flatten the `X-Forwarded-For` when it contains multiple IP addresses. From right to left, the first IP address that is not in one of the following categories will be set as `X-Forwarded-For` in the request passed to the upstream.
|
|
||||||
|
|
||||||
- Private (`XFF_STRIP_PRIVATE`, enabled by default)
|
|
||||||
- CGNAT (always stripped)
|
|
||||||
- Link-local Unicast (always stripped)
|
|
||||||
|
|
||||||
```
|
|
||||||
Incoming: X-Forwarded-For: 1.2.3.4, 5.6.7.8, 10.0.0.1
|
|
||||||
Upstream: X-Forwarded-For: 5.6.7.8
|
|
||||||
```
|
|
||||||
|
|
||||||
This behavior will cause problems if the proxy in front of Anubis is from a public IP, such as Cloudflare, because Anubis will use the Cloudflare IP instead of your client's real IP. You will likely see all requests from your browser being blocked and/or an infinite challenge loop.
|
|
||||||
|
|
||||||
```
|
|
||||||
Incoming: X-Forwarded-For: REAL_CLIENT_IP, CF_IP
|
|
||||||
Upstream: X-Forwarded-For: CF_IP
|
|
||||||
```
|
|
||||||
|
|
||||||
As a workaround, you should configure your web server to parse an alternative source (such as `CF-Connecting-IP`), or pre-process the incoming `X-Forwarded-For` with your web server to ensure it only contains the real client IP address, then pass it to Anubis as `X-Forwarded-For`.
|
|
||||||
|
|
||||||
If you do not control the web server upstream of Anubis, the `custom-real-ip-header` command line flag accepts a header value that Anubis will read the real client IP address from. Anubis will set the `X-Real-IP` header to the IP address found in the custom header.
|
|
||||||
|
|
||||||
The `X-Real-IP` header will be automatically inferred from `X-Forwarded-For` if not set, setting it explicitly is not necessary as long as `X-Forwarded-For` contains only the real client IP. However setting it explicitly can eliminate spoofed values if your web server doesn't set this.
|
|
||||||
|
|
||||||
See [Cloudflare](environments/cloudflare.mdx) for an example configuration.
|
|
||||||
|
|
@ -1,8 +0,0 @@
|
||||||
{
|
|
||||||
"label": "Configuration",
|
|
||||||
"position": 10,
|
|
||||||
"link": {
|
|
||||||
"type": "generated-index",
|
|
||||||
"description": "Detailed information about configuring parts of Anubis."
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,5 +0,0 @@
|
||||||
{
|
|
||||||
"label": "Challenges",
|
|
||||||
"position": 10,
|
|
||||||
"link": null
|
|
||||||
}
|
|
||||||
|
|
@ -1,9 +0,0 @@
|
||||||
# Challenge Methods
|
|
||||||
|
|
||||||
Anubis supports multiple challenge methods:
|
|
||||||
|
|
||||||
- [Meta Refresh](./metarefresh.mdx)
|
|
||||||
- [Preact](./preact.mdx)
|
|
||||||
- [Proof of Work](./proof-of-work.mdx)
|
|
||||||
|
|
||||||
Read the documentation to know which method is best for you.
|
|
||||||
|
|
@ -1,18 +0,0 @@
|
||||||
# Meta Refresh (No JavaScript)
|
|
||||||
|
|
||||||
The `metarefresh` challenge sends a browser a much simpler challenge that makes it refresh the page after a set period of time. This enables clients to pass challenges without executing JavaScript.
|
|
||||||
|
|
||||||
To use it in your Anubis configuration:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
# Generic catchall rule
|
|
||||||
- name: generic-browser
|
|
||||||
user_agent_regex: >-
|
|
||||||
Mozilla|Opera
|
|
||||||
action: CHALLENGE
|
|
||||||
challenge:
|
|
||||||
difficulty: 1 # Number of seconds to wait before refreshing the page
|
|
||||||
algorithm: metarefresh # Specify a non-JS challenge method
|
|
||||||
```
|
|
||||||
|
|
||||||
This is not enabled by default while this method is tested and its false positive rate is ascertained. Many modern scrapers use headless Google Chrome, so this will have a much higher false positive rate.
|
|
||||||
|
|
@ -1,18 +0,0 @@
|
||||||
# Preact
|
|
||||||
|
|
||||||
The `preact` challenge sends the browser a simple challenge that makes it run very lightweight JavaScript that proves the client is able to execute client-side JavaScript. It uses [Preact](https://www.npmjs.com/package/preact) (a lightweight client side web framework in the vein of React) to do this.
|
|
||||||
|
|
||||||
To use it in your Anubis configuration:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
# Generic catchall rule
|
|
||||||
- name: generic-browser
|
|
||||||
user_agent_regex: >-
|
|
||||||
Mozilla|Opera
|
|
||||||
action: CHALLENGE
|
|
||||||
challenge:
|
|
||||||
difficulty: 1 # Number of seconds to wait before refreshing the page
|
|
||||||
algorithm: preact
|
|
||||||
```
|
|
||||||
|
|
||||||
This is the default challenge method for most clients.
|
|
||||||
|
|
@ -1,5 +0,0 @@
|
||||||
# Proof of Work (JavaScript)
|
|
||||||
|
|
||||||
When Anubis is configured to use the `fast` or `slow` challenge methods, clients will be sent a small [proof of work](https://en.wikipedia.org/wiki/Proof_of_work) challenge. In order to get a token used to access the upstream resource, clients must calculate a complicated math puzzle with JavaScript.
|
|
||||||
|
|
||||||
A `fast` challenge uses a heavily optimized multithreaded implementation and a `slow` challenge uses a simplistic single-threaded implementation. The `slow` method is kept around for legacy compatibility.
|
|
||||||
|
|
@ -1,19 +0,0 @@
|
||||||
# Custom status codes for Anubis errors
|
|
||||||
|
|
||||||
Out of the box, Anubis will reply with `HTTP 200` for challenge and denial pages. This is intended to make AI scrapers have a hard time with your website because when they are faced with a non-200 response, they will hammer the page over and over until they get a 200 response. This behavior may not be desirable, as such Anubis lets you customize what HTTP status codes are returned when Anubis throws challenge and denial pages.
|
|
||||||
|
|
||||||
This is configured in the `status_codes` block of your [bot policy file](../policies.mdx):
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
status_codes:
|
|
||||||
CHALLENGE: 200
|
|
||||||
DENY: 200
|
|
||||||
```
|
|
||||||
|
|
||||||
To match CloudFlare's behavior, use a configuration like this:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
status_codes:
|
|
||||||
CHALLENGE: 403
|
|
||||||
DENY: 403
|
|
||||||
```
|
|
||||||
|
|
@ -1,385 +0,0 @@
|
||||||
# Expression-based rule matching
|
|
||||||
|
|
||||||
Most of the Anubis matchers let you match individual parts of a request and only those parts in isolation. In order to defend a service in depth, you often need the ability to match against multiple aspects of a request. Anubis implements [Common Expression Language (CEL)](https://cel.dev) to let administrators define these more advanced rules. This allows you to tailor your approach for the individual services you are protecting.
|
|
||||||
|
|
||||||
As an example, here is a rule that lets you allow JSON API requests through Anubis:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
- name: allow-api-requests
|
|
||||||
action: ALLOW
|
|
||||||
expression:
|
|
||||||
all:
|
|
||||||
- '"Accept" in headers'
|
|
||||||
- 'headers["Accept"] == "application/json"'
|
|
||||||
- 'path.startsWith("/api/")'
|
|
||||||
```
|
|
||||||
|
|
||||||
This is an advanced feature and as such it is easy to get yourself in trouble with it. Use this with care.
|
|
||||||
|
|
||||||
## Common Expression Language (CEL)
|
|
||||||
|
|
||||||
CEL is an expression language made by Google as a part of their access control lists system. As programs grow more complicated and users have the need to express more complicated security requirements, they often want the ability to just run a small bit of code to check things for themselves. CEL expressions are built for this. They are implicitly sandboxed so that they cannot affect the system they are running in and also designed to evaluate as fast as humanly possible.
|
|
||||||
|
|
||||||
Imagine a CEL expression as the contents of an `if` statement in JavaScript or the `WHERE` clause in SQL. Consider this example expression:
|
|
||||||
|
|
||||||
```python
|
|
||||||
userAgent == ""
|
|
||||||
```
|
|
||||||
|
|
||||||
This is roughly equivalent to the following in JavaScript:
|
|
||||||
|
|
||||||
```js
|
|
||||||
if (userAgent == "") {
|
|
||||||
// Do something
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Using these expressions, you can define more elaborate rules as facts and circumstances demand. For more information about the syntax and grammar of CEL, take a look at [the language specification](https://github.com/google/cel-spec/blob/master/doc/langdef.md).
|
|
||||||
|
|
||||||
## How Anubis uses CEL
|
|
||||||
|
|
||||||
Anubis uses CEL to let administrators create complicated filter rules. Anubis has several modes of using CEL:
|
|
||||||
|
|
||||||
- Validating requests against single expressions
|
|
||||||
- Validating multiple expressions and ensuring at least one of them are true (`any`)
|
|
||||||
- Validating multiple expressions and ensuring all of them are true (`all`)
|
|
||||||
|
|
||||||
The common pattern is that every Anubis expression returns `true`, `false`, or raises an error.
|
|
||||||
|
|
||||||
### Single expressions
|
|
||||||
|
|
||||||
A single expression that returns either `true` or `false`. If the expression returns `true`, then the action specified in the rule will be taken. If it returns `false`, Anubis will move on to the next rule.
|
|
||||||
|
|
||||||
For example, consider this rule:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
- name: no-user-agent-string
|
|
||||||
action: DENY
|
|
||||||
expression: userAgent == ""
|
|
||||||
```
|
|
||||||
|
|
||||||
For this rule, if a request comes in without a [`User-Agent` string](https://developer.mozilla.org/en-US/docs/Web/HTTP/Reference/Headers/User-Agent) set, Anubis will deny the request and return an error page.
|
|
||||||
|
|
||||||
### `any` blocks
|
|
||||||
|
|
||||||
An `any` block that contains a list of expressions. If any expression in the list returns `true`, then the action specified in the rule will be taken. If all expressions in that list return `false`, Anubis will move on to the next rule.
|
|
||||||
|
|
||||||
For example, consider this rule:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
- name: known-banned-user
|
|
||||||
action: DENY
|
|
||||||
expression:
|
|
||||||
any:
|
|
||||||
- remoteAddress == "8.8.8.8"
|
|
||||||
- remoteAddress == "1.1.1.1"
|
|
||||||
```
|
|
||||||
|
|
||||||
For this rule, if a request comes in from `8.8.8.8` or `1.1.1.1`, Anubis will deny the request and return an error page.
|
|
||||||
|
|
||||||
### `all` blocks
|
|
||||||
|
|
||||||
An `all` block that contains a list of expressions. If all expressions in the list return `true`, then the action specified in the rule will be taken. If any of the expressions in the list returns `false`, Anubis will move on to the next rule.
|
|
||||||
|
|
||||||
For example, consider this rule:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
- name: go-get
|
|
||||||
action: ALLOW
|
|
||||||
expression:
|
|
||||||
all:
|
|
||||||
- userAgent.startsWith("Go-http-client/")
|
|
||||||
- '"go-get" in query'
|
|
||||||
- query["go-get"] == "1"
|
|
||||||
```
|
|
||||||
|
|
||||||
For this rule, if a request comes in matching [the signature of the `go get` command](https://pkg.go.dev/cmd/go#hdr-Remote_import_paths), Anubis will allow it through to the target.
|
|
||||||
|
|
||||||
## Variables exposed to Anubis expressions
|
|
||||||
|
|
||||||
Anubis exposes the following variables to expressions:
|
|
||||||
|
|
||||||
| Name | Type | Explanation | Example |
|
|
||||||
| :-------------- | :-------------------- | :-------------------------------------------------------------------------------------------------------------------------------------------- | :----------------------------------------------------------- |
|
|
||||||
| `headers` | `map[string, string]` | The [headers](https://developer.mozilla.org/en-US/docs/Web/HTTP/Reference/Headers) of the request being processed. | `{"User-Agent": "Mozilla/5.0 Gecko/20100101 Firefox/137.0"}` |
|
|
||||||
| `host` | `string` | The [HTTP hostname](https://web.dev/articles/url-parts#host) the request is targeted to. | `anubis.techaro.lol` |
|
|
||||||
| `contentLength` | `int64` | The numerical value of the `Content-Length` header. |
|
|
||||||
| `load_1m` | `double` | The current system load average over the last one minute. This is useful for making [load-based checks](#using-the-system-load-average). |
|
|
||||||
| `load_5m` | `double` | The current system load average over the last five minutes. This is useful for making [load-based checks](#using-the-system-load-average). |
|
|
||||||
| `load_15m` | `double` | The current system load average over the last fifteen minutes. This is useful for making [load-based checks](#using-the-system-load-average). |
|
|
||||||
| `method` | `string` | The [HTTP method](https://developer.mozilla.org/en-US/docs/Web/HTTP/Reference/Methods) in the request being processed. | `GET`, `POST`, `DELETE`, etc. |
|
|
||||||
| `path` | `string` | The [path](https://web.dev/articles/url-parts#pathname) of the request being processed. | `/`, `/api/memes/create` |
|
|
||||||
| `query` | `map[string, string]` | The [query parameters](https://web.dev/articles/url-parts#query) of the request being processed. | `?foo=bar` -> `{"foo": "bar"}` |
|
|
||||||
| `remoteAddress` | `string` | The IP address of the client. | `1.1.1.1` |
|
|
||||||
| `userAgent` | `string` | The [`User-Agent`](https://developer.mozilla.org/en-US/docs/Web/HTTP/Reference/Headers/User-Agent) string in the request being processed. | `Mozilla/5.0 Gecko/20100101 Firefox/137.0` |
|
|
||||||
|
|
||||||
Of note: in many languages when you look up a key in a map and there is nothing there, the language will return some "falsy" value like `undefined` in JavaScript, `None` in Python, or the zero value of the type in Go. In CEL, if you try to look up a value that does not exist, execution of the expression will fail and Anubis will return an error.
|
|
||||||
|
|
||||||
In order to avoid this, make sure the header or query parameter you are testing is present in the request with an `all` block like this:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
- name: challenge-wiki-history-page
|
|
||||||
action: CHALLENGE
|
|
||||||
all:
|
|
||||||
- 'path == "/index.php"'
|
|
||||||
- '"title" in query'
|
|
||||||
- '"action" in query'
|
|
||||||
- 'query["action"] == "history"'
|
|
||||||
```
|
|
||||||
|
|
||||||
This rule throws a challenge if and only if all of the following conditions are true:
|
|
||||||
|
|
||||||
- The URL path is `/index.php`
|
|
||||||
- The URL query string contains a `title` value
|
|
||||||
- The URL query string contains an `action` value
|
|
||||||
- The URL query string's `action` value is `"history"`
|
|
||||||
|
|
||||||
So given an HTTP request like this:
|
|
||||||
|
|
||||||
```text
|
|
||||||
GET /index.php?title=Index&action=history HTTP/1.1
|
|
||||||
User-Agent: Mozilla/5.0 Gecko/20100101 Firefox/137.0
|
|
||||||
Host: wiki.int.techaro.lol
|
|
||||||
X-Real-Ip: 8.8.8.8
|
|
||||||
```
|
|
||||||
|
|
||||||
Anubis would return a challenge because all of those conditions are true.
|
|
||||||
|
|
||||||
### Using the system load average
|
|
||||||
|
|
||||||
In Unix-like systems (such as Linux), every process on the system has to wait its turn to be able to run. This means that as more processes on the system are running, they need to wait longer to be able to execute. The [load average](<https://en.wikipedia.org/wiki/Load_(computing)>) represents the number of processes that want to be able to run but can't run yet. This metric isn't the most reliable to identify a cause, but is great at helping to identify symptoms.
|
|
||||||
|
|
||||||
Anubis lets you use the system load average as an input to expressions so that you can make dynamic rules like "when the system is under a low amount of load, dial back the protection, but when it's under a lot of load, crank it up to the mix". This lets you get all of the blocking features of Anubis in the background but only really expose Anubis to users when the system is actively being attacked.
|
|
||||||
|
|
||||||
This is best combined with the [weight](../policies.mdx#request-weight) and [threshold](./thresholds.mdx) systems so that you can have Anubis dynamically respond to attacks. Consider these rules in the default configuration file:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
## System load based checks.
|
|
||||||
# If the system is under high load for the last minute, add weight.
|
|
||||||
- name: high-load-average
|
|
||||||
action: WEIGH
|
|
||||||
expression: load_1m >= 10.0 # make sure to end the load comparison in a .0
|
|
||||||
weight:
|
|
||||||
adjust: 20
|
|
||||||
|
|
||||||
# If it is not for the last 15 minutes, remove weight.
|
|
||||||
- name: low-load-average
|
|
||||||
action: WEIGH
|
|
||||||
expression: load_15m <= 4.0 # make sure to end the load comparison in a .0
|
|
||||||
weight:
|
|
||||||
adjust: -10
|
|
||||||
```
|
|
||||||
|
|
||||||
This combination of rules makes Anubis dynamically react to the system load and only kick in when the system is under attack.
|
|
||||||
|
|
||||||
Something to keep in mind about system load average is that it is not aware of the number of cores the system has. If you have a 16 core system that has 16 processes running but none of them is hogging the CPU, then you will get a load average below 16. If you are in doubt, make your "high load" metric at least two times the number of CPU cores and your "low load" metric at least half of the number of CPU cores. For example:
|
|
||||||
|
|
||||||
| Kind | Core count | Load threshold |
|
|
||||||
| --------: | :--------- | :------------- |
|
|
||||||
| high load | 4 | `8.0` |
|
|
||||||
| low load | 4 | `2.0` |
|
|
||||||
| high load | 16 | `32.0` |
|
|
||||||
| low load | 16 | `8` |
|
|
||||||
|
|
||||||
Also keep in mind that this does not account for other kinds of latency like I/O latency. A system can have its web applications unresponsive due to high latency from a MySQL server but still have that web application server report a load near or at zero.
|
|
||||||
|
|
||||||
## Functions exposed to Anubis expressions
|
|
||||||
|
|
||||||
Anubis expressions can be augmented with the following functions:
|
|
||||||
|
|
||||||
### `missingHeader`
|
|
||||||
|
|
||||||
Available in `bot` expressions.
|
|
||||||
|
|
||||||
```ts
|
|
||||||
function missingHeader(headers: Record<string, string>, key: string) bool
|
|
||||||
```
|
|
||||||
|
|
||||||
`missingHeader` returns `true` if the request does not contain a header. This is useful when you are trying to assert behavior such as:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
# Adds weight to old versions of Chrome
|
|
||||||
- name: old-chrome
|
|
||||||
action: WEIGH
|
|
||||||
weight:
|
|
||||||
adjust: 10
|
|
||||||
expression:
|
|
||||||
all:
|
|
||||||
- userAgent.matches("Chrome/[1-9][0-9]?\\.0\\.0\\.0")
|
|
||||||
- missingHeader(headers, "Sec-Ch-Ua")
|
|
||||||
```
|
|
||||||
|
|
||||||
### `randInt`
|
|
||||||
|
|
||||||
Available in all expressions.
|
|
||||||
|
|
||||||
```ts
|
|
||||||
function randInt(n: int): int;
|
|
||||||
```
|
|
||||||
|
|
||||||
randInt returns a randomly selected integer value in the range of `[0,n)`. This is a thin wrapper around [Go's math/rand#Intn](https://pkg.go.dev/math/rand#Intn). Be careful with this as it may cause inconsistent behavior for genuine users.
|
|
||||||
|
|
||||||
This is best applied when doing explicit block rules, eg:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
# Denies LightPanda about 75% of the time on average
|
|
||||||
- name: deny-lightpanda-sometimes
|
|
||||||
action: DENY
|
|
||||||
expression:
|
|
||||||
all:
|
|
||||||
- userAgent.matches("LightPanda")
|
|
||||||
- randInt(16) >= 4
|
|
||||||
```
|
|
||||||
|
|
||||||
It seems counter-intuitive to allow known bad clients through sometimes, but this allows you to confuse attackers by making Anubis' behavior random. Adjust the thresholds and numbers as facts and circumstances demand.
|
|
||||||
|
|
||||||
### `regexSafe`
|
|
||||||
|
|
||||||
Available in `bot` expressions.
|
|
||||||
|
|
||||||
```ts
|
|
||||||
function regexSafe(input: string): string;
|
|
||||||
```
|
|
||||||
|
|
||||||
`regexSafe` takes a string and escapes it for safe use inside of a regular expression. This is useful when you are creating regular expressions from headers or variables such as `remoteAddress`.
|
|
||||||
|
|
||||||
| Input | Output |
|
|
||||||
| :------------------------ | :------------------------------ |
|
|
||||||
| `regexSafe("1.2.3.4")` | `1\\.2\\.3\\.4` |
|
|
||||||
| `regexSafe("techaro.lol")` | `techaro\\.lol` |
|
|
||||||
| `regexSafe("star*")` | `star\\*` |
|
|
||||||
| `regexSafe("plus+")` | `plus\\+` |
|
|
||||||
| `regexSafe("{braces}")` | `\\{braces\\}` |
|
|
||||||
| `regexSafe("start^")` | `start\\^` |
|
|
||||||
| `regexSafe("back\\slash")` | `back\\\\slash` |
|
|
||||||
| `regexSafe("dash-dash")` | `dash\\-dash` |
|
|
||||||
|
|
||||||
### `segments`
|
|
||||||
|
|
||||||
Available in `bot` expressions.
|
|
||||||
|
|
||||||
```ts
|
|
||||||
function segments(path: string): string[];
|
|
||||||
```
|
|
||||||
|
|
||||||
`segments` returns the number of slash-separated path segments, ignoring the leading slash. Here is what it will return with some common paths:
|
|
||||||
|
|
||||||
| Input | Output |
|
|
||||||
| :----------------------- | :--------------------- |
|
|
||||||
| `segments("/")` | `[""]` |
|
|
||||||
| `segments("/foo/bar")` | `["foo", "bar"] ` |
|
|
||||||
| `segments("/users/xe/")` | `["users", "xe", ""] ` |
|
|
||||||
|
|
||||||
:::note
|
|
||||||
|
|
||||||
If the path ends with a `/`, then the last element of the result will be an empty string. This is because `/users/xe` and `/users/xe/` are semantically different paths.
|
|
||||||
|
|
||||||
:::
|
|
||||||
|
|
||||||
This is useful if you want to write rules that allow requests that have no query parameters only if they have less than two path segments:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
- name: two-path-segments-no-query
|
|
||||||
action: ALLOW
|
|
||||||
expression:
|
|
||||||
all:
|
|
||||||
- size(query) == 0
|
|
||||||
- size(segments(path)) < 2
|
|
||||||
```
|
|
||||||
|
|
||||||
### DNS Functions
|
|
||||||
|
|
||||||
Anubis can also perform DNS lookups as a part of its expression evaluation. This can be useful for doing things like checking for a valid [Forward-confirmed reverse DNS (FCrDNS)](https://en.wikipedia.org/wiki/Forward-confirmed_reverse_DNS) record.
|
|
||||||
|
|
||||||
#### `arpaReverseIP`
|
|
||||||
|
|
||||||
Available in `bot` expressions.
|
|
||||||
|
|
||||||
```ts
|
|
||||||
function arpaReverseIP(ip: string): string;
|
|
||||||
```
|
|
||||||
|
|
||||||
`arpaReverseIP` takes an IP address and returns its value in [ARPA notation](https://www.ietf.org/rfc/rfc2317.html). This can be useful when matching PTR record patterns.
|
|
||||||
|
|
||||||
| Input | Output |
|
|
||||||
| :----------------------------- | :------------------------------------------------------------------- |
|
|
||||||
| `arpaReverseIP("1.2.3.4")` | `4.3.2.1` |
|
|
||||||
| `arpaReverseIP("2001:db8::1")` | `1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.8.b.d.0.1.0.0.2` |
|
|
||||||
|
|
||||||
#### `lookupHost`
|
|
||||||
|
|
||||||
Available in `bot` expressions.
|
|
||||||
|
|
||||||
```ts
|
|
||||||
function lookupHost(host: string): string[];
|
|
||||||
```
|
|
||||||
|
|
||||||
`lookupHost` performs a DNS lookup for the given hostname and returns a list of IP addresses.
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
- name: cloudflare-ip-in-host-header
|
|
||||||
action: DENY
|
|
||||||
expression: '"104.16.0.0" in lookupHost(headers["Host"])'
|
|
||||||
```
|
|
||||||
|
|
||||||
#### `reverseDNS`
|
|
||||||
|
|
||||||
Available in `bot` expressions.
|
|
||||||
|
|
||||||
```ts
|
|
||||||
function reverseDNS(ip: string): string[];
|
|
||||||
```
|
|
||||||
|
|
||||||
`reverseDNS` takes an IP address and returns the DNS names associated with it. This is useful when you want to check PTR records of an IP address.
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
- name: allow-googlebot
|
|
||||||
action: ALLOW
|
|
||||||
expression: 'reverseDNS(remoteAddress).endsWith(".googlebot.com")'
|
|
||||||
```
|
|
||||||
|
|
||||||
::: warning
|
|
||||||
|
|
||||||
Do not use this for validating the legitimacy of an IP address. It is possible for DNS records to be out of date or otherwise manipulated. Use [`verifyFCrDNS`](#verifyfcrdns) instead for a more reliable result.
|
|
||||||
|
|
||||||
:::
|
|
||||||
|
|
||||||
#### `verifyFCrDNS`
|
|
||||||
|
|
||||||
Available in `bot` expressions.
|
|
||||||
|
|
||||||
```ts
|
|
||||||
function verifyFCrDNS(ip: string): bool;
|
|
||||||
function verifyFCrDNS(ip: string, pattern: string): bool;
|
|
||||||
```
|
|
||||||
|
|
||||||
`verifyFCrDNS` checks if the reverse DNS of an IP address matches its forward DNS. This is a common technique to filter out spam and bot traffic. `verifyFCrDNS` comes in two forms:
|
|
||||||
|
|
||||||
- `verifyFCrDNS(remoteAddress)` will check that the reverse DNS of the remote address resolves back to the remote address. If no PTR records, returns true.
|
|
||||||
- `verifyFCrDNS(remoteAddress, pattern)` will check that the reverse DNS of the remote address is matching with pattern and that name resolves back to the remote address.
|
|
||||||
|
|
||||||
This is best used in rules like this:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
- name: require-fcrdns-for-post
|
|
||||||
action: DENY
|
|
||||||
expression:
|
|
||||||
all:
|
|
||||||
- method == "POST"
|
|
||||||
- "!verifyFCrDNS(remoteAddress)"
|
|
||||||
```
|
|
||||||
|
|
||||||
Here is an another example that allows requests from telegram:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
- name: telegrambot
|
|
||||||
action: ALLOW
|
|
||||||
expression:
|
|
||||||
all:
|
|
||||||
- userAgent.matches("TelegramBot")
|
|
||||||
- verifyFCrDNS(remoteAddress, "ptr\\.telegram\\.org$")
|
|
||||||
```
|
|
||||||
|
|
||||||
## Life advice
|
|
||||||
|
|
||||||
Expressions are very powerful. This is a benefit and a burden. If you are not careful with your expression targeting, you will be liable to get yourself into trouble. If you are at all in doubt, throw a `CHALLENGE` over a `DENY`. Legitimate users can easily work around a `CHALLENGE` result with a [proof of work challenge](../../design/why-proof-of-work.mdx). Bots are less likely to be able to do this.
|
|
||||||
|
|
@ -1,133 +0,0 @@
|
||||||
# Importing configuration rules
|
|
||||||
|
|
||||||
import Tabs from "@theme/Tabs";
|
|
||||||
import TabItem from "@theme/TabItem";
|
|
||||||
|
|
||||||
Anubis has the ability to let you import snippets of configuration into the main configuration file. This allows you to break up your config into smaller parts that get logically assembled into one big file.
|
|
||||||
|
|
||||||
EG:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
bots:
|
|
||||||
# Pathological bots to deny
|
|
||||||
- # This correlates to data/bots/ai-catchall.yaml in the source tree
|
|
||||||
import: (data)/bots/ai-catchall.yaml
|
|
||||||
- import: (data)/bots/cloudflare-workers.yaml
|
|
||||||
# Import all the rules in the default configuration
|
|
||||||
- import: (data)/meta/default-config.yaml
|
|
||||||
```
|
|
||||||
|
|
||||||
Of note, a bot rule can either have inline bot configuration or import a bot config snippet. You cannot do both in a single bot rule.
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
bots:
|
|
||||||
- import: (data)/bots/ai-catchall.yaml
|
|
||||||
name: generic-browser
|
|
||||||
user_agent_regex: >
|
|
||||||
Mozilla|Opera
|
|
||||||
action: CHALLENGE
|
|
||||||
```
|
|
||||||
|
|
||||||
This will return an error like this:
|
|
||||||
|
|
||||||
```text
|
|
||||||
config is not valid:
|
|
||||||
config.BotOrImport: rule definition is invalid, you must set either bot rules or an import statement, not both
|
|
||||||
```
|
|
||||||
|
|
||||||
Paths can either be prefixed with `(data)` to import from the [the data folder in the Anubis source tree](https://github.com/TecharoHQ/anubis/tree/main/data) or anywhere on the filesystem. If you don't have access to the Anubis source tree, check /usr/share/docs/anubis/data or in the tarball you extracted Anubis from.
|
|
||||||
|
|
||||||
## Importing the default configuration
|
|
||||||
|
|
||||||
If you want to base your configuration off of the default configuration, import `(data)/meta/default-config.yaml`:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
bots:
|
|
||||||
- import: (data)/meta/default-config.yaml
|
|
||||||
# Write your rules here
|
|
||||||
```
|
|
||||||
|
|
||||||
This will keep your configuration up to date as Anubis adapts to emerging threats.
|
|
||||||
|
|
||||||
## How do I exempt most modern browsers from Anubis challenges?
|
|
||||||
|
|
||||||
If you want to exempt most modern browsers from Anubis challenges, import `(data)/common/acts-like-browser.yaml`:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
bots:
|
|
||||||
- import: (data)/meta/default-config.yaml
|
|
||||||
- import: (data)/common/acts-like-browser.yaml
|
|
||||||
# Write your rules here
|
|
||||||
```
|
|
||||||
|
|
||||||
These rules will allow traffic that "looks like" it's from a modern copy of Edge, Safari, Chrome, or Firefox. These rules used to be enabled by default, however user reports have suggested that AI scraper bots have adapted to conform to these rules to scrape without regard for the infrastructure they are attacking.
|
|
||||||
|
|
||||||
Use these rules at your own risk.
|
|
||||||
|
|
||||||
## Importing from imports
|
|
||||||
|
|
||||||
You can also import from an imported file in case you want to import an entire folder of rules at once.
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
bots:
|
|
||||||
- import: (data)/bots/_deny-pathological.yaml
|
|
||||||
```
|
|
||||||
|
|
||||||
This lets you import an entire ruleset at once:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
# (data)/bots/_deny-pathological.yaml
|
|
||||||
- import: (data)/bots/cloudflare-workers.yaml
|
|
||||||
- import: (data)/bots/headless-browsers.yaml
|
|
||||||
- import: (data)/bots/us-ai-scraper.yaml
|
|
||||||
```
|
|
||||||
|
|
||||||
Use this with care, you can easily get yourself into a state where Anubis recursively imports things for eternity if you are not careful. The best way to use this is to make a "root import" named `_everything.yaml` or `_allow-good.yaml` so they sort to the top. Name your meta-imports after the main verb they are enforcing so that you can glance at the configuration file and understand what it's doing.
|
|
||||||
|
|
||||||
## Writing snippets
|
|
||||||
|
|
||||||
Snippets can be written in either JSON or YAML, with a preference for YAML. When writing a snippet, write the bot rules you want directly at the top level of the file in a list.
|
|
||||||
|
|
||||||
Here is an example snippet that allows [IPv6 Unique Local Addresses](https://en.wikipedia.org/wiki/Unique_local_address) through Anubis:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
- name: ipv6-ula
|
|
||||||
action: ALLOW
|
|
||||||
remote_addresses:
|
|
||||||
- fc00::/7
|
|
||||||
```
|
|
||||||
|
|
||||||
## Extracting Anubis' embedded filesystem
|
|
||||||
|
|
||||||
You can always extract the list of rules embedded into the Anubis binary with this command:
|
|
||||||
|
|
||||||
```text
|
|
||||||
anubis --extract-resources=static
|
|
||||||
```
|
|
||||||
|
|
||||||
This will dump the contents of Anubis' embedded data to a new folder named `static`:
|
|
||||||
|
|
||||||
```text
|
|
||||||
static
|
|
||||||
├── apps
|
|
||||||
│ └── gitea-rss-feeds.yaml
|
|
||||||
├── botPolicies.json
|
|
||||||
├── botPolicies.yaml
|
|
||||||
├── bots
|
|
||||||
│ ├── ai-catchall.yaml
|
|
||||||
│ ├── cloudflare-workers.yaml
|
|
||||||
│ ├── headless-browsers.yaml
|
|
||||||
│ └── us-ai-scraper.yaml
|
|
||||||
├── common
|
|
||||||
│ ├── allow-private-addresses.yaml
|
|
||||||
│ └── keep-internet-working.yaml
|
|
||||||
└── crawlers
|
|
||||||
├── bingbot.yaml
|
|
||||||
├── duckduckbot.yaml
|
|
||||||
├── googlebot.yaml
|
|
||||||
├── internet-archive.yaml
|
|
||||||
├── kagibot.yaml
|
|
||||||
├── marginalia.yaml
|
|
||||||
├── mojeekbot.yaml
|
|
||||||
└── qwantbot.yaml
|
|
||||||
```
|
|
||||||
|
|
@ -1,70 +0,0 @@
|
||||||
# Imprint / Impressum configuration
|
|
||||||
|
|
||||||
Some jurisdictions (such as the European Union and specifically Germany) [must have contact information freely available](https://www.privacycompany.eu/blog/the-imprint-requirement-a-must-have-for-companies-from-outside-germany) on an imprint/impressum page. Anubis supports creating an Anubis-specific imprint page for your organization with the `impressum` block in your bot policy file. For example:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
impressum:
|
|
||||||
# Displayed at the bottom of every page rendered by Anubis.
|
|
||||||
footer: >-
|
|
||||||
This website is hosted by Techaro. If you have any complaints or notes
|
|
||||||
about the service, please contact
|
|
||||||
<a href="mailto:contact@techaro.lol">contact@techaro.lol</a> and we
|
|
||||||
will assist you as soon as possible.
|
|
||||||
|
|
||||||
# The imprint page that will be linked to at the footer of every Anubis page.
|
|
||||||
page:
|
|
||||||
# The HTML <title> of the page
|
|
||||||
title: Imprint and Privacy Policy
|
|
||||||
# The HTML contents of the page. The exact contents of this page can
|
|
||||||
# and will vary by locale. Please consult with a lawyer if you are not
|
|
||||||
# sure what to put here
|
|
||||||
body: >-
|
|
||||||
<p>Last updated: June 2025</p>
|
|
||||||
|
|
||||||
<h2>Information that is gathered from visitors</h2>
|
|
||||||
|
|
||||||
<p>In common with other websites, log files are stored on the web server saving details such as the visitor's IP address, browser type, referring page and time of visit.</p>
|
|
||||||
|
|
||||||
<p>Cookies may be used to remember visitor preferences when interacting with the website.</p>
|
|
||||||
|
|
||||||
<p>Where registration is required, the visitor's email and a username will be stored on the server.</p>
|
|
||||||
|
|
||||||
<!-- ... -->
|
|
||||||
```
|
|
||||||
|
|
||||||
If you are subscribed to and using [advanced classification features](../thoth.mdx), be sure to disclose the following:
|
|
||||||
|
|
||||||
```html
|
|
||||||
<h2>Techaro Anubis</h2>
|
|
||||||
|
|
||||||
<p>
|
|
||||||
This website uses a service called
|
|
||||||
<a href="https://anubis.techaro.lol">Anubis</a> by
|
|
||||||
<a href="https://techaro.lol">Techaro</a> to filter malicious traffic. Anubis
|
|
||||||
requires the use of browser cookies to ensure that web clients are running
|
|
||||||
conformant software. Anubis also may report the following data to Techaro to
|
|
||||||
improve service quality:
|
|
||||||
</p>
|
|
||||||
|
|
||||||
<ul>
|
|
||||||
<li>
|
|
||||||
IP address (for purposes of matching against geo-location and BGP autonomous
|
|
||||||
systems numbers), which is stored in-memory and not persisted to disk.
|
|
||||||
</li>
|
|
||||||
<li>
|
|
||||||
Unique browser fingerprints (such as HTTP request fingerprints and
|
|
||||||
encryption system fingerprints), which may be stored on Techaro's side for a
|
|
||||||
period of up to one month.
|
|
||||||
</li>
|
|
||||||
<li>
|
|
||||||
HTTP request metadata that may include things such as the User-Agent header
|
|
||||||
and other identifiers.
|
|
||||||
</li>
|
|
||||||
</ul>
|
|
||||||
|
|
||||||
<p>
|
|
||||||
This data is processed and stored for the legitimate interest of combatting
|
|
||||||
abusive web clients. This data is encrypted at rest as much as possible and is
|
|
||||||
only decrypted in memory for the purposes of fulfilling requests.
|
|
||||||
</p>
|
|
||||||
```
|
|
||||||
|
|
@ -1,95 +0,0 @@
|
||||||
---
|
|
||||||
id: open-graph
|
|
||||||
title: Open Graph Configuration
|
|
||||||
---
|
|
||||||
|
|
||||||
# Open Graph Configuration
|
|
||||||
|
|
||||||
This page provides detailed information on how to configure [Open Graph tag](https://ogp.me/) passthrough in Anubis. This enables social previews of resources protected by Anubis without having to exempt each scraper individually.
|
|
||||||
|
|
||||||
## Configuration Options
|
|
||||||
|
|
||||||
Open Graph settings are configured in the `openGraph` section of the [Policy File](../policies.mdx).
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
openGraph:
|
|
||||||
# Enables Open Graph passthrough
|
|
||||||
enabled: true
|
|
||||||
# Enables the use of the HTTP host in the cache key, this enables
|
|
||||||
# caching metadata for multiple http hosts at once.
|
|
||||||
considerHost: true
|
|
||||||
# How long cached OpenGraph metadata should last in memory
|
|
||||||
ttl: 24h
|
|
||||||
# If set, return these opengraph values instead of looking them up with
|
|
||||||
# the target service.
|
|
||||||
#
|
|
||||||
# Correlates to properties in https://ogp.me/
|
|
||||||
override:
|
|
||||||
# og:title is required, it is the title of the website
|
|
||||||
"og:title": "Techaro Anubis"
|
|
||||||
"og:description": >-
|
|
||||||
Anubis is a Web AI Firewall Utility that helps you fight the bots
|
|
||||||
away so that you can maintain uptime at work!
|
|
||||||
"description": >-
|
|
||||||
Anubis is a Web AI Firewall Utility that helps you fight the bots
|
|
||||||
away so that you can maintain uptime at work!
|
|
||||||
```
|
|
||||||
|
|
||||||
<details>
|
|
||||||
<summary>Configuration flags / envvars (old)</summary>
|
|
||||||
|
|
||||||
Open Graph passthrough used to be configured with configuration flags / environment variables. Reference to these settings are maintained for backwards compatibility's sake.
|
|
||||||
|
|
||||||
| Name | Description | Type | Default | Example |
|
|
||||||
| ------------------------ | --------------------------------------------------------- | -------- | ------- | ----------------------------- |
|
|
||||||
| `OG_PASSTHROUGH` | Enables or disables the Open Graph tag passthrough system | Boolean | `true` | `OG_PASSTHROUGH=true` |
|
|
||||||
| `OG_EXPIRY_TIME` | Configurable cache expiration time for Open Graph tags | Duration | `24h` | `OG_EXPIRY_TIME=1h` |
|
|
||||||
| `OG_CACHE_CONSIDER_HOST` | Enables or disables the use of the host in the cache key | Boolean | `false` | `OG_CACHE_CONSIDER_HOST=true` |
|
|
||||||
|
|
||||||
</details>
|
|
||||||
|
|
||||||
## Usage
|
|
||||||
|
|
||||||
To configure Open Graph tags, you can set the following environment variables, environment file or as flags in your Anubis configuration:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
export OG_PASSTHROUGH=true
|
|
||||||
export OG_EXPIRY_TIME=1h
|
|
||||||
export OG_CACHE_CONSIDER_HOST=false
|
|
||||||
```
|
|
||||||
|
|
||||||
## Implementation Details
|
|
||||||
|
|
||||||
When `OG_PASSTHROUGH` is enabled, Anubis will:
|
|
||||||
|
|
||||||
1. Check a local cache for the requested URL's Open Graph tags.
|
|
||||||
2. If a cached entry exists and is still valid, return the cached tags.
|
|
||||||
3. If the cached entry is stale or not found, fetch the URL, parse the Open Graph tags, update the cache, and return the new tags.
|
|
||||||
|
|
||||||
The cache expiration time is controlled by `OG_EXPIRY_TIME`.
|
|
||||||
|
|
||||||
When `OG_CACHE_CONSIDER_HOST` is enabled, Anubis will include the host in the cache key for Open Graph tags. This ensures that tags are cached separately for different hosts.
|
|
||||||
|
|
||||||
## Example
|
|
||||||
|
|
||||||
Here is an example of how to configure Open Graph tags in your Anubis setup:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
export OG_PASSTHROUGH=true
|
|
||||||
export OG_EXPIRY_TIME=1h
|
|
||||||
export OG_CACHE_CONSIDER_HOST=false
|
|
||||||
```
|
|
||||||
|
|
||||||
With these settings, Anubis will cache Open Graph tags for 1 hour and pass them through to the challenge page, not considering the host in the cache key.
|
|
||||||
|
|
||||||
## When to Enable `OG_CACHE_CONSIDER_HOST`
|
|
||||||
|
|
||||||
In most cases, you would want to keep `OG_CACHE_CONSIDER_HOST` set to `false` to avoid unnecessary cache fragmentation. However, there are some scenarios where enabling this option can be beneficial:
|
|
||||||
|
|
||||||
1. **Multi-Tenant Applications**: If you are running a multi-tenant application where different tenants are hosted on different subdomains, enabling `OG_CACHE_CONSIDER_HOST` ensures that the Open Graph tags are cached separately for each tenant. This prevents one tenant's Open Graph tags from being served to another tenant's users.
|
|
||||||
|
|
||||||
2. **Different Content for Different Hosts**: If your application serves different content based on the host, enabling `OG_CACHE_CONSIDER_HOST` ensures that the correct Open Graph tags are cached and served for each host. This is useful for applications that have different branding or content for different domains or subdomains.
|
|
||||||
|
|
||||||
3. **Security and Privacy Concerns**: In some cases, you may want to ensure that Open Graph tags are not shared between different hosts for security or privacy reasons. Enabling `OG_CACHE_CONSIDER_HOST` ensures that the tags are cached separately for each host, preventing any potential leakage of information between hosts.
|
|
||||||
|
|
||||||
For more information, refer to the [installation guide](../installation).
|
|
||||||
|
|
@ -1,97 +0,0 @@
|
||||||
---
|
|
||||||
title: Redirect Domain Configuration
|
|
||||||
---
|
|
||||||
|
|
||||||
import Tabs from "@theme/Tabs";
|
|
||||||
import TabItem from "@theme/TabItem";
|
|
||||||
|
|
||||||
Anubis has an HTTP redirect in the middle of its check validation logic. This redirect allows Anubis to set a cookie on validated requests so that users don't need to pass challenges on every page load.
|
|
||||||
|
|
||||||
This flow looks something like this:
|
|
||||||
|
|
||||||
```mermaid
|
|
||||||
sequenceDiagram
|
|
||||||
participant User
|
|
||||||
participant Challenge
|
|
||||||
participant Validation
|
|
||||||
participant Backend
|
|
||||||
|
|
||||||
User->>+Challenge: GET /
|
|
||||||
Challenge->>+User: Solve this challenge
|
|
||||||
User->>+Validation: Here's the solution, send me to /
|
|
||||||
Validation->>+User: Here's a cookie, go to /
|
|
||||||
User->>+Backend: GET /
|
|
||||||
```
|
|
||||||
|
|
||||||
However, in some cases a sufficiently dedicated attacker could trick a user into clicking on a validation link with a solution pre-filled out. For example:
|
|
||||||
|
|
||||||
```mermaid
|
|
||||||
sequenceDiagram
|
|
||||||
participant Hacker
|
|
||||||
participant User
|
|
||||||
participant Validation
|
|
||||||
participant Evil Site
|
|
||||||
|
|
||||||
Hacker->>+User: Click on example.org with this solution
|
|
||||||
User->>+Validation: Here's a solution, send me to evilsite.com
|
|
||||||
Validation->>+User: Here's a cookie, go to evilsite.com
|
|
||||||
User->>+Evil Site: GET evilsite.com
|
|
||||||
```
|
|
||||||
|
|
||||||
If this happens, Anubis will throw an error like this:
|
|
||||||
|
|
||||||
```text
|
|
||||||
Redirect domain not allowed
|
|
||||||
```
|
|
||||||
|
|
||||||
## Configuring allowed redirect domains
|
|
||||||
|
|
||||||
By default, Anubis may redirect to any domain which could cause security issues in the unlikely case that an attacker passes a challenge for your browser and then tricks you into clicking a link to your domain.
|
|
||||||
One can restrict the domains that Anubis can redirect to when passing a challenge by setting up `REDIRECT_DOMAINS` environment variable.
|
|
||||||
If you need to set more than one domain, fill the environment variable with a comma-separated list of domain names.
|
|
||||||
There is also glob matching support. You can pass `*.bugs.techaro.lol` to allow redirecting to anything ending with `.bugs.techaro.lol`. There is a limit of 4 wildcards.
|
|
||||||
|
|
||||||
:::note
|
|
||||||
|
|
||||||
If you are hosting Anubis on a non-standard port (`https://example:com:8443`, `http://www.example.net:8080`, etc.), you must also include the port number here.
|
|
||||||
|
|
||||||
:::
|
|
||||||
|
|
||||||
<Tabs>
|
|
||||||
<TabItem value="env-file" label="Environment file" default>
|
|
||||||
|
|
||||||
```shell
|
|
||||||
# anubis.env
|
|
||||||
|
|
||||||
REDIRECT_DOMAINS="example.org,secretplans.example.org,*.test.example.org"
|
|
||||||
# ...
|
|
||||||
```
|
|
||||||
|
|
||||||
</TabItem>
|
|
||||||
<TabItem value="docker-compose" label="Docker Compose">
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
services:
|
|
||||||
anubis-nginx:
|
|
||||||
image: ghcr.io/techarohq/anubis:latest
|
|
||||||
environment:
|
|
||||||
REDIRECT_DOMAINS: "example.org,secretplans.example.org,*.test.example.org"
|
|
||||||
# ...
|
|
||||||
```
|
|
||||||
|
|
||||||
</TabItem>
|
|
||||||
<TabItem value="k8s" label="Kubernetes">
|
|
||||||
|
|
||||||
Inside your Deployment, StatefulSet, or Pod:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
- name: anubis
|
|
||||||
image: ghcr.io/techarohq/anubis:latest
|
|
||||||
env:
|
|
||||||
- name: REDIRECT_DOMAINS
|
|
||||||
value: "example.org,secretplans.example.org,*.test.example.org"
|
|
||||||
# ...
|
|
||||||
```
|
|
||||||
|
|
||||||
</TabItem>
|
|
||||||
</Tabs>
|
|
||||||
|
|
@ -1,223 +0,0 @@
|
||||||
---
|
|
||||||
title: Subrequest Authentication
|
|
||||||
---
|
|
||||||
|
|
||||||
import Tabs from "@theme/Tabs";
|
|
||||||
import TabItem from "@theme/TabItem";
|
|
||||||
|
|
||||||
Anubis can act in one of two modes:
|
|
||||||
|
|
||||||
1. Reverse proxy (the default): Anubis sits in the middle of all traffic and then will reverse proxy it to its destination. This is the moral equivalent of a middleware in your favorite web framework.
|
|
||||||
2. Subrequest authentication mode: Anubis listens for requests and if they don't pass muster then they are forwarded to Anubis for challenge processing. This is the equivalent of Anubis being a sidecar service.
|
|
||||||
|
|
||||||
:::note
|
|
||||||
|
|
||||||
Subrequest authentication requires changing the default policy because nginx interprets the default `DENY` status code `200` as successful authentication and allows the request.
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
status_codes:
|
|
||||||
CHALLENGE: 200
|
|
||||||
DENY: 403
|
|
||||||
```
|
|
||||||
|
|
||||||
[See policy definitions](../policies.mdx).
|
|
||||||
|
|
||||||
:::
|
|
||||||
|
|
||||||
## Nginx
|
|
||||||
|
|
||||||
Anubis can perform [subrequest authentication](https://docs.nginx.com/nginx/admin-guide/security-controls/configuring-subrequest-authentication/) with the `auth_request` module in Nginx. In order to set this up, keep the following things in mind:
|
|
||||||
|
|
||||||
The `TARGET` environment variable in Anubis must be set to a space, eg:
|
|
||||||
|
|
||||||
<Tabs>
|
|
||||||
<TabItem value="env-file" label="Environment file" default>
|
|
||||||
|
|
||||||
```shell
|
|
||||||
# anubis.env
|
|
||||||
|
|
||||||
TARGET=" "
|
|
||||||
# ...
|
|
||||||
```
|
|
||||||
|
|
||||||
</TabItem>
|
|
||||||
<TabItem value="docker-compose" label="Docker Compose">
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
services:
|
|
||||||
anubis-nginx:
|
|
||||||
image: ghcr.io/techarohq/anubis:latest
|
|
||||||
environment:
|
|
||||||
TARGET: " "
|
|
||||||
# ...
|
|
||||||
```
|
|
||||||
|
|
||||||
</TabItem>
|
|
||||||
<TabItem value="k8s" label="Kubernetes">
|
|
||||||
|
|
||||||
Inside your Deployment, StatefulSet, or Pod:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
- name: anubis
|
|
||||||
image: ghcr.io/techarohq/anubis:latest
|
|
||||||
env:
|
|
||||||
- name: TARGET
|
|
||||||
value: " "
|
|
||||||
# ...
|
|
||||||
```
|
|
||||||
|
|
||||||
</TabItem>
|
|
||||||
</Tabs>
|
|
||||||
|
|
||||||
In order to configure this, you need to add the following location blocks to each server pointing to the service you want to protect:
|
|
||||||
|
|
||||||
```nginx
|
|
||||||
location /.within.website/ {
|
|
||||||
# Assumption: Anubis is running in the same network namespace as
|
|
||||||
# nginx on localhost TCP port 8923
|
|
||||||
proxy_pass http://127.0.0.1:8923;
|
|
||||||
proxy_set_header X-Real-IP $remote_addr;
|
|
||||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
|
||||||
proxy_set_header Host $http_host;
|
|
||||||
proxy_pass_request_body off;
|
|
||||||
proxy_set_header content-length "";
|
|
||||||
auth_request off;
|
|
||||||
}
|
|
||||||
|
|
||||||
location @redirectToAnubis {
|
|
||||||
return 307 /.within.website/?redir=$scheme://$host$request_uri;
|
|
||||||
auth_request off;
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
This sets up `/.within.website` to point to Anubis. Any requests that Anubis rejects or throws a challenge to will be sent here. This also sets up a named location `@redirectToAnubis` that will redirect any requests to Anubis for advanced processing.
|
|
||||||
|
|
||||||
Finally, add this to your root location block:
|
|
||||||
|
|
||||||
```nginx
|
|
||||||
location / {
|
|
||||||
# diff-add
|
|
||||||
auth_request /.within.website/x/cmd/anubis/api/check;
|
|
||||||
# diff-add
|
|
||||||
error_page 401 = @redirectToAnubis;
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
This will check all requests that don't match other locations with Anubis to ensure the client is genuine.
|
|
||||||
|
|
||||||
This will make every request get checked by Anubis before it hits your backend. If you have other locations that don't need Anubis to do validation, add the `auth_request off` directive to their blocks:
|
|
||||||
|
|
||||||
```nginx
|
|
||||||
location /secret {
|
|
||||||
# diff-add
|
|
||||||
auth_request off;
|
|
||||||
|
|
||||||
# ...
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Here is a complete example of an Nginx server listening over TLS and pointing to Anubis:
|
|
||||||
|
|
||||||
<details>
|
|
||||||
<summary>Complete example</summary>
|
|
||||||
|
|
||||||
```nginx
|
|
||||||
# /etc/nginx/conf.d/nginx.local.cetacean.club.conf
|
|
||||||
|
|
||||||
server {
|
|
||||||
listen 443 ssl;
|
|
||||||
listen [::]:443 ssl;
|
|
||||||
server_name nginx.local.cetacean.club;
|
|
||||||
ssl_certificate /etc/techaro/pki/nginx.local.cetacean.club/tls.crt;
|
|
||||||
ssl_certificate_key /etc/techaro/pki/nginx.local.cetacean.club/tls.key;
|
|
||||||
ssl_protocols TLSv1.2 TLSv1.3;
|
|
||||||
ssl_ciphers HIGH:!aNULL:!MD5;
|
|
||||||
|
|
||||||
proxy_set_header X-Real-IP $remote_addr;
|
|
||||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
|
||||||
|
|
||||||
location /.within.website/ {
|
|
||||||
proxy_pass http://localhost:8923;
|
|
||||||
auth_request off;
|
|
||||||
}
|
|
||||||
|
|
||||||
location @redirectToAnubis {
|
|
||||||
return 307 /.within.website/?redir=$scheme://$host$request_uri;
|
|
||||||
auth_request off;
|
|
||||||
}
|
|
||||||
|
|
||||||
location / {
|
|
||||||
auth_request /.within.website/x/cmd/anubis/api/check;
|
|
||||||
error_page 401 = @redirectToAnubis;
|
|
||||||
root /usr/share/nginx/html;
|
|
||||||
index index.html index.htm;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
</details>
|
|
||||||
|
|
||||||
## Caddy
|
|
||||||
|
|
||||||
Anubis can be used with the [`forward_auth`](https://caddyserver.com/docs/caddyfile/directives/forward_auth) directive in Caddy.
|
|
||||||
|
|
||||||
First, the `TARGET` environment variable in Anubis must be set to a space, eg:
|
|
||||||
|
|
||||||
<Tabs>
|
|
||||||
<TabItem value="env-file" label="Environment file" default>
|
|
||||||
|
|
||||||
```shell
|
|
||||||
# anubis.env
|
|
||||||
|
|
||||||
TARGET=" "
|
|
||||||
# ...
|
|
||||||
```
|
|
||||||
|
|
||||||
</TabItem>
|
|
||||||
<TabItem value="docker-compose" label="Docker Compose">
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
services:
|
|
||||||
anubis-caddy:
|
|
||||||
image: ghcr.io/techarohq/anubis:latest
|
|
||||||
environment:
|
|
||||||
TARGET: " "
|
|
||||||
# ...
|
|
||||||
```
|
|
||||||
|
|
||||||
</TabItem>
|
|
||||||
<TabItem value="k8s" label="Kubernetes">
|
|
||||||
|
|
||||||
Inside your Deployment, StatefulSet, or Pod:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
- name: anubis
|
|
||||||
image: ghcr.io/techarohq/anubis:latest
|
|
||||||
env:
|
|
||||||
- name: TARGET
|
|
||||||
value: " "
|
|
||||||
# ...
|
|
||||||
```
|
|
||||||
|
|
||||||
</TabItem>
|
|
||||||
</Tabs>
|
|
||||||
|
|
||||||
Then configure the necessary directives in your site block:
|
|
||||||
|
|
||||||
```caddy
|
|
||||||
route {
|
|
||||||
# Assumption: Anubis is running in the same network namespace as
|
|
||||||
# caddy on localhost TCP port 8923
|
|
||||||
reverse_proxy /.within.website/* 127.0.0.1:8923
|
|
||||||
forward_auth 127.0.0.1:8923 {
|
|
||||||
uri /.within.website/x/cmd/anubis/api/check
|
|
||||||
trusted_proxies private_ranges
|
|
||||||
@unauthorized status 401
|
|
||||||
handle_response @unauthorized {
|
|
||||||
redir * /.within.website/?redir={uri} 307
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
If you want to use this for multiple sites, you can create a [snippet](https://caddyserver.com/docs/caddyfile/concepts#snippets) and import it in multiple site blocks.
|
|
||||||
|
|
@ -1,136 +0,0 @@
|
||||||
# Weight Threshold Configuration
|
|
||||||
|
|
||||||
Anubis offers the ability to assign "weight" to requests. This is a custom level of suspicion that rules can add to or remove from. For example, here's how you assign 10 weight points to anything that might be a browser:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
# botPolicies.yaml
|
|
||||||
|
|
||||||
bots:
|
|
||||||
- name: generic-browser
|
|
||||||
user_agent_regex: >-
|
|
||||||
Mozilla|Opera
|
|
||||||
action: WEIGH
|
|
||||||
weight:
|
|
||||||
adjust: 10
|
|
||||||
```
|
|
||||||
|
|
||||||
Thresholds let you take this per-request weight value and take actions in response to it. Thresholds are defined alongside your bot configuration in `botPolicies.yaml`.
|
|
||||||
|
|
||||||
:::note
|
|
||||||
|
|
||||||
Thresholds DO NOT apply when a request matches a bot rule with the CHALLENGE action. Thresholds only apply when requests don't match any terminal bot rules.
|
|
||||||
|
|
||||||
:::
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
# botPolicies.yaml
|
|
||||||
|
|
||||||
bots: ...
|
|
||||||
|
|
||||||
thresholds:
|
|
||||||
- name: minimal-suspicion
|
|
||||||
expression: weight < 0
|
|
||||||
action: ALLOW
|
|
||||||
|
|
||||||
- name: mild-suspicion
|
|
||||||
expression:
|
|
||||||
all:
|
|
||||||
- weight >= 0
|
|
||||||
- weight < 10
|
|
||||||
action: CHALLENGE
|
|
||||||
challenge:
|
|
||||||
algorithm: metarefresh
|
|
||||||
difficulty: 1
|
|
||||||
|
|
||||||
- name: moderate-suspicion
|
|
||||||
expression:
|
|
||||||
all:
|
|
||||||
- weight >= 10
|
|
||||||
- weight < 20
|
|
||||||
action: CHALLENGE
|
|
||||||
challenge:
|
|
||||||
algorithm: fast
|
|
||||||
difficulty: 2
|
|
||||||
|
|
||||||
- name: extreme-suspicion
|
|
||||||
expression: weight >= 20
|
|
||||||
action: CHALLENGE
|
|
||||||
challenge:
|
|
||||||
algorithm: fast
|
|
||||||
difficulty: 4
|
|
||||||
```
|
|
||||||
|
|
||||||
This defines a suite of 4 thresholds:
|
|
||||||
|
|
||||||
1. If the request weight is less than zero, allow it through.
|
|
||||||
2. If the request weight is greater than or equal to zero, but less than ten: give it [a very lightweight challenge](./challenges/metarefresh.mdx).
|
|
||||||
3. If the request weight is greater than or equal to ten, but less than twenty: give it [a slightly heavier challenge](./challenges/proof-of-work.mdx).
|
|
||||||
4. Otherwise, give it [the heaviest challenge](./challenges/proof-of-work.mdx).
|
|
||||||
|
|
||||||
Thresholds can be configured with the following options:
|
|
||||||
|
|
||||||
<table>
|
|
||||||
<thead>
|
|
||||||
<tr>
|
|
||||||
<th>Name</th>
|
|
||||||
<th>Description</th>
|
|
||||||
<th>Example</th>
|
|
||||||
</tr>
|
|
||||||
</thead>
|
|
||||||
<tbody>
|
|
||||||
<tr>
|
|
||||||
<td>`name`</td>
|
|
||||||
<td>The human-readable name for this threshold.</td>
|
|
||||||
<td>
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
name: extreme-suspicion
|
|
||||||
```
|
|
||||||
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td>`expression`</td>
|
|
||||||
<td>A [CEL](https://cel.dev/) expression taking the request weight and returning true or false</td>
|
|
||||||
<td>
|
|
||||||
|
|
||||||
To check if the request weight is less than zero:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
expression: weight < 0
|
|
||||||
```
|
|
||||||
|
|
||||||
To check if it's between 0 and 10 (inclusive):
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
expression:
|
|
||||||
all:
|
|
||||||
- weight >= 0
|
|
||||||
- weight < 10
|
|
||||||
```
|
|
||||||
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td>`action`</td>
|
|
||||||
<td>The Anubis action to apply: `ALLOW`, `CHALLENGE`, or `DENY`</td>
|
|
||||||
<td>
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
action: ALLOW
|
|
||||||
```
|
|
||||||
|
|
||||||
If you set the CHALLENGE action, you must set challenge details:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
action: CHALLENGE
|
|
||||||
challenge:
|
|
||||||
algorithm: metarefresh
|
|
||||||
difficulty: 1
|
|
||||||
```
|
|
||||||
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
|
|
||||||
</tbody>
|
|
||||||
</table>
|
|
||||||
|
|
@ -1,92 +0,0 @@
|
||||||
---
|
|
||||||
title: Default allow behavior
|
|
||||||
---
|
|
||||||
|
|
||||||
import Tabs from "@theme/Tabs";
|
|
||||||
import TabItem from "@theme/TabItem";
|
|
||||||
|
|
||||||
# Default allow behavior
|
|
||||||
|
|
||||||
Anubis is designed to be as unintrusive as possible to your existing infrastructure.
|
|
||||||
|
|
||||||
By default, it allows all traffic unless a request matches a rule that explicitly denies or challenges it.
|
|
||||||
|
|
||||||
Only requests matching a DENY or CHALLENGE rule are blocked or challenged. All other requests are allowed. This is called "the implicit rule".
|
|
||||||
|
|
||||||
## Example: Minimal policy
|
|
||||||
|
|
||||||
If your policy only blocks a specific bot, all other requests will be allowed:
|
|
||||||
|
|
||||||
<Tabs>
|
|
||||||
<TabItem value="json" label="JSON" default>
|
|
||||||
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"bots": [
|
|
||||||
{
|
|
||||||
"name": "block-amazonbot",
|
|
||||||
"user_agent_regex": "Amazonbot",
|
|
||||||
"action": "DENY"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
</TabItem>
|
|
||||||
<TabItem value="yaml" label="YAML">
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
- name: block-amazonbot
|
|
||||||
user_agent_regex: Amazonbot
|
|
||||||
action: DENY
|
|
||||||
```
|
|
||||||
|
|
||||||
</TabItem>
|
|
||||||
</Tabs>
|
|
||||||
|
|
||||||
## How to deny by default
|
|
||||||
|
|
||||||
If you want to deny all traffic except what you explicitly allow, add a catch-all deny rule at the end of your policy list. Make sure to add ALLOW rules for any traffic you want to permit before this rule.
|
|
||||||
|
|
||||||
<Tabs>
|
|
||||||
<TabItem value="json" label="JSON" default>
|
|
||||||
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"bots": [
|
|
||||||
{
|
|
||||||
"name": "allow-goodbot",
|
|
||||||
"user_agent_regex": "GoodBot",
|
|
||||||
"action": "ALLOW"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "catch-all-deny",
|
|
||||||
"path_regex": ".*",
|
|
||||||
"action": "DENY"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
</TabItem>
|
|
||||||
<TabItem value="yaml" label="YAML">
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
- name: allow-goodbot
|
|
||||||
user_agent_regex: GoodBot
|
|
||||||
action: ALLOW
|
|
||||||
- name: catch-all-deny
|
|
||||||
path_regex: .*
|
|
||||||
action: DENY
|
|
||||||
```
|
|
||||||
|
|
||||||
</TabItem>
|
|
||||||
</Tabs>
|
|
||||||
|
|
||||||
## Final remarks
|
|
||||||
|
|
||||||
- Rules are evaluated in order; the first match wins.
|
|
||||||
- The implicit allow rule is always last and cannot be removed.
|
|
||||||
- Use your logs to monitor what traffic is being allowed by default.
|
|
||||||
|
|
||||||
See [Policy Definitions](./policies) for more details on writing rules.
|
|
||||||
|
|
@ -1,8 +0,0 @@
|
||||||
{
|
|
||||||
"label": "Environments",
|
|
||||||
"position": 20,
|
|
||||||
"link": {
|
|
||||||
"type": "generated-index",
|
|
||||||
"description": "Detailed information about individual environments (such as HTTP servers, platforms, etc.) Anubis is known to work with."
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,145 +0,0 @@
|
||||||
# Apache
|
|
||||||
|
|
||||||
import Tabs from "@theme/Tabs";
|
|
||||||
import TabItem from "@theme/TabItem";
|
|
||||||
|
|
||||||
Anubis is intended to be a filter proxy. The way to integrate this is to break your configuration up into two parts: TLS termination and then HTTP routing. Consider this diagram:
|
|
||||||
|
|
||||||
```mermaid
|
|
||||||
---
|
|
||||||
title: Apache as tls terminator and HTTP router
|
|
||||||
---
|
|
||||||
|
|
||||||
flowchart LR
|
|
||||||
T(User Traffic)
|
|
||||||
subgraph Apache 2
|
|
||||||
TCP(TCP 80/443)
|
|
||||||
US(TCP 3001)
|
|
||||||
end
|
|
||||||
|
|
||||||
An(Anubis)
|
|
||||||
B(Backend)
|
|
||||||
|
|
||||||
T --> |TLS termination| TCP
|
|
||||||
TCP --> |Traffic filtering| An
|
|
||||||
An --> |Happy traffic| US
|
|
||||||
US --> |whatever you're doing| B
|
|
||||||
```
|
|
||||||
|
|
||||||
Effectively you have one trip through Apache to do TLS termination, a detour through Anubis for traffic scrubbing, and then going to the backend directly. This final socket is what will do HTTP routing.
|
|
||||||
|
|
||||||
:::note
|
|
||||||
|
|
||||||
These examples assume that you are using a setup where your Apache configuration is made up of a bunch of files in `/etc/httpd/conf.d/*.conf`. This is not true for all deployments of Apache. If you are not in such an environment, append these snippets to your `/etc/httpd/conf/httpd.conf` file.
|
|
||||||
|
|
||||||
:::
|
|
||||||
|
|
||||||
## Configuration
|
|
||||||
|
|
||||||
Assuming you are protecting `anubistest.techaro.lol`, you need the following server configuration blocks:
|
|
||||||
|
|
||||||
1. A block on port 80 that forwards HTTP to HTTPS
|
|
||||||
2. A block on port 443 that terminates TLS and forwards to Anubis
|
|
||||||
3. A block on port 3001 that actually serves your websites
|
|
||||||
|
|
||||||
```text
|
|
||||||
# Plain HTTP redirect to HTTPS
|
|
||||||
<VirtualHost *:80>
|
|
||||||
ServerAdmin your@email.here
|
|
||||||
ServerName anubistest.techaro.lol
|
|
||||||
DocumentRoot /var/www/anubistest.techaro.lol
|
|
||||||
ErrorLog /var/log/httpd/anubistest.techaro.lol_error.log
|
|
||||||
CustomLog /var/log/httpd/anubistest.techaro.lol_access.log combined
|
|
||||||
RewriteEngine on
|
|
||||||
RewriteCond %{SERVER_NAME} =anubistest.techaro.lol
|
|
||||||
RewriteRule ^ https://%{SERVER_NAME}%{REQUEST_URI} [END,NE,R=permanent]
|
|
||||||
</VirtualHost>
|
|
||||||
|
|
||||||
# HTTPS listener that forwards to Anubis
|
|
||||||
<IfModule mod_proxy.c>
|
|
||||||
<VirtualHost *:443>
|
|
||||||
ServerAdmin your@email.here
|
|
||||||
ServerName anubistest.techaro.lol
|
|
||||||
DocumentRoot /var/www/anubistest.techaro.lol
|
|
||||||
ErrorLog /var/log/httpd/anubistest.techaro.lol_error.log
|
|
||||||
CustomLog /var/log/httpd/anubistest.techaro.lol_access.log combined
|
|
||||||
|
|
||||||
SSLCertificateFile /etc/letsencrypt/live/anubistest.techaro.lol/fullchain.pem
|
|
||||||
SSLCertificateKeyFile /etc/letsencrypt/live/anubistest.techaro.lol/privkey.pem
|
|
||||||
Include /etc/letsencrypt/options-ssl-apache.conf
|
|
||||||
|
|
||||||
# These headers need to be set or else Anubis will
|
|
||||||
# throw an "admin misconfiguration" error.
|
|
||||||
RequestHeader set "X-Real-Ip" expr=%{REMOTE_ADDR}
|
|
||||||
RequestHeader set X-Forwarded-Proto "https"
|
|
||||||
RequestHeader set "X-Http-Version" "%{SERVER_PROTOCOL}s"
|
|
||||||
|
|
||||||
ProxyPreserveHost On
|
|
||||||
|
|
||||||
ProxyRequests Off
|
|
||||||
ProxyVia Off
|
|
||||||
|
|
||||||
# Replace 9000 with the port Anubis listens on
|
|
||||||
ProxyPass / http://[::1]:9000/
|
|
||||||
ProxyPassReverse / http://[::1]:9000/
|
|
||||||
</VirtualHost>
|
|
||||||
</IfModule>
|
|
||||||
|
|
||||||
# Actual website config
|
|
||||||
<VirtualHost *:3001>
|
|
||||||
ServerAdmin your@email.here
|
|
||||||
ServerName anubistest.techaro.lol
|
|
||||||
DocumentRoot /var/www/anubistest.techaro.lol
|
|
||||||
ErrorLog /var/log/httpd/anubistest.techaro.lol_error.log
|
|
||||||
CustomLog /var/log/httpd/anubistest.techaro.lol_access.log combined
|
|
||||||
|
|
||||||
# Pass the remote IP to the proxied application instead of 127.0.0.1
|
|
||||||
# This requires mod_remoteip
|
|
||||||
RemoteIPHeader X-Real-IP
|
|
||||||
RemoteIPTrustedProxy 127.0.0.1/32
|
|
||||||
</VirtualHost>
|
|
||||||
```
|
|
||||||
|
|
||||||
Make sure to add a separate configuration file for the listener on port 3001:
|
|
||||||
|
|
||||||
```text
|
|
||||||
# /etc/httpd/conf.d/listener-3001.conf
|
|
||||||
|
|
||||||
Listen [::1]:3001
|
|
||||||
```
|
|
||||||
|
|
||||||
In case you are running an IPv4-only system, use the following configuration instead:
|
|
||||||
|
|
||||||
```text
|
|
||||||
# /etc/httpd/conf.d/listener-3001.conf
|
|
||||||
|
|
||||||
Listen 127.0.0.1:3001
|
|
||||||
```
|
|
||||||
|
|
||||||
This can be repeated for multiple sites. Anubis does not care about the HTTP `Host` header and will happily cope with multiple websites via the same instance.
|
|
||||||
|
|
||||||
Then reload your Apache config and load your website. You should see Anubis protecting your apps!
|
|
||||||
|
|
||||||
```text
|
|
||||||
sudo systemctl reload httpd.service
|
|
||||||
```
|
|
||||||
|
|
||||||
## Troubleshooting
|
|
||||||
|
|
||||||
Here are some answers to questions that came in in testing:
|
|
||||||
|
|
||||||
### I'm running on a Red Hat distribution and Apache is saying "service unavailable" for every page load
|
|
||||||
|
|
||||||
If you see a "Service unavailable" error on every page load and run a Red Hat derived distribution, you are missing a `selinux` setting. The exact command will be in a journalctl log message like this:
|
|
||||||
|
|
||||||
```text
|
|
||||||
***** Plugin catchall_boolean (89.3 confidence) suggests ******************
|
|
||||||
|
|
||||||
If you want to allow HTTPD scripts and modules to connect to the network using TCP.
|
|
||||||
Then you must tell SELinux about this by enabling the 'httpd_can_network_connect' boolean.
|
|
||||||
|
|
||||||
Do
|
|
||||||
setsebool -P httpd_can_network_connect 1
|
|
||||||
```
|
|
||||||
|
|
||||||
This will fix the error immediately.
|
|
||||||
|
|
@ -1,71 +0,0 @@
|
||||||
# Caddy
|
|
||||||
|
|
||||||
To use Anubis with Caddy, stick Anubis between Caddy and your backend. For example, consider this application setup:
|
|
||||||
|
|
||||||
```mermaid
|
|
||||||
---
|
|
||||||
title: Caddy with Anubis in the middle
|
|
||||||
---
|
|
||||||
|
|
||||||
flowchart LR
|
|
||||||
T(User Traffic)
|
|
||||||
TCP(TCP 80/443)
|
|
||||||
An(Anubis)
|
|
||||||
B(Backend)
|
|
||||||
Blocked
|
|
||||||
|
|
||||||
T --> TCP
|
|
||||||
TCP --> |Traffic filtering| An
|
|
||||||
An --> |Happy traffic| B
|
|
||||||
An --> |Malicious traffic| Blocked
|
|
||||||
```
|
|
||||||
|
|
||||||
Instead of your traffic going directly to your backend, it takes a detour through Anubis. Anubis filters out the "bad" traffic and passes the "good" traffic to the backend.
|
|
||||||
|
|
||||||
To set up Anubis with Docker compose and Caddy, start with a `docker-compose` configuration like this:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
services:
|
|
||||||
caddy:
|
|
||||||
image: caddy:2
|
|
||||||
ports:
|
|
||||||
- 80:80
|
|
||||||
- 443:443
|
|
||||||
- 443:443/udp
|
|
||||||
volumes:
|
|
||||||
- ./conf:/etc/caddy
|
|
||||||
- caddy_config:/config
|
|
||||||
- caddy_data:/data
|
|
||||||
|
|
||||||
anubis:
|
|
||||||
image: ghcr.io/techarohq/anubis:latest
|
|
||||||
pull_policy: always
|
|
||||||
environment:
|
|
||||||
BIND: ":3000"
|
|
||||||
TARGET: http://httpdebug:3000
|
|
||||||
|
|
||||||
httpdebug:
|
|
||||||
image: ghcr.io/xe/x/httpdebug
|
|
||||||
pull_policy: always
|
|
||||||
|
|
||||||
volumes:
|
|
||||||
caddy_data:
|
|
||||||
caddy_config:
|
|
||||||
```
|
|
||||||
|
|
||||||
And then put the following in `conf/Caddyfile`:
|
|
||||||
|
|
||||||
```Caddyfile
|
|
||||||
# conf/Caddyfile
|
|
||||||
|
|
||||||
yourdomain.example.com {
|
|
||||||
tls your@email.address
|
|
||||||
|
|
||||||
reverse_proxy http://anubis:3000 {
|
|
||||||
header_up X-Real-Ip {remote_host}
|
|
||||||
header_up X-Http-Version {http.request.proto}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
If you want to protect multiple services with Anubis, you will need to either start multiple instances of Anubis (Anubis requires less than 32 MB of ram on average) or set up a two-tier routing setup where TLS termination is done with one instance of Caddy and the actual routing to services is done with another instance of Caddy. See the [nginx](./nginx.mdx) or [Apache](./apache.mdx) documentation to get ideas on how you would do this.
|
|
||||||
|
|
@ -1,26 +0,0 @@
|
||||||
# Cloudflare
|
|
||||||
|
|
||||||
If you are using Cloudflare, you should configure your server to use `CF-Connecting-IP` as the source of the real client IP, and pass that address to Anubis as `X-Forwarded-For`. Read [Client IP Headers](../caveats-xff.mdx) for details.
|
|
||||||
|
|
||||||
Example configuration with Caddy:
|
|
||||||
|
|
||||||
```Caddyfile
|
|
||||||
{
|
|
||||||
servers {
|
|
||||||
# Cloudflare IP ranges from https://www.cloudflare.com/en-gb/ips/
|
|
||||||
trusted_proxies static 173.245.48.0/20 103.21.244.0/22 103.22.200.0/22 103.31.4.0/22 141.101.64.0/18 108.162.192.0/18 190.93.240.0/20 188.114.96.0/20 197.234.240.0/22 198.41.128.0/17 162.158.0.0/15 104.16.0.0/13 104.24.0.0/14 172.64.0.0/13 131.0.72.0/22 2400:cb00::/32 2606:4700::/32 2803:f800::/32 2405:b500::/32 2405:8100::/32 2a06:98c0::/29 2c0f:f248::/32
|
|
||||||
# Use CF-Connecting-IP to determine the client IP instead of XFF
|
|
||||||
# https://caddyserver.com/docs/caddyfile/options#client-ip-headers
|
|
||||||
client_ip_headers CF-Connecting-IP
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
example.com {
|
|
||||||
reverse_proxy http://anubis:3000 {
|
|
||||||
# Pass the client IP read from CF-Connecting-IP
|
|
||||||
header_up X-Forwarded-For {client_ip}
|
|
||||||
header_up X-Real-IP {client_ip}
|
|
||||||
header_up X-Http-Version {http.request.proto}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
@ -1,33 +0,0 @@
|
||||||
# Docker compose
|
|
||||||
|
|
||||||
Docker compose is typically used in concert with other load balancers such as [Apache](./apache.mdx) or [Nginx](./nginx.mdx). Below is a minimal example showing you how to set up an instance of Anubis listening on host port 8080 that points to a static website containing data in `./www`:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
services:
|
|
||||||
anubis:
|
|
||||||
image: ghcr.io/techarohq/anubis:latest
|
|
||||||
environment:
|
|
||||||
BIND: ":8080"
|
|
||||||
DIFFICULTY: "4"
|
|
||||||
METRICS_BIND: ":9090"
|
|
||||||
SERVE_ROBOTS_TXT: "true"
|
|
||||||
TARGET: "http://nginx"
|
|
||||||
POLICY_FNAME: "/data/cfg/botPolicy.yaml"
|
|
||||||
OG_PASSTHROUGH: "true"
|
|
||||||
OG_EXPIRY_TIME: "24h"
|
|
||||||
healthcheck:
|
|
||||||
test: ["CMD", "anubis", "--healthcheck"]
|
|
||||||
interval: 5s
|
|
||||||
timeout: 30s
|
|
||||||
retries: 5
|
|
||||||
start_period: 500ms
|
|
||||||
ports:
|
|
||||||
- 8080:8080
|
|
||||||
volumes:
|
|
||||||
- "./botPolicy.yaml:/data/cfg/botPolicy.yaml:ro"
|
|
||||||
|
|
||||||
nginx:
|
|
||||||
image: nginx
|
|
||||||
volumes:
|
|
||||||
- "./www:/usr/share/nginx/html"
|
|
||||||
```
|
|
||||||
|
|
@ -1,134 +0,0 @@
|
||||||
# Kubernetes
|
|
||||||
|
|
||||||
:::note
|
|
||||||
Leave the `PUBLIC_URL` environment variable unset in this sidecar/standalone setup. Setting it here makes redirect construction fail (`redir=null`).
|
|
||||||
:::
|
|
||||||
|
|
||||||
When setting up Anubis in Kubernetes, you want to make sure that you thread requests through Anubis kinda like this:
|
|
||||||
|
|
||||||
```mermaid
|
|
||||||
---
|
|
||||||
title: Anubis embedded into workload pods
|
|
||||||
---
|
|
||||||
|
|
||||||
flowchart LR
|
|
||||||
T(User Traffic)
|
|
||||||
|
|
||||||
IngressController(IngressController)
|
|
||||||
|
|
||||||
subgraph Service
|
|
||||||
AnPort(Anubis Port)
|
|
||||||
BPort(Backend Port)
|
|
||||||
end
|
|
||||||
|
|
||||||
subgraph Pod
|
|
||||||
An(Anubis)
|
|
||||||
B(Backend)
|
|
||||||
end
|
|
||||||
|
|
||||||
T --> IngressController
|
|
||||||
IngressController --> AnPort
|
|
||||||
AnPort --> An
|
|
||||||
An --> B
|
|
||||||
```
|
|
||||||
|
|
||||||
Anubis is lightweight enough that you should be able to have many instances of it running without many problems. If this is a concern for you, please check out [ingress-anubis](https://github.com/jaredallard/ingress-anubis?ref=anubis.techaro.lol).
|
|
||||||
|
|
||||||
This example makes the following assumptions:
|
|
||||||
|
|
||||||
- Your target service is listening on TCP port `5000`.
|
|
||||||
- Anubis will be listening on port `8080`.
|
|
||||||
|
|
||||||
Adjust these values as facts and circumstances demand.
|
|
||||||
|
|
||||||
Create a secret with the signing key Anubis should use for its responses:
|
|
||||||
|
|
||||||
```
|
|
||||||
kubectl create secret generic anubis-key \
|
|
||||||
--namespace default \
|
|
||||||
--from-literal=ED25519_PRIVATE_KEY_HEX=$(openssl rand -hex 32)
|
|
||||||
```
|
|
||||||
|
|
||||||
Attach Anubis to your Deployment:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
containers:
|
|
||||||
# ...
|
|
||||||
- name: anubis
|
|
||||||
image: ghcr.io/techarohq/anubis:latest
|
|
||||||
imagePullPolicy: Always
|
|
||||||
env:
|
|
||||||
- name: "BIND"
|
|
||||||
value: ":8080"
|
|
||||||
- name: "DIFFICULTY"
|
|
||||||
value: "4"
|
|
||||||
- name: ED25519_PRIVATE_KEY_HEX
|
|
||||||
valueFrom:
|
|
||||||
secretKeyRef:
|
|
||||||
name: anubis-key
|
|
||||||
key: ED25519_PRIVATE_KEY_HEX
|
|
||||||
- name: "METRICS_BIND"
|
|
||||||
value: ":9090"
|
|
||||||
- name: "SERVE_ROBOTS_TXT"
|
|
||||||
value: "true"
|
|
||||||
- name: "TARGET"
|
|
||||||
value: "http://localhost:5000"
|
|
||||||
- name: "OG_PASSTHROUGH"
|
|
||||||
value: "true"
|
|
||||||
- name: "OG_EXPIRY_TIME"
|
|
||||||
value: "24h"
|
|
||||||
resources:
|
|
||||||
limits:
|
|
||||||
cpu: 750m
|
|
||||||
memory: 256Mi
|
|
||||||
requests:
|
|
||||||
cpu: 250m
|
|
||||||
memory: 256Mi
|
|
||||||
securityContext:
|
|
||||||
runAsUser: 1000
|
|
||||||
runAsGroup: 1000
|
|
||||||
runAsNonRoot: true
|
|
||||||
allowPrivilegeEscalation: false
|
|
||||||
capabilities:
|
|
||||||
drop:
|
|
||||||
- ALL
|
|
||||||
seccompProfile:
|
|
||||||
type: RuntimeDefault
|
|
||||||
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
Then add a Service entry for Anubis:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
# ...
|
|
||||||
spec:
|
|
||||||
ports:
|
|
||||||
# diff-add
|
|
||||||
- protocol: TCP
|
|
||||||
# diff-add
|
|
||||||
port: 8080
|
|
||||||
# diff-add
|
|
||||||
targetPort: 8080
|
|
||||||
# diff-add
|
|
||||||
name: anubis
|
|
||||||
```
|
|
||||||
|
|
||||||
Then point your Ingress to the Anubis port:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
rules:
|
|
||||||
- host: git.xeserv.us
|
|
||||||
http:
|
|
||||||
paths:
|
|
||||||
- pathType: Prefix
|
|
||||||
path: "/"
|
|
||||||
backend:
|
|
||||||
service:
|
|
||||||
name: git
|
|
||||||
port:
|
|
||||||
# diff-remove
|
|
||||||
name: http
|
|
||||||
# diff-add
|
|
||||||
name: anubis
|
|
||||||
```
|
|
||||||
|
|
@ -1,78 +0,0 @@
|
||||||
# Nginx
|
|
||||||
|
|
||||||
import CodeBlock from "@theme/CodeBlock";
|
|
||||||
|
|
||||||
Anubis is intended to be a filter proxy. The way to integrate this with nginx is to break your configuration up into two parts: TLS termination and then HTTP routing. Consider this diagram:
|
|
||||||
|
|
||||||
```mermaid
|
|
||||||
---
|
|
||||||
title: Nginx as tls terminator and HTTP router
|
|
||||||
---
|
|
||||||
|
|
||||||
flowchart LR
|
|
||||||
T(User Traffic)
|
|
||||||
subgraph Nginx
|
|
||||||
TCP(TCP 80/443)
|
|
||||||
US(Unix Socket or
|
|
||||||
another TCP port)
|
|
||||||
end
|
|
||||||
|
|
||||||
An(Anubis)
|
|
||||||
B(Backend)
|
|
||||||
|
|
||||||
T --> |TLS termination| TCP
|
|
||||||
TCP --> |Traffic filtering| An
|
|
||||||
An --> |Happy traffic| US
|
|
||||||
US --> |whatever you're doing| B
|
|
||||||
```
|
|
||||||
|
|
||||||
Instead of your traffic going right from TLS termination into the backend, it takes a detour through Anubis. Anubis filters out the "bad" traffic and then passes the "good" traffic to another socket that Nginx has open. This final socket is what you will use to do HTTP routing.
|
|
||||||
|
|
||||||
Effectively, you have two roles for nginx: TLS termination (converting HTTPS to HTTP) and HTTP routing (distributing requests to the individual vhosts). This can stack with something like Apache in case you have a legacy deployment. Make sure you have the right [TLS certificates configured](https://code.kuederle.com/letsencrypt/) at the TLS termination level.
|
|
||||||
|
|
||||||
:::note
|
|
||||||
|
|
||||||
These examples assume that you are using a setup where your nginx configuration is made up of a bunch of files in `/etc/nginx/conf.d/*.conf`. This is not true for all deployments of nginx. If you are not in such an environment, append these snippets to your `/etc/nginx/nginx.conf` file.
|
|
||||||
|
|
||||||
:::
|
|
||||||
|
|
||||||
Assuming that we are protecting `anubistest.techaro.lol`, here's what the server configuration file would look like:
|
|
||||||
|
|
||||||
import anubisTest from "!!raw-loader!./nginx/server-anubistest-techaro-lol.conf";
|
|
||||||
|
|
||||||
<CodeBlock language="nginx">{anubisTest}</CodeBlock>
|
|
||||||
|
|
||||||
:::tip
|
|
||||||
|
|
||||||
You can copy the `location /` block into a separate file named something like `conf-anubis.inc` and then include it inline to other `server` blocks:
|
|
||||||
|
|
||||||
import anubisInclude from "!!raw-loader!./nginx/conf-anubis.inc";
|
|
||||||
|
|
||||||
<CodeBlock language="nginx">{anubisInclude}</CodeBlock>
|
|
||||||
|
|
||||||
Then in a server block:
|
|
||||||
|
|
||||||
<details>
|
|
||||||
<summary>Full nginx config</summary>
|
|
||||||
|
|
||||||
import mimiTecharoLol from "!!raw-loader!./nginx/server-mimi-techaro-lol.conf";
|
|
||||||
|
|
||||||
<CodeBlock language="nginx">{mimiTecharoLol}</CodeBlock>
|
|
||||||
|
|
||||||
</details>
|
|
||||||
|
|
||||||
:::
|
|
||||||
|
|
||||||
Create an upstream for Anubis.
|
|
||||||
|
|
||||||
import anubisUpstream from "!!raw-loader!./nginx/upstream-anubis.conf";
|
|
||||||
|
|
||||||
<CodeBlock language="nginx">{anubisUpstream}</CodeBlock>
|
|
||||||
|
|
||||||
This can be repeated for multiple sites. Anubis does not care about the HTTP `Host` header and will happily cope with multiple websites via the same instance.
|
|
||||||
|
|
||||||
Then reload your nginx config and load your website. You should see Anubis protecting your apps!
|
|
||||||
|
|
||||||
```text
|
|
||||||
sudo systemctl reload nginx.service
|
|
||||||
```
|
|
||||||
|
|
@ -1,8 +0,0 @@
|
||||||
# /etc/nginx/conf-anubis.inc
|
|
||||||
|
|
||||||
# Forward to anubis
|
|
||||||
location / {
|
|
||||||
proxy_set_header Host $host;
|
|
||||||
proxy_set_header X-Real-IP $remote_addr;
|
|
||||||
proxy_pass http://anubis;
|
|
||||||
}
|
|
||||||
|
|
@ -1,50 +0,0 @@
|
||||||
# /etc/nginx/conf.d/server-anubistest-techaro-lol.conf
|
|
||||||
|
|
||||||
# HTTP - Redirect all HTTP traffic to HTTPS
|
|
||||||
server {
|
|
||||||
listen 80;
|
|
||||||
listen [::]:80;
|
|
||||||
|
|
||||||
server_name anubistest.techaro.lol;
|
|
||||||
|
|
||||||
location / {
|
|
||||||
return 301 https://$host$request_uri;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# TLS termination server, this will listen over TLS (https) and then
|
|
||||||
# proxy all traffic to the target via Anubis.
|
|
||||||
server {
|
|
||||||
# Listen on TCP port 443 with TLS (https) and HTTP/2
|
|
||||||
listen 443 ssl;
|
|
||||||
listen [::]:443 ssl;
|
|
||||||
http2 on;
|
|
||||||
|
|
||||||
location / {
|
|
||||||
proxy_set_header Host $host;
|
|
||||||
proxy_set_header X-Real-IP $remote_addr;
|
|
||||||
proxy_set_header X-Http-Version $server_protocol;
|
|
||||||
proxy_pass http://anubis;
|
|
||||||
}
|
|
||||||
|
|
||||||
server_name anubistest.techaro.lol;
|
|
||||||
|
|
||||||
ssl_certificate /path/to/your/certs/anubistest.techaro.lol.crt;
|
|
||||||
ssl_certificate_key /path/to/your/certs/anubistest.techaro.lol.key;
|
|
||||||
}
|
|
||||||
|
|
||||||
# Backend server, this is where your webapp should actually live.
|
|
||||||
server {
|
|
||||||
listen unix:/run/nginx/nginx.sock;
|
|
||||||
|
|
||||||
server_name anubistest.techaro.lol;
|
|
||||||
root "/srv/http/anubistest.techaro.lol";
|
|
||||||
index index.html;
|
|
||||||
|
|
||||||
# Get the visiting IP from the TLS termination server
|
|
||||||
set_real_ip_from unix:;
|
|
||||||
real_ip_header X-Real-IP;
|
|
||||||
|
|
||||||
# Your normal configuration can go here
|
|
||||||
# location .php { fastcgi...} etc.
|
|
||||||
}
|
|
||||||
|
|
@ -1,29 +0,0 @@
|
||||||
# /etc/nginx/conf.d/server-mimi-techaro-lol.conf
|
|
||||||
|
|
||||||
server {
|
|
||||||
# Listen on 443 with SSL
|
|
||||||
listen 443 ssl;
|
|
||||||
listen [::]:443 ssl;
|
|
||||||
http2 on;
|
|
||||||
|
|
||||||
# Slipstream via Anubis
|
|
||||||
include "conf-anubis.inc";
|
|
||||||
|
|
||||||
server_name mimi.techaro.lol;
|
|
||||||
|
|
||||||
ssl_certificate /path/to/your/certs/mimi.techaro.lol.crt;
|
|
||||||
ssl_certificate_key /path/to/your/certs/mimi.techaro.lol.key;
|
|
||||||
}
|
|
||||||
|
|
||||||
server {
|
|
||||||
listen unix:/run/nginx/nginx.sock;
|
|
||||||
|
|
||||||
server_name mimi.techaro.lol;
|
|
||||||
|
|
||||||
port_in_redirect off;
|
|
||||||
root "/srv/http/mimi.techaro.lol";
|
|
||||||
index index.html;
|
|
||||||
|
|
||||||
# Your normal configuration can go here
|
|
||||||
# location .php { fastcgi...} etc.
|
|
||||||
}
|
|
||||||
|
|
@ -1,152 +0,0 @@
|
||||||
---
|
|
||||||
id: traefik
|
|
||||||
title: Traefik
|
|
||||||
---
|
|
||||||
|
|
||||||
:::note
|
|
||||||
|
|
||||||
This only talks about integration through Compose,
|
|
||||||
but it also applies to docker cli options.
|
|
||||||
|
|
||||||
:::
|
|
||||||
|
|
||||||
In this example, we will use 4 Containers:
|
|
||||||
|
|
||||||
- `traefik` - the Traefik instance
|
|
||||||
- `anubis` - the Anubis instance
|
|
||||||
- `target` - our service to protect (`traefik/whoami` in this case)
|
|
||||||
- `target2` - a second service that isn't supposed to be protected (`traefik/whoami` in this case)
|
|
||||||
|
|
||||||
## Diagram of Flow
|
|
||||||
|
|
||||||
This is a small diagram depicting the flow.
|
|
||||||
Keep in mind that `8080` or `80` can be anything depending on your containers.
|
|
||||||
|
|
||||||
```mermaid
|
|
||||||
flowchart LR
|
|
||||||
user[User]
|
|
||||||
traefik[Traefik]
|
|
||||||
anubis[Anubis]
|
|
||||||
target[Target]
|
|
||||||
|
|
||||||
user-->|:443 - Requesting Service|traefik
|
|
||||||
traefik-->|:8080 - Check authorization to Anubis|anubis
|
|
||||||
anubis-->|redirect if failed|traefik
|
|
||||||
user-->|:8080 - make the challenge|traefik
|
|
||||||
anubis-->|redirect back to target|traefik
|
|
||||||
traefik-->|:80 - Passing to the target|target
|
|
||||||
```
|
|
||||||
|
|
||||||
## Full Example Config
|
|
||||||
|
|
||||||
This example contains 3 services: anubis, one that is protected and the other one that is not.
|
|
||||||
|
|
||||||
**compose.yml**
|
|
||||||
|
|
||||||
```yml
|
|
||||||
services:
|
|
||||||
traefik:
|
|
||||||
image: traefik:v3.3
|
|
||||||
ports:
|
|
||||||
- 80:80
|
|
||||||
- 443:443
|
|
||||||
volumes:
|
|
||||||
- /var/run/docker.sock:/var/run/docker.sock
|
|
||||||
- ./letsencrypt:/letsencrypt
|
|
||||||
- ./traefik.yml:/traefik.yml:ro
|
|
||||||
networks:
|
|
||||||
- traefik
|
|
||||||
labels:
|
|
||||||
# Enable Traefik
|
|
||||||
- traefik.enable=true
|
|
||||||
- traefik.docker.network=traefik
|
|
||||||
# Anubis middleware
|
|
||||||
- traefik.http.middlewares.anubis.forwardauth.address=http://anubis:8080/.within.website/x/cmd/anubis/api/check
|
|
||||||
# Redirect any HTTP to HTTPS
|
|
||||||
- traefik.http.middlewares.redirect-to-https.redirectscheme.scheme=https
|
|
||||||
- traefik.http.routers.web.rule=PathPrefix(`/`)
|
|
||||||
- traefik.http.routers.web.entrypoints=web
|
|
||||||
- traefik.http.routers.web.middlewares=redirect-to-https
|
|
||||||
- traefik.http.routers.web.tls=false
|
|
||||||
|
|
||||||
anubis:
|
|
||||||
image: ghcr.io/techarohq/anubis:main
|
|
||||||
environment:
|
|
||||||
# Telling Anubis, where to listen for Traefik
|
|
||||||
- BIND=:8080
|
|
||||||
# Telling Anubis to do redirect — ensure there is a space after '='
|
|
||||||
- 'TARGET= '
|
|
||||||
# Specifies which domains Anubis is allowed to redirect to.
|
|
||||||
- REDIRECT_DOMAINS=example.com
|
|
||||||
# Should be the full external URL for Anubis (including scheme)
|
|
||||||
- PUBLIC_URL=https://anubis.example.com
|
|
||||||
# Should match your domain for proper cookie scoping
|
|
||||||
- COOKIE_DOMAIN=example.com
|
|
||||||
networks:
|
|
||||||
- traefik
|
|
||||||
labels:
|
|
||||||
- traefik.enable=true # Enabling Traefik
|
|
||||||
- traefik.docker.network=traefik # Telling Traefik which network to use
|
|
||||||
- traefik.http.routers.anubis.rule=Host(`anubis.example.com`) # Only Matching Requests for example.com
|
|
||||||
- traefik.http.routers.anubis.entrypoints=websecure # Listen on HTTPS
|
|
||||||
- traefik.http.services.anubis.loadbalancer.server.port=8080 # Telling Traefik where to receive requests
|
|
||||||
- traefik.http.routers.anubis.service=anubis # Telling Traefik to use the above specified port
|
|
||||||
- traefik.http.routers.anubis.tls.certresolver=le # Telling Traefik to resolve a Cert for Anubis
|
|
||||||
|
|
||||||
# Protected by Anubis
|
|
||||||
target:
|
|
||||||
image: traefik/whoami:latest
|
|
||||||
networks:
|
|
||||||
- traefik
|
|
||||||
labels:
|
|
||||||
- traefik.enable=true # Enabling Traefik
|
|
||||||
- traefik.docker.network=traefik # Telling Traefik which network to use
|
|
||||||
- traefik.http.routers.target.rule=Host(`example.com`) # Only Matching Requests for example.com
|
|
||||||
- traefik.http.routers.target.entrypoints=websecure # Listening on the exclusive Anubis Network
|
|
||||||
- traefik.http.services.target.loadbalancer.server.port=80 # Telling Traefik where to receive requests
|
|
||||||
- traefik.http.routers.target.service=target # Telling Traefik to use the above specified port
|
|
||||||
- traefik.http.routers.target.tls.certresolver=le # Telling Traefik to resolve a Cert for Anubis
|
|
||||||
- traefik.http.routers.target.middlewares=anubis@docker # Use the Anubis middleware
|
|
||||||
|
|
||||||
# Not Protected by Anubis
|
|
||||||
target2:
|
|
||||||
image: traefik/whoami:latest
|
|
||||||
networks:
|
|
||||||
- traefik
|
|
||||||
labels:
|
|
||||||
- traefik.enable=true # Enabling Traefik
|
|
||||||
- traefik.docker.network=traefik # Telling Traefik which network to use
|
|
||||||
- traefik.http.routers.target2.rule=Host(`another.example.com`) # Only Matching Requests for example.com
|
|
||||||
- traefik.http.routers.target2.entrypoints=websecure # Listening on the exclusive Anubis Network
|
|
||||||
- traefik.http.services.target2.loadbalancer.server.port=80 # Telling Traefik where to receive requests
|
|
||||||
- traefik.http.routers.target2.service=target2 # Telling Traefik to use the above specified port
|
|
||||||
- traefik.http.routers.target2.tls.certresolver=le # Telling Traefik to resolve a Cert for this Target
|
|
||||||
|
|
||||||
networks:
|
|
||||||
traefik:
|
|
||||||
name: traefik
|
|
||||||
```
|
|
||||||
|
|
||||||
**traefik.yml**
|
|
||||||
|
|
||||||
```yml
|
|
||||||
api:
|
|
||||||
insecure: false # shouldn't be enabled in prod
|
|
||||||
|
|
||||||
entryPoints:
|
|
||||||
# Web
|
|
||||||
web:
|
|
||||||
address: ":80"
|
|
||||||
websecure:
|
|
||||||
address: ":443"
|
|
||||||
|
|
||||||
certificatesResolvers:
|
|
||||||
le:
|
|
||||||
acme:
|
|
||||||
tlsChallenge: {}
|
|
||||||
email: "admin@example.com"
|
|
||||||
storage: "/letsencrypt/acme.json"
|
|
||||||
|
|
||||||
providers:
|
|
||||||
docker: {}
|
|
||||||
```
|
|
||||||
|
|
@ -1,8 +0,0 @@
|
||||||
{
|
|
||||||
"label": "Frameworks",
|
|
||||||
"position": 30,
|
|
||||||
"link": {
|
|
||||||
"type": "generated-index",
|
|
||||||
"description": "Information about getting specific frameworks or tools working with Anubis."
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,21 +0,0 @@
|
||||||
# HTMX
|
|
||||||
|
|
||||||
import Tabs from "@theme/Tabs";
|
|
||||||
import TabItem from "@theme/TabItem";
|
|
||||||
|
|
||||||
[HTMX](https://htmx.org) is a framework that enables you to write applications using hypertext as the engine of application state. This enables you to simplify you server side code by having it return HTML instead of JSON. This can interfere with Anubis because Anubis challenge pages also return HTML.
|
|
||||||
|
|
||||||
To work around this, you can make a custom [expression](../configuration/expressions.mdx) rule that allows HTMX requests if the user has passed a challenge in the past:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
- name: allow-htmx-iff-already-passed-challenge
|
|
||||||
action: ALLOW
|
|
||||||
expression:
|
|
||||||
all:
|
|
||||||
- '"Cookie" in headers'
|
|
||||||
- 'headers["Cookie"].contains("anubis-auth")'
|
|
||||||
- '"Hx-Request" in headers'
|
|
||||||
- 'headers["Hx-Request"] == "true"'
|
|
||||||
```
|
|
||||||
|
|
||||||
This will reduce some security because it does not assert the validity of the Anubis auth cookie, however in trade it improves the experience for existing users.
|
|
||||||
|
|
@ -1,39 +0,0 @@
|
||||||
# WordPress
|
|
||||||
|
|
||||||
WordPress is the most popular blog engine on the planet.
|
|
||||||
|
|
||||||
## Using a multi-site setup with Anubis
|
|
||||||
|
|
||||||
If you have a multi-site setup where traffic goes through Anubis like this:
|
|
||||||
|
|
||||||
```mermaid
|
|
||||||
---
|
|
||||||
title: Apache as tls terminator and HTTP router
|
|
||||||
---
|
|
||||||
|
|
||||||
flowchart LR
|
|
||||||
T(User Traffic)
|
|
||||||
subgraph Apache 2
|
|
||||||
TCP(TCP 80/443)
|
|
||||||
US(TCP 3001)
|
|
||||||
end
|
|
||||||
|
|
||||||
An(Anubis)
|
|
||||||
B(Backend)
|
|
||||||
|
|
||||||
T --> |TLS termination| TCP
|
|
||||||
TCP --> |Traffic filtering| An
|
|
||||||
An --> |Happy traffic| US
|
|
||||||
US --> |whatever you're doing| B
|
|
||||||
```
|
|
||||||
|
|
||||||
WordPress may not realize that the underlying connection is being done over HTTPS. This could lead to a redirect loop in the `/wp-admin/` routes. In order to fix this, add the following to your `wp-config.php` file:
|
|
||||||
|
|
||||||
```php
|
|
||||||
if (isset($_SERVER['HTTP_X_FORWARDED_PROTO']) && $_SERVER['HTTP_X_FORWARDED_PROTO'] === 'https') {
|
|
||||||
$_SERVER['HTTPS'] = 'on';
|
|
||||||
$_SERVER['SERVER_PORT'] = 443;
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
This will make WordPress think that your connection is over HTTPS instead of plain HTTP.
|
|
||||||
|
|
@ -1,8 +0,0 @@
|
||||||
{
|
|
||||||
"label": "Honeypot",
|
|
||||||
"position": 40,
|
|
||||||
"link": {
|
|
||||||
"type": "generated-index",
|
|
||||||
"description": "Honeypot features in Anubis, allowing Anubis to passively detect malicious crawlers."
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,40 +0,0 @@
|
||||||
---
|
|
||||||
title: Dataset poisoning
|
|
||||||
---
|
|
||||||
|
|
||||||
Anubis offers the ability to participate in [dataset poisoning](https://www.anthropic.com/research/small-samples-poison) attacks similar to what [iocaine](https://iocaine.madhouse-project.org/) and other similar tools offer. Currently this is in a preview state where a lot of details are hard-coded in order to test the viability of this approach.
|
|
||||||
|
|
||||||
In essence, when Anubis challenge and error pages are rendered they include a small bit of HTML code that browsers will ignore but scrapers will interpret as a link to ingest. This will then create a small forest of recursive nothing pages that are designed according to the following principles:
|
|
||||||
|
|
||||||
- These pages are _cheap_ to render, rendering in at most ten milliseconds on decently specced hardware.
|
|
||||||
- These pages are _vacuous_, meaning that they essentially are devoid of content such that a human would find it odd and click away, but a scraper would not be able to know that and would continue through the forest.
|
|
||||||
- These pages are _fairly large_ so that scrapers don't think that the pages are error pages or are otherwise devoid of content.
|
|
||||||
- These pages are _fully self-contained_ so that they load fast without incurring additional load from resource fetches.
|
|
||||||
|
|
||||||
In this limited preview state, Anubis generates pages using [spintax](https://outboundly.ai/blogs/what-is-spintax-and-how-to-use-it/). Spintax is a syntax that is used to create different variants of utterances for use in marketing messages and email spam that evades word filtering. In its current form, Anubis' dataset poisoning has AI generated spintax that generates vapid LinkedIn posts with some western occultism thrown in for good measure. This results in utterances like the following:
|
|
||||||
|
|
||||||
> There's a moment when visionaries are being called to realize that the work can't be reduced to optimization, but about resonance. We don't transform products by grinding endlessly, we do it by holding the vision. Because meaning can't be forced, it unfolds over time when culture are in integrity. This moment represents a fundamental reimagining in how we think about work. This isn't a framework, it's a lived truth that requires courage. When we get honest, we activate nonlinear growth that don't show up in dashboards, but redefine success anyway.
|
|
||||||
|
|
||||||
This should be fairly transparent to humans that this is pseudoprofound anti-content and is a signal to click away.
|
|
||||||
|
|
||||||
## Plans
|
|
||||||
|
|
||||||
Future versions of this feature will allow for more customization. In the near future this will be configurable via the following mechanisms:
|
|
||||||
|
|
||||||
- WebAssembly logic for customizing how the poisoning data is generated (with examples including the existing spintax method).
|
|
||||||
- Weight thresholds and logic for how they are interpreted by Anubis.
|
|
||||||
- Other configuration settings as facts and circumstances dictate.
|
|
||||||
|
|
||||||
## Implementation notes
|
|
||||||
|
|
||||||
In its current implementation, the Anubis dataset poisoning feature has the following flaws that may hinder production deployments:
|
|
||||||
|
|
||||||
- All Anubis instances use the same method for generating dataset poisoning information. This may be easy for malicious actors to detect and ignore.
|
|
||||||
- Anubis dataset poisoning routes are under the `/.within.website/x/cmd/anubis` URL hierarchy. This may be easy for malicious actors to detect and ignore.
|
|
||||||
|
|
||||||
Right now Anubis assigns 30 weight points if the following criteria are met:
|
|
||||||
|
|
||||||
- A client's User-Agent has been observed in the dataset poisoning maze at least 25 times.
|
|
||||||
- The network-clamped IP address (/24 for IPv4 and /48 for IPv6) has been observed in the dataset poisoning maze at least 25 times.
|
|
||||||
|
|
||||||
Additionally, when any given client by both User-Agent and network-clamped IP address has been observed, Anubis will emit log lines warning about it so that administrative action can be taken up to and including [filing abuse reports with the network owner](/blog/2025/file-abuse-reports).
|
|
||||||
|
|
@ -1,211 +0,0 @@
|
||||||
---
|
|
||||||
title: Setting up Anubis
|
|
||||||
---
|
|
||||||
|
|
||||||
import EnterpriseOnly from "@site/src/components/EnterpriseOnly";
|
|
||||||
import RandomKey from "@site/src/components/RandomKey";
|
|
||||||
|
|
||||||
export const EO = () => (
|
|
||||||
<>
|
|
||||||
<EnterpriseOnly link="./botstopper/" />
|
|
||||||
<div style={{ marginBottom: "0.5rem" }} />
|
|
||||||
</>
|
|
||||||
);
|
|
||||||
|
|
||||||
Anubis is meant to sit between your reverse proxy (such as Nginx or Caddy) and your target service. One instance of Anubis must be used per service you are protecting.
|
|
||||||
|
|
||||||
<center>
|
|
||||||
|
|
||||||
```mermaid
|
|
||||||
---
|
|
||||||
title: With Anubis installed
|
|
||||||
---
|
|
||||||
|
|
||||||
flowchart LR
|
|
||||||
LB(Load balancer /
|
|
||||||
TLS terminator)
|
|
||||||
Anubis(Anubis)
|
|
||||||
App(App)
|
|
||||||
|
|
||||||
LB --> Anubis --> App
|
|
||||||
```
|
|
||||||
|
|
||||||
</center>
|
|
||||||
|
|
||||||
## Docker image conventions
|
|
||||||
|
|
||||||
Anubis is shipped in the Docker repo [`ghcr.io/techarohq/anubis`](https://github.com/TecharoHQ/anubis/pkgs/container/anubis). The following tags exist for your convenience:
|
|
||||||
|
|
||||||
| Tag | Meaning |
|
|
||||||
| :------------------ | :--------------------------------------------------------------------------------------------------------------------------------- |
|
|
||||||
| `latest` | The latest [tagged release](https://github.com/TecharoHQ/anubis/releases), if you are in doubt, start here. |
|
|
||||||
| `v<version number>` | The Anubis image for [any given tagged release](https://github.com/TecharoHQ/anubis/tags) |
|
|
||||||
| `main` | The current build on the `main` branch. Only use this if you need the latest and greatest features as they are merged into `main`. |
|
|
||||||
|
|
||||||
The Docker image runs Anubis as user ID 1000 and group ID 1000. If you are mounting external volumes into Anubis' container, please be sure they are owned by or writable to this user/group.
|
|
||||||
|
|
||||||
Anubis has very minimal system requirements. I suspect that 128Mi of ram may be sufficient for a large number of concurrent clients. Anubis may be a poor fit for apps that use WebSockets and maintain open connections, but I don't have enough real-world experience to know one way or another.
|
|
||||||
|
|
||||||
## Native packages
|
|
||||||
|
|
||||||
For more detailed information on installing Anubis with native packages, please read [the native install directions](./native-install.mdx).
|
|
||||||
|
|
||||||
## Configuration
|
|
||||||
|
|
||||||
Anubis is configurable via environment variables and [the policy file](./policies.mdx). Most settings are currently exposed with environment variables but they are being slowly moved over to the policy file.
|
|
||||||
|
|
||||||
### Configuration via the policy file
|
|
||||||
|
|
||||||
Currently the following settings are configurable via the policy file:
|
|
||||||
|
|
||||||
- [Bot policies](./policies.mdx)
|
|
||||||
- [Open Graph passthrough](./configuration/open-graph.mdx)
|
|
||||||
- [Weight thresholds](./configuration/thresholds.mdx)
|
|
||||||
|
|
||||||
### Environment variables
|
|
||||||
|
|
||||||
Anubis uses these environment variables for configuration:
|
|
||||||
|
|
||||||
| Environment Variable | Default value | Explanation |
|
|
||||||
|:-------------------------------|:------------------------|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
|
||||||
| `ASSET_LOOKUP_HEADER` | unset | <EO /> If set, use the contents of this header in requests when looking up custom assets in `OVERLAY_FOLDER`. See [Header-based overlay dispatch](./botstopper.mdx#header-based-overlay-dispatch) for more details. |
|
|
||||||
| `BASE_PREFIX` | unset | If set, adds a global prefix to all Anubis endpoints (everything starting with `/.within.website/x/anubis/`). For example, setting this to `/myapp` would make Anubis accessible at `/myapp/` instead of `/`. This is useful when running Anubis behind a reverse proxy that routes based on path prefixes. |
|
|
||||||
| `BIND` | `:8923` | The network address that Anubis listens on. For `unix`, set this to a path: `/run/anubis/instance.sock` |
|
|
||||||
| `BIND_NETWORK` | `tcp` | The address family that Anubis listens on. Accepts `tcp`, `unix` and anything Go's [`net.Listen`](https://pkg.go.dev/net#Listen) supports. |
|
|
||||||
| `CHALLENGE_TITLE` | unset | <EO /> If set, override the translation stack to show a custom title for challenge pages such as "Making sure your connection is secure!". See [Customizing messages](./botstopper.mdx#customizing-messages) for more details. |
|
|
||||||
| `COOKIE_DOMAIN` | unset | The domain the Anubis challenge pass cookie should be set to. This should be set to the domain you bought from your registrar (EG: `techaro.lol` if your webapp is running on `anubis.techaro.lol`). See this [stackoverflow explanation of cookies](https://stackoverflow.com/a/1063760) for more information.<br/><br/>Note that unlike `REDIRECT_DOMAINS`, you should never include a port number in this variable. |
|
|
||||||
| `COOKIE_DYNAMIC_DOMAIN` | false | If set to true, automatically set cookie domain fields based on the hostname of the request. EG: if you are making a request to `anubis.techaro.lol`, the Anubis cookie will be valid for any subdomain of `techaro.lol`. |
|
|
||||||
| `COOKIE_EXPIRATION_TIME` | `168h` | The amount of time the authorization cookie is valid for. |
|
|
||||||
| `CUSTOM_REAL_IP_HEADER` | unset | If set, Anubis will read the client's real IP address from this header, and set it in `X-Real-IP` header. |
|
|
||||||
| `COOKIE_PARTITIONED` | `false` | If set to `true`, enables the [partitioned (CHIPS) flag](https://developers.google.com/privacy-sandbox/cookies/chips), meaning that Anubis inside an iframe has a different set of cookies than the domain hosting the iframe. |
|
|
||||||
| `COOKIE_PREFIX` | `anubis-cookie` | The prefix used for browser cookies created by Anubis. Useful for customization or avoiding conflicts with other applications. |
|
|
||||||
| `COOKIE_SECURE` | `true` | If set to `true`, enables the [Secure flag](https://developer.mozilla.org/en-US/docs/Web/HTTP/Guides/Cookies#block_access_to_your_cookies), meaning that the cookies will only be transmitted over HTTPS. If Anubis is used in an unsecure context (plain HTTP), this will be need to be set to false |
|
|
||||||
| `COOKIE_SAME_SITE` | `None` | Controls the cookie’s [`SameSite` attribute](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Set-Cookie#samesitesamesite-value). Allowed: `None`, `Lax`, `Strict`, `Default`. `None` permits cross-site use but modern browsers require it to be **Secure**—so if `COOKIE_SECURE=false` or you serve over plain HTTP, use `Lax` (recommended) or `Strict` or the cookie will be rejected. `Default` uses the Go runtime’s `SameSiteDefaultMode`. `None` will be downgraded to `Lax` automatically if cookie is set NOT to be secure. |
|
|
||||||
| `DIFFICULTY` | `4` | The difficulty of the challenge, or the number of leading zeroes that must be in successful responses. |
|
|
||||||
| `DIFFICULTY_IN_JWT` | `false` | If set to `true`, adds the `difficulty` field into JWT claims, which indicates the difficulty the token has been generated. This may be useful for statistics and debugging. |
|
|
||||||
| `ED25519_PRIVATE_KEY_HEX` | unset | The hex-encoded ed25519 private key used to sign Anubis responses. If this is not set, Anubis will generate one for you. This should be exactly 64 characters long. **Required when using persistent storage backends** (like bbolt) to ensure challenges survive service restarts. When running multiple instances on the same base domain, the key must be the same across all instances. See below for details. |
|
|
||||||
| `ED25519_PRIVATE_KEY_HEX_FILE` | unset | Path to a file containing the hex-encoded ed25519 private key. Only one of this or its sister option may be set. **Required when using persistent storage backends** (like bbolt) to ensure challenges survive service restarts. When running multiple instances on the same base domain, the key must be the same across all instances. |
|
|
||||||
| `ERROR_TITLE` | unset | <EO /> If set, override the translation stack to show a custom title for error pages such as "Something went wrong!". See [Customizing messages](./botstopper.mdx#customizing-messages) for more details. |
|
|
||||||
| `JWT_RESTRICTION_HEADER` | `X-Real-IP` | If set, the JWT is only valid if the current value of this header matches the value when the JWT was created. You can use it e.g. to restrict a JWT to the source IP of the user using `X-Real-IP`. |
|
|
||||||
| `METRICS_BIND` | `:9090` | The network address that Anubis serves Prometheus metrics on. See `BIND` for more information. |
|
|
||||||
| `METRICS_BIND_NETWORK` | `tcp` | The address family that the Anubis metrics server listens on. See `BIND_NETWORK` for more information. |
|
|
||||||
| `OG_EXPIRY_TIME` | `24h` | The expiration time for the Open Graph tag cache. Prefer using [the policy file](./configuration/open-graph.mdx) to configure the Open Graph subsystem. |
|
|
||||||
| `OG_PASSTHROUGH` | `false` | If set to `true`, Anubis will enable Open Graph tag passthrough. Prefer using [the policy file](./configuration/open-graph.mdx) to configure the Open Graph subsystem. |
|
|
||||||
| `OG_CACHE_CONSIDER_HOST` | `false` | If set to `true`, Anubis will consider the host in the Open Graph tag cache key. Prefer using [the policy file](./configuration/open-graph.mdx) to configure the Open Graph subsystem. |
|
|
||||||
| `OVERLAY_FOLDER` | unset | <EO /> If set, treat the given path as an [overlay folder](./botstopper.mdx#custom-images-and-css), allowing you to customize CSS, fonts, images, and add other assets to BotStopper deployments. |
|
|
||||||
| `POLICY_FNAME` | unset | The file containing [bot policy configuration](./policies.mdx). See the bot policy documentation for more details. If unset, the default bot policy configuration is used. |
|
|
||||||
| `PUBLIC_URL` | unset | The externally accessible URL for this Anubis instance, used for constructing redirect URLs (e.g., for Traefik forwardAuth). Leave it unset when Anubis terminates traffic directly (sidecar/standalone deployments) or redirect building will fail with `redir=null`. |
|
|
||||||
| `REDIRECT_DOMAINS` | unset | Comma-separated list of domain names that Anubis should allow redirects to when passing a challenge. See [Redirect Domain Configuration](./configuration/redirect-domains) for more details. |
|
|
||||||
| `SERVE_ROBOTS_TXT` | `false` | If set `true`, Anubis will serve a default `robots.txt` file that disallows all known AI scrapers by name and then additionally disallows every scraper. This is useful if facts and circumstances make it difficult to change the underlying service to serve such a `robots.txt` file. |
|
|
||||||
| `SLOG_LEVEL` | `INFO` | The log level for structured logging. Valid values are `DEBUG`, `INFO`, `WARN`, and `ERROR`. Set to `DEBUG` to see all requests, evaluations, and detailed diagnostic information. |
|
|
||||||
| `SOCKET_MODE` | `0770` | _Only used when at least one of the `*_BIND_NETWORK` variables are set to `unix`._ The socket mode (permissions) for Unix domain sockets. |
|
|
||||||
| `STRIP_BASE_PREFIX` | `false` | If set to `true`, strips the base prefix from request paths when forwarding to the target server. This is useful when your target service expects to receive requests without the base prefix. For example, with `BASE_PREFIX=/foo` and `STRIP_BASE_PREFIX=true`, a request to `/foo/bar` would be forwarded to the target as `/bar`. |
|
|
||||||
| `TARGET` | `http://localhost:3923` | The URL of the service that Anubis should forward valid requests to. Supports Unix domain sockets, set this to a URI like so: `unix:///path/to/socket.sock`. |
|
|
||||||
| `USE_REMOTE_ADDRESS` | unset | If set to `true`, Anubis will take the client's IP from the network socket. For production deployments, it is expected that a reverse proxy is used in front of Anubis, which pass the IP using headers, instead. |
|
|
||||||
| `USE_SIMPLIFIED_EXPLANATION` | false | If set to `true`, replaces the text when clicking "Why am I seeing this?" with a more simplified text for a non-tech-savvy audience. |
|
|
||||||
| `USE_TEMPLATES` | false | <EO /> If set to `true`, enable [custom HTML template support](./botstopper.mdx#custom-html-templates), allowing you to completely rewrite how BotStopper renders its HTML pages. |
|
|
||||||
| `WEBMASTER_EMAIL` | unset | If set, shows a contact email address when rendering error pages. This email address will be how users can get in contact with administrators. |
|
|
||||||
| `XFF_STRIP_PRIVATE` | `true` | If set, strip private addresses from `X-Forwarded-For` headers. To unset this, you must set `XFF_STRIP_PRIVATE=false` or `--xff-strip-private=false`. |
|
|
||||||
|
|
||||||
<details>
|
|
||||||
<summary>Advanced configuration settings</summary>
|
|
||||||
|
|
||||||
:::note
|
|
||||||
|
|
||||||
If you don't know or understand what these settings mean, ignore them. These are intended to work around very specific issues.
|
|
||||||
|
|
||||||
:::
|
|
||||||
|
|
||||||
| Environment Variable | Default value | Explanation |
|
|
||||||
| :---------------------------- | :------------ | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
|
||||||
| `FORCED_LANGUAGE` | unset | If set, forces Anubis to display challenge pages in the specified language instead of using the browser's Accept-Language header. Use ISO 639-1 language codes (e.g., `de` for German, `fr` for French). |
|
|
||||||
| `HS512_SECRET` | unset | Secret string for JWT HS512 algorithm. If this is not set, Anubis will use ED25519 as defined via the variables above. The longer the better; 128 chars should suffice. **Required when using persistent storage backends** (like bbolt) to ensure challenges survive service restarts. When running multiple instances on the same base domain, the key must be the same across all instances. |
|
|
||||||
| `TARGET_DISABLE_KEEPALIVE` | `false` | If `true`, disables HTTP keep-alive for connections to the target backend. Useful for backends that don't handle keep-alive properly. |
|
|
||||||
| `TARGET_HOST` | unset | If set, overrides the Host header in requests forwarded to `TARGET`. |
|
|
||||||
| `TARGET_INSECURE_SKIP_VERIFY` | `false` | If `true`, skip TLS certificate validation for targets that listen over `https`. If your backend does not listen over `https`, ignore this setting. |
|
|
||||||
| `TARGET_SNI` | unset | If set, TLS handshake hostname when forwarding requests to the `TARGET`. If set to auto, use Host header. |
|
|
||||||
|
|
||||||
</details>
|
|
||||||
|
|
||||||
For more detailed information on configuring Open Graph tags, please refer to the [Open Graph Configuration](./configuration/open-graph.mdx) page.
|
|
||||||
|
|
||||||
### Using Base Prefix
|
|
||||||
|
|
||||||
The `BASE_PREFIX` environment variable allows you to run Anubis behind a path prefix. This is useful when:
|
|
||||||
|
|
||||||
- You want to host multiple services on the same domain
|
|
||||||
- You're using a reverse proxy that routes based on path prefixes
|
|
||||||
- You need to integrate Anubis with an existing application structure
|
|
||||||
|
|
||||||
For example, if you set `BASE_PREFIX=/myapp`, Anubis will:
|
|
||||||
|
|
||||||
- Serve its challenge page at `/myapp/` instead of `/`
|
|
||||||
- Serve its API endpoints at `/myapp/.within.website/x/cmd/anubis/api/` instead of `/.within.website/x/cmd/anubis/api/`
|
|
||||||
- Serve its static assets at `/myapp/.within.website/x/cmd/anubis/` instead of `/.within.website/x/cmd/anubis/`
|
|
||||||
|
|
||||||
When using this feature with a reverse proxy:
|
|
||||||
|
|
||||||
1. Configure your reverse proxy to route requests for the specified path prefix to Anubis
|
|
||||||
2. Set the `BASE_PREFIX` environment variable to match the path prefix in your reverse proxy configuration
|
|
||||||
3. Ensure that your reverse proxy preserves the path when forwarding requests to Anubis
|
|
||||||
|
|
||||||
Example with Nginx:
|
|
||||||
|
|
||||||
```nginx
|
|
||||||
location /myapp/ {
|
|
||||||
proxy_pass http://anubis:8923/myapp;
|
|
||||||
proxy_set_header Host $host;
|
|
||||||
proxy_set_header X-Real-IP $remote_addr;
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
With corresponding Anubis configuration:
|
|
||||||
|
|
||||||
```
|
|
||||||
BASE_PREFIX=/myapp
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Stripping Base Prefix
|
|
||||||
|
|
||||||
If your target service doesn't expect to receive the base prefix in request paths, you can use the `STRIP_BASE_PREFIX` option:
|
|
||||||
|
|
||||||
```
|
|
||||||
BASE_PREFIX=/myapp
|
|
||||||
STRIP_BASE_PREFIX=true
|
|
||||||
```
|
|
||||||
|
|
||||||
With this configuration:
|
|
||||||
|
|
||||||
- A request to `/myapp/api/users` would be forwarded to your target service as `/api/users`
|
|
||||||
- A request to `/myapp/` would be forwarded as `/`
|
|
||||||
|
|
||||||
This is particularly useful when working with applications that weren't designed to handle path prefixes. However, note that if your target application generates absolute redirects or links (like `/login` instead of `./login`), these may break the subpath routing since they won't include the base prefix.
|
|
||||||
|
|
||||||
### Key generation
|
|
||||||
|
|
||||||
To generate an ed25519 private key, you can use this command:
|
|
||||||
|
|
||||||
```text
|
|
||||||
openssl rand -hex 32
|
|
||||||
```
|
|
||||||
|
|
||||||
Alternatively here is a key generated by your browser:
|
|
||||||
|
|
||||||
<RandomKey />
|
|
||||||
|
|
||||||
## Next steps
|
|
||||||
|
|
||||||
To get Anubis filtering your traffic, you need to make sure it's added to your HTTP load balancer or platform configuration. See the [environments category](/docs/category/environments) for detailed information on individual environments.
|
|
||||||
|
|
||||||
- [Apache](./environments/apache.mdx)
|
|
||||||
- [Caddy](./environments/caddy.mdx)
|
|
||||||
- [Docker compose](./environments/docker-compose.mdx)
|
|
||||||
- [Kubernetes](./environments/kubernetes.mdx)
|
|
||||||
- [Nginx](./environments/nginx.mdx)
|
|
||||||
- [Traefik](./environments/traefik.mdx)
|
|
||||||
|
|
||||||
:::note
|
|
||||||
|
|
||||||
Anubis loads its assets from `/.within.website/x/xess/` and `/.within.website/x/cmd/anubis`. If you do not reverse proxy these in your server config, Anubis won't work.
|
|
||||||
|
|
||||||
:::
|
|
||||||
|
|
@ -1,50 +0,0 @@
|
||||||
---
|
|
||||||
title: iplist2rule CLI tool
|
|
||||||
---
|
|
||||||
|
|
||||||
The `iplist2rule` tool converts IP blocklists into Anubis challenge policies. It reads common IP block list formats and generates the appropriate Anubis policy file for IP address filtering.
|
|
||||||
|
|
||||||
## Installation
|
|
||||||
|
|
||||||
Install directly with Go
|
|
||||||
|
|
||||||
```bash
|
|
||||||
go install github.com/TecharoHQ/anubis/utils/cmd/iplist2rule@latest
|
|
||||||
```
|
|
||||||
|
|
||||||
## Usage
|
|
||||||
|
|
||||||
Basic conversion from URL:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
iplist2rule https://raw.githubusercontent.com/7c/torfilter/refs/heads/main/lists/txt/torfilter-1m-flat.txt filter-tor.yaml
|
|
||||||
```
|
|
||||||
|
|
||||||
Explicitly allow every IP address on a list:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
iplist2rule --action ALLOW https://raw.githubusercontent.com/7c/torfilter/refs/heads/main/lists/txt/torfilter-1m-flat.txt filter-tor.yaml
|
|
||||||
```
|
|
||||||
|
|
||||||
Add weight to requests matching IP addresses on a list:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
iplist2rule --action WEIGH --weight 20 https://raw.githubusercontent.com/7c/torfilter/refs/heads/main/lists/txt/torfilter-1m-flat.txt filter-tor.yaml
|
|
||||||
```
|
|
||||||
|
|
||||||
## Options
|
|
||||||
|
|
||||||
| Flag | Description | Default |
|
|
||||||
| :------------ | :----------------------------------------------------------------------------------------------- | :-------------------------------- |
|
|
||||||
| `--action` | The Anubis action to take for the IP address in question, must be in ALL CAPS. | `DENY` (forbids traffic) |
|
|
||||||
| `--rule-name` | The name for the generated Anubis rule, should be in kebab-case. | (not set, inferred from filename) |
|
|
||||||
| `--weight` | When `--action=WEIGH`, how many weight points should be added or removed from matching requests? | 0 (not set) |
|
|
||||||
|
|
||||||
## Using the Generated Policy
|
|
||||||
|
|
||||||
Save the output and import it in your main policy file:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
bots:
|
|
||||||
- import: "./filter-tor.yaml"
|
|
||||||
```
|
|
||||||
|
|
@ -1,145 +0,0 @@
|
||||||
---
|
|
||||||
title: Installing Anubis with a native package
|
|
||||||
---
|
|
||||||
|
|
||||||
import Tabs from "@theme/Tabs";
|
|
||||||
import TabItem from "@theme/TabItem";
|
|
||||||
|
|
||||||
Download the package for your system from [the most recent release on GitHub](https://github.com/TecharoHQ/anubis/releases).
|
|
||||||
|
|
||||||
Install the Anubis package using your package manager of choice:
|
|
||||||
|
|
||||||
<Tabs>
|
|
||||||
<TabItem value="deb" label="Debian-based (apt)" default>
|
|
||||||
|
|
||||||
Install Anubis with `apt`:
|
|
||||||
|
|
||||||
```text
|
|
||||||
sudo apt install ./anubis-$VERSION-$ARCH.deb
|
|
||||||
```
|
|
||||||
|
|
||||||
</TabItem>
|
|
||||||
<TabItem value="tarball" label="Tarball">
|
|
||||||
|
|
||||||
Extract the tarball to a folder:
|
|
||||||
|
|
||||||
```text
|
|
||||||
tar zxf ./anubis-$VERSION-$OS-$ARCH.tar.gz
|
|
||||||
cd anubis-$VERSION-$OS-$ARCH
|
|
||||||
```
|
|
||||||
|
|
||||||
Install the binary to your system:
|
|
||||||
|
|
||||||
```text
|
|
||||||
sudo install -D ./bin/anubis /usr/local/bin
|
|
||||||
```
|
|
||||||
|
|
||||||
Edit the systemd unit to point to `/usr/local/bin/anubis` instead of `/usr/bin/anubis`:
|
|
||||||
|
|
||||||
```text
|
|
||||||
perl -pi -e 's$/usr/bin/anubis$/usr/local/bin/anubis$g' ./run/anubis@.service
|
|
||||||
```
|
|
||||||
|
|
||||||
Install the systemd unit to your system:
|
|
||||||
|
|
||||||
```text
|
|
||||||
sudo install -D ./run/anubis@.service /etc/systemd/system
|
|
||||||
```
|
|
||||||
|
|
||||||
Install the default configuration file to your system:
|
|
||||||
|
|
||||||
```text
|
|
||||||
sudo install -D ./run/default.env /etc/anubis/default.env
|
|
||||||
```
|
|
||||||
|
|
||||||
</TabItem>
|
|
||||||
<TabItem value="rpm" label="Red Hat-based (rpm)">
|
|
||||||
|
|
||||||
Install Anubis with `dnf`:
|
|
||||||
|
|
||||||
```text
|
|
||||||
sudo dnf -y install ./anubis-$VERSION.$ARCH.rpm
|
|
||||||
```
|
|
||||||
|
|
||||||
OR
|
|
||||||
|
|
||||||
Install Anubis with `yum`:
|
|
||||||
|
|
||||||
```text
|
|
||||||
sudo yum -y install ./anubis-$VERSION.$ARCH.rpm
|
|
||||||
```
|
|
||||||
|
|
||||||
OR
|
|
||||||
|
|
||||||
Install Anubis with `rpm`:
|
|
||||||
|
|
||||||
```
|
|
||||||
sudo rpm -ivh ./anubis-$VERSION.$ARCH.rpm
|
|
||||||
```
|
|
||||||
|
|
||||||
</TabItem>
|
|
||||||
<TabItem value="distro" label="Package managers">
|
|
||||||
|
|
||||||
Some Linux distributions offer Anubis [as a native package](https://repology.org/project/anubis-anti-crawler/versions). If you want to install Anubis from your distribution's package manager, consult any upstream documentation for how to install the package. It will either be named `anubis`, `www-apps/anubis` or `www/anubis`.
|
|
||||||
|
|
||||||
If you use a systemd-flavoured distribution, then follow the setup instructions for Debian or Red Hat Linux.
|
|
||||||
|
|
||||||
</TabItem>
|
|
||||||
</Tabs>
|
|
||||||
|
|
||||||
Once it's installed, make a copy of the default configuration file `/etc/anubis/default.env` based on which service you want to protect. For example, to protect a `gitea` server:
|
|
||||||
|
|
||||||
```text
|
|
||||||
sudo cp /etc/anubis/default.env /etc/anubis/gitea.env
|
|
||||||
```
|
|
||||||
|
|
||||||
Copy the default bot policies file to `/etc/anubis/gitea.botPolicies.yaml`:
|
|
||||||
|
|
||||||
<Tabs>
|
|
||||||
<TabItem value="debrpm" label="Debian or Red Hat" default>
|
|
||||||
|
|
||||||
```text
|
|
||||||
sudo cp /usr/share/doc/anubis/botPolicies.yaml /etc/anubis/gitea.botPolicies.yaml
|
|
||||||
```
|
|
||||||
|
|
||||||
</TabItem>
|
|
||||||
<TabItem value="tarball" label="Tarball">
|
|
||||||
|
|
||||||
```text
|
|
||||||
sudo cp ./doc/botPolicies.yaml /etc/anubis/gitea.botPolicies.yaml
|
|
||||||
```
|
|
||||||
|
|
||||||
</TabItem>
|
|
||||||
|
|
||||||
</Tabs>
|
|
||||||
|
|
||||||
Then open `gitea.env` in your favorite text editor and customize [the environment variables](./installation.mdx#environment-variables) as needed. Here's an example configuration for a Gitea server:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
BIND=[::1]:8239
|
|
||||||
BIND_NETWORK=tcp
|
|
||||||
DIFFICULTY=4
|
|
||||||
METRICS_BIND=[::1]:8240
|
|
||||||
METRICS_BIND_NETWORK=tcp
|
|
||||||
POLICY_FNAME=/etc/anubis/gitea.botPolicies.yaml
|
|
||||||
TARGET=http://localhost:3000
|
|
||||||
```
|
|
||||||
|
|
||||||
Then start Anubis with `systemctl enable --now`:
|
|
||||||
|
|
||||||
```text
|
|
||||||
sudo systemctl enable --now anubis@gitea.service
|
|
||||||
```
|
|
||||||
|
|
||||||
Test to make sure it's running with `curl`:
|
|
||||||
|
|
||||||
```text
|
|
||||||
curl http://localhost:8240/metrics
|
|
||||||
```
|
|
||||||
|
|
||||||
Then set up your reverse proxy (Nginx, Caddy, etc.) to point to the Anubis port. Anubis will then reverse proxy all requests that meet the policies in `/etc/anubis/gitea.botPolicies.yaml` to the target service.
|
|
||||||
|
|
||||||
For more details on particular reverse proxies, see here:
|
|
||||||
|
|
||||||
- [Apache](./environments/apache.mdx)
|
|
||||||
- [Nginx](./environments/nginx.mdx)
|
|
||||||
|
|
@ -1,447 +0,0 @@
|
||||||
---
|
|
||||||
title: Policy Definitions
|
|
||||||
---
|
|
||||||
|
|
||||||
import Tabs from "@theme/Tabs";
|
|
||||||
import TabItem from "@theme/TabItem";
|
|
||||||
|
|
||||||
Out of the box, Anubis is pretty heavy-handed. It will aggressively challenge everything that might be a browser (usually indicated by having `Mozilla` in its user agent). However, some bots are smart enough to get past the challenge. Some things that look like bots may actually be fine (IE: RSS readers). Some resources need to be visible no matter what. Some resources and remotes are fine to begin with.
|
|
||||||
|
|
||||||
Anubis lets you customize its configuration with a Policy File. This is a YAML document that spells out what actions Anubis should take when evaluating requests. The [default configuration](https://github.com/TecharoHQ/anubis/blob/main/data/botPolicies.yaml) explains everything, but this page contains an overview of everything you can do with it.
|
|
||||||
|
|
||||||
## Bot Policies
|
|
||||||
|
|
||||||
Bot policies let you customize the rules that Anubis uses to allow, deny, or challenge incoming requests. Currently you can set policies by the following matches:
|
|
||||||
|
|
||||||
- Request path
|
|
||||||
- User agent string
|
|
||||||
- HTTP request header values
|
|
||||||
- [Importing other configuration snippets](./configuration/import.mdx)
|
|
||||||
|
|
||||||
As of version v1.17.0 or later, configuration can be written in either JSON or YAML.
|
|
||||||
|
|
||||||
Here's an example rule that denies [Amazonbot](https://developer.amazon.com/en/amazonbot):
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
- name: amazonbot
|
|
||||||
user_agent_regex: Amazonbot
|
|
||||||
action: DENY
|
|
||||||
```
|
|
||||||
|
|
||||||
When this rule is evaluated, Anubis will check the `User-Agent` string of the request. If it contains `Amazonbot`, Anubis will send an error page to the user saying that access is denied, but in such a way that makes scrapers think they have correctly loaded the webpage.
|
|
||||||
|
|
||||||
Right now the only kinds of policies you can write are bot policies. Other forms of policies will be added in the future.
|
|
||||||
|
|
||||||
Here is a minimal policy file that will protect against most scraper bots:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
bots:
|
|
||||||
- name: cloudflare-workers
|
|
||||||
headers_regex:
|
|
||||||
CF-Worker: .*
|
|
||||||
action: DENY
|
|
||||||
- name: well-known
|
|
||||||
path_regex: ^/.well-known/.*$
|
|
||||||
action: ALLOW
|
|
||||||
- name: favicon
|
|
||||||
path_regex: ^/favicon.ico$
|
|
||||||
action: ALLOW
|
|
||||||
- name: robots-txt
|
|
||||||
path_regex: ^/robots.txt$
|
|
||||||
action: ALLOW
|
|
||||||
- name: generic-browser
|
|
||||||
user_agent_regex: Mozilla
|
|
||||||
action: CHALLENGE
|
|
||||||
```
|
|
||||||
|
|
||||||
This allows requests to [`/.well-known`](https://en.wikipedia.org/wiki/Well-known_URI), `/favicon.ico`, `/robots.txt`, and challenges any request that has the word `Mozilla` in its User-Agent string. The [default policy file](https://github.com/TecharoHQ/anubis/blob/main/data/botPolicies.yaml) is a bit more cohesive, but this should be more than enough for most users.
|
|
||||||
|
|
||||||
If no rules match the request, it is allowed through. For more details on this default behavior and its implications, see [Default allow behavior](./default-allow-behavior.mdx).
|
|
||||||
|
|
||||||
### Writing your own rules
|
|
||||||
|
|
||||||
There are four actions that can be returned from a rule:
|
|
||||||
|
|
||||||
| Action | Effects |
|
|
||||||
| :---------- | :---------------------------------------------------------------------------------------------------------------------------------- |
|
|
||||||
| `ALLOW` | Bypass all further checks and send the request to the backend. |
|
|
||||||
| `DENY` | Deny the request and send back an error message that scrapers think is a success. |
|
|
||||||
| `CHALLENGE` | Show a challenge page and/or validate that clients have passed a challenge. |
|
|
||||||
| `WEIGH` | Change the [request weight](#request-weight) for this request. See the [request weight](#request-weight) docs for more information. |
|
|
||||||
|
|
||||||
Name your rules in lower case using kebab-case. Rule names will be exposed in Prometheus metrics.
|
|
||||||
|
|
||||||
### Challenge configuration
|
|
||||||
|
|
||||||
Rules can also have their own challenge settings. These are customized using the `"challenge"` key. For example, here is a rule that makes challenges artificially hard for connections with the substring "bot" in their user agent:
|
|
||||||
|
|
||||||
This rule has been known to have a high false positive rate in testing. Please use this with care.
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
# Punish any bot with "bot" in the user-agent string
|
|
||||||
- name: generic-bot-catchall
|
|
||||||
user_agent_regex: (?i:bot|crawler)
|
|
||||||
action: CHALLENGE
|
|
||||||
challenge:
|
|
||||||
difficulty: 16 # impossible
|
|
||||||
algorithm: slow # intentionally waste CPU cycles and time
|
|
||||||
```
|
|
||||||
|
|
||||||
Challenges can be configured with these settings:
|
|
||||||
|
|
||||||
| Key | Example | Description |
|
|
||||||
| :----------- | :------- | :--------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
|
||||||
| `difficulty` | `4` | The challenge difficulty (number of leading zeros) for proof-of-work. See [Why does Anubis use Proof-of-Work?](/docs/design/why-proof-of-work) for more details. |
|
|
||||||
| `algorithm` | `"fast"` | The challenge method to use. See [the list of challenge methods](./configuration/challenges/) for more information. |
|
|
||||||
|
|
||||||
### Remote IP based filtering
|
|
||||||
|
|
||||||
The `remote_addresses` field of a Bot rule allows you to set the IP range that this ruleset applies to.
|
|
||||||
|
|
||||||
For example, you can allow a search engine to connect if and only if its IP address matches the ones they published:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
- name: qwantbot
|
|
||||||
user_agent_regex: \+https\://help\.qwant\.com/bot/
|
|
||||||
action: ALLOW
|
|
||||||
# https://help.qwant.com/wp-content/uploads/sites/2/2025/01/qwantbot.json
|
|
||||||
remote_addresses: ["91.242.162.0/24"]
|
|
||||||
```
|
|
||||||
|
|
||||||
This also works at an IP range level without any other checks:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
name: internal-network
|
|
||||||
action: ALLOW
|
|
||||||
remote_addresses:
|
|
||||||
- 100.64.0.0/10
|
|
||||||
```
|
|
||||||
|
|
||||||
## Imprint / Impressum support
|
|
||||||
|
|
||||||
Anubis has support for showing imprint / impressum information. This is defined in the `impressum` block of your configuration. See [Imprint / Impressum configuration](./configuration/impressum.mdx) for more information.
|
|
||||||
|
|
||||||
## Storage backends
|
|
||||||
|
|
||||||
Anubis needs to store temporary data in order to determine if a user is legitimate or not. Administrators should choose a storage backend based on their infrastructure needs. Each backend has its own advantages and disadvantages.
|
|
||||||
|
|
||||||
Anubis offers the following storage backends:
|
|
||||||
|
|
||||||
- [`memory`](#memory) -- A simple in-memory hashmap
|
|
||||||
- [`bbolt`](#bbolt) -- An on-disk key/value store backed by [bbolt](https://github.com/etcd-io/bbolt), an embedded key/value database for Go programs
|
|
||||||
- [`valkey`](#valkey) -- A remote in-memory key/value database backed by [Valkey](https://valkey.io/) (or another database compatible with the [RESP](https://redis.io/docs/latest/develop/reference/protocol-spec/) protocol)
|
|
||||||
|
|
||||||
If no storage backend is set in the policy file, Anubis will use the [`memory`](#memory) backend by default. This is equivalent to the following in the policy file:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
store:
|
|
||||||
backend: memory
|
|
||||||
parameters: {}
|
|
||||||
```
|
|
||||||
|
|
||||||
### `memory`
|
|
||||||
|
|
||||||
The memory backend is an in-memory cache. This backend works best if you don't use multiple instances of Anubis or don't have mutable storage in the environment you're running Anubis in.
|
|
||||||
|
|
||||||
| Should I use this backend? | Yes/no |
|
|
||||||
| :------------------------------------------------------------ | :----- |
|
|
||||||
| Are you running only one instance of Anubis for this service? | ✅ Yes |
|
|
||||||
| Does your service get a lot of traffic? | 🚫 No |
|
|
||||||
| Do you want to store data persistently when Anubis restarts? | 🚫 No |
|
|
||||||
| Do you run Anubis without mutable filesystem storage? | ✅ Yes |
|
|
||||||
|
|
||||||
The biggest downside is that there is not currently a limit to how much data can be stored in memory. This will be addressed at a later time.
|
|
||||||
|
|
||||||
:::warning
|
|
||||||
|
|
||||||
The in-memory backend exists mostly for validation, testing, and to ensure that the default configuration of Anubis works as expected. Do not use this persistently in production.
|
|
||||||
|
|
||||||
:::
|
|
||||||
|
|
||||||
#### Configuration
|
|
||||||
|
|
||||||
The memory backend does not require any configuration to use.
|
|
||||||
|
|
||||||
### `bbolt`
|
|
||||||
|
|
||||||
An on-disk storage layer powered by [bbolt](https://github.com/etcd-io/bbolt), a high performance embedded key/value database used by containerd, etcd, Kubernetes, and NATS. This backend works best if you're running Anubis on a single host and get a lot of traffic.
|
|
||||||
|
|
||||||
| Should I use this backend? | Yes/no |
|
|
||||||
| :------------------------------------------------------------ | :----- |
|
|
||||||
| Are you running only one instance of Anubis for this service? | ✅ Yes |
|
|
||||||
| Does your service get a lot of traffic? | ✅ Yes |
|
|
||||||
| Do you want to store data persistently when Anubis restarts? | ✅ Yes |
|
|
||||||
| Do you run Anubis without mutable filesystem storage? | 🚫 No |
|
|
||||||
|
|
||||||
When Anubis opens a bbolt database, it takes an exclusive lock on that database. Other instances of Anubis or other tools cannot view the bbolt database while it is locked by another instance of Anubis. If you run multiple instances of Anubis for different services, give each its own `bbolt` configuration.
|
|
||||||
|
|
||||||
#### Configuration
|
|
||||||
|
|
||||||
The `bbolt` backend takes the following configuration options:
|
|
||||||
|
|
||||||
| Name | Type | Example | Description |
|
|
||||||
| :----- | :--- | :----------------- | :--------------------------------------------------------------------------------------------------------------------------- |
|
|
||||||
| `path` | path | `/data/anubis.bdb` | The filesystem path for the Anubis bbolt database. Anubis requires write access to the folder containing the bbolt database. |
|
|
||||||
|
|
||||||
Example:
|
|
||||||
|
|
||||||
If you have persistent storage mounted to `/data`, then your store configuration could look like this:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
store:
|
|
||||||
backend: bbolt
|
|
||||||
parameters:
|
|
||||||
path: /data/anubis.bdb
|
|
||||||
```
|
|
||||||
|
|
||||||
### `s3api`
|
|
||||||
|
|
||||||
A network-backed storage layer backed by [object storage](https://en.wikipedia.org/wiki/Object_storage), specifically using the [S3 API](https://docs.aws.amazon.com/AmazonS3/latest/API/Type_API_Reference.html). This can be backed by any S3-compatible object storage service such as:
|
|
||||||
|
|
||||||
- [AWS S3](https://aws.amazon.com/s3/)
|
|
||||||
- [Cloudflare R2](https://www.cloudflare.com/developer-platform/products/r2/)
|
|
||||||
- [Hetzner Object Storage](https://www.hetzner.com/storage/object-storage/)
|
|
||||||
- [Minio](https://www.min.io/)
|
|
||||||
- [Tigris](https://www.tigrisdata.com/)
|
|
||||||
|
|
||||||
If you are using a cloud platform, they likely provide an S3 compatible object storage service. If not, you may want to choose [one of the fastest options](https://www.tigrisdata.com/blog/benchmark-small-objects/).
|
|
||||||
|
|
||||||
| Should I use this backend? | Yes/no |
|
|
||||||
| :------------------------------------------------------------ | :----- |
|
|
||||||
| Are you running only one instance of Anubis for this service? | 🚫 No |
|
|
||||||
| Does your service get a lot of traffic? | ✅ Yes |
|
|
||||||
| Do you want to store data persistently when Anubis restarts? | ✅ Yes |
|
|
||||||
| Do you run Anubis without mutable filesystem storage? | ✅ Yes |
|
|
||||||
|
|
||||||
:::note
|
|
||||||
|
|
||||||
Using this backend will cause a lot of S3 operations, at least one for creating challenges, one for invalidating challenges, one for updating challenges to prevent double-spends, and one for removing challenges.
|
|
||||||
|
|
||||||
:::
|
|
||||||
|
|
||||||
#### Configuration
|
|
||||||
|
|
||||||
The `s3api` backend takes the following configuration options:
|
|
||||||
|
|
||||||
| Name | Type | Example | Description |
|
|
||||||
| :----------- | :------ | :------------ | :------------------------------------------------------------------------------------------------------------------------------------------ |
|
|
||||||
| `bucketName` | string | `anubis-data` | (Required) The name of the dedicated bucket for Anubis to store information in. |
|
|
||||||
| `pathStyle` | boolean | `false` | If true, use path-style S3 API operations. Please consult your storage provider's documentation if you don't know what you should put here. |
|
|
||||||
|
|
||||||
:::note
|
|
||||||
|
|
||||||
You should probably enable a lifecycle expiration rule for buckets containing Anubis data. Here is an example policy:
|
|
||||||
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"Rules": [
|
|
||||||
{
|
|
||||||
"Status": "Enabled",
|
|
||||||
"Expiration": {
|
|
||||||
"Days": 7
|
|
||||||
}
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Adjust this as facts and circumstances demand, but 7 days should be enough for anyone.
|
|
||||||
|
|
||||||
:::
|
|
||||||
|
|
||||||
Example:
|
|
||||||
|
|
||||||
Assuming your environment looks like this:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
# All of the following are fake credentials that look like real ones.
|
|
||||||
AWS_ACCESS_KEY_ID=accordingToAllKnownRulesOfAviation
|
|
||||||
AWS_SECRET_ACCESS_KEY=thereIsNoWayABeeShouldBeAbleToFly
|
|
||||||
AWS_REGION=yow
|
|
||||||
AWS_ENDPOINT_URL_S3=https://yow.s3.probably-not-malware.lol
|
|
||||||
```
|
|
||||||
|
|
||||||
Then your configuration would look like this:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
store:
|
|
||||||
backend: s3api
|
|
||||||
parameters:
|
|
||||||
bucketName: techaro-prod-anubis
|
|
||||||
pathStyle: false
|
|
||||||
```
|
|
||||||
|
|
||||||
### `valkey`
|
|
||||||
|
|
||||||
[Valkey](https://valkey.io/) is an in-memory key/value store that clients access over the network. This allows multiple instances of Anubis to share information and does not require each instance of Anubis to have persistent filesystem storage.
|
|
||||||
|
|
||||||
:::note
|
|
||||||
|
|
||||||
You can also use [Redis™](http://redis.io/) with Anubis.
|
|
||||||
|
|
||||||
:::
|
|
||||||
|
|
||||||
This backend is ideal if you are running multiple instances of Anubis in a worker pool (eg: Kubernetes Deployments with a copy of Anubis in each Pod).
|
|
||||||
|
|
||||||
| Should I use this backend? | Yes/no |
|
|
||||||
| :------------------------------------------------------------ | :----- |
|
|
||||||
| Are you running only one instance of Anubis for this service? | 🚫 No |
|
|
||||||
| Does your service get a lot of traffic? | ✅ Yes |
|
|
||||||
| Do you want to store data persistently when Anubis restarts? | ✅ Yes |
|
|
||||||
| Do you run Anubis without mutable filesystem storage? | ✅ Yes |
|
|
||||||
| Do you have Redis™ or Valkey installed? | ✅ Yes |
|
|
||||||
|
|
||||||
#### Configuration
|
|
||||||
|
|
||||||
The `valkey` backend takes the following configuration options:
|
|
||||||
|
|
||||||
| Name | Type | Example | Description |
|
|
||||||
| :--------- | :----- | :---------------------- | :------------------------------------------------------------------------------------------------------------------------------------------------ |
|
|
||||||
| `cluster` | bool | `false` | If true, use [Redis™ Clustering](https://redis.io/topics/cluster-spec) for storing Anubis data. |
|
|
||||||
| `sentinel` | object | `{}` | See [Redis™ Sentinel docs](#redis-sentinel) for more detail and examples |
|
|
||||||
| `url` | string | `redis://valkey:6379/0` | The URL for the instance of Redis™ or Valkey that Anubis should store data in. This is in the same format as `REDIS_URL` in many cloud providers. |
|
|
||||||
|
|
||||||
Example:
|
|
||||||
|
|
||||||
If you have an instance of Valkey running with the hostname `valkey.int.techaro.lol`, then your store configuration could look like this:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
store:
|
|
||||||
backend: valkey
|
|
||||||
parameters:
|
|
||||||
url: "redis://valkey.int.techaro.lol:6379/0"
|
|
||||||
```
|
|
||||||
|
|
||||||
This would have the Valkey client connect to host `valkey.int.techaro.lol` on port `6379` with database `0` (the default database).
|
|
||||||
|
|
||||||
#### Redis™ Sentinel
|
|
||||||
|
|
||||||
If you are using [Redis™ Sentinel](https://redis.io/docs/latest/operate/oss_and_stack/management/sentinel/) for a high availability setup, you need to configure the `sentinel` object. This object takes the following configuration options:
|
|
||||||
|
|
||||||
| Name | Type | Example | Description |
|
|
||||||
| :----------- | :----------------------- | :-------------------- | :-------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
|
||||||
| `addr` | string or list of string | `10.43.208.130:26379` | (Required) The host and port of the Redis™ Sentinel server. When possible, use DNS names for this. If you have multiple addresses, supply a list of them. |
|
|
||||||
| `clientName` | string | `Anubis` | The client name reported to Redis™ Sentinel. Set this if you want to track Anubis connections to your Redis™ Sentinel. |
|
|
||||||
| `masterName` | string | `mymaster` | (Required) The name of the master in the Redis™ Sentinel configuration. This is used to discover where to find client connection hosts/ports. |
|
|
||||||
| `username` | string | `azurediamond` | The username used to authenticate against the Redis™ Sentinel and Redis™ servers. |
|
|
||||||
| `password` | string | `hunter2` | The password used to authenticate against the Redis™ Sentinel and Redis™ servers. |
|
|
||||||
|
|
||||||
## Logging management
|
|
||||||
|
|
||||||
Anubis has very verbose logging out of the box. This is intentional and allows administrators to be sure that it is working merely by watching it work in real time. Some administrators may not appreciate this level of logging out of the box. As such, Anubis lets you customize details about how it logs data.
|
|
||||||
|
|
||||||
Anubis uses a practice called [structured logging](https://stackify.com/what-is-structured-logging-and-why-developers-need-it/) to emit log messages with key-value pair context. In order to make analyzing large amounts of log messages easier, Anubis encodes all logs in JSON. This allows you to use any tool that can parse JSON to perform analytics or monitor for issues.
|
|
||||||
|
|
||||||
Anubis exposes the following logging settings in the policy file:
|
|
||||||
|
|
||||||
| Name | Type | Example | Description |
|
|
||||||
| :----------- | :----------------------- | :-------------- | :--------------------------------------------------------------------------------------------------------------------------------------- |
|
|
||||||
| `level` | [log level](#log-levels) | `info` | The logging level threshold. Any logs that are at or above this threshold will be drained to the sink. Any other logs will be discarded. |
|
|
||||||
| `sink` | string | `stdio`, `file` | The sink where the logs drain to as they are being recorded in Anubis. |
|
|
||||||
| `parameters` | object | | Parameters for the given logging sink. This will vary based on the logging sink of choice. See below for more information. |
|
|
||||||
|
|
||||||
Anubis supports the following logging sinks:
|
|
||||||
|
|
||||||
1. `file`: logs are emitted to a file that is rotated based on size and age. Old log files are compressed with gzip to save space. This allows for better integration with users that decide to use legacy service managers (OpenRC, FreeBSD's init, etc).
|
|
||||||
2. `stdio`: logs are emitted to the standard error stream of the Anubis process. This allows runtimes such as Docker, Podman, Systemd, and Kubernetes to capture logs with their native logging subsystems without any additional configuration.
|
|
||||||
|
|
||||||
### Log levels
|
|
||||||
|
|
||||||
Anubis uses Go's [standard library `log/slog` package](https://pkg.go.dev/log/slog) to emit structured logs. By default, Anubis logs at the [Info level](https://pkg.go.dev/log/slog#Level), which is fairly verbose out of the box. Here are the possible logging levels in Anubis:
|
|
||||||
|
|
||||||
| Log level | Use in Anubis |
|
|
||||||
| :-------- | :-------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
|
||||||
| `DEBUG` | The raw unfiltered torrent of doom. Only use this if you are actively working on Anubis or have very good reasons to use it. |
|
|
||||||
| `INFO` | The default logging level, fairly verbose in order to make it easier for automation to parse. |
|
|
||||||
| `WARN` | A "more silent" logging level. Much less verbose. Some things that are now at the `info` level need to be moved up to the `warn` level in future patches. |
|
|
||||||
| `ERROR` | Only log error messages. |
|
|
||||||
|
|
||||||
Additionally, you can set a "slightly higher" log level if you need to, such as:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
logging:
|
|
||||||
sink: stdio
|
|
||||||
level: "INFO+1"
|
|
||||||
```
|
|
||||||
|
|
||||||
This isn't currently used by Anubis, but will be in the future for "slightly important" information.
|
|
||||||
|
|
||||||
### `file` sink
|
|
||||||
|
|
||||||
The `file` sink makes Anubis write its logs to the filesystem and rotate them out when the log file meets certain thresholds. This logging sink takes the following parameters:
|
|
||||||
|
|
||||||
| Name | Type | Example | Description |
|
|
||||||
| :------------- | :-------------- | :-------------------- | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
|
||||||
| `file` | string | `/var/log/anubis.log` | The file where Anubis logs should be written to. Make sure the user Anubis is running as has write and file creation permissions to this directory. |
|
|
||||||
| `maxBackups` | number | `3` | The number of old log files that should be maintained when log files are rotated out. |
|
|
||||||
| `maxBytes` | number of bytes | `67108864` (64Mi) | The maximum size of each log file before it is rotated out. |
|
|
||||||
| `maxAge` | number of days | `7` | If a log file is more than this many days old, rotate it out. |
|
|
||||||
| `compress` | boolean | `true` | If true, compress old log files with gzip. This should be set to `true` and is only exposed as an option for dealing with legacy workflows where there is magical thinking about log files at play. |
|
|
||||||
| `useLocalTime` | boolean | `false` | If true, use the system local time zone to create log filenames instead of UTC. This should almost always be set to `false` and is only exposed for legacy workflows where there is magical thinking about time zones at play. |
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
logging:
|
|
||||||
sink: file
|
|
||||||
parameters:
|
|
||||||
file: "./var/anubis.log"
|
|
||||||
maxBackups: 3 # keep at least 3 old copies
|
|
||||||
maxBytes: 67108864 # each file can have up to 64 Mi of logs
|
|
||||||
maxAge: 7 # rotate files out every n days
|
|
||||||
compress: true # gzip-compress old log files
|
|
||||||
useLocalTime: false # timezone for rotated files is UTC
|
|
||||||
```
|
|
||||||
|
|
||||||
When files are rotated out, the old files will be named after the rotation timestamp in [RFC 3339 format](https://www.rfc-editor.org/rfc/rfc3339).
|
|
||||||
|
|
||||||
### `stdio` sink
|
|
||||||
|
|
||||||
By default, Anubis logs everything to the standard error stream of its process. This requires no configuration:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
logging:
|
|
||||||
sink: stdio
|
|
||||||
```
|
|
||||||
|
|
||||||
If you use a service orchestration platform that does not capture the standard error stream of processes, you need to use a different logging sink.
|
|
||||||
|
|
||||||
## Risk calculation for downstream services
|
|
||||||
|
|
||||||
In case your service needs it for risk calculation reasons, Anubis exposes information about the rules that any requests match using a few headers:
|
|
||||||
|
|
||||||
| Header | Explanation | Example |
|
|
||||||
| :---------------- | :--------------------------------------------------- | :--------------- |
|
|
||||||
| `X-Anubis-Rule` | The name of the rule that was matched | `bot/lightpanda` |
|
|
||||||
| `X-Anubis-Action` | The action that Anubis took in response to that rule | `CHALLENGE` |
|
|
||||||
| `X-Anubis-Status` | The status and how strict Anubis was in its checks | `PASS` |
|
|
||||||
|
|
||||||
Policy rules are matched using [Go's standard library regular expressions package](https://pkg.go.dev/regexp). You can mess around with the syntax at [regex101.com](https://regex101.com), make sure to select the Golang option.
|
|
||||||
|
|
||||||
## Request Weight
|
|
||||||
|
|
||||||
Anubis rules can also add or remove "weight" from requests, allowing administrators to configure custom levels of suspicion. For example, if your application uses session tokens named `i_love_gitea`:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
- name: gitea-session-token
|
|
||||||
action: WEIGH
|
|
||||||
expression:
|
|
||||||
all:
|
|
||||||
- '"Cookie" in headers'
|
|
||||||
- headers["Cookie"].contains("i_love_gitea=")
|
|
||||||
# Remove 5 weight points
|
|
||||||
weight:
|
|
||||||
adjust: -5
|
|
||||||
```
|
|
||||||
|
|
||||||
This would remove five weight points from the request, which would make Anubis present the [Meta Refresh challenge](./configuration/challenges/metarefresh.mdx) in the default configuration.
|
|
||||||
|
|
||||||
### Weight Thresholds
|
|
||||||
|
|
||||||
For more information on configuring weight thresholds, see [Weight Threshold Configuration](./configuration/thresholds.mdx)
|
|
||||||
|
|
||||||
### Advice
|
|
||||||
|
|
||||||
Weight is still very new and needs work. This is an experimental feature and should be treated as such. Here's some advice to help you better tune requests:
|
|
||||||
|
|
||||||
- The default weight for browser-like clients is 10. This triggers an aggressive challenge.
|
|
||||||
- Remove and add weight in multiples of five.
|
|
||||||
- Be careful with how you configure weight.
|
|
||||||
|
|
@ -1,87 +0,0 @@
|
||||||
---
|
|
||||||
title: robots2policy CLI Tool
|
|
||||||
sidebar_position: 50
|
|
||||||
---
|
|
||||||
|
|
||||||
The `robots2policy` tool converts robots.txt files into Anubis challenge policies. It reads robots.txt rules and generates equivalent CEL expressions for path matching and user-agent filtering.
|
|
||||||
|
|
||||||
## Installation
|
|
||||||
|
|
||||||
Install directly with Go:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
go install github.com/TecharoHQ/anubis/cmd/robots2policy@latest
|
|
||||||
```
|
|
||||||
|
|
||||||
## Usage
|
|
||||||
|
|
||||||
Basic conversion from URL:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
robots2policy -input https://www.example.com/robots.txt
|
|
||||||
```
|
|
||||||
|
|
||||||
Convert local file to YAML:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
robots2policy -input robots.txt -output policy.yaml
|
|
||||||
```
|
|
||||||
|
|
||||||
Convert with custom settings:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
robots2policy -input robots.txt -action DENY -format json
|
|
||||||
```
|
|
||||||
|
|
||||||
## Options
|
|
||||||
|
|
||||||
| Flag | Description | Default |
|
|
||||||
| --------------------- | ------------------------------------------------------------------ | ------------------- |
|
|
||||||
| `-input` | robots.txt file path or URL (use `-` for stdin) | _required_ |
|
|
||||||
| `-output` | Output file (use `-` for stdout) | stdout |
|
|
||||||
| `-format` | Output format: `yaml` or `json` | `yaml` |
|
|
||||||
| `-action` | Action for disallowed paths: `ALLOW`, `DENY`, `CHALLENGE`, `WEIGH` | `CHALLENGE` |
|
|
||||||
| `-name` | Policy name prefix | `robots-txt-policy` |
|
|
||||||
| `-crawl-delay-weight` | Weight adjustment for crawl-delay rules | `3` |
|
|
||||||
| `-deny-user-agents` | Action for blacklisted user agents | `DENY` |
|
|
||||||
|
|
||||||
## Example
|
|
||||||
|
|
||||||
Input robots.txt:
|
|
||||||
|
|
||||||
```txt
|
|
||||||
User-agent: *
|
|
||||||
Disallow: /admin/
|
|
||||||
Disallow: /private
|
|
||||||
|
|
||||||
User-agent: BadBot
|
|
||||||
Disallow: /
|
|
||||||
```
|
|
||||||
|
|
||||||
Generated policy:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
- name: robots-txt-policy-disallow-1
|
|
||||||
action: CHALLENGE
|
|
||||||
expression:
|
|
||||||
single: path.startsWith("/admin/")
|
|
||||||
- name: robots-txt-policy-disallow-2
|
|
||||||
action: CHALLENGE
|
|
||||||
expression:
|
|
||||||
single: path.startsWith("/private")
|
|
||||||
- name: robots-txt-policy-blacklist-3
|
|
||||||
action: DENY
|
|
||||||
expression:
|
|
||||||
single: userAgent.contains("BadBot")
|
|
||||||
```
|
|
||||||
|
|
||||||
## Using the Generated Policy
|
|
||||||
|
|
||||||
Save the output and import it in your main policy file:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
bots:
|
|
||||||
- import: "./robots-policy.yaml"
|
|
||||||
```
|
|
||||||
|
|
||||||
The tool handles wildcard patterns, user-agent specific rules, and blacklisted bots automatically.
|
|
||||||
|
|
@ -1,8 +0,0 @@
|
||||||
{
|
|
||||||
"label": "Server Roles",
|
|
||||||
"position": 40,
|
|
||||||
"link": {
|
|
||||||
"type": "generated-index",
|
|
||||||
"description": "Various server roles you will need to keep in mind with Anubis."
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,10 +0,0 @@
|
||||||
# OCI Registries
|
|
||||||
|
|
||||||
If you are serving an OCI registry behind Anubis, you will need to import the `(data)/clients/docker-client.yaml` file in order to make sure that OCI registry clients can download images:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
bots:
|
|
||||||
- import: (data)/meta/default-config.yaml
|
|
||||||
- import: (data)/clients/docker-client.yaml
|
|
||||||
# ... the rest of your config
|
|
||||||
```
|
|
||||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue