Compare commits
17 Commits
bc26fcd1f9
...
master
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b4ebc4bad7 | ||
|
|
d5e473304a | ||
|
|
5deb2e6e48 | ||
|
|
1c478e6ab5 | ||
|
|
dbd898cb2f | ||
|
|
43fbcf59a5 | ||
|
|
f23fc62ada | ||
|
|
d4b01468ba | ||
|
|
8fd220a16e | ||
|
|
3637b3ba23 | ||
|
|
9f95585daa | ||
|
|
495943b837 | ||
|
|
3eb6938b62 | ||
|
|
d10cd49cf0 | ||
|
|
61692b36a2 | ||
|
|
9d562c7188 | ||
|
|
33eceff1fe |
19
CLAUDE.md
19
CLAUDE.md
@@ -9,7 +9,7 @@ This is a home infrastructure deployment repository using Ansible for automated
|
||||
## Development Commands
|
||||
|
||||
### Core Commands
|
||||
- `make` or `make lint` - Run linting (yamllint + ansible-lint) on all YAML files
|
||||
- `make` or `make lint` - Run yamllint on all YAML files. Output may only show "Running yamllint..." and "Done." with no errors listed — this means linting passed. Do NOT run yamllint or ansible-lint manually; `make lint` is the only lint step needed.
|
||||
- `make deploy` - Deploy all configurations to the home server
|
||||
- `make deploy TAGS=sometag` - Deploy only specific tagged tasks
|
||||
- `make deploy TARGET=specific-host` - Deploy to specific host instead of all
|
||||
@@ -96,9 +96,22 @@ Tasks are tagged by service/component for selective deployment:
|
||||
|
||||
## Target Environment
|
||||
|
||||
- Single target host: `home.bdebyl.net`
|
||||
- Single target host: `home.debyl.io`
|
||||
- OS: Fedora (ansible_user: fedora)
|
||||
- Container runtime: Podman
|
||||
- Web server: Caddy with automatic HTTPS and built-in security (replaced nginx + ModSecurity)
|
||||
- All services accessible via HTTPS with automatic certificate renewal
|
||||
- ~~CI/CD: Drone CI infrastructure completely decommissioned~~
|
||||
- ~~CI/CD: Drone CI infrastructure completely decommissioned~~
|
||||
|
||||
### Remote SSH Commands for Service Users
|
||||
|
||||
The `podman` user (and other service users) have `/bin/nologin` as their shell. To run commands as these users via SSH:
|
||||
|
||||
- **One-off commands**: `sudo -H -u podman bash -c 'command here'`
|
||||
- **Interactive shell**: `sudo -H -u podman bash -c 'cd; bash'`
|
||||
- **systemctl --user** requires `XDG_RUNTIME_DIR`:
|
||||
```bash
|
||||
sudo -H -u podman bash -c 'export XDG_RUNTIME_DIR=/run/user/$(id -u); systemctl --user <action> <service>'
|
||||
```
|
||||
|
||||
Podman is a user-specific (rootless) container runtime, not a system service like Docker. The user context matters for all podman and systemctl --user operations. The default SSH user (`fedora`) has sudo access and can run commands directly.
|
||||
1
Makefile
1
Makefile
@@ -70,6 +70,7 @@ vault: ${ANSIBLE_VAULT} ${VAULT_FILE}
|
||||
lint: ${LINT_YAML} ${SKIP_FILE}
|
||||
@printf "Running yamllint...\n"
|
||||
-@${LINT_YAML} ${YAML_FILES}
|
||||
@printf "Done.\n"
|
||||
|
||||
# Git-crypt management
|
||||
git-crypt-backup:
|
||||
|
||||
@@ -8,6 +8,8 @@
|
||||
- role: podman
|
||||
# SSL certificates are now handled automatically by Caddy
|
||||
# - role: ssl # REMOVED - Caddy handles all certificate management
|
||||
- role: ollama
|
||||
tags: ollama
|
||||
- role: github-actions
|
||||
- role: graylog-config
|
||||
tags: graylog-config
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
all:
|
||||
hosts:
|
||||
home.bdebyl.net:
|
||||
home.debyl.io:
|
||||
ansible_user: fedora
|
||||
|
||||
@@ -12,7 +12,8 @@ deps:
|
||||
python-docker,
|
||||
]
|
||||
|
||||
fail2ban_jails: [sshd.local]
|
||||
fail2ban_jails: [sshd.local, zomboid.local]
|
||||
fail2ban_filters: [zomboid.conf]
|
||||
|
||||
services:
|
||||
- crond
|
||||
|
||||
5
ansible/roles/common/files/fail2ban/filters/zomboid.conf
Normal file
5
ansible/roles/common/files/fail2ban/filters/zomboid.conf
Normal file
@@ -0,0 +1,5 @@
|
||||
[Definition]
|
||||
# Match ZOMBOID_RATELIMIT firewall log entries
|
||||
# Example: ZOMBOID_RATELIMIT: IN=eth0 OUT= MAC=... SRC=1.2.3.4 DST=...
|
||||
failregex = ZOMBOID_RATELIMIT:.*SRC=<HOST>
|
||||
ignoreregex =
|
||||
9
ansible/roles/common/files/fail2ban/jails/zomboid.local
Normal file
9
ansible/roles/common/files/fail2ban/jails/zomboid.local
Normal file
@@ -0,0 +1,9 @@
|
||||
[zomboid]
|
||||
enabled = true
|
||||
filter = zomboid
|
||||
banaction = iptables-allports
|
||||
backend = systemd
|
||||
maxretry = 5
|
||||
findtime = 4h
|
||||
bantime = 1w
|
||||
ignoreip = 127.0.0.1/32 192.168.1.0/24
|
||||
@@ -10,3 +10,9 @@
|
||||
ansible.builtin.service:
|
||||
name: fail2ban
|
||||
state: restarted
|
||||
|
||||
- name: restart fluent-bit
|
||||
become: true
|
||||
ansible.builtin.systemd:
|
||||
name: fluent-bit
|
||||
state: restarted
|
||||
|
||||
@@ -2,25 +2,6 @@
|
||||
# Fluent Bit - Log forwarder from journald to Graylog GELF
|
||||
# Deployed as systemd service (not container) for direct journal access
|
||||
|
||||
# Clean up old container deployment if it exists
|
||||
- name: stop and remove fluent-bit container if exists
|
||||
become: true
|
||||
become_user: "{{ podman_user }}"
|
||||
containers.podman.podman_container:
|
||||
name: fluent-bit
|
||||
state: absent
|
||||
ignore_errors: true
|
||||
|
||||
- name: disable old fluent-bit container systemd service
|
||||
become: true
|
||||
become_user: "{{ podman_user }}"
|
||||
ansible.builtin.systemd:
|
||||
name: fluent-bit
|
||||
enabled: false
|
||||
state: stopped
|
||||
scope: user
|
||||
ignore_errors: true
|
||||
|
||||
- name: install fluent-bit package
|
||||
become: true
|
||||
ansible.builtin.dnf:
|
||||
@@ -3,6 +3,9 @@
|
||||
- import_tasks: security.yml
|
||||
- import_tasks: service.yml
|
||||
|
||||
- import_tasks: fluent-bit.yml
|
||||
tags: fluent-bit, graylog
|
||||
|
||||
- name: create the docker group
|
||||
become: true
|
||||
ansible.builtin.group:
|
||||
|
||||
@@ -21,6 +21,16 @@
|
||||
notify: restart_sshd
|
||||
tags: security
|
||||
|
||||
- name: setup fail2ban filters
|
||||
become: true
|
||||
ansible.builtin.copy:
|
||||
src: files/fail2ban/filters/{{ item }}
|
||||
dest: /etc/fail2ban/filter.d/{{ item }}
|
||||
mode: 0644
|
||||
loop: "{{ fail2ban_filters }}"
|
||||
notify: restart_fail2ban
|
||||
tags: security
|
||||
|
||||
- name: setup fail2ban jails
|
||||
become: true
|
||||
ansible.builtin.copy:
|
||||
|
||||
@@ -36,6 +36,27 @@
|
||||
Read_From_Tail On
|
||||
Strip_Underscores On
|
||||
|
||||
# =============================================================================
|
||||
# INPUT: Kernel firewall logs for Zomboid rate limiting
|
||||
# =============================================================================
|
||||
# Captures ZOMBOID_RATELIMIT firewall events for fail2ban monitoring
|
||||
[INPUT]
|
||||
Name systemd
|
||||
Tag firewall.zomboid.ratelimit
|
||||
Systemd_Filter _TRANSPORT=kernel
|
||||
Read_From_Tail On
|
||||
Strip_Underscores On
|
||||
|
||||
# =============================================================================
|
||||
# INPUT: Fail2ban actions (ban/unban events)
|
||||
# =============================================================================
|
||||
[INPUT]
|
||||
Name systemd
|
||||
Tag fail2ban.*
|
||||
Systemd_Filter _SYSTEMD_UNIT=fail2ban.service
|
||||
Read_From_Tail On
|
||||
Strip_Underscores On
|
||||
|
||||
# =============================================================================
|
||||
# INPUT: Caddy access logs (JSON format)
|
||||
# =============================================================================
|
||||
@@ -53,6 +74,12 @@
|
||||
# =============================================================================
|
||||
# FILTERS: Add metadata for Graylog categorization
|
||||
# =============================================================================
|
||||
# Exclude Graylog stack containers to prevent feedback loop
|
||||
[FILTER]
|
||||
Name grep
|
||||
Match podman.*
|
||||
Exclude CONTAINER_NAME ^graylog
|
||||
|
||||
[FILTER]
|
||||
Name record_modifier
|
||||
Match podman.*
|
||||
@@ -93,6 +120,27 @@
|
||||
Record source firewall
|
||||
Record log_type zomboid_connection
|
||||
|
||||
# Filter kernel logs to only keep ZOMBOID_RATELIMIT messages
|
||||
[FILTER]
|
||||
Name grep
|
||||
Match firewall.zomboid.ratelimit
|
||||
Regex MESSAGE ZOMBOID_RATELIMIT
|
||||
|
||||
[FILTER]
|
||||
Name record_modifier
|
||||
Match firewall.zomboid.ratelimit
|
||||
Record host {{ ansible_hostname }}
|
||||
Record source firewall
|
||||
Record log_type zomboid_ratelimit
|
||||
|
||||
# Fail2ban ban/unban events
|
||||
[FILTER]
|
||||
Name record_modifier
|
||||
Match fail2ban.*
|
||||
Record host {{ ansible_hostname }}
|
||||
Record source fail2ban
|
||||
Record log_type security
|
||||
|
||||
# =============================================================================
|
||||
# OUTPUT: All logs to Graylog GELF UDP
|
||||
# =============================================================================
|
||||
@@ -101,7 +149,7 @@
|
||||
Name gelf
|
||||
Match *
|
||||
Host 127.0.0.1
|
||||
Port 12203
|
||||
Mode udp
|
||||
Port 12202
|
||||
Mode tcp
|
||||
Gelf_Short_Message_Key MESSAGE
|
||||
Gelf_Host_Key host
|
||||
@@ -15,3 +15,10 @@
|
||||
Name zomboid_firewall
|
||||
Format regex
|
||||
Regex ZOMBOID_CONN:.*SRC=(?<src_ip>[0-9.]+).*DST=(?<dst_ip>[0-9.]+).*DPT=(?<dst_port>[0-9]+)
|
||||
|
||||
# Parse ZOMBOID_RATELIMIT firewall logs to extract source IP
|
||||
# Example: ZOMBOID_RATELIMIT: IN=enp0s31f6 OUT= MAC=... SRC=45.5.113.90 DST=192.168.1.10 ...
|
||||
[PARSER]
|
||||
Name zomboid_ratelimit
|
||||
Format regex
|
||||
Regex ZOMBOID_RATELIMIT:.*SRC=(?<src_ip>[0-9.]+).*DST=(?<dst_ip>[0-9.]+).*DPT=(?<dst_port>[0-9]+)
|
||||
@@ -3,7 +3,17 @@ gitea_runner_user: gitea-runner
|
||||
gitea_runner_home: /home/gitea-runner
|
||||
gitea_runner_version: "0.2.13"
|
||||
gitea_runner_arch: linux-amd64
|
||||
gitea_instance_url: https://git.debyl.io
|
||||
gitea_runner_capacity: 4
|
||||
|
||||
# Multiple Gitea instances to run actions runners for
|
||||
gitea_runners:
|
||||
- name: debyl
|
||||
instance_url: https://git.debyl.io
|
||||
- name: skudak
|
||||
instance_url: https://git.skudak.com
|
||||
|
||||
# Old single-instance format (replaced by gitea_runners list above):
|
||||
# gitea_instance_url: https://git.debyl.io
|
||||
|
||||
# Paths
|
||||
act_runner_bin: /usr/local/bin/act_runner
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
---
|
||||
- name: restart act_runner
|
||||
- name: restart act_runner services
|
||||
become: true
|
||||
ansible.builtin.systemd:
|
||||
name: act_runner
|
||||
name: "act_runner-{{ item.name }}"
|
||||
state: restarted
|
||||
daemon_reload: true
|
||||
loop: "{{ gitea_runners }}"
|
||||
|
||||
- name: restart podman socket
|
||||
become: true
|
||||
|
||||
@@ -35,6 +35,13 @@
|
||||
when: not esp_idf_dir.stat.exists
|
||||
tags: gitea-actions
|
||||
|
||||
- name: add ESP-IDF to git safe.directory
|
||||
become: true
|
||||
ansible.builtin.command:
|
||||
cmd: git config --global --add safe.directory {{ esp_idf_path }}
|
||||
changed_when: false
|
||||
tags: gitea-actions
|
||||
|
||||
- name: ensure ESP-IDF submodules are initialized
|
||||
become: true
|
||||
ansible.builtin.command:
|
||||
@@ -79,7 +86,7 @@
|
||||
export IDF_TOOLS_PATH="{{ gitea_runner_home }}/.espressif"
|
||||
{{ esp_idf_path }}/install.sh esp32
|
||||
args:
|
||||
creates: "{{ gitea_runner_home }}/.espressif/tools"
|
||||
creates: "{{ gitea_runner_home }}/.espressif/python_env"
|
||||
environment:
|
||||
HOME: "{{ gitea_runner_home }}"
|
||||
tags: gitea-actions
|
||||
|
||||
@@ -15,31 +15,36 @@
|
||||
mode: "0755"
|
||||
tags: gitea-actions
|
||||
|
||||
- name: create act_runner working directory
|
||||
- name: create per-runner working directory
|
||||
become: true
|
||||
ansible.builtin.file:
|
||||
path: "{{ act_runner_work_dir }}"
|
||||
path: "{{ act_runner_work_dir }}/{{ item.name }}"
|
||||
state: directory
|
||||
owner: "{{ gitea_runner_user }}"
|
||||
group: "{{ gitea_runner_user }}"
|
||||
mode: "0755"
|
||||
loop: "{{ gitea_runners }}"
|
||||
tags: gitea-actions
|
||||
|
||||
- name: create act_runner cache directory
|
||||
- name: create per-runner cache directory
|
||||
become: true
|
||||
ansible.builtin.file:
|
||||
path: "{{ act_runner_work_dir }}/cache"
|
||||
path: "{{ act_runner_work_dir }}/{{ item.name }}/cache"
|
||||
state: directory
|
||||
owner: "{{ gitea_runner_user }}"
|
||||
group: "{{ gitea_runner_user }}"
|
||||
mode: "0755"
|
||||
loop: "{{ gitea_runners }}"
|
||||
tags: gitea-actions
|
||||
|
||||
- name: deploy act_runner configuration
|
||||
- name: deploy per-runner configuration
|
||||
become: true
|
||||
ansible.builtin.template:
|
||||
src: config.yaml.j2
|
||||
dest: "{{ act_runner_config_dir }}/config.yaml"
|
||||
dest: "{{ act_runner_config_dir }}/config-{{ item.name }}.yaml"
|
||||
mode: "0644"
|
||||
notify: restart act_runner
|
||||
vars:
|
||||
runner_name: "{{ item.name }}"
|
||||
loop: "{{ gitea_runners }}"
|
||||
notify: restart act_runner services
|
||||
tags: gitea-actions
|
||||
|
||||
@@ -1,17 +1,37 @@
|
||||
---
|
||||
- name: deploy act_runner systemd service
|
||||
become: true
|
||||
ansible.builtin.template:
|
||||
src: act_runner.service.j2
|
||||
dest: /etc/systemd/system/act_runner.service
|
||||
mode: "0644"
|
||||
notify: restart act_runner
|
||||
tags: gitea-actions
|
||||
|
||||
- name: enable act_runner service
|
||||
- name: stop and disable legacy act_runner service
|
||||
become: true
|
||||
ansible.builtin.systemd:
|
||||
name: act_runner
|
||||
state: stopped
|
||||
enabled: false
|
||||
failed_when: false
|
||||
tags: gitea-actions
|
||||
|
||||
- name: remove legacy act_runner service file
|
||||
become: true
|
||||
ansible.builtin.file:
|
||||
path: /etc/systemd/system/act_runner.service
|
||||
state: absent
|
||||
tags: gitea-actions
|
||||
|
||||
- name: deploy per-runner systemd service
|
||||
become: true
|
||||
ansible.builtin.template:
|
||||
src: act_runner.service.j2
|
||||
dest: "/etc/systemd/system/act_runner-{{ item.name }}.service"
|
||||
mode: "0644"
|
||||
vars:
|
||||
runner_name: "{{ item.name }}"
|
||||
loop: "{{ gitea_runners }}"
|
||||
notify: restart act_runner services
|
||||
tags: gitea-actions
|
||||
|
||||
- name: enable per-runner services
|
||||
become: true
|
||||
ansible.builtin.systemd:
|
||||
name: "act_runner-{{ item.name }}"
|
||||
daemon_reload: true
|
||||
enabled: true
|
||||
loop: "{{ gitea_runners }}"
|
||||
tags: gitea-actions
|
||||
|
||||
@@ -32,3 +32,42 @@
|
||||
state: directory
|
||||
mode: "0755"
|
||||
tags: gitea-actions
|
||||
|
||||
- name: create .ssh directory
|
||||
become: true
|
||||
ansible.builtin.file:
|
||||
path: "{{ gitea_runner_home }}/.ssh"
|
||||
state: directory
|
||||
owner: "{{ gitea_runner_user }}"
|
||||
group: "{{ gitea_runner_user }}"
|
||||
mode: "0700"
|
||||
tags: gitea-actions
|
||||
|
||||
- name: generate SSH key for gitea-runner
|
||||
become: true
|
||||
become_user: "{{ gitea_runner_user }}"
|
||||
ansible.builtin.command:
|
||||
cmd: ssh-keygen -t ed25519 -f {{ gitea_runner_home }}/.ssh/id_ed25519 -N "" -C "gitea-runner@galactica"
|
||||
creates: "{{ gitea_runner_home }}/.ssh/id_ed25519"
|
||||
tags: gitea-actions
|
||||
|
||||
- name: add Gitea SSH host keys to known_hosts
|
||||
become: true
|
||||
become_user: "{{ gitea_runner_user }}"
|
||||
ansible.builtin.shell:
|
||||
cmd: ssh-keyscan -p 2222 {{ item }} >> {{ gitea_runner_home }}/.ssh/known_hosts 2>/dev/null
|
||||
args:
|
||||
creates: "{{ gitea_runner_home }}/.ssh/known_hosts"
|
||||
loop:
|
||||
- git.skudak.com
|
||||
- git.debyl.io
|
||||
tags: gitea-actions
|
||||
|
||||
- name: set known_hosts permissions
|
||||
become: true
|
||||
ansible.builtin.file:
|
||||
path: "{{ gitea_runner_home }}/.ssh/known_hosts"
|
||||
owner: "{{ gitea_runner_user }}"
|
||||
group: "{{ gitea_runner_user }}"
|
||||
mode: "0644"
|
||||
tags: gitea-actions
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
[Unit]
|
||||
Description=Gitea Actions runner
|
||||
Description=Gitea Actions runner ({{ runner_name }})
|
||||
Documentation=https://gitea.com/gitea/act_runner
|
||||
After=network.target podman.socket
|
||||
|
||||
[Service]
|
||||
ExecStart={{ act_runner_bin }} daemon --config {{ act_runner_config_dir }}/config.yaml
|
||||
WorkingDirectory={{ act_runner_work_dir }}
|
||||
ExecStart={{ act_runner_bin }} daemon --config {{ act_runner_config_dir }}/config-{{ runner_name }}.yaml
|
||||
WorkingDirectory={{ act_runner_work_dir }}/{{ runner_name }}
|
||||
TimeoutSec=0
|
||||
RestartSec=10
|
||||
Restart=always
|
||||
|
||||
@@ -2,8 +2,8 @@ log:
|
||||
level: info
|
||||
|
||||
runner:
|
||||
file: {{ act_runner_work_dir }}/.runner
|
||||
capacity: 1
|
||||
file: {{ act_runner_work_dir }}/{{ runner_name }}/.runner
|
||||
capacity: {{ gitea_runner_capacity | default(4) }}
|
||||
timeout: 3h
|
||||
insecure: false
|
||||
fetch_timeout: 5s
|
||||
@@ -15,7 +15,7 @@ runner:
|
||||
|
||||
cache:
|
||||
enabled: true
|
||||
dir: {{ act_runner_work_dir }}/cache
|
||||
dir: {{ act_runner_work_dir }}/{{ runner_name }}/cache
|
||||
|
||||
container:
|
||||
network: host
|
||||
@@ -27,4 +27,4 @@ container:
|
||||
force_pull: false
|
||||
|
||||
host:
|
||||
workdir_parent: {{ act_runner_work_dir }}/workdir
|
||||
workdir_parent: {{ act_runner_work_dir }}/{{ runner_name }}/workdir
|
||||
|
||||
@@ -56,6 +56,30 @@ graylog_streams:
|
||||
type: 1
|
||||
inverted: false
|
||||
|
||||
- title: "zomboid-connections"
|
||||
description: "Zomboid game server connection logs"
|
||||
rules:
|
||||
- field: "log_type"
|
||||
value: "zomboid_connection"
|
||||
type: 1
|
||||
inverted: false
|
||||
|
||||
- title: "zomboid-ratelimit"
|
||||
description: "Zomboid rate-limited connection attempts"
|
||||
rules:
|
||||
- field: "log_type"
|
||||
value: "zomboid_ratelimit"
|
||||
type: 1
|
||||
inverted: false
|
||||
|
||||
- title: "fail2ban-actions"
|
||||
description: "Fail2ban ban and unban events"
|
||||
rules:
|
||||
- field: "source"
|
||||
value: "fail2ban"
|
||||
type: 1
|
||||
inverted: false
|
||||
|
||||
# Pipeline definitions
|
||||
graylog_pipelines:
|
||||
- title: "GeoIP Enrichment"
|
||||
@@ -65,6 +89,7 @@ graylog_pipelines:
|
||||
match: "EITHER"
|
||||
rules:
|
||||
- "geoip_caddy_access"
|
||||
- "geoip_zomboid"
|
||||
|
||||
- title: "Debyltech Event Classification"
|
||||
description: "Categorize debyltech-api events"
|
||||
@@ -98,6 +123,20 @@ graylog_pipeline_rules:
|
||||
set_field("geo_coordinates", geo["coordinates"]);
|
||||
end
|
||||
|
||||
- title: "geoip_zomboid"
|
||||
description: "GeoIP lookup for Zomboid connection logs"
|
||||
source: |
|
||||
rule "GeoIP for Zomboid"
|
||||
when
|
||||
has_field("src_ip")
|
||||
then
|
||||
let ip = to_string($message.src_ip);
|
||||
let geo = lookup("geoip-lookup", ip);
|
||||
set_field("geo_country", geo["country"].iso_code);
|
||||
set_field("geo_city", geo["city"].names.en);
|
||||
set_field("geo_coordinates", geo["coordinates"]);
|
||||
end
|
||||
|
||||
- title: "classify_order_events"
|
||||
description: "Classify order events"
|
||||
source: |
|
||||
@@ -164,6 +203,8 @@ graylog_pipeline_connections:
|
||||
streams:
|
||||
- "caddy-access"
|
||||
- "caddy-fulfillr"
|
||||
- "zomboid-connections"
|
||||
- "zomboid-ratelimit"
|
||||
|
||||
- pipeline: "Debyltech Event Classification"
|
||||
streams:
|
||||
|
||||
6
ansible/roles/ollama/defaults/main.yml
Normal file
6
ansible/roles/ollama/defaults/main.yml
Normal file
@@ -0,0 +1,6 @@
|
||||
---
|
||||
ollama_models:
|
||||
- dolphin-phi
|
||||
- dolphin-mistral
|
||||
ollama_host: "127.0.0.1"
|
||||
ollama_port: 11434
|
||||
8
ansible/roles/ollama/handlers/main.yml
Normal file
8
ansible/roles/ollama/handlers/main.yml
Normal file
@@ -0,0 +1,8 @@
|
||||
---
|
||||
- name: restart ollama
|
||||
become: true
|
||||
ansible.builtin.systemd:
|
||||
name: ollama
|
||||
state: restarted
|
||||
daemon_reload: true
|
||||
tags: ollama
|
||||
3
ansible/roles/ollama/meta/main.yml
Normal file
3
ansible/roles/ollama/meta/main.yml
Normal file
@@ -0,0 +1,3 @@
|
||||
---
|
||||
dependencies:
|
||||
- role: common
|
||||
11
ansible/roles/ollama/tasks/install.yml
Normal file
11
ansible/roles/ollama/tasks/install.yml
Normal file
@@ -0,0 +1,11 @@
|
||||
---
|
||||
- name: check if ollama is already installed
|
||||
ansible.builtin.stat:
|
||||
path: /usr/local/bin/ollama
|
||||
register: ollama_binary
|
||||
|
||||
- name: install ollama via official install script
|
||||
become: true
|
||||
ansible.builtin.shell: |
|
||||
curl -fsSL https://ollama.com/install.sh | sh
|
||||
when: not ollama_binary.stat.exists
|
||||
9
ansible/roles/ollama/tasks/main.yml
Normal file
9
ansible/roles/ollama/tasks/main.yml
Normal file
@@ -0,0 +1,9 @@
|
||||
---
|
||||
- import_tasks: install.yml
|
||||
tags: ollama
|
||||
|
||||
- import_tasks: service.yml
|
||||
tags: ollama
|
||||
|
||||
- import_tasks: models.yml
|
||||
tags: ollama
|
||||
10
ansible/roles/ollama/tasks/models.yml
Normal file
10
ansible/roles/ollama/tasks/models.yml
Normal file
@@ -0,0 +1,10 @@
|
||||
---
|
||||
- name: pull ollama models
|
||||
become: true
|
||||
ansible.builtin.command: ollama pull {{ item }}
|
||||
loop: "{{ ollama_models }}"
|
||||
register: result
|
||||
retries: 3
|
||||
delay: 10
|
||||
until: result is not failed
|
||||
changed_when: "'pulling' in result.stderr or 'pulling' in result.stdout"
|
||||
23
ansible/roles/ollama/tasks/service.yml
Normal file
23
ansible/roles/ollama/tasks/service.yml
Normal file
@@ -0,0 +1,23 @@
|
||||
---
|
||||
- name: create ollama systemd override directory
|
||||
become: true
|
||||
ansible.builtin.file:
|
||||
path: /etc/systemd/system/ollama.service.d
|
||||
state: directory
|
||||
mode: 0755
|
||||
|
||||
- name: template ollama environment override
|
||||
become: true
|
||||
ansible.builtin.template:
|
||||
src: ollama.env.j2
|
||||
dest: /etc/systemd/system/ollama.service.d/override.conf
|
||||
mode: 0644
|
||||
notify: restart ollama
|
||||
|
||||
- name: enable and start ollama service
|
||||
become: true
|
||||
ansible.builtin.systemd:
|
||||
name: ollama
|
||||
enabled: true
|
||||
state: started
|
||||
daemon_reload: true
|
||||
4
ansible/roles/ollama/templates/ollama.env.j2
Normal file
4
ansible/roles/ollama/templates/ollama.env.j2
Normal file
@@ -0,0 +1,4 @@
|
||||
[Service]
|
||||
Environment="OLLAMA_HOST={{ ollama_host }}:{{ ollama_port }}"
|
||||
Environment="OLLAMA_NUM_PARALLEL=1"
|
||||
Environment="OLLAMA_MAX_LOADED_MODELS=1"
|
||||
File diff suppressed because one or more lines are too long
@@ -42,11 +42,3 @@
|
||||
scope: user
|
||||
tags:
|
||||
- zomboid
|
||||
|
||||
- name: restart fluent-bit
|
||||
become: true
|
||||
ansible.builtin.systemd:
|
||||
name: fluent-bit
|
||||
state: restarted
|
||||
tags:
|
||||
- fluent-bit
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
container_name: awsddns
|
||||
container_image: "{{ image }}"
|
||||
|
||||
- name: create home.bdebyl.net awsddns server container
|
||||
- name: create home.debyl.io awsddns server container
|
||||
become: true
|
||||
become_user: "{{ podman_user }}"
|
||||
diff: false
|
||||
|
||||
59
ansible/roles/podman/tasks/containers/cloud-backup.yml
Normal file
59
ansible/roles/podman/tasks/containers/cloud-backup.yml
Normal file
@@ -0,0 +1,59 @@
|
||||
---
|
||||
- name: create backup SSH key directory
|
||||
become: true
|
||||
ansible.builtin.file:
|
||||
path: /etc/ssh/backup_keys
|
||||
state: directory
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0700
|
||||
|
||||
- name: deploy {{ backup_name }} backup SSH key
|
||||
become: true
|
||||
ansible.builtin.copy:
|
||||
content: "{{ ssh_key_content }}"
|
||||
dest: "{{ ssh_key_path }}"
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0600
|
||||
setype: ssh_home_t
|
||||
|
||||
- name: template {{ backup_name }} backup script
|
||||
become: true
|
||||
ansible.builtin.template:
|
||||
src: nextcloud/cloud-backup.sh.j2
|
||||
dest: "{{ script_path }}"
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0755
|
||||
setype: bin_t
|
||||
|
||||
- name: template {{ backup_name }} backup systemd service
|
||||
become: true
|
||||
ansible.builtin.template:
|
||||
src: nextcloud/cloud-backup.service.j2
|
||||
dest: "/etc/systemd/system/{{ backup_name }}-backup.service"
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0644
|
||||
vars:
|
||||
instance_name: "{{ backup_name }}"
|
||||
|
||||
- name: template {{ backup_name }} backup systemd timer
|
||||
become: true
|
||||
ansible.builtin.template:
|
||||
src: nextcloud/cloud-backup.timer.j2
|
||||
dest: "/etc/systemd/system/{{ backup_name }}-backup.timer"
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0644
|
||||
vars:
|
||||
instance_name: "{{ backup_name }}"
|
||||
|
||||
- name: enable and start {{ backup_name }} backup timer
|
||||
become: true
|
||||
ansible.builtin.systemd:
|
||||
name: "{{ backup_name }}-backup.timer"
|
||||
enabled: true
|
||||
state: started
|
||||
daemon_reload: true
|
||||
@@ -75,7 +75,7 @@
|
||||
- import_tasks: podman/podman-check.yml
|
||||
vars:
|
||||
container_name: graylog-mongo
|
||||
container_image: docker.io/mongo:6
|
||||
container_image: "{{ mongo_image }}"
|
||||
tags: graylog
|
||||
|
||||
- name: create graylog-mongo container
|
||||
@@ -83,7 +83,7 @@
|
||||
become_user: "{{ podman_user }}"
|
||||
containers.podman.podman_container:
|
||||
name: graylog-mongo
|
||||
image: docker.io/mongo:6
|
||||
image: "{{ mongo_image }}"
|
||||
state: started
|
||||
restart_policy: on-failure:3
|
||||
log_driver: journald
|
||||
@@ -103,7 +103,7 @@
|
||||
- import_tasks: podman/podman-check.yml
|
||||
vars:
|
||||
container_name: graylog-opensearch
|
||||
container_image: docker.io/opensearchproject/opensearch:2
|
||||
container_image: "{{ opensearch_image }}"
|
||||
tags: graylog
|
||||
|
||||
- name: create graylog-opensearch container
|
||||
@@ -111,7 +111,7 @@
|
||||
become_user: "{{ podman_user }}"
|
||||
containers.podman.podman_container:
|
||||
name: graylog-opensearch
|
||||
image: docker.io/opensearchproject/opensearch:2
|
||||
image: "{{ opensearch_image }}"
|
||||
state: started
|
||||
restart_policy: on-failure:3
|
||||
log_driver: journald
|
||||
@@ -135,7 +135,7 @@
|
||||
- import_tasks: podman/podman-check.yml
|
||||
vars:
|
||||
container_name: graylog
|
||||
container_image: docker.io/graylog/graylog:6.0
|
||||
container_image: "{{ image }}"
|
||||
tags: graylog
|
||||
|
||||
# Graylog uses host network to reach MongoDB/OpenSearch on 127.0.0.1
|
||||
@@ -145,7 +145,7 @@
|
||||
become_user: "{{ podman_user }}"
|
||||
containers.podman.podman_container:
|
||||
name: graylog
|
||||
image: docker.io/graylog/graylog:6.0
|
||||
image: "{{ image }}"
|
||||
state: started
|
||||
restart_policy: on-failure:3
|
||||
log_driver: journald
|
||||
|
||||
54
ansible/roles/podman/tasks/containers/debyltech/n8n.yml
Normal file
54
ansible/roles/podman/tasks/containers/debyltech/n8n.yml
Normal file
@@ -0,0 +1,54 @@
|
||||
---
|
||||
- name: create n8n host directory volumes
|
||||
become: true
|
||||
ansible.builtin.file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
owner: "{{ podman_user }}"
|
||||
group: "{{ podman_user }}"
|
||||
mode: 0755
|
||||
notify: restorecon podman
|
||||
loop:
|
||||
- "{{ n8n_path }}"
|
||||
|
||||
- name: set n8n volume ownership for node user
|
||||
become: true
|
||||
become_user: "{{ podman_user }}"
|
||||
ansible.builtin.command:
|
||||
cmd: podman unshare chown -R 1000:1000 {{ n8n_path }}
|
||||
changed_when: false
|
||||
|
||||
- name: flush handlers
|
||||
ansible.builtin.meta: flush_handlers
|
||||
|
||||
- import_tasks: podman/podman-check.yml
|
||||
vars:
|
||||
container_name: n8n
|
||||
container_image: "{{ image }}"
|
||||
|
||||
- name: create n8n container
|
||||
become: true
|
||||
become_user: "{{ podman_user }}"
|
||||
containers.podman.podman_container:
|
||||
name: n8n
|
||||
image: "{{ image }}"
|
||||
image_strict: true
|
||||
restart_policy: on-failure:3
|
||||
log_driver: journald
|
||||
network: shared
|
||||
volumes:
|
||||
- "{{ n8n_path }}:/home/node/.n8n"
|
||||
ports:
|
||||
- 5678:5678/tcp
|
||||
env:
|
||||
N8N_HOST: "{{ n8n_server_name }}"
|
||||
N8N_PORT: "5678"
|
||||
N8N_PROTOCOL: https
|
||||
WEBHOOK_URL: "https://{{ n8n_server_name }}/"
|
||||
N8N_SECURE_COOKIE: "true"
|
||||
GENERIC_TIMEZONE: America/New_York
|
||||
|
||||
- name: create systemd startup job for n8n
|
||||
include_tasks: podman/systemd-generate.yml
|
||||
vars:
|
||||
container_name: n8n
|
||||
@@ -83,3 +83,13 @@
|
||||
include_tasks: podman/systemd-generate.yml
|
||||
vars:
|
||||
container_name: cloud
|
||||
|
||||
- include_tasks: containers/cloud-backup.yml
|
||||
vars:
|
||||
backup_name: cloud
|
||||
data_path: "{{ cloud_path }}/data"
|
||||
ssh_key_path: /etc/ssh/backup_keys/cloud
|
||||
ssh_key_content: "{{ cloud_backup_ssh_key }}"
|
||||
ssh_user: cloud
|
||||
remote_path: /mnt/glacier/nextcloud
|
||||
script_path: /usr/local/bin/cloud-backup.sh
|
||||
|
||||
@@ -40,7 +40,13 @@
|
||||
- host
|
||||
env:
|
||||
TZ: America/New_York
|
||||
# Gemini AI for @bot ask command
|
||||
# Ollama + SearXNG for FISTO AI responses
|
||||
OLLAMA_HOST: "http://127.0.0.1:11434"
|
||||
OLLAMA_MODEL: "dolphin-mistral"
|
||||
OLLAMA_FALLBACK_MODEL: "dolphin-phi"
|
||||
OLLAMA_NUM_PREDICT: "300"
|
||||
SEARXNG_URL: "http://127.0.0.1:8080"
|
||||
# Gemini API for @bot gemini command
|
||||
GEMINI_API_KEY: "{{ gemini_api_key }}"
|
||||
# Zomboid RCON configuration for Discord restart command
|
||||
ZOMBOID_RCON_HOST: "127.0.0.1"
|
||||
|
||||
59
ansible/roles/podman/tasks/containers/home/searxng.yml
Normal file
59
ansible/roles/podman/tasks/containers/home/searxng.yml
Normal file
@@ -0,0 +1,59 @@
|
||||
---
|
||||
- name: create searxng host directory volumes
|
||||
become: true
|
||||
ansible.builtin.file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
owner: "{{ podman_subuid.stdout }}"
|
||||
group: "{{ podman_user }}"
|
||||
mode: 0755
|
||||
notify: restorecon podman
|
||||
loop:
|
||||
- "{{ searxng_path }}/config"
|
||||
- "{{ searxng_path }}/data"
|
||||
|
||||
- name: template searxng settings
|
||||
become: true
|
||||
ansible.builtin.template:
|
||||
src: searxng/settings.yml.j2
|
||||
dest: "{{ searxng_path }}/config/settings.yml"
|
||||
owner: "{{ podman_subuid.stdout }}"
|
||||
group: "{{ podman_user }}"
|
||||
mode: 0644
|
||||
|
||||
- name: unshare chown the searxng volumes for internal uid 977
|
||||
become: true
|
||||
become_user: "{{ podman_user }}"
|
||||
changed_when: false
|
||||
ansible.builtin.shell: |
|
||||
podman unshare chown -R 977:977 {{ searxng_path }}/config
|
||||
podman unshare chown -R 977:977 {{ searxng_path }}/data
|
||||
|
||||
- name: flush handlers
|
||||
ansible.builtin.meta: flush_handlers
|
||||
|
||||
- import_tasks: podman/podman-check.yml
|
||||
vars:
|
||||
container_name: searxng
|
||||
container_image: "{{ image }}"
|
||||
|
||||
- name: create searxng container
|
||||
become: true
|
||||
become_user: "{{ podman_user }}"
|
||||
containers.podman.podman_container:
|
||||
name: searxng
|
||||
image: "{{ image }}"
|
||||
restart_policy: on-failure:3
|
||||
log_driver: journald
|
||||
network:
|
||||
- host
|
||||
env:
|
||||
SEARXNG_BASE_URL: "http://127.0.0.1:8080/"
|
||||
volumes:
|
||||
- "{{ searxng_path }}/config:/etc/searxng"
|
||||
- "{{ searxng_path }}/data:/srv/searxng/data"
|
||||
|
||||
- name: create systemd startup job for searxng
|
||||
include_tasks: podman/systemd-generate.yml
|
||||
vars:
|
||||
container_name: searxng
|
||||
38
ansible/roles/podman/tasks/containers/home/uptime-kuma.yml
Normal file
38
ansible/roles/podman/tasks/containers/home/uptime-kuma.yml
Normal file
@@ -0,0 +1,38 @@
|
||||
---
|
||||
- name: create uptime-kuma-personal host directory volumes
|
||||
become: true
|
||||
ansible.builtin.file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
owner: "{{ podman_user }}"
|
||||
group: "{{ podman_user }}"
|
||||
mode: 0755
|
||||
notify: restorecon podman
|
||||
loop:
|
||||
- "{{ uptime_kuma_personal_path }}/data"
|
||||
|
||||
- name: flush handlers
|
||||
ansible.builtin.meta: flush_handlers
|
||||
|
||||
- import_tasks: podman/podman-check.yml
|
||||
vars:
|
||||
container_name: uptime-kuma-personal
|
||||
container_image: "{{ image }}"
|
||||
|
||||
- name: create uptime-kuma-personal container
|
||||
become: true
|
||||
become_user: "{{ podman_user }}"
|
||||
containers.podman.podman_container:
|
||||
name: uptime-kuma-personal
|
||||
image: "{{ image }}"
|
||||
restart_policy: on-failure:3
|
||||
log_driver: journald
|
||||
volumes:
|
||||
- "{{ uptime_kuma_personal_path }}/data:/app/data"
|
||||
ports:
|
||||
- "3002:3001/tcp"
|
||||
|
||||
- name: create systemd startup job for uptime-kuma-personal
|
||||
include_tasks: podman/systemd-generate.yml
|
||||
vars:
|
||||
container_name: uptime-kuma-personal
|
||||
@@ -291,6 +291,7 @@
|
||||
changed_when: "'already' not in firewall_result.stderr"
|
||||
failed_when: false
|
||||
notify: restart firewalld
|
||||
tags: firewall
|
||||
|
||||
- name: add firewall rule to log zomboid connections (runtime)
|
||||
become: true
|
||||
@@ -300,6 +301,209 @@
|
||||
-j LOG --log-prefix "ZOMBOID_CONN: " --log-level 4
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
tags: firewall
|
||||
|
||||
# =============================================================================
|
||||
# Add logging for port 16262 (mirrors existing 16261 logging)
|
||||
# =============================================================================
|
||||
- name: add firewall rule to log zomboid connections on 16262
|
||||
become: true
|
||||
ansible.builtin.command: >
|
||||
firewall-cmd --permanent --direct --add-rule ipv4 filter INPUT 0
|
||||
-p udp --dport 16262 -m conntrack --ctstate NEW
|
||||
-j LOG --log-prefix "ZOMBOID_CONN: " --log-level 4
|
||||
register: firewall_result_16262
|
||||
changed_when: "'already' not in firewall_result_16262.stderr"
|
||||
failed_when: false
|
||||
notify: restart firewalld
|
||||
tags: firewall
|
||||
|
||||
- name: add firewall rule to log zomboid connections on 16262 (runtime)
|
||||
become: true
|
||||
ansible.builtin.command: >
|
||||
firewall-cmd --direct --add-rule ipv4 filter INPUT 0
|
||||
-p udp --dport 16262 -m conntrack --ctstate NEW
|
||||
-j LOG --log-prefix "ZOMBOID_CONN: " --log-level 4
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
tags: firewall
|
||||
|
||||
# =============================================================================
|
||||
# Zomboid Rate Limiting and Query Flood Protection
|
||||
# =============================================================================
|
||||
# These rules mitigate Steam server query floods while allowing legitimate play.
|
||||
# Query packets are typically 53 bytes; game traffic is larger and sustained.
|
||||
#
|
||||
# Rule priority: 0=logging (existing), 1=allow established, 2=rate limit queries
|
||||
|
||||
# Allow established/related connections without rate limiting
|
||||
# This ensures active players aren't affected by query rate limits
|
||||
- name: allow established zomboid connections on 16261
|
||||
become: true
|
||||
ansible.builtin.command: >
|
||||
firewall-cmd --permanent --direct --add-rule ipv4 filter INPUT 1
|
||||
-p udp --dport 16261 -m conntrack --ctstate ESTABLISHED,RELATED
|
||||
-j ACCEPT
|
||||
register: established_result
|
||||
changed_when: "'already' not in established_result.stderr"
|
||||
failed_when: false
|
||||
notify: restart firewalld
|
||||
tags: firewall
|
||||
|
||||
- name: allow established zomboid connections on 16262
|
||||
become: true
|
||||
ansible.builtin.command: >
|
||||
firewall-cmd --permanent --direct --add-rule ipv4 filter INPUT 1
|
||||
-p udp --dport 16262 -m conntrack --ctstate ESTABLISHED,RELATED
|
||||
-j ACCEPT
|
||||
register: established_result_16262
|
||||
changed_when: "'already' not in established_result_16262.stderr"
|
||||
failed_when: false
|
||||
notify: restart firewalld
|
||||
tags: firewall
|
||||
|
||||
# =============================================================================
|
||||
# Smart Zomboid Traffic Filtering (Packet-Size Based)
|
||||
# =============================================================================
|
||||
# Distinguishes legitimate players from scanner bots:
|
||||
# - Players send varied packet sizes (53, 37, 1472 bytes)
|
||||
# - Scanners only send 53-byte query packets
|
||||
#
|
||||
# Rule priority:
|
||||
# 0 = LOG all (existing above)
|
||||
# 1 = ACCEPT established (existing above)
|
||||
# 2 = Mark + ACCEPT non-query packets (verifies player)
|
||||
# 3 = ACCEPT queries from verified IPs
|
||||
# 4 = LOG rate-limited queries from unverified IPs
|
||||
# 5 = DROP rate-limited queries from unverified IPs
|
||||
|
||||
# Priority 2: Mark IPs sending non-query packets as verified (1 hour TTL)
|
||||
# Any packet NOT 53 bytes proves actual connection attempt
|
||||
- name: mark verified players on 16261 (non-query packets)
|
||||
become: true
|
||||
ansible.builtin.command: >
|
||||
firewall-cmd --permanent --direct --add-rule ipv4 filter INPUT 2
|
||||
-p udp --dport 16261 -m conntrack --ctstate NEW
|
||||
-m length ! --length 53
|
||||
-m recent --name zomboid_verified --set
|
||||
-j ACCEPT
|
||||
register: verify_result
|
||||
changed_when: "'already' not in verify_result.stderr"
|
||||
failed_when: false
|
||||
notify: restart firewalld
|
||||
tags: firewall
|
||||
|
||||
- name: mark verified players on 16262 (non-query packets)
|
||||
become: true
|
||||
ansible.builtin.command: >
|
||||
firewall-cmd --permanent --direct --add-rule ipv4 filter INPUT 2
|
||||
-p udp --dport 16262 -m conntrack --ctstate NEW
|
||||
-m length ! --length 53
|
||||
-m recent --name zomboid_verified --set
|
||||
-j ACCEPT
|
||||
register: verify_result_16262
|
||||
changed_when: "'already' not in verify_result_16262.stderr"
|
||||
failed_when: false
|
||||
notify: restart firewalld
|
||||
tags: firewall
|
||||
|
||||
# Priority 3: Allow queries from verified players (within 1 hour)
|
||||
- name: allow queries from verified players on 16261
|
||||
become: true
|
||||
ansible.builtin.command: >
|
||||
firewall-cmd --permanent --direct --add-rule ipv4 filter INPUT 3
|
||||
-p udp --dport 16261 -m conntrack --ctstate NEW
|
||||
-m length --length 53
|
||||
-m recent --name zomboid_verified --rcheck --seconds 3600
|
||||
-j ACCEPT
|
||||
register: verified_query_result
|
||||
changed_when: "'already' not in verified_query_result.stderr"
|
||||
failed_when: false
|
||||
notify: restart firewalld
|
||||
tags: firewall
|
||||
|
||||
- name: allow queries from verified players on 16262
|
||||
become: true
|
||||
ansible.builtin.command: >
|
||||
firewall-cmd --permanent --direct --add-rule ipv4 filter INPUT 3
|
||||
-p udp --dport 16262 -m conntrack --ctstate NEW
|
||||
-m length --length 53
|
||||
-m recent --name zomboid_verified --rcheck --seconds 3600
|
||||
-j ACCEPT
|
||||
register: verified_query_result_16262
|
||||
changed_when: "'already' not in verified_query_result_16262.stderr"
|
||||
failed_when: false
|
||||
notify: restart firewalld
|
||||
tags: firewall
|
||||
|
||||
# Priority 4: LOG rate-limited queries from unverified IPs
|
||||
# Very aggressive: 2 burst, then 1 per hour
|
||||
# Note: Uses same hashlimit name as DROP rule to share bucket
|
||||
- name: log rate-limited queries from unverified IPs on 16261
|
||||
become: true
|
||||
ansible.builtin.command: >
|
||||
firewall-cmd --permanent --direct --add-rule ipv4 filter INPUT 4
|
||||
-p udp --dport 16261 -m conntrack --ctstate NEW
|
||||
-m length --length 53
|
||||
-m hashlimit --hashlimit-above 1/hour --hashlimit-burst 2
|
||||
--hashlimit-mode srcip --hashlimit-name zomboid_query_16261
|
||||
--hashlimit-htable-expire 3600000
|
||||
-j LOG --log-prefix "ZOMBOID_RATELIMIT: " --log-level 4
|
||||
register: unverified_log_result
|
||||
changed_when: "'already' not in unverified_log_result.stderr"
|
||||
failed_when: false
|
||||
notify: restart firewalld
|
||||
tags: firewall
|
||||
|
||||
- name: log rate-limited queries from unverified IPs on 16262
|
||||
become: true
|
||||
ansible.builtin.command: >
|
||||
firewall-cmd --permanent --direct --add-rule ipv4 filter INPUT 4
|
||||
-p udp --dport 16262 -m conntrack --ctstate NEW
|
||||
-m length --length 53
|
||||
-m hashlimit --hashlimit-above 1/hour --hashlimit-burst 2
|
||||
--hashlimit-mode srcip --hashlimit-name zomboid_query_16262
|
||||
--hashlimit-htable-expire 3600000
|
||||
-j LOG --log-prefix "ZOMBOID_RATELIMIT: " --log-level 4
|
||||
register: unverified_log_result_16262
|
||||
changed_when: "'already' not in unverified_log_result_16262.stderr"
|
||||
failed_when: false
|
||||
notify: restart firewalld
|
||||
tags: firewall
|
||||
|
||||
# Priority 5: DROP rate-limited queries from unverified IPs
|
||||
# Note: Uses same hashlimit name as LOG rule to share bucket
|
||||
- name: drop rate-limited queries from unverified IPs on 16261
|
||||
become: true
|
||||
ansible.builtin.command: >
|
||||
firewall-cmd --permanent --direct --add-rule ipv4 filter INPUT 5
|
||||
-p udp --dport 16261 -m conntrack --ctstate NEW
|
||||
-m length --length 53
|
||||
-m hashlimit --hashlimit-above 1/hour --hashlimit-burst 2
|
||||
--hashlimit-mode srcip --hashlimit-name zomboid_query_16261
|
||||
--hashlimit-htable-expire 3600000
|
||||
-j DROP
|
||||
register: unverified_drop_result
|
||||
changed_when: "'already' not in unverified_drop_result.stderr"
|
||||
failed_when: false
|
||||
notify: restart firewalld
|
||||
tags: firewall
|
||||
|
||||
- name: drop rate-limited queries from unverified IPs on 16262
|
||||
become: true
|
||||
ansible.builtin.command: >
|
||||
firewall-cmd --permanent --direct --add-rule ipv4 filter INPUT 5
|
||||
-p udp --dport 16262 -m conntrack --ctstate NEW
|
||||
-m length --length 53
|
||||
-m hashlimit --hashlimit-above 1/hour --hashlimit-burst 2
|
||||
--hashlimit-mode srcip --hashlimit-name zomboid_query_16262
|
||||
--hashlimit-htable-expire 3600000
|
||||
-j DROP
|
||||
register: unverified_drop_result_16262
|
||||
changed_when: "'already' not in unverified_drop_result_16262.stderr"
|
||||
failed_when: false
|
||||
notify: restart firewalld
|
||||
tags: firewall
|
||||
|
||||
# World reset is now triggered via Discord bot -> systemd path unit
|
||||
# See zomboid-world-reset.path and zomboid-world-reset.service
|
||||
|
||||
@@ -130,3 +130,13 @@
|
||||
register: trusted_domain_result
|
||||
changed_when: "'System config value trusted_domains' in trusted_domain_result.stdout"
|
||||
failed_when: false
|
||||
|
||||
- include_tasks: containers/cloud-backup.yml
|
||||
vars:
|
||||
backup_name: skudak-cloud
|
||||
data_path: "{{ cloud_skudak_path }}/data"
|
||||
ssh_key_path: /etc/ssh/backup_keys/skudak-cloud
|
||||
ssh_key_content: "{{ cloud_skudak_backup_ssh_key }}"
|
||||
ssh_user: skucloud
|
||||
remote_path: /mnt/glacier/skudakcloud
|
||||
script_path: /usr/local/bin/skudak-cloud-backup.sh
|
||||
|
||||
@@ -31,7 +31,7 @@
|
||||
|
||||
- import_tasks: containers/home/hass.yml
|
||||
vars:
|
||||
image: ghcr.io/home-assistant/home-assistant:2025.9
|
||||
image: ghcr.io/home-assistant/home-assistant:2026.1
|
||||
tags: hass
|
||||
|
||||
- import_tasks: containers/home/partkeepr.yml
|
||||
@@ -54,21 +54,21 @@
|
||||
- import_tasks: containers/home/photos.yml
|
||||
vars:
|
||||
db_image: docker.io/tensorchord/pgvecto-rs:pg14-v0.2.0@sha256:90724186f0a3517cf6914295b5ab410db9ce23190a2d9d0b9dd6463e3fa298f0
|
||||
ml_image: ghcr.io/immich-app/immich-machine-learning:v2.4.1
|
||||
ml_image: ghcr.io/immich-app/immich-machine-learning:v2.5.0
|
||||
redis_image: docker.io/redis:6.2-alpine@sha256:eaba718fecd1196d88533de7ba49bf903ad33664a92debb24660a922ecd9cac8
|
||||
image: ghcr.io/immich-app/immich-server:v2.4.1
|
||||
image: ghcr.io/immich-app/immich-server:v2.5.0
|
||||
tags: photos
|
||||
|
||||
- import_tasks: containers/home/cloud.yml
|
||||
vars:
|
||||
db_image: docker.io/library/mariadb:10.6
|
||||
image: docker.io/library/nextcloud:32.0.1-apache
|
||||
image: docker.io/library/nextcloud:33.0.0-apache
|
||||
tags: cloud
|
||||
|
||||
- import_tasks: containers/skudak/cloud.yml
|
||||
vars:
|
||||
db_image: docker.io/library/mariadb:10.6
|
||||
image: docker.io/library/nextcloud:32.0.1-apache
|
||||
image: docker.io/library/nextcloud:33.0.0-apache
|
||||
tags: skudak, skudak-cloud
|
||||
|
||||
- import_tasks: containers/debyltech/fulfillr.yml
|
||||
@@ -76,26 +76,43 @@
|
||||
image: git.debyl.io/debyltech/fulfillr:20260124.0411
|
||||
tags: debyltech, fulfillr
|
||||
|
||||
- import_tasks: containers/debyltech/n8n.yml
|
||||
vars:
|
||||
image: docker.io/n8nio/n8n:2.11.3
|
||||
tags: debyltech, n8n
|
||||
|
||||
- import_tasks: containers/debyltech/uptime-kuma.yml
|
||||
vars:
|
||||
image: docker.io/louislam/uptime-kuma:1
|
||||
tags: debyltech, uptime-kuma
|
||||
image: docker.io/louislam/uptime-kuma:2.0.2
|
||||
tags: debyltech, uptime-debyltech
|
||||
|
||||
- import_tasks: containers/debyltech/geoip.yml
|
||||
tags: debyltech, graylog, geoip
|
||||
- import_tasks: containers/home/uptime-kuma.yml
|
||||
vars:
|
||||
image: docker.io/louislam/uptime-kuma:2.0.2
|
||||
tags: home, uptime
|
||||
|
||||
- import_tasks: data/geoip.yml
|
||||
tags: graylog, geoip
|
||||
|
||||
- import_tasks: containers/debyltech/graylog.yml
|
||||
vars:
|
||||
mongo_image: docker.io/mongo:7.0
|
||||
opensearch_image: docker.io/opensearchproject/opensearch:2
|
||||
image: docker.io/graylog/graylog:7.0.1
|
||||
tags: debyltech, graylog
|
||||
|
||||
- import_tasks: containers/base/fluent-bit.yml
|
||||
tags: fluent-bit, graylog
|
||||
- import_tasks: containers/home/searxng.yml
|
||||
vars:
|
||||
image: docker.io/searxng/searxng:latest
|
||||
tags: searxng
|
||||
|
||||
- import_tasks: containers/home/gregtime.yml
|
||||
vars:
|
||||
image: localhost/greg-time-bot:3.0.1
|
||||
image: localhost/greg-time-bot:3.4.3
|
||||
tags: gregtime
|
||||
|
||||
- import_tasks: containers/home/zomboid.yml
|
||||
vars:
|
||||
image: docker.io/cm2network/steamcmd:root
|
||||
tags: zomboid
|
||||
|
||||
|
||||
@@ -112,6 +112,7 @@
|
||||
- name: fetch subuid of {{ podman_user }}
|
||||
become: true
|
||||
changed_when: false
|
||||
check_mode: false
|
||||
ansible.builtin.shell: |
|
||||
set -o pipefail && cat /etc/subuid | awk -F':' '/{{ podman_user }}/{ print $2 }' | head -n 1
|
||||
register: podman_subuid
|
||||
|
||||
@@ -130,11 +130,6 @@
|
||||
# CI/Drone - REMOVED
|
||||
# ci.bdebyl.net configuration removed - Drone CI infrastructure decommissioned
|
||||
|
||||
# Home server - redirect old to new
|
||||
{{ home_server_name }} {
|
||||
redir https://{{ home_server_name_io }}{uri} 302
|
||||
}
|
||||
|
||||
# Home server - {{ home_server_name_io }}
|
||||
{{ home_server_name_io }} {
|
||||
{{ ip_restricted_site() }}
|
||||
@@ -164,7 +159,7 @@
|
||||
}
|
||||
}
|
||||
|
||||
# Uptime Kuma - {{ uptime_kuma_server_name }}
|
||||
# Uptime Kuma (Debyltech) - {{ uptime_kuma_server_name }}
|
||||
{{ uptime_kuma_server_name }} {
|
||||
{{ ip_restricted_site() }}
|
||||
|
||||
@@ -182,6 +177,24 @@
|
||||
}
|
||||
}
|
||||
|
||||
# Uptime Kuma (Personal) - {{ uptime_kuma_personal_server_name }}
|
||||
{{ uptime_kuma_personal_server_name }} {
|
||||
{{ ip_restricted_site() }}
|
||||
|
||||
handle @local {
|
||||
import common_headers
|
||||
reverse_proxy localhost:3002 {
|
||||
# WebSocket support for live updates
|
||||
flush_interval -1
|
||||
}
|
||||
}
|
||||
|
||||
log {
|
||||
output file /var/log/caddy/uptime-kuma-personal.log
|
||||
format json
|
||||
}
|
||||
}
|
||||
|
||||
# Graylog Logs - {{ logs_server_name }}
|
||||
{{ logs_server_name }} {
|
||||
# GELF HTTP endpoint - open for Lambda (auth via header)
|
||||
@@ -319,6 +332,23 @@
|
||||
}
|
||||
}
|
||||
|
||||
# N8N Workflow Automation - {{ n8n_server_name }}
|
||||
{{ n8n_server_name }} {
|
||||
{{ ip_restricted_site() }}
|
||||
|
||||
handle @local {
|
||||
import common_headers
|
||||
reverse_proxy localhost:5678 {
|
||||
flush_interval -1
|
||||
}
|
||||
}
|
||||
|
||||
log {
|
||||
output file {{ caddy_log_path }}/n8n.log
|
||||
format {{ caddy_log_format }}
|
||||
}
|
||||
}
|
||||
|
||||
# Fulfillr - {{ fulfillr_server_name }} (Static + API with IP restrictions)
|
||||
{{ fulfillr_server_name }} {
|
||||
{{ ip_restricted_site() }}
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
},
|
||||
"tax": {
|
||||
"ein": "{{ fulfillr_tax_ein }}",
|
||||
"ioss": nil
|
||||
"ioss": null
|
||||
},
|
||||
"sender_address": {
|
||||
"city": "Newbury",
|
||||
|
||||
@@ -0,0 +1,6 @@
|
||||
[Unit]
|
||||
Description=Nextcloud {{ instance_name }} backup to TrueNAS
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
ExecStart={{ script_path }}
|
||||
@@ -0,0 +1,4 @@
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
rsync -az --exclude .ssh -e "ssh -i {{ ssh_key_path }} -o StrictHostKeyChecking=accept-new" \
|
||||
{{ data_path }}/ {{ ssh_user }}@truenas.localdomain:{{ remote_path }}/
|
||||
@@ -0,0 +1,9 @@
|
||||
[Unit]
|
||||
Description=Daily Nextcloud {{ instance_name }} backup
|
||||
|
||||
[Timer]
|
||||
OnCalendar=*-*-* 04:00:00
|
||||
Persistent=true
|
||||
|
||||
[Install]
|
||||
WantedBy=timers.target
|
||||
35
ansible/roles/podman/templates/searxng/settings.yml.j2
Normal file
35
ansible/roles/podman/templates/searxng/settings.yml.j2
Normal file
@@ -0,0 +1,35 @@
|
||||
use_default_settings: true
|
||||
|
||||
general:
|
||||
instance_name: "SearXNG"
|
||||
debug: false
|
||||
|
||||
server:
|
||||
bind_address: "127.0.0.1"
|
||||
port: 8080
|
||||
secret_key: "{{ searxng_secret_key }}"
|
||||
limiter: false
|
||||
image_proxy: false
|
||||
|
||||
search:
|
||||
safe_search: 0
|
||||
formats:
|
||||
- html
|
||||
- json
|
||||
|
||||
engines:
|
||||
- name: duckduckgo
|
||||
engine: duckduckgo
|
||||
disabled: false
|
||||
|
||||
- name: google
|
||||
engine: google
|
||||
disabled: false
|
||||
|
||||
- name: wikipedia
|
||||
engine: wikipedia
|
||||
disabled: false
|
||||
|
||||
- name: bing
|
||||
engine: bing
|
||||
disabled: false
|
||||
@@ -84,6 +84,6 @@ fi
|
||||
# Start server
|
||||
cd "${INSTALL_DIR}"
|
||||
echo "=== Starting Project Zomboid Server ==="
|
||||
echo "Connect to: home.bdebyl.net:16261"
|
||||
echo "Connect to: home.debyl.io:16261"
|
||||
|
||||
exec su -c "export LD_LIBRARY_PATH=${INSTALL_DIR}/jre64/lib:\${LD_LIBRARY_PATH} && ./start-server.sh ${SERVER_ARGS}" steam
|
||||
|
||||
Binary file not shown.
Reference in New Issue
Block a user