Compare commits

..

10 Commits

Author SHA1 Message Date
Bastian de Byl
3f84ecaf5b feat: migrate fulfillr container from ECR to Gitea Packages
- Change image source from AWS ECR to git.debyl.io/debyltech/fulfillr
- Update login task from ECR to Gitea registry authentication
- Add Gitea registry credentials to vault

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
2026-01-03 20:48:56 -05:00
Bastian de Byl
cf200d82d6 chore: gitea-actions improvements, graylog/fluent-bit logging, zomboid mod
- Gitea actions: add handlers, improve deps and service template
- Graylog: simplify container config, add Caddy reverse proxy
- Add fluent-bit container for log forwarding
- Add ClimbDownRope mod (Workshop ID: 3000725405) to zomboid

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
2026-01-03 17:20:18 -05:00
Bastian de Byl
5832497bbd feat: add gitea-actions role for Gitea act-runner
- Create gitea-runner user with podman access
- Install podman-docker for docker CLI compatibility
- Download and configure act_runner binary
- Systemd service for act_runner daemon
- Host-mode runner labels for Fedora

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
2026-01-01 13:13:42 -05:00
Bastian de Byl
2fd44fd450 feat: deploy gelf-proxy as container via Gitea registry
- Add Gitea container registry login task
- Add graylog.yml with full stack (MongoDB, OpenSearch, Graylog, gelf-proxy)
- Use container image instead of binary for gelf-proxy
- Image tagged from git.debyl.io/debyltech/gelf-proxy

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
2025-12-31 18:53:36 -05:00
Bastian de Byl
4d835e86a0 chore: zomboid improvements, gregtime improvements with rcon 2025-12-22 12:31:43 -05:00
Bastian de Byl
f9507f4685 chore: zomboid mod updates 2025-12-19 19:45:38 -05:00
Bastian de Byl
38561cb968 gitea, zomboid updates, ssh key fixes 2025-12-19 10:39:56 -05:00
Bastian de Byl
adce3e2dd4 chore: zomboid improvements, immich and other updates 2025-12-14 22:07:49 -05:00
Bastian de Byl
216a486db5 fix: home automations with configurations fixes 2025-12-14 19:23:35 -05:00
Bastian de Byl
68803214d4 fix: home automations part 2 2025-12-14 18:50:38 -05:00
44 changed files with 1126 additions and 98 deletions

View File

@@ -1,4 +1,13 @@
--- ---
- name: enable post-quantum key exchange for sshd
become: true
ansible.builtin.template:
src: sshd-pq-kex.conf.j2
dest: /etc/ssh/sshd_config.d/30-pq-kex.conf
mode: 0600
notify: restart_sshd
tags: security, sshd
- name: ensure sshd disallows passwords - name: ensure sshd disallows passwords
become: true become: true
ansible.builtin.lineinfile: ansible.builtin.lineinfile:

View File

@@ -0,0 +1,9 @@
# Post-Quantum Key Exchange Algorithm
# Managed by Ansible - do not edit directly
#
# Enables sntrup761x25519-sha512 (hybrid post-quantum + classical)
# to protect against "store now, decrypt later" attacks
#
# This must be included BEFORE crypto-policies (40-redhat-crypto-policies.conf)
KexAlgorithms sntrup761x25519-sha512@openssh.com,curve25519-sha256,curve25519-sha256@libssh.org,ecdh-sha2-nistp256,ecdh-sha2-nistp384,ecdh-sha2-nistp521,diffie-hellman-group-exchange-sha256,diffie-hellman-group14-sha256,diffie-hellman-group16-sha512,diffie-hellman-group18-sha512

View File

@@ -1,3 +1,8 @@
--- ---
git_user: git git_user: git
git_home: "/srv/{{ git_user }}" git_home: "/srv/{{ git_user }}"
# Gitea configuration
gitea_debyl_server_name: git.debyl.io
gitea_image: docker.gitea.com/gitea:1.25.2
gitea_db_image: docker.io/library/postgres:14-alpine

View File

@@ -0,0 +1,13 @@
module gitea-ssh-podman 1.0;
require {
type sshd_t;
type container_runtime_exec_t;
type user_home_t;
class file { execute execute_no_trans open read };
class dir { search };
}
# Allow sshd to execute podman for AuthorizedKeysCommand
allow sshd_t container_runtime_exec_t:file { execute execute_no_trans open read };
allow sshd_t user_home_t:dir search;

View File

@@ -15,3 +15,10 @@
tags: tags:
- git - git
- selinux - selinux
- name: restart sshd
become: true
ansible.builtin.systemd:
name: sshd.service
state: restarted
tags: git

View File

@@ -0,0 +1,28 @@
---
# Deploy gitea shim and shell for SSH passthrough
# The shim is called by SSH when authorized_keys command runs
# It forwards gitea commands to the container
- name: create gitea shim script
become: true
ansible.builtin.template:
src: gitea-shim.j2
dest: /usr/local/bin/gitea
mode: 0755
tags: git, gitea
# The shell is used if someone tries to SSH interactively
- name: create gitea-shell script
become: true
ansible.builtin.template:
src: gitea-shell.j2
dest: /usr/local/bin/gitea-shell
mode: 0755
tags: git, gitea
- name: update git user shell to gitea-shell
become: true
ansible.builtin.user:
name: "{{ git_user }}"
shell: /usr/local/bin/gitea-shell
tags: git, gitea

View File

@@ -0,0 +1,90 @@
---
# Deploy Gitea containers using Podman pod
# Create pod for Gitea services
- name: create gitea-debyl pod
become: true
become_user: "{{ git_user }}"
containers.podman.podman_pod:
name: gitea-debyl-pod
state: started
ports:
- "3100:3000"
tags: gitea
# PostgreSQL container in pod
- name: create gitea-debyl-postgres container
become: true
become_user: "{{ git_user }}"
containers.podman.podman_container:
name: gitea-debyl-postgres
image: "{{ gitea_db_image }}"
pod: gitea-debyl-pod
restart_policy: on-failure:3
log_driver: journald
env:
POSTGRES_DB: gitea
POSTGRES_USER: gitea
POSTGRES_PASSWORD: "{{ gitea_debyl_db_pass }}"
volumes:
- "{{ git_home }}/volumes/gitea/psql:/var/lib/postgresql/data"
tags: gitea
# Gitea container in pod
- name: create gitea-debyl container
become: true
become_user: "{{ git_user }}"
containers.podman.podman_container:
name: gitea-debyl
image: "{{ gitea_image }}"
pod: gitea-debyl-pod
restart_policy: on-failure:3
log_driver: journald
env:
USER_UID: "1000"
USER_GID: "1000"
GITEA__database__DB_TYPE: postgres
GITEA__database__HOST: "127.0.0.1:5432"
GITEA__database__NAME: gitea
GITEA__database__USER: gitea
GITEA__database__PASSWD: "{{ gitea_debyl_db_pass }}"
GITEA__server__DOMAIN: "{{ gitea_debyl_server_name }}"
GITEA__server__ROOT_URL: "https://{{ gitea_debyl_server_name }}/"
GITEA__server__SSH_DOMAIN: "{{ gitea_debyl_server_name }}"
GITEA__server__START_SSH_SERVER: "false"
GITEA__server__DISABLE_SSH: "false"
GITEA__server__SSH_PORT: "22"
GITEA__security__SECRET_KEY: "{{ gitea_debyl_secret_key }}"
GITEA__security__INTERNAL_TOKEN: "{{ gitea_debyl_internal_token }}"
GITEA__security__INSTALL_LOCK: "true"
GITEA__service__DISABLE_REGISTRATION: "true"
GITEA__service__REQUIRE_SIGNIN_VIEW: "false"
volumes:
- "{{ git_home }}/volumes/gitea/data:/data"
- /etc/localtime:/etc/localtime:ro
tags: gitea
# Generate systemd service for the pod
- name: create systemd job for gitea-debyl-pod
become: true
become_user: "{{ git_user }}"
ansible.builtin.shell: |
podman generate systemd --name gitea-debyl-pod --files --new
mv pod-gitea-debyl-pod.service {{ git_home }}/.config/systemd/user/
mv container-gitea-debyl-postgres.service {{ git_home }}/.config/systemd/user/
mv container-gitea-debyl.service {{ git_home }}/.config/systemd/user/
args:
chdir: "{{ git_home }}"
changed_when: false
tags: gitea
- name: enable gitea-debyl-pod service
become: true
become_user: "{{ git_user }}"
ansible.builtin.systemd:
name: pod-gitea-debyl-pod.service
daemon_reload: true
enabled: true
state: started
scope: user
tags: gitea

View File

@@ -1,4 +1,15 @@
--- ---
- import_tasks: user.yml - import_tasks: user.yml
- import_tasks: systemd.yml - import_tasks: podman.yml
- import_tasks: gitea-shell.yml
- import_tasks: sshd.yml
- import_tasks: selinux.yml - import_tasks: selinux.yml
- import_tasks: selinux-podman.yml
- import_tasks: gitea.yml
# git-daemon no longer needed - commented out
# - import_tasks: systemd.yml
# Gitea Actions runner
- include_role:
name: gitea-actions
tags: gitea-actions

View File

@@ -0,0 +1,80 @@
---
# Rootless Podman setup for git user
# Enables running Gitea containers under the git user
# Enable lingering for systemd user services
- name: check if git user lingering enabled
become: true
ansible.builtin.stat:
path: "/var/lib/systemd/linger/{{ git_user }}"
register: git_user_lingering
tags: git, gitea
- name: enable git user lingering
become: true
ansible.builtin.command: |
loginctl enable-linger {{ git_user }}
when: not git_user_lingering.stat.exists
tags: git, gitea
# Set ulimits for container operations
- name: set ulimits for git user
become: true
community.general.pam_limits:
domain: "{{ git_user }}"
limit_type: "{{ item.type }}"
limit_item: "{{ item.name }}"
value: "{{ item.value }}"
loop:
- { name: memlock, type: soft, value: "unlimited" }
- { name: memlock, type: hard, value: "unlimited" }
- { name: nofile, type: soft, value: 39693561 }
- { name: nofile, type: hard, value: 39693561 }
tags: git, gitea
# Create container directories
- name: create git podman directories
become: true
become_user: "{{ git_user }}"
ansible.builtin.file:
path: "{{ item }}"
state: directory
mode: 0755
loop:
- "{{ git_home }}/.config/systemd/user"
- "{{ git_home }}/volumes"
- "{{ git_home }}/volumes/gitea"
- "{{ git_home }}/volumes/gitea/data"
# NOTE: psql directory is created by PostgreSQL container with container user ownership
notify: restorecon git
tags: git, gitea
# SELinux context for container volumes
- name: selinux context for git container volumes
become: true
community.general.sefcontext:
target: "{{ git_home }}/volumes(/.*)?"
setype: container_file_t
state: present
notify: restorecon git
tags: git, gitea, selinux
# Enable podman socket for SSH key lookup via AuthorizedKeysCommand
- name: enable podman socket for git user
become: true
become_user: "{{ git_user }}"
ansible.builtin.systemd:
name: podman.socket
enabled: true
state: started
scope: user
tags: git, gitea
# Fetch subuid for volume permissions
- name: fetch subuid of {{ git_user }}
become: true
changed_when: false
ansible.builtin.shell: |
set -o pipefail && cat /etc/subuid | awk -F':' '/{{ git_user }}/{ print $2 }' | head -n 1
register: git_subuid
tags: always

View File

@@ -0,0 +1,21 @@
---
# SELinux policy for SSH + Podman integration
- name: copy gitea SELinux policy module
become: true
ansible.builtin.copy:
src: gitea-ssh-podman.te
dest: /tmp/gitea-ssh-podman.te
mode: 0644
register: selinux_policy
tags: git, gitea, selinux
- name: compile and install gitea SELinux policy
become: true
ansible.builtin.shell: |
cd /tmp
checkmodule -M -m -o gitea-ssh-podman.mod gitea-ssh-podman.te
semodule_package -o gitea-ssh-podman.pp -m gitea-ssh-podman.mod
semodule -i gitea-ssh-podman.pp
when: selinux_policy.changed
tags: git, gitea, selinux

View File

@@ -0,0 +1,19 @@
---
# Configure SSH AuthorizedKeysCommand for Gitea
- name: create gitea-authorized-keys script
become: true
ansible.builtin.template:
src: gitea-authorized-keys.j2
dest: /usr/local/bin/gitea-authorized-keys
mode: 0755
tags: git, gitea
- name: deploy sshd gitea configuration
become: true
ansible.builtin.template:
src: sshd-gitea.conf.j2
dest: /etc/ssh/sshd_config.d/50-gitea.conf
mode: 0644
notify: restart sshd
tags: git, gitea

View File

@@ -0,0 +1,12 @@
#!/bin/sh
# Query Gitea for SSH authorized keys
# Managed by Ansible - do not edit directly
# Arguments: %u (username) %t (key type) %k (key blob)
# Use podman remote to connect via socket (avoids rootless pause process issues)
export CONTAINER_HOST=unix:///run/user/1001/podman/podman.sock
/usr/bin/podman --remote exec -i --user 1000 gitea-debyl \
/usr/local/bin/gitea keys \
-c /data/gitea/conf/app.ini \
-e git -u "$1" -t "$2" -k "$3" 2>/dev/null

View File

@@ -0,0 +1,27 @@
#!/bin/sh
# Gitea SSH shell - forwards commands to Gitea container
# Managed by Ansible - do not edit directly
#
# When sshd runs a forced command from authorized_keys, it invokes:
# <user-shell> -c "<forced-command>"
# The forced command is: /usr/local/bin/gitea --config=... serv key-<id>
# SSH_ORIGINAL_COMMAND contains the client's requested command (e.g., git-upload-pack)
# Use podman remote to connect via socket (avoids rootless pause process issues)
export CONTAINER_HOST=unix:///run/user/1001/podman/podman.sock
if [ "$1" = "-c" ] && [ -n "$2" ]; then
# sshd invoked us with -c "command" - execute the command
# The command is: /usr/local/bin/gitea --config=... serv key-<id>
exec $2
elif [ -n "$SSH_ORIGINAL_COMMAND" ]; then
# Direct invocation with SSH_ORIGINAL_COMMAND (shouldn't happen normally)
echo "Interactive shell is disabled."
echo "Use: git clone git@{{ gitea_debyl_server_name }}:<owner>/<repo>.git"
exit 1
else
# Interactive login attempt
echo "Interactive shell is disabled."
echo "Use: git clone git@{{ gitea_debyl_server_name }}:<owner>/<repo>.git"
exit 1
fi

View File

@@ -0,0 +1,15 @@
#!/bin/sh
# Gitea shim - forwards gitea commands to the container
# Managed by Ansible - do not edit directly
#
# This script is called when sshd executes the forced command from authorized_keys:
# /usr/local/bin/gitea --config=/data/gitea/conf/app.ini serv key-<id>
#
# SSH_ORIGINAL_COMMAND contains the client's git command (e.g., git-upload-pack <repo>)
# Use podman remote to connect via socket (avoids rootless pause process issues)
export CONTAINER_HOST=unix:///run/user/1001/podman/podman.sock
exec /usr/bin/podman --remote exec -i --user 1000 \
--env SSH_ORIGINAL_COMMAND="$SSH_ORIGINAL_COMMAND" \
gitea-debyl /usr/local/bin/gitea "$@"

View File

@@ -0,0 +1,7 @@
# Gitea SSH Key Authentication
# Managed by Ansible - do not edit directly
Match User {{ git_user }}
AuthorizedKeysFile none
AuthorizedKeysCommandUser {{ git_user }}
AuthorizedKeysCommand /usr/local/bin/gitea-authorized-keys %u %t %k

View File

@@ -0,0 +1,11 @@
---
gitea_runner_user: gitea-runner
gitea_runner_home: /home/gitea-runner
gitea_runner_version: "0.2.13"
gitea_runner_arch: linux-amd64
gitea_instance_url: https://git.debyl.io
# Paths
act_runner_bin: /usr/local/bin/act_runner
act_runner_config_dir: /etc/act_runner
act_runner_work_dir: /var/lib/act_runner

View File

@@ -0,0 +1,14 @@
---
- name: restart act_runner
become: true
ansible.builtin.systemd:
name: act_runner
state: restarted
daemon_reload: true
- name: restart podman socket
become: true
ansible.builtin.systemd:
name: podman.socket
state: restarted
daemon_reload: true

View File

@@ -0,0 +1,38 @@
---
- name: install podman-docker for docker CLI compatibility
become: true
ansible.builtin.dnf:
name:
- podman-docker
- golang
state: present
tags: gitea-actions
- name: create podman socket override directory
become: true
ansible.builtin.file:
path: /etc/systemd/system/podman.socket.d
state: directory
mode: "0755"
tags: gitea-actions
- name: configure podman socket for gitea-runner access
become: true
ansible.builtin.copy:
dest: /etc/systemd/system/podman.socket.d/override.conf
content: |
[Socket]
SocketMode=0660
SocketGroup={{ gitea_runner_user }}
mode: "0644"
notify: restart podman socket
tags: gitea-actions
- name: enable system podman socket
become: true
ansible.builtin.systemd:
name: podman.socket
daemon_reload: true
enabled: true
state: started
tags: gitea-actions

View File

@@ -0,0 +1,9 @@
---
- import_tasks: user.yml
tags: gitea-actions
- import_tasks: deps.yml
tags: gitea-actions
- import_tasks: runner.yml
tags: gitea-actions
- import_tasks: systemd.yml
tags: gitea-actions

View File

@@ -0,0 +1,45 @@
---
- name: download act_runner binary
become: true
ansible.builtin.get_url:
url: "https://dl.gitea.com/act_runner/{{ gitea_runner_version }}/act_runner-{{ gitea_runner_version }}-{{ gitea_runner_arch }}"
dest: "{{ act_runner_bin }}"
mode: "0755"
tags: gitea-actions
- name: create act_runner config directory
become: true
ansible.builtin.file:
path: "{{ act_runner_config_dir }}"
state: directory
mode: "0755"
tags: gitea-actions
- name: create act_runner working directory
become: true
ansible.builtin.file:
path: "{{ act_runner_work_dir }}"
state: directory
owner: "{{ gitea_runner_user }}"
group: "{{ gitea_runner_user }}"
mode: "0755"
tags: gitea-actions
- name: create act_runner cache directory
become: true
ansible.builtin.file:
path: "{{ act_runner_work_dir }}/cache"
state: directory
owner: "{{ gitea_runner_user }}"
group: "{{ gitea_runner_user }}"
mode: "0755"
tags: gitea-actions
- name: deploy act_runner configuration
become: true
ansible.builtin.template:
src: config.yaml.j2
dest: "{{ act_runner_config_dir }}/config.yaml"
mode: "0644"
notify: restart act_runner
tags: gitea-actions

View File

@@ -0,0 +1,17 @@
---
- name: deploy act_runner systemd service
become: true
ansible.builtin.template:
src: act_runner.service.j2
dest: /etc/systemd/system/act_runner.service
mode: "0644"
notify: restart act_runner
tags: gitea-actions
- name: enable act_runner service
become: true
ansible.builtin.systemd:
name: act_runner
daemon_reload: true
enabled: true
tags: gitea-actions

View File

@@ -0,0 +1,34 @@
---
- name: create gitea-runner user
become: true
ansible.builtin.user:
name: "{{ gitea_runner_user }}"
comment: Gitea Actions runner
shell: /bin/bash
createhome: true
home: "{{ gitea_runner_home }}"
groups: docker
append: true
tags: gitea-actions
- name: check if gitea-runner lingering enabled
become: true
ansible.builtin.stat:
path: "/var/lib/systemd/linger/{{ gitea_runner_user }}"
register: gitea_runner_lingering
tags: gitea-actions
- name: enable gitea-runner lingering
become: true
ansible.builtin.command: loginctl enable-linger {{ gitea_runner_user }}
when: not gitea_runner_lingering.stat.exists
tags: gitea-actions
- name: create .config/systemd/user directory
become: true
become_user: "{{ gitea_runner_user }}"
ansible.builtin.file:
path: "{{ gitea_runner_home }}/.config/systemd/user"
state: directory
mode: "0755"
tags: gitea-actions

View File

@@ -0,0 +1,16 @@
[Unit]
Description=Gitea Actions runner
Documentation=https://gitea.com/gitea/act_runner
After=network.target podman.socket
[Service]
ExecStart={{ act_runner_bin }} daemon --config {{ act_runner_config_dir }}/config.yaml
WorkingDirectory={{ act_runner_work_dir }}
TimeoutSec=0
RestartSec=10
Restart=always
User={{ gitea_runner_user }}
Environment="DOCKER_HOST=unix:///run/podman/podman.sock"
[Install]
WantedBy=multi-user.target

View File

@@ -0,0 +1,30 @@
log:
level: info
runner:
file: {{ act_runner_work_dir }}/.runner
capacity: 1
timeout: 3h
insecure: false
fetch_timeout: 5s
fetch_interval: 2s
labels:
- ubuntu-latest:host
- ubuntu-22.04:host
- fedora:host
cache:
enabled: true
dir: {{ act_runner_work_dir }}/cache
container:
network: host
privileged: false
options:
workdir_parent:
valid_volumes: []
docker_host: ""
force_pull: false
host:
workdir_parent: {{ act_runner_work_dir }}/workdir

View File

@@ -16,6 +16,32 @@ partsy_path: "{{ podman_volumes }}/partsy"
photos_path: "{{ podman_volumes }}/photos" photos_path: "{{ podman_volumes }}/photos"
uptime_kuma_path: "{{ podman_volumes }}/uptime-kuma" uptime_kuma_path: "{{ podman_volumes }}/uptime-kuma"
zomboid_path: "{{ podman_volumes }}/zomboid" zomboid_path: "{{ podman_volumes }}/zomboid"
# Zomboid server mode: 'vanilla' or 'modded'
zomboid_server_mode: modded
# Zomboid RCON port for remote administration
zomboid_rcon_port: "27015"
# Server names for each mode
zomboid_server_names:
vanilla: zomboid
modded: moddedjoboid
# Mod configuration for modded server
# Generated by scripts/steam-workshop-query.py
# Removed: 3403870858 (LifestyleHobbies), 2937786633 (69mini), 3616176188 (GaelGunStore - buggy)
# Removed: 3422418897, 2544353492 (Ahu ToolWeapon - Lua crashes), 2940354599 (FWO Fitness - anim errors)
# Removed: 3390411200 (SLDarkerSnowB42), 3618557184 (HereGoesTheSun)
# Removed: 3480990544 (Constown42), 3602388131 (Greenleaf), 2463499011 (Grapeseed)
# Load order: Libraries first (damnlib, tsarslib), then dependent mods, then others
zomboid_mods:
workshop_items: >-
3171167894;3402491515;3330403100;2409333430;3073430075;3379334330;3110913021;3366300557;3034636011;3409287192;3005903549;3161951724;3413704851;3413706334;3287727378;3226885926;2625625421;3418252689;3418253716;3152529790;2478247379;2942793445;2991201484;2913633066;2873290424;3428008364;3253385114;2846036306;2642541073;3435796523;3008795514;3447272250;3026723485;2900580391;2870394916;3292659291;2969343830;2566953935;2962175696;3196180339;3258343790;3346905070;3320947974;3478633453;2952802178;3001592312;3052360250;3490370700;2932547723;2805630347;3504401781;2772575623;3110911330;3088951320;3213391371;2932549988;3041122351;2971246021;3539691958;3315443103;2886832257;2886832936;2886833398;2811383142;2799152995;3248388837;3566868353;3570973322;2897390033;3592777775;3596903773;3601417745;3614034284;3577903007;3407042038;3405178154;3402493701;3402812859;3616536783;3431734923;3429790870;2850935956;3307376332;3397182976;3432928943;3610005735;3540297822;3426448380;3579640010;3389448389;3393821407;3044705007;2866258937;3490188370;3508537032;3451167732;3461263912;2903771337;3629835761;3000725405
# Build 42 requires backslash prefix for each mod ID
# Load order: 1) damnlib 2) tsarslib 3) KI5 vehicles 4) Autotsar vehicles 5) Everything else
mod_ids: >-
\damnlib;\tsarslib;\KI5trailers;\91range;\93fordF350;\82porsche911;\90bmwE30;\91fordLTD;\89dodgeCaravan;\84jeepXJ;\63beetle;\76chevyKseries;\85chevyCaprice;\85pontiacParisienne;\92jeepYJ;\92jeepYJJP18;\87buickRegal;\isoContainers;\85buickLeSabre;\85oldsmobileDelta88;\93chevySuburban;\93chevySuburbanExpanded;\67commando;\90pierceArrow;\69camaro;\70barracuda;\70dodge;\86chevyCUCV;\81deloreanDMC12;\81deloreanDMC12BTTF;\92nissanGTR;\92amgeneralM998;\88toyotaHilux;\91geoMetro;\66pontiacLeMans;\67gt500;\49powerWagon;\86fordE150;\86fordE150dnd;\86fordE150mm;\86fordE150pd;\86fordE150expanded;\89volvo200;\93fordElgin;\86oshkoshP19A;\92fordCVPI;\87chevySuburban;\68firebird;\77firebird;\82firebird;\82firebirdKITT;\04vwTouran;\90fordF350ambulance;\93mustangSSP;\87toyotaMR2;\73fordFalcon;\73fordFalconPS;\93townCar;\84merc;\91nissan240sx;\59meteor;\ECTO1;\87fordB700;\93fordTaurus;\75grandPrix;\89trooper;\63Type2Van;\99fordCVPI;\91fordRanger;\98stagea;\82jeepJ10;\82jeepJ10t;\88chevyS10;\89fordBronco;\83amgeneralM923;\78amgeneralM35A2;\78amgeneralM35A2extra;\78amgeneralM49A2C;\78amgeneralM50A3;\78amgeneralM62;\80manKat1;\65banshee;\89defender;\97bushmaster;\84cadillacDeVille;\84buickElectra;\84oldsmobile98;\85chevyStepVan;\85chevyStepVanexpanded;\autotsartrailers;\ATA_Jeep;\ATA_Jeep_x10;\ATA_Jeep_x2;\ATA_Jeep_x4;\ATA_Mustang;\ATA_Mustang_x2;\ATA_Mustang_x4;\ATA_Bus;\VanillaFoodsExpanded;\TombWardrobeALT;\TombWardrobeALTVanilla;\TombBodyCompat;\TombBodyCompatBootsExp;\TombBody;\TombBodyCustom;\TombBodyTex;\TombBodyTexDOLL;\TombBodyTexNUDE;\SM4BootsExpandedB42;\SM4BootsExpandedFlatshoes;\GanydeBielovzki's Frockin Splendor!;\RandomClothing;\EFTBP;\AliceGear;\TableSaw;\stanks_suicide;\STA_PryOpen;\AutoReload;\DBFaster50;\DBFaster60;\DBFaster70;\DBFaster80;\FixBlowTorchPropaneTank;\MiniHealthPanel;\P4HasBeenRead;\Project_Cook;\NeatUI_Framework;\ModernStatus;\CleanHotBar;\REORDER_THE_HOTBAR;\Ladders42131;\ClimbDownRope
pihole_path: "{{ podman_volumes }}/pihole" pihole_path: "{{ podman_volumes }}/pihole"
sshpass_cron_path: "{{ podman_volumes }}/sshpass_cron" sshpass_cron_path: "{{ podman_volumes }}/sshpass_cron"
caddy_path: "{{ podman_volumes }}/caddy" caddy_path: "{{ podman_volumes }}/caddy"
@@ -43,6 +69,7 @@ cloud_server_name_io: cloud.debyl.io
home_server_name_io: home.debyl.io home_server_name_io: home.debyl.io
parts_server_name_io: parts.debyl.io parts_server_name_io: parts.debyl.io
photos_server_name_io: photos.debyl.io photos_server_name_io: photos.debyl.io
gitea_debyl_server_name: git.debyl.io
# Legacy nginx/ModSecurity configuration removed - Caddy provides built-in security # Legacy nginx/ModSecurity configuration removed - Caddy provides built-in security
@@ -77,3 +104,11 @@ caddy_security_headers:
X-Content-Type-Options: "nosniff" X-Content-Type-Options: "nosniff"
Referrer-Policy: "same-origin" Referrer-Policy: "same-origin"
X-Frame-Options: "SAMEORIGIN" X-Frame-Options: "SAMEORIGIN"
# Graylog logging stack
graylog_path: "{{ podman_volumes }}/graylog"
logs_server_name: logs.debyl.io
# gelf_auth_token: defined in vault - X-Gelf-Token header for Lambda GELF HTTP auth
# Fluent Bit is deployed as a systemd service (not container)
# for direct journal access - see containers/base/fluent-bit.yml

View File

@@ -411,6 +411,7 @@
- condition: device - condition: device
device_id: 03a12d2360d9954aed19c2449070725a device_id: 03a12d2360d9954aed19c2449070725a
domain: light domain: light
entity_id: 7c1e7db73799cc3f90948b5118596985
type: is_on type: is_on
then: then:
- type: turn_on - type: turn_on
@@ -422,6 +423,7 @@
- condition: device - condition: device
device_id: f31e4f9bf8fa3687a07aeb4430eaef38 device_id: f31e4f9bf8fa3687a07aeb4430eaef38
domain: light domain: light
entity_id: b79934d97f3bb9d8a3da47c76d03ded4
type: is_on type: is_on
then: then:
- type: turn_on - type: turn_on
@@ -433,6 +435,7 @@
- condition: device - condition: device
device_id: 3f7f65571d9bb0833433996f1f6725bd device_id: 3f7f65571d9bb0833433996f1f6725bd
domain: light domain: light
entity_id: 7407afe14783543252c666d5ff7c5d5c
type: is_on type: is_on
then: then:
- type: turn_on - type: turn_on
@@ -444,6 +447,7 @@
- condition: device - condition: device
device_id: 21eb2bd28aba2ee361a22af92e8b2d16 device_id: 21eb2bd28aba2ee361a22af92e8b2d16
domain: light domain: light
entity_id: 81c486d682afcc94e98e377475cc92fc
type: is_on type: is_on
then: then:
- type: turn_on - type: turn_on
@@ -467,6 +471,7 @@
- condition: device - condition: device
device_id: f31e4f9bf8fa3687a07aeb4430eaef38 device_id: f31e4f9bf8fa3687a07aeb4430eaef38
domain: light domain: light
entity_id: b79934d97f3bb9d8a3da47c76d03ded4
type: is_on type: is_on
then: then:
- type: turn_on - type: turn_on
@@ -478,6 +483,7 @@
- condition: device - condition: device
device_id: 03a12d2360d9954aed19c2449070725a device_id: 03a12d2360d9954aed19c2449070725a
domain: light domain: light
entity_id: 7c1e7db73799cc3f90948b5118596985
type: is_on type: is_on
then: then:
- type: turn_on - type: turn_on
@@ -489,6 +495,7 @@
- condition: device - condition: device
device_id: 800eddbeeda071225f181a14cb9527e0 device_id: 800eddbeeda071225f181a14cb9527e0
domain: light domain: light
entity_id: 521a92ddd8be76c7eddfc544f81f6020
type: is_on type: is_on
then: then:
- type: turn_on - type: turn_on
@@ -500,6 +507,7 @@
- condition: device - condition: device
device_id: 3f7f65571d9bb0833433996f1f6725bd device_id: 3f7f65571d9bb0833433996f1f6725bd
domain: light domain: light
entity_id: 7407afe14783543252c666d5ff7c5d5c
type: is_on type: is_on
then: then:
- type: turn_on - type: turn_on
@@ -511,6 +519,7 @@
- condition: device - condition: device
device_id: 21eb2bd28aba2ee361a22af92e8b2d16 device_id: 21eb2bd28aba2ee361a22af92e8b2d16
domain: light domain: light
entity_id: 81c486d682afcc94e98e377475cc92fc
type: is_on type: is_on
then: then:
- type: turn_on - type: turn_on
@@ -534,6 +543,7 @@
- condition: device - condition: device
device_id: f31e4f9bf8fa3687a07aeb4430eaef38 device_id: f31e4f9bf8fa3687a07aeb4430eaef38
domain: light domain: light
entity_id: b79934d97f3bb9d8a3da47c76d03ded4
type: is_on type: is_on
then: then:
- type: turn_on - type: turn_on
@@ -545,6 +555,7 @@
- condition: device - condition: device
device_id: 03a12d2360d9954aed19c2449070725a device_id: 03a12d2360d9954aed19c2449070725a
domain: light domain: light
entity_id: 7c1e7db73799cc3f90948b5118596985
type: is_on type: is_on
then: then:
- type: turn_on - type: turn_on
@@ -556,6 +567,7 @@
- condition: device - condition: device
device_id: 800eddbeeda071225f181a14cb9527e0 device_id: 800eddbeeda071225f181a14cb9527e0
domain: light domain: light
entity_id: 521a92ddd8be76c7eddfc544f81f6020
type: is_on type: is_on
then: then:
- type: turn_on - type: turn_on
@@ -567,6 +579,7 @@
- condition: device - condition: device
device_id: 3f7f65571d9bb0833433996f1f6725bd device_id: 3f7f65571d9bb0833433996f1f6725bd
domain: light domain: light
entity_id: 7407afe14783543252c666d5ff7c5d5c
type: is_on type: is_on
then: then:
- type: turn_on - type: turn_on
@@ -578,6 +591,7 @@
- condition: device - condition: device
device_id: 21eb2bd28aba2ee361a22af92e8b2d16 device_id: 21eb2bd28aba2ee361a22af92e8b2d16
domain: light domain: light
entity_id: 81c486d682afcc94e98e377475cc92fc
type: is_on type: is_on
then: then:
- type: turn_on - type: turn_on

View File

@@ -18,3 +18,9 @@ homeassistant:
media: /share media: /share
automation: !include automations.yaml automation: !include automations.yaml
input_boolean:
tv_mode:
name: TV Mode
initial: off
icon: mdi:television

View File

@@ -42,3 +42,11 @@
scope: user scope: user
tags: tags:
- zomboid - zomboid
- name: restart fluent-bit
become: true
ansible.builtin.systemd:
name: fluent-bit
state: restarted
tags:
- fluent-bit

View File

@@ -105,4 +105,6 @@
- name: create systemd startup job for awsddns-debyl - name: create systemd startup job for awsddns-debyl
include_tasks: podman/systemd-generate.yml include_tasks: podman/systemd-generate.yml
vars: vars:
container_name: awsddns-debyl container_name: awsddns-debyl
# NOTE: git.debyl.io is an ALIAS record to home.debyl.io - no DDNS needed

View File

@@ -0,0 +1,45 @@
---
# Fluent Bit - Log forwarder from journald to Graylog GELF
# Deployed as systemd service (not container) for direct journal access
# Clean up old container deployment if it exists
- name: stop and remove fluent-bit container if exists
become: true
become_user: "{{ podman_user }}"
containers.podman.podman_container:
name: fluent-bit
state: absent
ignore_errors: true
- name: disable old fluent-bit container systemd service
become: true
become_user: "{{ podman_user }}"
ansible.builtin.systemd:
name: fluent-bit
enabled: false
state: stopped
scope: user
ignore_errors: true
- name: install fluent-bit package
become: true
ansible.builtin.dnf:
name: fluent-bit
state: present
- name: deploy fluent-bit configuration
become: true
ansible.builtin.template:
src: fluent-bit/fluent-bit.conf.j2
dest: /etc/fluent-bit/fluent-bit.conf
owner: root
group: root
mode: '0644'
notify: restart fluent-bit
- name: enable and start fluent-bit service
become: true
ansible.builtin.systemd:
name: fluent-bit
enabled: true
state: started

View File

@@ -1,5 +1,5 @@
--- ---
- import_tasks: ecr/podman-ecr-login.yml - import_tasks: gitea/podman-gitea-login.yml
- name: create nginx fulfillr-site directory - name: create nginx fulfillr-site directory
become: true become: true

View File

@@ -0,0 +1,171 @@
---
# Graylog Logging Stack
# Deploys MongoDB, OpenSearch, and Graylog
# System prerequisite: OpenSearch requires increased virtual memory
- name: set vm.max_map_count for OpenSearch
become: true
ansible.posix.sysctl:
name: vm.max_map_count
value: '262144'
state: present
sysctl_set: true
tags: graylog
# Create directory structure
- name: create graylog host directory volumes
become: true
ansible.builtin.file:
path: "{{ item }}"
state: directory
owner: "{{ podman_subuid.stdout }}"
group: "{{ podman_subuid.stdout }}"
mode: '0755'
notify: restorecon podman
loop:
- "{{ graylog_path }}/mongo"
- "{{ graylog_path }}/opensearch"
- "{{ graylog_path }}/graylog/data"
- "{{ graylog_path }}/graylog/data/config"
tags: graylog
# OpenSearch runs as UID 1000 inside the container
- name: unshare chown the opensearch data volume
become: true
become_user: "{{ podman_user }}"
changed_when: false
ansible.builtin.command: |
podman unshare chown -R 1000:1000 {{ graylog_path }}/opensearch
tags: graylog
# Graylog runs as UID 1100 inside the container
- name: unshare chown the graylog data volume
become: true
become_user: "{{ podman_user }}"
changed_when: false
ansible.builtin.command: |
podman unshare chown -R 1100:1100 {{ graylog_path }}/graylog
tags: graylog
# Graylog requires minimal config file
- name: create graylog.conf
become: true
ansible.builtin.copy:
dest: "{{ graylog_path }}/graylog/data/config/graylog.conf"
content: |
is_leader = true
data_dir = /usr/share/graylog/data
node_id_file = /usr/share/graylog/data/node-id
mode: '0644'
tags: graylog
- name: fix graylog.conf ownership
become: true
become_user: "{{ podman_user }}"
changed_when: false
ansible.builtin.command: |
podman unshare chown 1100:1100 {{ graylog_path }}/graylog/data/config/graylog.conf
tags: graylog
- name: flush handlers
ansible.builtin.meta: flush_handlers
tags: graylog
# MongoDB container
- import_tasks: podman/podman-check.yml
vars:
container_name: graylog-mongo
container_image: docker.io/mongo:6
tags: graylog
- name: create graylog-mongo container
become: true
become_user: "{{ podman_user }}"
containers.podman.podman_container:
name: graylog-mongo
image: docker.io/mongo:6
state: started
restart_policy: on-failure:3
log_driver: journald
volumes:
- "{{ graylog_path }}/mongo:/data/db:Z"
ports:
- "127.0.0.1:27017:27017/tcp"
tags: graylog
- name: create systemd startup job for graylog-mongo
include_tasks: podman/systemd-generate.yml
vars:
container_name: graylog-mongo
tags: graylog
# OpenSearch container
- import_tasks: podman/podman-check.yml
vars:
container_name: graylog-opensearch
container_image: docker.io/opensearchproject/opensearch:2
tags: graylog
- name: create graylog-opensearch container
become: true
become_user: "{{ podman_user }}"
containers.podman.podman_container:
name: graylog-opensearch
image: docker.io/opensearchproject/opensearch:2
state: started
restart_policy: on-failure:3
log_driver: journald
env:
discovery.type: single-node
DISABLE_SECURITY_PLUGIN: "true"
OPENSEARCH_JAVA_OPTS: "-Xms512m -Xmx512m"
volumes:
- "{{ graylog_path }}/opensearch:/usr/share/opensearch/data:z"
ports:
- "127.0.0.1:9200:9200/tcp"
tags: graylog
- name: create systemd startup job for graylog-opensearch
include_tasks: podman/systemd-generate.yml
vars:
container_name: graylog-opensearch
tags: graylog
# Graylog container
- import_tasks: podman/podman-check.yml
vars:
container_name: graylog
container_image: docker.io/graylog/graylog:6.0
tags: graylog
# Graylog uses host network to reach MongoDB/OpenSearch on 127.0.0.1
# Binds to: 9000 (web UI), 12202 (GELF HTTP input proxied via Caddy)
- name: create graylog container
become: true
become_user: "{{ podman_user }}"
containers.podman.podman_container:
name: graylog
image: docker.io/graylog/graylog:6.0
state: started
restart_policy: on-failure:3
log_driver: journald
network: host
env:
GRAYLOG_PASSWORD_SECRET: "{{ graylog_password_secret }}"
GRAYLOG_ROOT_PASSWORD_SHA2: "{{ graylog_root_password_sha2 }}"
GRAYLOG_HTTP_EXTERNAL_URI: "https://{{ logs_server_name }}/"
GRAYLOG_HTTP_BIND_ADDRESS: "0.0.0.0:9000"
GRAYLOG_ELASTICSEARCH_HOSTS: "http://127.0.0.1:9200"
GRAYLOG_MONGODB_URI: "mongodb://127.0.0.1:27017/graylog"
volumes:
- "{{ graylog_path }}/graylog/data:/usr/share/graylog/data:z"
requires:
- graylog-mongo
- graylog-opensearch
tags: graylog
- name: create systemd startup job for graylog
include_tasks: podman/systemd-generate.yml
vars:
container_name: graylog
tags: graylog

View File

@@ -34,8 +34,14 @@
image: "{{ image }}" image: "{{ image }}"
restart_policy: on-failure:3 restart_policy: on-failure:3
log_driver: journald log_driver: journald
network:
- host
env: env:
TZ: America/New_York TZ: America/New_York
# Zomboid RCON configuration for Discord restart command
ZOMBOID_RCON_HOST: "127.0.0.1"
ZOMBOID_RCON_PORT: "{{ zomboid_rcon_port }}"
ZOMBOID_RCON_PASSWORD: "{{ zomboid_admin_password }}"
volumes: volumes:
- "{{ gregtime_path }}/logs:/app/logs" - "{{ gregtime_path }}/logs:/app/logs"

View File

@@ -24,7 +24,7 @@
notify: restorecon podman notify: restorecon podman
loop: loop:
- configuration.yaml - configuration.yaml
# - automations.yaml - automations.yaml
- name: flush handlers - name: flush handlers
ansible.builtin.meta: flush_handlers ansible.builtin.meta: flush_handlers

View File

@@ -23,6 +23,26 @@
mode: 0755 mode: 0755
notify: restorecon podman notify: restorecon podman
- name: copy zomboid steamcmd install script
become: true
ansible.builtin.template:
src: zomboid/install.scmd.j2
dest: "{{ zomboid_path }}/scripts/install.scmd"
owner: "{{ podman_subuid.stdout }}"
group: "{{ podman_user }}"
mode: 0644
notify: restorecon podman
# Set volume permissions for steam user (UID 1000) inside container
# This uses podman unshare to set ownership correctly for rootless podman
- name: set zomboid volume permissions for steam user
become: true
become_user: "{{ podman_user }}"
ansible.builtin.shell: |
podman unshare chown -R 1000:1000 {{ zomboid_path }}/server
podman unshare chown -R 1000:1000 {{ zomboid_path }}/data
changed_when: false
- name: flush handlers - name: flush handlers
ansible.builtin.meta: flush_handlers ansible.builtin.meta: flush_handlers
@@ -40,19 +60,23 @@
restart_policy: on-failure:3 restart_policy: on-failure:3
log_driver: journald log_driver: journald
env: env:
SERVER_NAME: zomboid SERVER_NAME: "{{ zomboid_server_names[zomboid_server_mode] }}"
MIN_RAM: 8g MIN_RAM: 8g
MAX_RAM: 24g MAX_RAM: 24g
AUTO_UPDATE: "true" AUTO_UPDATE: "true"
ADMIN_PASSWORD: "{{ zomboid_admin_password }}" ADMIN_PASSWORD: "{{ zomboid_admin_password }}"
SERVER_PASSWORD: "{{ zomboid_password }}" SERVER_PASSWORD: "{{ zomboid_password }}"
PUID: "1000"
PGID: "1000"
volumes: volumes:
- "{{ zomboid_path }}/server:/home/steam/pzserver" - "{{ zomboid_path }}/server:/project-zomboid"
- "{{ zomboid_path }}/data:/home/steam/Zomboid" - "{{ zomboid_path }}/data:/project-zomboid-config"
- "{{ zomboid_path }}/scripts/entrypoint.sh:/entrypoint.sh:ro" - "{{ zomboid_path }}/scripts/entrypoint.sh:/entrypoint.sh:ro"
- "{{ zomboid_path }}/scripts/install.scmd:/home/steam/install.scmd:ro"
ports: ports:
- "16261:16261/udp" - "16261:16261/udp"
- "16262:16262/udp" - "16262:16262/udp"
- "{{ zomboid_rcon_port }}:{{ zomboid_rcon_port }}/tcp"
command: /bin/bash /entrypoint.sh command: /bin/bash /entrypoint.sh
- name: create systemd startup job for zomboid - name: create systemd startup job for zomboid
@@ -70,32 +94,73 @@
line: "Restart=always" line: "Restart=always"
notify: reload zomboid systemd notify: reload zomboid systemd
# Configuration management (requires server to have run once to generate ini) # Check if server INI exists (generated on first server run)
- name: configure zomboid server settings - name: check if zomboid server ini exists
become: true
ansible.builtin.stat:
path: "{{ zomboid_path }}/data/Server/{{ zomboid_server_names[zomboid_server_mode] }}.ini"
register: zomboid_ini_stat
tags: zomboid-conf
# Backup settings (requires server to have run once to generate ini)
- name: configure zomboid backup settings
become: true become: true
ansible.builtin.lineinfile: ansible.builtin.lineinfile:
path: "{{ zomboid_path }}/data/Server/zomboid.ini" path: "{{ zomboid_path }}/data/Server/{{ zomboid_server_names[zomboid_server_mode] }}.ini"
regexp: "^{{ item.key }}=" regexp: "^{{ item.key }}="
line: "{{ item.key }}={{ item.value }}" line: "{{ item.key }}={{ item.value }}"
loop: loop:
- { key: "PublicName", value: "Modded Joboid" } - { key: "SaveWorldEveryMinutes", value: "10" }
- { key: "MaxPlayers", value: "8" } - { key: "BackupsPeriod", value: "30" }
- { key: "BackupsCount", value: "10" }
# B42 Linux server fix: disable Lua checksum to allow mods to load
- { key: "DoLuaChecksum", value: "false" }
# Server password
- { key: "Password", value: "{{ zomboid_password }}" } - { key: "Password", value: "{{ zomboid_password }}" }
- { key: "Mods", value: "PzkVanillaPlusCarPack;PZKExtendedVehicleZones;PZKCarzoneWorkshop;Pogo;Pogo;Pogo;LethalHeadHit;VanillaFoodsExpanded;VanillaFoodsExpanded;RebalancedPropMoving;GaelGunStore_B42;STA_PryOpen;tsarslib;Ahu;Ahu;Ahu;ModernStatus;StandardizedVehicleUpgrades3V;StandardizedVehicleUpgrades3Core;survivingthroughseasons;survivingthroughseasons;RVInteriorExpansionPart2;RVInteriorExpansion;TchernoLib;HereGoesTheSun;hf_point_blank;WayMoreCars;WaterGoesBad;WaterGoesBad;PROJECTRVInterior42;ClimbWall;amclub;RepairableWindows;RepairableWindows;StarlitLibrary;StarlitLibrary;StarlitLibrary;ImmersiveBlackouts;ModLoadOrderSorter_b42;NeatUI_Framework;SomewhatWater;SomewhatWaterBright;VanillaVehiclesAnimated;VanillaVehiclesAnimated_SVU;VVA_nascarlights;VVA_cullseats;VVA_slowdoors;kitsunelib;ChuckleberryFinnAlertSystem;ImmersiveVehiclePaint;darkerMap;SLDarkerSnowB42;BecomeBraveB42;Louisville spawn v42;ItemretexturePSC" } when: zomboid_ini_stat.stat.exists
- { key: "WorkshopItems", value: "3217685049;3058134369;3543588687;3577903007;2699828474;3616176188;3579640010;3402491515;3422418897;3451167732;3304582091;3403490889;2753086629;3622163276;3618427553;3389605231;3618557184;2990322197;3520758551;2849467715;3543229299;3389681224;3404737883;3378304610;3378285185;3607686447;3423660713;3508537032;3582960654;3281755175;3390453390;3077900375;3464606086;2939187818;3390411200;3388028737;3387071727;3618491765" }
tags: zomboid-conf tags: zomboid-conf
# Sandbox settings (requires world reset to take effect) # Discord integration (uses Gregbot token, posts /all chat to Discord)
- name: configure zomboid sandbox settings - name: configure zomboid discord integration
become: true become: true
ansible.builtin.lineinfile: ansible.builtin.lineinfile:
path: "{{ zomboid_path }}/data/Server/zomboid_SandboxVars.lua" path: "{{ zomboid_path }}/data/Server/{{ zomboid_server_names[zomboid_server_mode] }}.ini"
regexp: "^\\s*{{ item.key }} = " regexp: "^{{ item.key }}="
line: " {{ item.key }} = {{ item.value }}," line: "{{ item.key }}={{ item.value }}"
backrefs: false
loop: loop:
- { key: "StartMonth", value: "12" } - { key: "DiscordEnable", value: "true" }
- { key: "StartDay", value: "15" } - { key: "DiscordToken", value: "{{ zomboid_discord_token }}" }
- { key: "DiscordChannel", value: "zomboidbot" }
- { key: "DiscordChannelID", value: "1451961291194241095" }
when: zomboid_ini_stat.stat.exists
tags: zomboid-conf
# RCON configuration for remote administration
- name: configure zomboid rcon
become: true
ansible.builtin.lineinfile:
path: "{{ zomboid_path }}/data/Server/{{ zomboid_server_names[zomboid_server_mode] }}.ini"
regexp: "^{{ item.key }}="
line: "{{ item.key }}={{ item.value }}"
loop:
- { key: "RCONPort", value: "{{ zomboid_rcon_port }}" }
- { key: "RCONPassword", value: "{{ zomboid_admin_password }}" }
when: zomboid_ini_stat.stat.exists
tags: zomboid-conf
# Mod configuration (only for modded server profile)
- name: configure zomboid mods for modded server
become: true
ansible.builtin.lineinfile:
path: "{{ zomboid_path }}/data/Server/{{ zomboid_server_names[zomboid_server_mode] }}.ini"
regexp: "^{{ item.key }}="
line: "{{ item.key }}={{ item.value }}"
loop:
- { key: "Mods", value: "{{ zomboid_mods.mod_ids }}" }
- { key: "WorkshopItems", value: "{{ zomboid_mods.workshop_items }}" }
when:
- zomboid_server_mode == 'modded'
- zomboid_ini_stat.stat.exists
tags: zomboid-conf tags: zomboid-conf
# World reset tasks REMOVED - too dangerous to have in automation # World reset tasks REMOVED - too dangerous to have in automation

View File

@@ -69,5 +69,7 @@
- 1080/tcp - 1080/tcp
- 1443/tcp - 1443/tcp
- 7000/tcp - 7000/tcp
# gelf-proxy (removed - now using GELF HTTP via Caddy)
- 12201/udp
notify: restart firewalld notify: restart firewalld
tags: firewall tags: firewall

View File

@@ -0,0 +1,8 @@
---
- name: podman login to Gitea Container Registry
become: true
become_user: "{{ podman_user }}"
containers.podman.podman_login:
registry: "git.debyl.io"
username: "{{ gitea_registry_username }}"
password: "{{ gitea_registry_token }}"

View File

@@ -54,9 +54,9 @@
- import_tasks: containers/home/photos.yml - import_tasks: containers/home/photos.yml
vars: vars:
db_image: docker.io/tensorchord/pgvecto-rs:pg14-v0.2.0@sha256:90724186f0a3517cf6914295b5ab410db9ce23190a2d9d0b9dd6463e3fa298f0 db_image: docker.io/tensorchord/pgvecto-rs:pg14-v0.2.0@sha256:90724186f0a3517cf6914295b5ab410db9ce23190a2d9d0b9dd6463e3fa298f0
ml_image: ghcr.io/immich-app/immich-machine-learning:v2.2.2 ml_image: ghcr.io/immich-app/immich-machine-learning:v2.4.0
redis_image: docker.io/redis:6.2-alpine@sha256:eaba718fecd1196d88533de7ba49bf903ad33664a92debb24660a922ecd9cac8 redis_image: docker.io/redis:6.2-alpine@sha256:eaba718fecd1196d88533de7ba49bf903ad33664a92debb24660a922ecd9cac8
image: ghcr.io/immich-app/immich-server:v2.2.2 image: ghcr.io/immich-app/immich-server:v2.4.0
tags: photos tags: photos
- import_tasks: containers/home/cloud.yml - import_tasks: containers/home/cloud.yml
@@ -73,7 +73,7 @@
- import_tasks: containers/debyltech/fulfillr.yml - import_tasks: containers/debyltech/fulfillr.yml
vars: vars:
image: "{{ aws_ecr_endpoint }}/fulfillr:20251105.0436" image: "git.debyl.io/debyltech/fulfillr:20260104.0001"
tags: debyltech, fulfillr tags: debyltech, fulfillr
- import_tasks: containers/debyltech/uptime-kuma.yml - import_tasks: containers/debyltech/uptime-kuma.yml
@@ -81,6 +81,12 @@
image: docker.io/louislam/uptime-kuma:1 image: docker.io/louislam/uptime-kuma:1
tags: debyltech, uptime-kuma tags: debyltech, uptime-kuma
- import_tasks: containers/debyltech/graylog.yml
tags: debyltech, graylog
- import_tasks: containers/base/fluent-bit.yml
tags: fluent-bit, graylog
- import_tasks: containers/home/nosql.yml - import_tasks: containers/home/nosql.yml
vars: vars:
image: docker.io/redis:7.2.1-alpine image: docker.io/redis:7.2.1-alpine
@@ -88,7 +94,7 @@
- import_tasks: containers/home/gregtime.yml - import_tasks: containers/home/gregtime.yml
vars: vars:
image: localhost/greg-time-bot:1.3.2 image: localhost/greg-time-bot:1.4.3
tags: gregtime tags: gregtime
- import_tasks: containers/home/zomboid.yml - import_tasks: containers/home/zomboid.yml

View File

@@ -177,6 +177,49 @@
} }
} }
# Graylog Logs - {{ logs_server_name }}
{{ logs_server_name }} {
# GELF HTTP endpoint - open for Lambda (auth via header)
# Must come BEFORE ip_restricted_site to allow external access
@gelf_authorized {
path /gelf
header X-Gelf-Token "{{ gelf_auth_token }}"
}
handle @gelf_authorized {
reverse_proxy localhost:12202
}
# Reject unauthorized GELF requests
handle /gelf {
respond "Unauthorized" 401
}
# IP restriction for Graylog web UI (excludes /gelf which is handled above)
@local {
remote_ip {{ caddy_local_networks | join(' ') }}
}
@denied {
not remote_ip {{ caddy_local_networks | join(' ') }}
not path /gelf
}
handle @denied {
redir https://debyl.io{uri} 302
}
handle @local {
import common_headers
reverse_proxy localhost:9000
}
log {
output file /var/log/caddy/graylog.log
format json
}
}
# ============================================================================ # ============================================================================
# COMPLEX CONFIGURATIONS # COMPLEX CONFIGURATIONS
# ============================================================================ # ============================================================================
@@ -243,6 +286,20 @@
} }
} }
# Gitea - {{ gitea_debyl_server_name }}
{{ gitea_debyl_server_name }} {
import common_headers
reverse_proxy localhost:3100 {
flush_interval -1
}
log {
output file /var/log/caddy/gitea-debyl.log
format json
}
}
# Fulfillr - {{ fulfillr_server_name }} (Static + API with IP restrictions) # Fulfillr - {{ fulfillr_server_name }} (Static + API with IP restrictions)
{{ fulfillr_server_name }} { {{ fulfillr_server_name }} {
{{ ip_restricted_site() }} {{ ip_restricted_site() }}

View File

@@ -0,0 +1,32 @@
[SERVICE]
Flush 5
Daemon Off
Log_Level info
Parsers_File parsers.conf
# Read from systemd journal - filter for Podman container logs
# Container logs come from conmon process with CONTAINER_NAME field
[INPUT]
Name systemd
Tag journal.*
Systemd_Filter _COMM=conmon
Read_From_Tail On
Strip_Underscores On
# Extract container name for better filtering in Graylog
[FILTER]
Name record_modifier
Match journal.*
Record host {{ ansible_hostname }}
Record source podman
# Output to Graylog GELF UDP (local, port 12203)
# Graylog needs a GELF UDP input configured on this port
[OUTPUT]
Name gelf
Match journal.*
Host 127.0.0.1
Port 12203
Mode udp
Gelf_Short_Message_Key MESSAGE
Gelf_Host_Key host

View File

@@ -1,103 +1,89 @@
#!/bin/bash #!/bin/bash
# Project Zomboid Build 42 Server Entrypoint
# Based on IndifferentBroccoli/projectzomboid-server-docker
set -e set -e
# Configuration
INSTALL_DIR="/project-zomboid"
CONFIG_DIR="/project-zomboid-config"
STEAMCMD="/home/steam/steamcmd/steamcmd.sh" STEAMCMD="/home/steam/steamcmd/steamcmd.sh"
INSTALL_DIR="/home/steam/pzserver"
DATA_DIR="/home/steam/Zomboid"
SERVER_NAME="${SERVER_NAME:-zomboid}" SERVER_NAME="${SERVER_NAME:-zomboid}"
MIN_RAM="${MIN_RAM:-8g}" PUID="${PUID:-1000}"
MAX_RAM="${MAX_RAM:-24g}" PGID="${PGID:-1000}"
MIN_RAM="${MIN_RAM:-4g}"
MAX_RAM="${MAX_RAM:-8g}"
echo "=== Project Zomboid Build 42 Server ===" echo "=== Project Zomboid Build 42 Server ==="
echo "Server Name: ${SERVER_NAME}" echo "Server Name: ${SERVER_NAME}"
echo "RAM: ${MIN_RAM} - ${MAX_RAM}" echo "RAM: ${MIN_RAM} - ${MAX_RAM}"
# Fix ownership of mounted volumes (container runs as steam user, UID 1000) # Set user permissions (IndifferentBroccoli approach)
echo "=== Fixing volume permissions ===" echo "=== Setting file permissions ==="
chown -R steam:steam "${INSTALL_DIR}" || true usermod -o -u "${PUID}" steam
chown -R steam:steam "${DATA_DIR}" || true groupmod -o -g "${PGID}" steam
chmod -R 755 "${INSTALL_DIR}" || true chown -R steam:steam "${INSTALL_DIR}" "${CONFIG_DIR}"
chmod -R 755 "${DATA_DIR}" || true # Only chown writable parts of /home/steam (not read-only mounts)
chown steam:steam /home/steam
chown -R steam:steam /home/steam/steamcmd 2>/dev/null || true
chown -R steam:steam /home/steam/Steam 2>/dev/null || true
# Create required subdirectories with correct ownership # Create required directories
mkdir -p "${DATA_DIR}/Server" mkdir -p "${CONFIG_DIR}/Server"
mkdir -p "${DATA_DIR}/Saves/Multiplayer" mkdir -p "${CONFIG_DIR}/Saves"
mkdir -p "${DATA_DIR}/db" mkdir -p "${CONFIG_DIR}/db"
chown -R steam:steam "${DATA_DIR}"
# Ensure steam user has proper home directory setup
export HOME=/home/steam
# Initialize SteamCMD if needed (creates config directories)
if [ ! -d "/home/steam/Steam" ]; then
echo "=== Initializing SteamCMD ==="
su -c "${STEAMCMD} +quit" steam || true
fi
# Update/Install PZ dedicated server with Build 42 unstable branch # Update/Install PZ dedicated server with Build 42 unstable branch
if [ "${AUTO_UPDATE:-true}" = "true" ]; then if [ "${AUTO_UPDATE:-true}" = "true" ]; then
echo "=== Updating Project Zomboid Server (Build 42 unstable) ===" echo "=== Updating Project Zomboid Server (Build 42 unstable) ==="
# Run steamcmd as steam user with proper quoting for beta flag su -c "${STEAMCMD} +runscript /home/steam/install.scmd" steam
su -c "${STEAMCMD} +force_install_dir ${INSTALL_DIR} +login anonymous +app_update 380870 -beta unstable validate +quit" steam
echo "=== Update complete ===" echo "=== Update complete ==="
fi fi
# Ensure data directories exist (created earlier with correct permissions) # Configure JVM memory settings in ProjectZomboid64.json (Build 42 uses JSON config)
configure_memory() {
local json_file="${INSTALL_DIR}/ProjectZomboid64.json"
# Configure server settings on first run if [ ! -f "$json_file" ]; then
SERVER_INI="${DATA_DIR}/Server/${SERVER_NAME}.ini" echo "=== ProjectZomboid64.json not found, skipping memory config ==="
if [ ! -f "${SERVER_INI}" ]; then return 0
echo "=== First run detected, server will generate default config ===" fi
fi
# Handle admin password for first run echo "=== Configuring JVM memory: Xms=${MIN_RAM}, Xmx=${MAX_RAM} ==="
# PZ requires interactive password input on first run, so we create a db file
ADMIN_DB="${DATA_DIR}/db/${SERVER_NAME}.db"
if [ ! -f "${ADMIN_DB}" ] && [ -n "${ADMIN_PASSWORD}" ]; then
echo "=== Setting up admin account ==="
mkdir -p "${DATA_DIR}/db"
# The server will prompt for password on first run
# We'll use expect-like behavior or let it use defaults
fi
# Modify memory settings in ProjectZomboid64.json (Build 42 uses JSON config) # Update Xmx
PZ_JSON="${INSTALL_DIR}/ProjectZomboid64.json" sed -i "s/-Xmx[0-9]*[gGmM]*/-Xmx${MAX_RAM}/g" "$json_file"
if [ -f "${PZ_JSON}" ]; then
echo "=== Setting JVM memory: Xms=${MIN_RAM}, Xmx=${MAX_RAM} ===" # Update or add Xms
# Add -Xms if not present, otherwise update it if grep -q "\-Xms" "$json_file"; then
if grep -q "\-Xms" "${PZ_JSON}"; then sed -i "s/-Xms[0-9]*[gGmM]*/-Xms${MIN_RAM}/g" "$json_file"
sed -i "s/-Xms[0-9]*[gGmM]*/-Xms${MIN_RAM}/g" "${PZ_JSON}"
else else
# Insert -Xms before -Xmx # Insert -Xms before -Xmx
sed -i "s/\"-Xmx/\"-Xms${MIN_RAM}\",\n\t\t\"-Xmx/g" "${PZ_JSON}" sed -i "s/\"-Xmx/\"-Xms${MIN_RAM}\",\n\t\t\"-Xmx/g" "$json_file"
fi fi
sed -i "s/-Xmx[0-9]*[gGmM]*/-Xmx${MAX_RAM}/g" "${PZ_JSON}"
echo "=== Memory configuration complete ==="
}
configure_memory
# Check if first run (no admin DB)
ADMIN_DB="${CONFIG_DIR}/db/${SERVER_NAME}.db"
# Build server arguments
# Note: -modfolders is NOT used - mods are configured via INI only
# Reference: IndifferentBroccoli/projectzomboid-server-docker
SERVER_ARGS="-cachedir=${CONFIG_DIR} -servername ${SERVER_NAME}"
# Add admin password for first run
if [ ! -f "${ADMIN_DB}" ] && [ -n "${ADMIN_PASSWORD}" ]; then
echo "=== First run: setting admin password ==="
SERVER_ARGS="${SERVER_ARGS} -adminpassword ${ADMIN_PASSWORD}"
fi fi
# If server password is set, we'll need to configure it in the ini after first run # Note: Server password is set via INI file, not command line args
# For now, store it for later configuration
if [ -n "${SERVER_PASSWORD}" ]; then
echo "${SERVER_PASSWORD}" > "${DATA_DIR}/.server_password"
fi
if [ -n "${ADMIN_PASSWORD}" ]; then # Start server
echo "${ADMIN_PASSWORD}" > "${DATA_DIR}/.admin_password"
fi
# Change to install directory and start server
cd "${INSTALL_DIR}" cd "${INSTALL_DIR}"
echo "=== Starting Project Zomboid Server ===" echo "=== Starting Project Zomboid Server ==="
echo "Connect to: home.bdebyl.net:16261" echo "Connect to: home.bdebyl.net:16261"
# Start server - on first run this will prompt for admin password exec su -c "export LD_LIBRARY_PATH=${INSTALL_DIR}/jre64/lib:\${LD_LIBRARY_PATH} && ./start-server.sh ${SERVER_ARGS}" steam
# We handle this by providing input via stdin if password file exists
if [ -f "${DATA_DIR}/.admin_password" ] && [ ! -f "${ADMIN_DB}" ]; then
# First run with admin password
ADMIN_PASS=$(cat "${DATA_DIR}/.admin_password")
echo "=== First run: setting admin password ==="
printf "%s\n%s\n" "${ADMIN_PASS}" "${ADMIN_PASS}" | su -c "bash start-server.sh -servername ${SERVER_NAME}" steam
else
# Normal run
exec su -c "bash start-server.sh -servername ${SERVER_NAME}" steam
fi

View File

@@ -0,0 +1,18 @@
// SteamCMD script for Project Zomboid Server installation
// Based on IndifferentBroccoli/projectzomboid-server-docker
// Do not shutdown on a failed command
@ShutdownOnFailedCommand 0
// No password prompt as this is unattended
@NoPromptForPassword 1
// Set the game installation directory
force_install_dir /project-zomboid
login anonymous
// Install/Update the Project Zomboid Dedicated Server - Unstable Branch (Build 42)
app_update 380870 -beta unstable validate
quit

Binary file not shown.