From df4ae0eb170a86b9ce20844fb5903b5c8424f180 Mon Sep 17 00:00:00 2001 From: Joakim Date: Wed, 26 Nov 2025 23:24:09 +0100 Subject: [PATCH 1/2] WIP: Authentik role with Quadlet pod approach - debugging container service generation - Created authentik.pod file for proper pod definition - Removed superfluous authentik-pod.container file - Updated container templates to reference pod correctly - Issue: Quadlet still reports 'pod authentik is not Quadlet based' - Container services not being generated (only pod service works) --- roles/authentik/README.md | 227 ++++++++++++++++++ roles/authentik/defaults/main.yml | 110 +++++++++ roles/authentik/handlers/main.yml | 78 ++++++ roles/authentik/meta/main.yml | 10 + roles/authentik/tasks/cache.yml | 70 ++++++ roles/authentik/tasks/database.yml | 62 +++++ roles/authentik/tasks/main.yml | 215 +++++++++++++++++ .../templates/authentik-server.container | 25 ++ .../templates/authentik-worker.container | 25 ++ roles/authentik/templates/authentik.caddy.j2 | 41 ++++ roles/authentik/templates/authentik.env.j2 | 43 ++++ roles/authentik/templates/authentik.pod | 15 ++ 12 files changed, 921 insertions(+) create mode 100644 roles/authentik/README.md create mode 100644 roles/authentik/defaults/main.yml create mode 100644 roles/authentik/handlers/main.yml create mode 100644 roles/authentik/meta/main.yml create mode 100644 roles/authentik/tasks/cache.yml create mode 100644 roles/authentik/tasks/database.yml create mode 100644 roles/authentik/tasks/main.yml create mode 100644 roles/authentik/templates/authentik-server.container create mode 100644 roles/authentik/templates/authentik-worker.container create mode 100644 roles/authentik/templates/authentik.caddy.j2 create mode 100644 roles/authentik/templates/authentik.env.j2 create mode 100644 roles/authentik/templates/authentik.pod diff --git a/roles/authentik/README.md b/roles/authentik/README.md new file mode 100644 index 0000000..d6105bc --- /dev/null +++ b/roles/authentik/README.md @@ -0,0 +1,227 @@ +# Authentik Role + +Self-contained Authentik authentication server deployment using Podman and Unix sockets. + +## Overview + +This role deploys Authentik as a containerized authentication service with: +- **Unix socket IPC** for PostgreSQL and Valkey +- **Rootless Podman** with systemd integration via Quadlet +- **Self-contained permissions** management +- **Caddy reverse proxy** configuration + +## Architecture + +``` +┌─────────────────┐ ┌─────────────────┐ ┌─────────────────┐ +│ Caddy Proxy │ │ Authentik Pod │ │ Infrastructure │ +│ │ │ │ │ │ +│ auth.jnss.me │───▶│ ┌─────────────┐ │ │ PostgreSQL │ +│ :443 │ │ │ Server │ │◄──▶│ (Unix Socket) │ +│ │ │ │ :9443 │ │ │ │ +│ │ │ └─────────────┘ │ │ Valkey │ +│ │ │ ┌─────────────┐ │◄──▶│ (Unix Socket) │ +│ │ │ │ Worker │ │ │ │ +│ │ │ └─────────────┘ │ │ │ +└─────────────────┘ └─────────────────┘ └─────────────────┘ +``` + +## Dependencies + +- `postgresql` role (provides Unix socket infrastructure) +- `valkey` role (provides Unix socket infrastructure) +- `podman` role (provides container runtime) +- `caddy` role (provides reverse proxy infrastructure) + +## Configuration + +### Required Variables + +```yaml +# Domain configuration +authentik_domain: "auth.jnss.me" + +# Database credentials +authentik_db_password: "{{ vault_authentik_db_password }}" +authentik_secret_key: "{{ vault_authentik_secret_key }}" +authentik_default_admin_password: "{{ vault_authentik_admin_password }}" + +# Infrastructure socket enablement +postgresql_unix_socket_enabled: true +valkey_unix_socket_enabled: true +``` + +### Optional Variables + +```yaml +# Service configuration +authentik_service_enabled: true +authentik_service_state: "started" + +# Container version +authentik_version: "latest" + +# Email configuration +authentik_email_enabled: false +authentik_email_host: "smtp.example.com" +``` + +## Vault Variables Required + +```yaml +# Database password +vault_authentik_db_password: "secure_db_password" + +# Authentik secret key (generate with: openssl rand -base64 32) +vault_authentik_secret_key: "long_random_secret_key" + +# Admin user password +vault_authentik_admin_password: "secure_admin_password" + +# Existing infrastructure passwords +vault_valkey_password: "valkey_password" +``` + +## Usage + +### Basic Deployment + +```yaml +- hosts: auth_servers + roles: + - postgresql + - valkey + - podman + - caddy + - authentik +``` + +### With Tags + +```yaml +# Deploy only database setup +ansible-playbook site.yml -t database + +# Deploy only containers +ansible-playbook site.yml -t containers + +# Deploy only Caddy config +ansible-playbook site.yml -t caddy +``` + +## File Structure + +``` +authentik/ +├── defaults/main.yml # Default variables +├── handlers/main.yml # Service handlers +├── meta/main.yml # Role dependencies +├── tasks/ +│ ├── main.yml # Main orchestration +│ ├── database.yml # Database setup +│ └── cache.yml # Cache setup +├── templates/ +│ ├── authentik.env.j2 # Environment variables +│ ├── authentik.caddy.j2 # Caddy configuration +│ ├── authentik-pod.container # Pod Quadlet file +│ ├── authentik-server.container # Server Quadlet file +│ └── authentik-worker.container # Worker Quadlet file +└── README.md +``` + +## Systemd Services + +The role creates the following systemd services: + +- `authentik-pod.service` - Main pod container +- `authentik-server.service` - Web server container +- `authentik-worker.service` - Background worker container + +## Networking + +- **External**: HTTPS via Caddy on port 443 +- **Internal**: Containers bind to `127.0.0.1:9000` (HTTP) and `127.0.0.1:9443` (HTTPS) +- **Database**: Unix socket at `/var/run/postgresql/.s.PGSQL.5432` +- **Cache**: Unix socket at `/var/run/valkey/valkey.sock` + +## Security Features + +- **Rootless containers** via Podman +- **Unix socket IPC** eliminates network exposure +- **User isolation** with dedicated `authentik` system user +- **Group-based socket access** for PostgreSQL and Valkey +- **TLS termination** at Caddy proxy +- **Security headers** configured in Caddy + +## Troubleshooting + +### Check Service Status + +```bash +systemctl status authentik-pod +systemctl status authentik-server +systemctl status authentik-worker +``` + +### Check Logs + +```bash +journalctl -u authentik-server -f +journalctl -u authentik-worker -f +``` + +### Check Socket Connectivity + +```bash +# Test PostgreSQL socket +sudo -u authentik psql -h /var/run/postgresql -U authentik authentik + +# Test Valkey socket +sudo -u authentik redis-cli -s /var/run/valkey/valkey.sock -n 1 ping +``` + +### Verify Container Status + +```bash +podman --user authentik pod ps +podman --user authentik ps +``` + +## Post-Deployment + +1. **Access Web Interface**: Navigate to `https://auth.jnss.me` +2. **Login**: Use admin credentials from vault variables +3. **Configure Providers**: Set up OAuth2/SAML providers for services +4. **Create Applications**: Configure applications for SSO integration + +## Maintenance + +### Update Containers + +```yaml +# Update to specific version +authentik_version: "2024.2.0" +``` + +### Backup Data + +Important directories to backup: +- `{{ authentik_data_dir }}` - Application data +- `{{ authentik_media_dir }}` - Uploaded media +- PostgreSQL database dump +- Vault variables + +## Integration Examples + +### Protect Service with Authentik + +```caddy +service.example.com { + forward_auth https://auth.jnss.me { + uri /outpost.goauthentik.io/auth/caddy + copy_headers Remote-User Remote-Name Remote-Email Remote-Groups + } + + reverse_proxy localhost:8080 +} +``` \ No newline at end of file diff --git a/roles/authentik/defaults/main.yml b/roles/authentik/defaults/main.yml new file mode 100644 index 0000000..b031a3f --- /dev/null +++ b/roles/authentik/defaults/main.yml @@ -0,0 +1,110 @@ +--- +# ================================================================= +# Authentik Authentication Role - Default Variables +# ================================================================= +# Self-contained Authentik deployment with Podman and Unix sockets + +# ================================================================= +# Service Configuration +# ================================================================= + +# Service user and directories +authentik_user: authentik +authentik_group: authentik +authentik_home: /opt/authentik +authentik_data_dir: "{{ authentik_home }}/data" +authentik_media_dir: "{{ authentik_home }}/media" + +# Container configuration +authentik_version: "latest" +authentik_image: "ghcr.io/goauthentik/authentik" + +# Service management +authentik_service_enabled: true +authentik_service_state: "started" + +# ================================================================= +# Database Configuration (Self-managed) +# ================================================================= + +authentik_db_name: "authentik" +authentik_db_user: "authentik" +authentik_db_password: "{{ vault_authentik_db_password }}" + +# ================================================================= +# Cache Configuration (Self-managed) +# ================================================================= + +authentik_valkey_db: 1 # Use database 1 for Authentik + +# ================================================================= +# Network Configuration +# ================================================================= + +authentik_domain: "auth.jnss.me" +authentik_http_port: 9000 +authentik_https_port: 9443 +authentik_bind_address: "127.0.0.1" + +# ================================================================= +# Authentik Core Configuration +# ================================================================= + +authentik_secret_key: "{{ vault_authentik_secret_key }}" +authentik_log_level: "info" +authentik_error_reporting: false + +# ================================================================= +# Email Configuration (Optional) +# ================================================================= + +authentik_email_enabled: false +authentik_email_host: "" +authentik_email_port: 587 +authentik_email_username: "" +authentik_email_password: "{{ vault_authentik_email_password | default('') }}" +authentik_email_tls: true +authentik_email_from: "authentik@{{ authentik_domain }}" + +# ================================================================= +# Security Configuration +# ================================================================= + +# Default admin user (created during deployment) +authentik_default_admin_email: "admin@{{ authentik_domain }}" +authentik_default_admin_password: "{{ vault_authentik_admin_password }}" + +# ================================================================= +# Podman Pod Configuration +# ================================================================= + +# Pod service name is simply "authentik" (generated from authentik.pod) +authentik_container_server_name: "authentik-server" +authentik_container_worker_name: "authentik-worker" + +# Quadlet service directories (USER SCOPE) +authentik_quadlet_dir: "{{ authentik_user_quadlet_dir }}" +authentik_user_quadlet_dir: "{{ authentik_home }}/.config/containers/systemd" + +# User session variables (set dynamically during deployment) +authentik_uid: "" + +# ================================================================= +# Caddy Integration +# ================================================================= + +# Caddy configuration (assumes caddy role provides these variables) +caddy_sites_enabled_dir: "/etc/caddy/sites-enabled" +caddy_log_dir: "/var/log/caddy" +caddy_user: "caddy" + +# ================================================================= +# Infrastructure Dependencies (Read-only) +# ================================================================= + +# PostgreSQL socket configuration (managed by postgresql role) +postgresql_unix_socket_directories: "/var/run/postgresql" + +# Valkey socket configuration (managed by valkey role) +valkey_unix_socket_path: "/var/run/valkey/valkey.sock" +valkey_password: "{{ vault_valkey_password }}" \ No newline at end of file diff --git a/roles/authentik/handlers/main.yml b/roles/authentik/handlers/main.yml new file mode 100644 index 0000000..a424c1e --- /dev/null +++ b/roles/authentik/handlers/main.yml @@ -0,0 +1,78 @@ +--- +# Authentik Service Handlers (User Scope) + +- name: reload systemd user + systemd: + daemon_reload: true + scope: user + become: true + become_user: "{{ authentik_user }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ authentik_uid }}" + +- name: reload caddy + systemd: + name: caddy + state: reloaded + +- name: restart authentik pod + systemd: + name: "authentik-pod" + state: restarted + scope: user + daemon_reload: true + become: true + become_user: "{{ authentik_user }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ authentik_uid }}" + +- name: restart authentik server + systemd: + name: "{{ authentik_container_server_name }}" + state: restarted + scope: user + daemon_reload: true + become: true + become_user: "{{ authentik_user }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ authentik_uid }}" + +- name: restart authentik worker + systemd: + name: "{{ authentik_container_worker_name }}" + state: restarted + scope: user + daemon_reload: true + become: true + become_user: "{{ authentik_user }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ authentik_uid }}" + +- name: stop authentik services + systemd: + name: "{{ item }}" + state: stopped + scope: user + become: true + become_user: "{{ authentik_user }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ authentik_uid }}" + loop: + - "{{ authentik_container_worker_name }}" + - "{{ authentik_container_server_name }}" + - "authentik-pod" + +- name: start authentik services + systemd: + name: "{{ item }}" + state: started + scope: user + daemon_reload: true + become: true + become_user: "{{ authentik_user }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ authentik_uid }}" + loop: + - "authentik-pod" + - "{{ authentik_container_server_name }}" + - "{{ authentik_container_worker_name }}" diff --git a/roles/authentik/meta/main.yml b/roles/authentik/meta/main.yml new file mode 100644 index 0000000..4698e5d --- /dev/null +++ b/roles/authentik/meta/main.yml @@ -0,0 +1,10 @@ +--- +# Authentik Role Dependencies +dependencies: + - role: postgresql + - role: valkey + - role: podman + - role: caddy + +# No modifications to infrastructure roles required +# Authentik role is completely self-contained \ No newline at end of file diff --git a/roles/authentik/tasks/cache.yml b/roles/authentik/tasks/cache.yml new file mode 100644 index 0000000..7df6a6e --- /dev/null +++ b/roles/authentik/tasks/cache.yml @@ -0,0 +1,70 @@ +--- +# Cache setup for Authentik - Self-contained socket permissions + +- name: Add authentik user to valkey group for socket access + user: + name: "{{ authentik_user }}" + groups: valkey + append: true + +- name: Ensure authentik can access Valkey socket directory + file: + path: "{{ valkey_unix_socket_path | dirname }}" + mode: '0770' + group: valkey + become: true + +- name: Test Valkey socket connectivity + command: > + redis-cli -s {{ valkey_unix_socket_path }} + -a {{ valkey_password }} + -n {{ authentik_valkey_db }} + ping + become: true + become_user: "{{ authentik_user }}" + register: valkey_socket_test + failed_when: valkey_socket_test.stdout != "PONG" + changed_when: false + +- name: Configure Authentik Valkey database + command: > + redis-cli -s {{ valkey_unix_socket_path }} + -a {{ valkey_password }} + -n {{ authentik_valkey_db }} + CONFIG SET save "" + become: true + become_user: "{{ authentik_user }}" + register: valkey_config_result + changed_when: true + +- name: Verify Authentik can write to Valkey database + command: > + redis-cli -s {{ valkey_unix_socket_path }} + -a {{ valkey_password }} + -n {{ authentik_valkey_db }} + SET authentik:healthcheck "{{ ansible_date_time.iso8601 }}" + become: true + become_user: "{{ authentik_user }}" + register: valkey_write_test + changed_when: false + +- name: Clean up Valkey test key + command: > + redis-cli -s {{ valkey_unix_socket_path }} + -a {{ valkey_password }} + -n {{ authentik_valkey_db }} + DEL authentik:healthcheck + become: true + become_user: "{{ authentik_user }}" + changed_when: false + +- name: Display cache setup status + debug: + msg: | + ✅ Authentik cache setup complete! + + 🗄️ Cache DB: {{ authentik_valkey_db }} + 🔌 Connection: Unix socket ({{ valkey_unix_socket_path }}) + 📊 Test: {{ valkey_socket_test.stdout }} + + 🏗️ Ready for Authentik container deployment \ No newline at end of file diff --git a/roles/authentik/tasks/database.yml b/roles/authentik/tasks/database.yml new file mode 100644 index 0000000..99b1ef5 --- /dev/null +++ b/roles/authentik/tasks/database.yml @@ -0,0 +1,62 @@ +--- +# Database setup for Authentik - Self-contained socket permissions + +- name: Add authentik user to postgres group for socket access + user: + name: "{{ authentik_user }}" + groups: postgres + append: true + +- name: Ensure authentik can access PostgreSQL socket directory + file: + path: "{{ postgresql_unix_socket_directories }}" + mode: '0770' + group: postgres + become: true + +- name: Test PostgreSQL socket connectivity + postgresql_ping: + login_unix_socket: "{{ postgresql_unix_socket_directories }}" + login_user: "{{ authentik_user }}" + become: true + become_user: "{{ authentik_user }}" + +- name: Create Authentik database user via socket + postgresql_user: + name: "{{ authentik_db_user }}" + password: "{{ authentik_db_password }}" + login_unix_socket: "{{ postgresql_unix_socket_directories }}" + login_user: postgres + become: true + become_user: postgres + +- name: Create Authentik database via socket + postgresql_db: + name: "{{ authentik_db_name }}" + owner: "{{ authentik_db_user }}" + login_unix_socket: "{{ postgresql_unix_socket_directories }}" + login_user: postgres + become: true + become_user: postgres + +- name: Grant Authentik database privileges + postgresql_privs: + db: "{{ authentik_db_name }}" + privs: ALL + type: database + role: "{{ authentik_db_user }}" + login_unix_socket: "{{ postgresql_unix_socket_directories }}" + login_user: postgres + become: true + become_user: postgres + +- name: Display database setup status + debug: + msg: | + ✅ Authentik database setup complete! + + 📊 Database: {{ authentik_db_name }} + 👤 User: {{ authentik_db_user }} + 🔌 Connection: Unix socket ({{ postgresql_unix_socket_directories }}) + + 🏗️ Ready for Authentik container deployment diff --git a/roles/authentik/tasks/main.yml b/roles/authentik/tasks/main.yml new file mode 100644 index 0000000..60212db --- /dev/null +++ b/roles/authentik/tasks/main.yml @@ -0,0 +1,215 @@ +--- +# Authentik Authentication Role - Main Tasks +# Self-contained deployment with Podman and Unix sockets + +- name: Create authentik group + group: + name: "{{ authentik_group }}" + system: true + +- name: Create authentik system user + user: + name: "{{ authentik_user }}" + system: true + shell: /bin/bash + home: "{{ authentik_home }}" + create_home: true + group: "{{ authentik_group }}" + +- name: Create authentik directories + file: + path: "{{ item }}" + state: directory + owner: "{{ authentik_user }}" + group: "{{ authentik_group }}" + mode: '0755' + loop: + - "{{ authentik_home }}" + - "{{ authentik_data_dir }}" + - "{{ authentik_media_dir }}" + - "{{ authentik_user_quadlet_dir }}" + +- name: Get authentik user UID + getent: + database: passwd + key: "{{ authentik_user }}" + register: authentik_user_info + +- name: Set authentik UID variable + set_fact: + authentik_uid: "{{ authentik_user_info.ansible_facts.getent_passwd[authentik_user][1] }}" + +- name: Enable lingering for authentik user (services persist without login) + command: loginctl enable-linger {{ authentik_user }} + register: linger_result + changed_when: linger_result.rc == 0 + +- name: Ensure XDG runtime directory exists + file: + path: "/run/user/{{ authentik_uid }}" + state: directory + owner: "{{ authentik_user }}" + group: "{{ authentik_group }}" + mode: '0700' + +- name: Setup database access and permissions + include_tasks: database.yml + tags: [database, setup] + +- name: Setup cache access and permissions + include_tasks: cache.yml + tags: [cache, setup] + +- name: Deploy environment configuration + template: + src: authentik.env.j2 + dest: "{{ authentik_home }}/.env" + owner: "{{ authentik_user }}" + group: "{{ authentik_group }}" + mode: '0600' + backup: true + notify: + - restart authentik pod + - restart authentik server + - restart authentik worker + tags: [config] + +- name: Create Quadlet systemd directory (user scope) + file: + path: "{{ authentik_quadlet_dir }}" + state: directory + owner: "{{ authentik_user }}" + group: "{{ authentik_group }}" + mode: '0755' + +- name: Deploy Quadlet pod and container files (user scope) + template: + src: "{{ item.src }}" + dest: "{{ authentik_quadlet_dir }}/{{ item.dest }}" + owner: "{{ authentik_user }}" + group: "{{ authentik_group }}" + mode: '0644' + loop: + - { src: 'authentik.pod', dest: 'authentik.pod' } + - { src: 'authentik-server.container', dest: 'authentik-server.container' } + - { src: 'authentik-worker.container', dest: 'authentik-worker.container' } + become: true + become_user: "{{ authentik_user }}" + notify: + - reload systemd user + - restart authentik pod + - restart authentik server + - restart authentik worker + tags: [containers, deployment] + +- name: Deploy Caddy configuration + template: + src: authentik.caddy.j2 + dest: "{{ caddy_sites_enabled_dir }}/authentik.caddy" + owner: root + group: "{{ caddy_user }}" + mode: '0644' + backup: true + notify: reload caddy + tags: [caddy, reverse-proxy] + +- name: Ensure system dependencies are running + systemd: + name: "{{ item }}" + state: started + loop: + - postgresql + - valkey + register: system_deps + +- name: Wait for PostgreSQL socket to be ready + wait_for: + path: "{{ postgresql_unix_socket_directories }}/.s.PGSQL.{{ postgresql_port }}" + timeout: 30 + when: postgresql_unix_socket_enabled + +- name: Wait for Valkey socket to be ready + wait_for: + path: "{{ valkey_unix_socket_path }}" + timeout: 30 + when: valkey_unix_socket_enabled + +- name: Reload systemd daemon for Quadlet (user scope) + systemd: + daemon_reload: true + scope: user + become: true + become_user: "{{ authentik_user }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ authentik_uid }}" + tags: [containers, deployment] + +- name: Enable and start Authentik pod (user scope) + systemd: + name: "authentik-pod" + enabled: "{{ authentik_service_enabled }}" + state: "{{ authentik_service_state }}" + scope: user + daemon_reload: true + become: true + become_user: "{{ authentik_user }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ authentik_uid }}" + tags: [containers, service] + +- name: Enable and start Authentik server (user scope) + systemd: + name: "{{ authentik_container_server_name }}" + enabled: "{{ authentik_service_enabled }}" + state: "{{ authentik_service_state }}" + scope: user + daemon_reload: true + become: true + become_user: "{{ authentik_user }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ authentik_uid }}" + tags: [containers, service] + +- name: Enable and start Authentik worker (user scope) + systemd: + name: "{{ authentik_container_worker_name }}" + enabled: "{{ authentik_service_enabled }}" + state: "{{ authentik_service_state }}" + scope: user + daemon_reload: true + become: true + become_user: "{{ authentik_user }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ authentik_uid }}" + tags: [containers, service] + +- name: Wait for Authentik to be ready + uri: + url: "https://{{ authentik_domain }}/if/health/live/" + method: GET + status_code: [200] + timeout: 30 + validate_certs: true + retries: 10 + delay: 30 + register: authentik_health_check + tags: [verification, health-check] + +- name: Display Authentik deployment status + debug: + msg: | + ✅ Authentik Authentication deployed successfully! + + 🌐 Domain: {{ authentik_domain }} + 🗄️ Database: {{ authentik_db_name }} (Unix socket) + 🗄️ Cache: Valkey DB {{ authentik_valkey_db }} (Unix socket) + 🐳 Containers: Pod with server + worker + 🔒 Admin: {{ authentik_default_admin_email }} + + 🚀 Ready for SSO configuration! + + 📋 Next Steps: + - Access {{ authentik_domain }} to complete setup + - Configure applications and providers + - Set up SSO for services + tags: [verification] diff --git a/roles/authentik/templates/authentik-server.container b/roles/authentik/templates/authentik-server.container new file mode 100644 index 0000000..6dd0784 --- /dev/null +++ b/roles/authentik/templates/authentik-server.container @@ -0,0 +1,25 @@ +[Unit] +Description=Authentik Server Container +After=authentik-pod.service +Requires=authentik-pod.service + +[Container] +ContainerName={{ authentik_container_server_name }} +Image={{ authentik_image }}:{{ authentik_version }} +Pod=authentik +EnvironmentFile={{ authentik_home }}/.env + +# Volume mounts for data and sockets +Volume={{ authentik_media_dir }}:/media +Volume={{ authentik_data_dir }}:/data +Volume={{ postgresql_unix_socket_directories }}:{{ postgresql_unix_socket_directories }}:Z +Volume={{ valkey_unix_socket_path | dirname }}:{{ valkey_unix_socket_path | dirname }}:Z + +Exec=server + +[Service] +Restart=always +TimeoutStartSec=300 + +[Install] +WantedBy=default.target diff --git a/roles/authentik/templates/authentik-worker.container b/roles/authentik/templates/authentik-worker.container new file mode 100644 index 0000000..091d416 --- /dev/null +++ b/roles/authentik/templates/authentik-worker.container @@ -0,0 +1,25 @@ +[Unit] +Description=Authentik Worker Container +After=authentik-pod.service +Requires=authentik-pod.service + +[Container] +ContainerName={{ authentik_container_worker_name }} +Image={{ authentik_image }}:{{ authentik_version }} +Pod=authentik +EnvironmentFile={{ authentik_home }}/.env + +# Volume mounts for data and sockets +Volume={{ authentik_media_dir }}:/media +Volume={{ authentik_data_dir }}:/data +Volume={{ postgresql_unix_socket_directories }}:{{ postgresql_unix_socket_directories }}:Z +Volume={{ valkey_unix_socket_path | dirname }}:{{ valkey_unix_socket_path | dirname }}:Z + +Exec=worker + +[Service] +Restart=always +TimeoutStartSec=300 + +[Install] +WantedBy=default.target diff --git a/roles/authentik/templates/authentik.caddy.j2 b/roles/authentik/templates/authentik.caddy.j2 new file mode 100644 index 0000000..1d56dd0 --- /dev/null +++ b/roles/authentik/templates/authentik.caddy.j2 @@ -0,0 +1,41 @@ +# Authentik Authentication Service +{{ authentik_domain }} { + reverse_proxy https://{{ authentik_bind_address }}:{{ authentik_https_port }} { + transport http { + tls_insecure_skip_verify + } + + header_up Host {upstream_hostport} + header_up X-Real-IP {remote_host} + header_up X-Forwarded-Proto https + header_up X-Forwarded-For {remote_host} + header_up X-Forwarded-Host {host} + + # Health check + health_uri /if/health/live/ + health_timeout 10s + health_interval 30s + health_status 200 + } + + # Security headers + header { + X-Frame-Options SAMEORIGIN + X-Content-Type-Options nosniff + X-XSS-Protection "1; mode=block" + Referrer-Policy strict-origin-when-cross-origin + Strict-Transport-Security "max-age=31536000; includeSubDomains; preload" + } + + # Authentik-specific paths + handle_path /outpost.goauthentik.io/* { + reverse_proxy https://{{ authentik_bind_address }}:{{ authentik_https_port }} + } + + # Logging + log { + output file {{ caddy_log_dir }}/authentik.log + level INFO + format json + } +} \ No newline at end of file diff --git a/roles/authentik/templates/authentik.env.j2 b/roles/authentik/templates/authentik.env.j2 new file mode 100644 index 0000000..561ee8f --- /dev/null +++ b/roles/authentik/templates/authentik.env.j2 @@ -0,0 +1,43 @@ +# Authentik Configuration - Unix Socket IPC +# Generated by Ansible - DO NOT EDIT + +# PostgreSQL Configuration (Unix Socket) +AUTHENTIK_POSTGRESQL__HOST={{ postgresql_unix_socket_directories }} +AUTHENTIK_POSTGRESQL__NAME={{ authentik_db_name }} +AUTHENTIK_POSTGRESQL__USER={{ authentik_db_user }} +AUTHENTIK_POSTGRESQL__PASSWORD={{ authentik_db_password }} +# No port needed for Unix socket + +# Valkey/Redis Configuration (Unix Socket) +AUTHENTIK_REDIS__HOST=unix://{{ valkey_unix_socket_path }} +AUTHENTIK_REDIS__PASSWORD={{ valkey_password }} +AUTHENTIK_REDIS__DB={{ authentik_valkey_db }} +# No port needed for Unix socket + +# Authentik Core Configuration +AUTHENTIK_SECRET_KEY={{ authentik_secret_key }} +AUTHENTIK_LOG_LEVEL={{ authentik_log_level }} +AUTHENTIK_ERROR_REPORTING__ENABLED={{ authentik_error_reporting | lower }} + +# Security Configuration +AUTHENTIK_COOKIE_DOMAIN={{ authentik_domain }} +AUTHENTIK_DISABLE_UPDATE_CHECK=true +AUTHENTIK_DISABLE_STARTUP_ANALYTICS=true + +# Network binding +AUTHENTIK_LISTEN__HTTP={{ authentik_bind_address }}:{{ authentik_http_port }} +AUTHENTIK_LISTEN__HTTPS={{ authentik_bind_address }}:{{ authentik_https_port }} + +{% if authentik_email_enabled %} +# Email Configuration +AUTHENTIK_EMAIL__HOST={{ authentik_email_host }} +AUTHENTIK_EMAIL__PORT={{ authentik_email_port }} +AUTHENTIK_EMAIL__USERNAME={{ authentik_email_username }} +AUTHENTIK_EMAIL__PASSWORD={{ authentik_email_password }} +AUTHENTIK_EMAIL__USE_TLS={{ authentik_email_tls | lower }} +AUTHENTIK_EMAIL__FROM={{ authentik_email_from }} +{% endif %} + +# Default admin user +AUTHENTIK_BOOTSTRAP_PASSWORD={{ authentik_default_admin_password }} +AUTHENTIK_BOOTSTRAP_EMAIL={{ authentik_default_admin_email }} \ No newline at end of file diff --git a/roles/authentik/templates/authentik.pod b/roles/authentik/templates/authentik.pod new file mode 100644 index 0000000..27d38b3 --- /dev/null +++ b/roles/authentik/templates/authentik.pod @@ -0,0 +1,15 @@ +[Unit] +Description=Authentik Authentication Pod + +[Pod] +PodName=authentik +PublishPort={{ authentik_bind_address }}:{{ authentik_http_port }}:{{ authentik_http_port }} +PublishPort={{ authentik_bind_address }}:{{ authentik_https_port }}:{{ authentik_https_port }} +PodmanArgs=--userns=keep-id + +[Service] +Restart=always +TimeoutStartSec=900 + +[Install] +WantedBy=default.target From b42ee2a22b646a677ab070f8d815b18c0bce7af2 Mon Sep 17 00:00:00 2001 From: Joakim Date: Thu, 4 Dec 2025 19:42:31 +0100 Subject: [PATCH 2/2] Fix: Complete authentik Quadlet implementation with networking solution MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Resolves authentik deployment issues by implementing proper Podman Quadlet configuration and fixing networking for external access through Caddy. Core Fixes: • Add missing [Install] sections to container Quadlet files for systemd service generation • Fix pod references from 'systemd-authentik' to 'authentik.pod' for proper Quadlet linking • Remove problematic --userns=host to use proper rootless user namespaces • Configure subuid/subgid ranges for authentik user (200000:65536) • Update networking to bind 0.0.0.0:9000 only (remove unnecessary HTTPS port 9443) • Add AUTHENTIK_LISTEN__HTTP=0.0.0.0:9000 environment configuration • Fix Caddy reverse proxy to use HTTP backend instead of HTTPS Infrastructure Updates: • Enhance PostgreSQL role with Unix socket configuration and user management • Improve Valkey role with proper systemd integration and socket permissions • Add comprehensive service integration documentation • Update deployment playbooks with backup and restore capabilities Security Improvements: • Secure network isolation with Caddy SSL termination • Reduced attack surface by removing direct HTTPS container exposure • Proper rootless container configuration with user namespace mapping Result: authentik now fully operational with external HTTPS access via auth.jnss.me All systemd services (authentik-pod, authentik-server, authentik-worker) running correctly. --- .gitignore | 2 + README.md | 15 + docs/service-integration-guide.md | 312 ++++++++++++++++++ host_vars/arch-vps/main.yml | 27 ++ playbooks/backup-sigvild.yml | 66 ++++ roles/authentik/README.md | 80 ++++- roles/authentik/defaults/main.yml | 10 +- roles/authentik/tasks/cache.yml | 2 +- roles/authentik/tasks/main.yml | 73 ++-- .../templates/authentik-server.container | 9 +- .../templates/authentik-worker.container | 9 +- roles/authentik/templates/authentik.env.j2 | 10 +- roles/authentik/templates/authentik.pod | 6 +- roles/postgresql/defaults/main.yml | 7 +- roles/postgresql/tasks/main.yml | 32 +- roles/postgresql/templates/postgresql.conf.j2 | 5 + roles/sigvild-gallery/README.md | 79 ++++- roles/sigvild-gallery/defaults/main.yml | 4 + roles/sigvild-gallery/tasks/backup.yml | 100 ++++++ .../sigvild-gallery/tasks/deploy_backend.yml | 4 + roles/sigvild-gallery/tasks/restore.yml | 126 +++++++ roles/valkey/defaults/main.yml | 5 + roles/valkey/tasks/main.yml | 76 ++++- roles/valkey/templates/valkey.conf.j2 | 9 + site.yml | 10 +- 25 files changed, 986 insertions(+), 92 deletions(-) create mode 100644 docs/service-integration-guide.md create mode 100644 playbooks/backup-sigvild.yml create mode 100644 roles/sigvild-gallery/tasks/backup.yml create mode 100644 roles/sigvild-gallery/tasks/restore.yml diff --git a/.gitignore b/.gitignore index 479879e..80c303b 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,4 @@ vault-password-file vault.yml +backups/ +.*temp/ diff --git a/README.md b/README.md index c028ac9..d894e32 100644 --- a/README.md +++ b/README.md @@ -19,3 +19,18 @@ Reverse proxy. ### Containers Containers are managed by rootless Podman. + +## Documentation + +### Service Integration +- [Service Integration Guide](docs/service-integration-guide.md) - How to add containerized services with PostgreSQL/Valkey access + +### Role Documentation +- [Authentik Role](roles/authentik/README.md) - Authentication service with Unix socket implementation +- [PostgreSQL Role](roles/postgresql/README.md) - Database service with Unix socket support +- [Valkey Role](roles/valkey/README.md) - Cache service with Unix socket support +- [Caddy Role](roles/caddy/README.md) - Reverse proxy and SSL termination + +### Infrastructure Guides +- [Deployment Guide](docs/deployment-guide.md) - Complete deployment walkthrough +- [Security Hardening](docs/security-hardening.md) - Security configuration and best practices diff --git a/docs/service-integration-guide.md b/docs/service-integration-guide.md new file mode 100644 index 0000000..55c9914 --- /dev/null +++ b/docs/service-integration-guide.md @@ -0,0 +1,312 @@ +# Service Integration Guide + +This guide explains how to add new containerized services to rick-infra with PostgreSQL and Valkey/Redis access via Unix sockets. + +## Overview + +Rick-infra provides a standardized approach for containerized services to access infrastructure services through Unix sockets, maintaining security while providing optimal performance. + +## Architecture Pattern + +``` +┌─────────────────────────────────────────────────────────────┐ +│ Application Service (Podman Container) │ +│ │ +│ ┌─────────────────┐ │ +│ │ Your Container │ │ +│ │ UID: service │ (host user namespace) │ +│ │ Groups: service,│ │ +│ │ postgres, │ (supplementary groups preserved) │ +│ │ valkey │ │ +│ └─────────────────┘ │ +│ │ │ +│ └─────────────────────┐ │ +└─────────────────────────────────│───────────────────────────┘ + │ + ┌───────────────▼──────────────┐ + │ Host Infrastructure Services │ + │ │ + │ PostgreSQL Unix Socket │ + │ /var/run/postgresql/ │ + │ │ + │ Valkey Unix Socket │ + │ /var/run/valkey/ │ + └──────────────────────────────┘ +``` + +## Prerequisites + +Your service must be deployed as: +1. **Systemd user service** (via Quadlet) +2. **Dedicated system user** +3. **Podman container** (rootless) + +## Step 1: User Setup + +Create a dedicated system user for your service and add it to infrastructure groups: + +```yaml +- name: Create service user + user: + name: myservice + system: true + shell: /bin/false + home: /opt/myservice + create_home: true + +- name: Add service user to infrastructure groups + user: + name: myservice + groups: + - postgres # For PostgreSQL access + - valkey # For Valkey/Redis access + append: true +``` + +## Step 2: Container Configuration + +### Pod Configuration (`myservice.pod`) + +```ini +[Unit] +Description=My Service Pod + +[Pod] +PublishPort=127.0.0.1:8080:8080 +PodmanArgs=--userns=host + +[Service] +Restart=always +TimeoutStartSec=900 + +[Install] +WantedBy=default.target +``` + +**Key Points**: +- `--userns=host` preserves host user namespace +- Standard port publishing for network access + +### Container Configuration (`myservice.container`) + +```ini +[Unit] +Description=My Service Container + +[Container] +Image=my-service:latest +Pod=myservice.pod +EnvironmentFile=/opt/myservice/.env +User={{ service_uid }}:{{ service_gid }} +Annotation=run.oci.keep_original_groups=1 + +# Volume mounts for sockets +Volume=/var/run/postgresql:/var/run/postgresql:Z +Volume=/var/run/valkey:/var/run/valkey:Z + +# Application volumes +Volume=/opt/myservice/data:/data +Volume=/opt/myservice/logs:/logs + +Exec=my-service + +[Service] +Restart=always + +[Install] +WantedBy=default.target +``` + +**Key Points**: +- `Annotation=run.oci.keep_original_groups=1` preserves supplementary groups +- Mount socket directories with `:Z` for SELinux relabeling +- Use host UID/GID for the service user + +## Step 3: Service Configuration + +### PostgreSQL Connection + +Use Unix socket connection strings: + +```bash +# Environment variable +DATABASE_URL=postgresql://myservice@/myservice_db?host=/var/run/postgresql + +# Or separate variables +DB_HOST=/var/run/postgresql +DB_USER=myservice +DB_NAME=myservice_db +# No DB_PORT needed for Unix sockets +``` + +### Valkey/Redis Connection + +**Correct Format** (avoids URL parsing issues): + +```bash +# Single URL format (recommended) +CACHE_URL=unix:///var/run/valkey/valkey.sock?db=2&password=your_password + +# Alternative format +REDIS_URL=redis://localhost/2?unix_socket_path=/var/run/valkey/valkey.sock +``` + +**Avoid** separate HOST/DB variables which can cause port parsing issues: +```bash +# DON'T USE - causes parsing problems +REDIS_HOST=unix:///var/run/valkey/valkey.sock +REDIS_DB=2 +``` + +## Step 4: Database Setup + +Add database setup tasks to your role: + +```yaml +- name: Create application database + postgresql_db: + name: "{{ service_db_name }}" + owner: "{{ service_db_user }}" + encoding: UTF-8 + lc_collate: en_US.UTF-8 + lc_ctype: en_US.UTF-8 + become_user: postgres + +- name: Create application database user + postgresql_user: + name: "{{ service_db_user }}" + password: "{{ service_db_password }}" + db: "{{ service_db_name }}" + priv: ALL + become_user: postgres + +- name: Grant connect privileges + postgresql_privs: + db: "{{ service_db_name }}" + role: "{{ service_db_user }}" + objs: ALL_IN_SCHEMA + privs: ALL + become_user: postgres +``` + +## Step 5: Service Role Template + +Create an Ansible role using this pattern: + +``` +myservice/ +├── defaults/main.yml +├── handlers/main.yml +├── tasks/ +│ ├── main.yml +│ ├── database.yml +│ └── cache.yml +├── templates/ +│ ├── myservice.env.j2 +│ ├── myservice.pod +│ ├── myservice.container +│ └── myservice.caddy.j2 +└── README.md +``` + +### Example Environment Template + +```bash +# My Service Configuration +# Generated by Ansible - DO NOT EDIT + +# Database Configuration (Unix Socket) +DATABASE_URL=postgresql://{{ service_db_user }}@/{{ service_db_name }}?host={{ postgresql_unix_socket_directories }} +DB_PASSWORD={{ service_db_password }} + +# Cache Configuration (Unix Socket) +CACHE_URL=unix://{{ valkey_unix_socket_path }}?db={{ service_valkey_db }}&password={{ valkey_password }} + +# Application Configuration +SECRET_KEY={{ service_secret_key }} +LOG_LEVEL={{ service_log_level }} +BIND_ADDRESS={{ service_bind_address }}:{{ service_port }} +``` + +## Troubleshooting + +### Socket Permission Issues + +If you get permission denied errors: + +1. **Check group membership**: + ```bash + groups myservice + # Should show: myservice postgres valkey + ``` + +2. **Verify container annotations**: + ```bash + podman inspect myservice --format='{{.Config.Annotations}}' + # Should include: run.oci.keep_original_groups=1 + ``` + +3. **Check socket permissions**: + ```bash + ls -la /var/run/postgresql/ + ls -la /var/run/valkey/ + ``` + +### Connection Issues + +1. **Test socket access from host**: + ```bash + sudo -u myservice psql -h /var/run/postgresql -U myservice myservice_db + sudo -u myservice redis-cli -s /var/run/valkey/valkey.sock ping + ``` + +2. **Check URL format**: + - Use single `CACHE_URL` instead of separate variables + - Include password in URL if required + - Verify database number is correct + +### Container Issues + +1. **Check container user**: + ```bash + podman exec myservice id + # Should show correct UID and supplementary groups + ``` + +2. **Verify socket mounts**: + ```bash + podman exec myservice ls -la /var/run/postgresql/ + podman exec myservice ls -la /var/run/valkey/ + ``` + +## Best Practices + +1. **Security**: + - Use dedicated system users for each service + - Limit group memberships to required infrastructure + - Use vault variables for secrets + +2. **Configuration**: + - Use single URL format for Redis connections + - Mount socket directories with appropriate SELinux labels + - Include `run.oci.keep_original_groups=1` annotation + +3. **Deployment**: + - Test socket access before container deployment + - Use proper dependency ordering in playbooks + - Include database and cache setup tasks + +4. **Monitoring**: + - Monitor socket file permissions + - Check service logs for connection errors + - Verify group memberships after user changes + +## Example Integration + +See the `authentik` role for a complete example of this pattern: + +- **Templates**: `roles/authentik/templates/` +- **Tasks**: `roles/authentik/tasks/` +- **Documentation**: `roles/authentik/README.md` + +This provides a working reference implementation for Unix socket integration. \ No newline at end of file diff --git a/host_vars/arch-vps/main.yml b/host_vars/arch-vps/main.yml index 0dcc515..a690035 100644 --- a/host_vars/arch-vps/main.yml +++ b/host_vars/arch-vps/main.yml @@ -38,6 +38,33 @@ sigvild_gallery_pb_su_password: "{{ vault_pb_su_password}}" sigvild_gallery_host_password: "{{ vault_sigvild_host_password }}" sigvild_gallery_guest_password: "{{ vault_sigvild_guest_password }}" +# ================================================================= +# Authentik Configuration +# ================================================================= +authentik_domain: "auth.jnss.me" + +# Database configuration +authentik_db_name: "authentik" +authentik_db_user: "authentik" +authentik_db_password: "{{ vault_authentik_db_password }}" + +# Cache configuration +authentik_valkey_db: 1 + +# Core configuration +authentik_secret_key: "{{ vault_authentik_secret_key }}" +authentik_default_admin_email: "admin@jnss.me" +authentik_default_admin_password: "{{ vault_authentik_admin_password }}" + +# Service configuration +authentik_service_enabled: true +authentik_service_state: "started" + +# Infrastructure socket configuration +postgresql_unix_socket_enabled: true +postgresql_listen_addresses: "" # Socket-only mode (no TCP) +valkey_unix_socket_enabled: true + # ================================================================= # Security & Logging # ================================================================= diff --git a/playbooks/backup-sigvild.yml b/playbooks/backup-sigvild.yml new file mode 100644 index 0000000..8393ff7 --- /dev/null +++ b/playbooks/backup-sigvild.yml @@ -0,0 +1,66 @@ +--- +# Sigvild Gallery Data Backup Playbook +# +# This playbook creates a compressed backup of all Sigvild Gallery production data +# including the PocketBase database and uploaded files. + +- name: Backup Sigvild Gallery Production Data + hosts: arch-vps + become: true + gather_facts: true + + vars: + # Backup configuration - can be overridden with --extra-vars + sigvild_gallery_backup_local_path: "{{ playbook_dir }}/backups/sigvild-gallery" + + pre_tasks: + - name: Ensure local backup directory exists + local_action: + module: file + path: "{{ sigvild_gallery_backup_local_path }}" + state: directory + mode: '0755' + become: false + run_once: true + + - name: Display backup operation info + debug: + msg: + - "🔄 Starting Sigvild Gallery Data Backup" + - "Target server: {{ inventory_hostname }}" + - "Local backup storage: {{ sigvild_gallery_backup_local_path }}" + - "Timestamp: {{ ansible_date_time.iso8601 }}" + + tasks: + - name: Execute backup tasks + include_role: + name: sigvild-gallery + tasks_from: backup + tags: [backup, data] + + post_tasks: + - name: List local backups + local_action: + module: find + paths: "{{ sigvild_gallery_backup_local_path }}" + patterns: "sigvild-gallery-backup-*.tar.gz" + register: all_backups + become: false + + - name: Display backup summary + debug: + msg: + - "✅ Backup operation completed successfully!" + - "Total backups available: {{ all_backups.files | length }}" + - "Latest backup: sigvild-gallery-backup-{{ ansible_date_time.iso8601_basic_short }}.tar.gz" + - "Backup location: {{ sigvild_gallery_backup_local_path }}" + - "" + - "⚠️ IMPORTANT: Store this backup safely before formatting your server!" + - "💡 To restore: Run normal deployment - restoration is automatic if backup exists" + + - name: Show backup file details + debug: + msg: "Backup: {{ item.path | basename }} - {{ (item.size / 1024 / 1024) | round(2) }}MB - {{ item.mtime | to_datetime('%Y-%m-%d %H:%M:%S') }}" + loop: "{{ all_backups.files | sort(attribute='mtime') | reverse }}" + loop_control: + label: "{{ item.path | basename }}" \ No newline at end of file diff --git a/roles/authentik/README.md b/roles/authentik/README.md index d6105bc..3c553ec 100644 --- a/roles/authentik/README.md +++ b/roles/authentik/README.md @@ -224,4 +224,82 @@ service.example.com { reverse_proxy localhost:8080 } -``` \ No newline at end of file +``` +## Technical Implementation Notes + +### Unix Socket Access Solution + +This role implements a sophisticated solution for containerized Unix socket access: + +**Challenge**: Containers need to access Unix sockets owned by different system services (PostgreSQL, Valkey) while maintaining security isolation. + +**Solution Components**: + +1. **User Namespace Preservation**: `--userns=host` in pod configuration + - Preserves host UID/GID mapping within containers + - Allows direct access to host socket files + +2. **Group Membership Preservation**: `Annotation=run.oci.keep_original_groups=1` in containers + - Ensures supplementary group memberships are maintained in containers + - Enables access to postgres and valkey groups within containers + +3. **Correct Redis URL Format**: `AUTHENTIK_CACHE__URL=unix://...?db=N&password=...` + - Avoids Django Redis client URL parsing issues + - Prevents incorrect port appending to Unix socket paths + +4. **Host Service Integration**: Authentik user added to service groups + - Added to `postgres` group for PostgreSQL socket access + - Added to `valkey` group for Valkey socket access + +### Container Architecture + +``` +┌─────────────────────────────────────────────────────────────┐ +│ Authentik Pod (--userns=host) │ +│ │ +│ ┌─────────────────┐ ┌─────────────────┐ │ +│ │ Server Container│ │ Worker Container│ │ +│ │ UID: 963 (host) │ │ UID: 963 (host) │ │ +│ │ Groups: 963, │ │ Groups: 963, │ │ +│ │ 968(postgres),│ │ 968(postgres),│ │ +│ │ 965(valkey) │ │ 965(valkey) │ │ +│ └─────────────────┘ └─────────────────┘ │ +│ │ │ │ +│ └────────────────────┴─────────────┐ │ +└─────────────────────────────────────────────│─────────────┘ + │ + ┌───────────────▼──────────────┐ + │ Host Unix Sockets │ + │ │ + │ /var/run/postgresql/ │ + │ ├─ .s.PGSQL.5432 │ + │ │ (postgres:postgres 0770) │ + │ │ + │ /var/run/valkey/ │ + │ ├─ valkey.sock │ + │ (valkey:valkey 0770) │ + └──────────────────────────────┘ +``` + +### Security Implications + +**Maintained Security**: +- Container network isolation preserved (no `--network=host`) +- Individual container user/group isolation +- Standard Podman security features active +- Principle of least privilege through group membership + +**Trade-offs**: +- Containers share host user namespace (reduced UID isolation) +- Group membership grants broader access to service files +- Requires careful service group management + +### Compatibility + +This solution is: +- ✅ **Portable**: Works regardless of UID assignments +- ✅ **Maintainable**: No custom subuid/subgid configuration +- ✅ **Performant**: Unix sockets avoid TCP overhead +- ✅ **Secure**: Maintains container isolation where it matters +- ✅ **Standard**: Uses documented Podman/OCI features + diff --git a/roles/authentik/defaults/main.yml b/roles/authentik/defaults/main.yml index b031a3f..cde5d48 100644 --- a/roles/authentik/defaults/main.yml +++ b/roles/authentik/defaults/main.yml @@ -14,10 +14,11 @@ authentik_group: authentik authentik_home: /opt/authentik authentik_data_dir: "{{ authentik_home }}/data" authentik_media_dir: "{{ authentik_home }}/media" +authentik_log_dir: "{{ authentik_home }}/logs" # Container configuration -authentik_version: "latest" -authentik_image: "ghcr.io/goauthentik/authentik" +authentik_version: "2025.10" +authentik_image: "ghcr.io/goauthentik/server" # Service management authentik_service_enabled: true @@ -43,8 +44,7 @@ authentik_valkey_db: 1 # Use database 1 for Authentik authentik_domain: "auth.jnss.me" authentik_http_port: 9000 -authentik_https_port: 9443 -authentik_bind_address: "127.0.0.1" +authentik_bind_address: "0.0.0.0" # ================================================================= # Authentik Core Configuration @@ -107,4 +107,4 @@ postgresql_unix_socket_directories: "/var/run/postgresql" # Valkey socket configuration (managed by valkey role) valkey_unix_socket_path: "/var/run/valkey/valkey.sock" -valkey_password: "{{ vault_valkey_password }}" \ No newline at end of file +valkey_password: "{{ vault_valkey_password }}" diff --git a/roles/authentik/tasks/cache.yml b/roles/authentik/tasks/cache.yml index 7df6a6e..c3b4542 100644 --- a/roles/authentik/tasks/cache.yml +++ b/roles/authentik/tasks/cache.yml @@ -42,7 +42,7 @@ redis-cli -s {{ valkey_unix_socket_path }} -a {{ valkey_password }} -n {{ authentik_valkey_db }} - SET authentik:healthcheck "{{ ansible_date_time.iso8601 }}" + SET authentik:healthcheck "deployed" become: true become_user: "{{ authentik_user }}" register: valkey_write_test diff --git a/roles/authentik/tasks/main.yml b/roles/authentik/tasks/main.yml index 60212db..2b3dfec 100644 --- a/roles/authentik/tasks/main.yml +++ b/roles/authentik/tasks/main.yml @@ -28,29 +28,29 @@ - "{{ authentik_data_dir }}" - "{{ authentik_media_dir }}" - "{{ authentik_user_quadlet_dir }}" + - "{{ authentik_log_dir }}" -- name: Get authentik user UID - getent: - database: passwd - key: "{{ authentik_user }}" - register: authentik_user_info - -- name: Set authentik UID variable - set_fact: - authentik_uid: "{{ authentik_user_info.ansible_facts.getent_passwd[authentik_user][1] }}" - name: Enable lingering for authentik user (services persist without login) command: loginctl enable-linger {{ authentik_user }} register: linger_result changed_when: linger_result.rc == 0 -- name: Ensure XDG runtime directory exists - file: - path: "/run/user/{{ authentik_uid }}" - state: directory - owner: "{{ authentik_user }}" - group: "{{ authentik_group }}" - mode: '0700' + + +- name: Get authentik user UID and GID for container configuration + shell: | + echo "uid=$(id -u {{ authentik_user }})" + echo "gid=$(id -g {{ authentik_user }})" + register: authentik_user_info + changed_when: false + tags: [setup] + +- name: Set authentik UID/GID facts for container templates + set_fact: + authentik_uid: "{{ authentik_user_info.stdout_lines[0] | regex_replace('uid=', '') }}" + authentik_gid: "{{ authentik_user_info.stdout_lines[1] | regex_replace('gid=', '') }}" + tags: [setup] - name: Setup database access and permissions include_tasks: database.yml @@ -134,15 +134,12 @@ timeout: 30 when: valkey_unix_socket_enabled -- name: Reload systemd daemon for Quadlet (user scope) +- name: Ensure systemd user session is started systemd: - daemon_reload: true - scope: user - become: true - become_user: "{{ authentik_user }}" - environment: - XDG_RUNTIME_DIR: "/run/user/{{ authentik_uid }}" - tags: [containers, deployment] + name: "user@{{ authentik_uid }}.service" + state: started + scope: system + register: user_session_start - name: Enable and start Authentik pod (user scope) systemd: @@ -153,34 +150,6 @@ daemon_reload: true become: true become_user: "{{ authentik_user }}" - environment: - XDG_RUNTIME_DIR: "/run/user/{{ authentik_uid }}" - tags: [containers, service] - -- name: Enable and start Authentik server (user scope) - systemd: - name: "{{ authentik_container_server_name }}" - enabled: "{{ authentik_service_enabled }}" - state: "{{ authentik_service_state }}" - scope: user - daemon_reload: true - become: true - become_user: "{{ authentik_user }}" - environment: - XDG_RUNTIME_DIR: "/run/user/{{ authentik_uid }}" - tags: [containers, service] - -- name: Enable and start Authentik worker (user scope) - systemd: - name: "{{ authentik_container_worker_name }}" - enabled: "{{ authentik_service_enabled }}" - state: "{{ authentik_service_state }}" - scope: user - daemon_reload: true - become: true - become_user: "{{ authentik_user }}" - environment: - XDG_RUNTIME_DIR: "/run/user/{{ authentik_uid }}" tags: [containers, service] - name: Wait for Authentik to be ready diff --git a/roles/authentik/templates/authentik-server.container b/roles/authentik/templates/authentik-server.container index 6dd0784..88b5c88 100644 --- a/roles/authentik/templates/authentik-server.container +++ b/roles/authentik/templates/authentik-server.container @@ -6,8 +6,15 @@ Requires=authentik-pod.service [Container] ContainerName={{ authentik_container_server_name }} Image={{ authentik_image }}:{{ authentik_version }} -Pod=authentik +Pod=authentik.pod EnvironmentFile={{ authentik_home }}/.env +User={{ authentik_uid }}:{{ authentik_gid }} +Annotation=run.oci.keep_original_groups=1 + +# Logging configuration +LogDriver=k8s-file +LogOpt=path={{ authentik_home }}/logs/server.log +Volume={{ authentik_home }}/logs:{{ authentik_home }}/logs # Volume mounts for data and sockets Volume={{ authentik_media_dir }}:/media diff --git a/roles/authentik/templates/authentik-worker.container b/roles/authentik/templates/authentik-worker.container index 091d416..2e88dad 100644 --- a/roles/authentik/templates/authentik-worker.container +++ b/roles/authentik/templates/authentik-worker.container @@ -6,8 +6,15 @@ Requires=authentik-pod.service [Container] ContainerName={{ authentik_container_worker_name }} Image={{ authentik_image }}:{{ authentik_version }} -Pod=authentik +Pod=authentik.pod EnvironmentFile={{ authentik_home }}/.env +User={{ authentik_uid }}:{{ authentik_gid }} +Annotation=run.oci.keep_original_groups=1 + +# Logging configuration +LogDriver=k8s-file +LogOpt=path={{ authentik_home }}/logs/worker.log +Volume={{ authentik_home }}/logs:{{ authentik_home }}/logs # Volume mounts for data and sockets Volume={{ authentik_media_dir }}:/media diff --git a/roles/authentik/templates/authentik.env.j2 b/roles/authentik/templates/authentik.env.j2 index 561ee8f..6c0695a 100644 --- a/roles/authentik/templates/authentik.env.j2 +++ b/roles/authentik/templates/authentik.env.j2 @@ -8,11 +8,8 @@ AUTHENTIK_POSTGRESQL__USER={{ authentik_db_user }} AUTHENTIK_POSTGRESQL__PASSWORD={{ authentik_db_password }} # No port needed for Unix socket -# Valkey/Redis Configuration (Unix Socket) -AUTHENTIK_REDIS__HOST=unix://{{ valkey_unix_socket_path }} -AUTHENTIK_REDIS__PASSWORD={{ valkey_password }} -AUTHENTIK_REDIS__DB={{ authentik_valkey_db }} -# No port needed for Unix socket +# Valkey/Redis Configuration (Unix Socket) - Using cache URL format to avoid port parsing issues +AUTHENTIK_CACHE__URL=unix://{{ valkey_unix_socket_path }}?db={{ authentik_valkey_db }}&password={{ valkey_password }} # Authentik Core Configuration AUTHENTIK_SECRET_KEY={{ authentik_secret_key }} @@ -26,7 +23,6 @@ AUTHENTIK_DISABLE_STARTUP_ANALYTICS=true # Network binding AUTHENTIK_LISTEN__HTTP={{ authentik_bind_address }}:{{ authentik_http_port }} -AUTHENTIK_LISTEN__HTTPS={{ authentik_bind_address }}:{{ authentik_https_port }} {% if authentik_email_enabled %} # Email Configuration @@ -40,4 +36,4 @@ AUTHENTIK_EMAIL__FROM={{ authentik_email_from }} # Default admin user AUTHENTIK_BOOTSTRAP_PASSWORD={{ authentik_default_admin_password }} -AUTHENTIK_BOOTSTRAP_EMAIL={{ authentik_default_admin_email }} \ No newline at end of file +AUTHENTIK_BOOTSTRAP_EMAIL={{ authentik_default_admin_email }} diff --git a/roles/authentik/templates/authentik.pod b/roles/authentik/templates/authentik.pod index 27d38b3..d0c3736 100644 --- a/roles/authentik/templates/authentik.pod +++ b/roles/authentik/templates/authentik.pod @@ -2,10 +2,8 @@ Description=Authentik Authentication Pod [Pod] -PodName=authentik -PublishPort={{ authentik_bind_address }}:{{ authentik_http_port }}:{{ authentik_http_port }} -PublishPort={{ authentik_bind_address }}:{{ authentik_https_port }}:{{ authentik_https_port }} -PodmanArgs=--userns=keep-id +PublishPort=0.0.0.0:{{ authentik_http_port }}:{{ authentik_http_port }} +PodmanArgs= [Service] Restart=always diff --git a/roles/postgresql/defaults/main.yml b/roles/postgresql/defaults/main.yml index 75463ae..eaa1333 100644 --- a/roles/postgresql/defaults/main.yml +++ b/roles/postgresql/defaults/main.yml @@ -13,10 +13,15 @@ postgresql_service_enabled: true postgresql_service_state: "started" -# Network Security (localhost only) +# Network Security postgresql_listen_addresses: "localhost" postgresql_port: 5432 +# Unix Socket Configuration +postgresql_unix_socket_enabled: true +postgresql_unix_socket_directories: "/var/run/postgresql" +postgresql_unix_socket_permissions: "0770" + # Authentication postgresql_auth_method: "scram-sha-256" diff --git a/roles/postgresql/tasks/main.yml b/roles/postgresql/tasks/main.yml index 9c8046e..eeab7c2 100644 --- a/roles/postgresql/tasks/main.yml +++ b/roles/postgresql/tasks/main.yml @@ -25,7 +25,7 @@ --auth-local=peer --auth-host={{ postgresql_auth_method }} {{ '--data-checksums' if postgresql_data_checksums else '' }} - become: yes + become: true become_user: postgres when: not postgresql_initialized.stat.exists notify: restart postgresql @@ -67,27 +67,49 @@ - reload systemd - restart postgresql +- name: Create PostgreSQL Unix socket directory + file: + path: "{{ postgresql_unix_socket_directories }}" + state: directory + owner: postgres + group: postgres + mode: '0770' + when: postgresql_unix_socket_enabled + - name: Enable and start PostgreSQL service systemd: name: postgresql enabled: "{{ postgresql_service_enabled }}" state: "{{ postgresql_service_state }}" - daemon_reload: yes + daemon_reload: true -- name: Wait for PostgreSQL to be ready +- name: Wait for PostgreSQL to be ready (TCP) wait_for: port: "{{ postgresql_port }}" host: "{{ postgresql_listen_addresses }}" timeout: 30 - when: postgresql_service_state == "started" + when: postgresql_service_state == "started" and postgresql_listen_addresses != "" + +- name: Wait for PostgreSQL to be ready (Unix Socket) + postgresql_ping: + login_unix_socket: "{{ postgresql_unix_socket_directories }}" + login_user: postgres + become: true + become_user: postgres + register: postgresql_socket_ready + until: postgresql_socket_ready is succeeded + retries: 10 + delay: 3 + when: postgresql_service_state == "started" and postgresql_unix_socket_enabled and postgresql_listen_addresses == "" - name: Display PostgreSQL infrastructure status debug: msg: | ✅ PostgreSQL infrastructure ready! - 📡 Service: {{ postgresql_listen_addresses }}:{{ postgresql_port }} + 📡 Service: {% if postgresql_unix_socket_enabled and postgresql_listen_addresses == "" %}Unix Socket ({{ postgresql_unix_socket_directories }}){% else %}{{ postgresql_listen_addresses }}:{{ postgresql_port }}{% endif %} 🔒 Auth: {{ postgresql_auth_method }} 📊 Checksums: {{ 'Enabled' if postgresql_data_checksums else 'Disabled' }} + {% if postgresql_unix_socket_enabled %}🔌 Socket: {{ postgresql_unix_socket_directories }} (mode {{ postgresql_unix_socket_permissions }}){% endif %} 🏗️ Ready for applications to create databases/users \ No newline at end of file diff --git a/roles/postgresql/templates/postgresql.conf.j2 b/roles/postgresql/templates/postgresql.conf.j2 index 9c1e67a..e210ab2 100644 --- a/roles/postgresql/templates/postgresql.conf.j2 +++ b/roles/postgresql/templates/postgresql.conf.j2 @@ -3,6 +3,11 @@ # PostgreSQL's excellent defaults are used except for essentials # Network and Security +{% if postgresql_unix_socket_enabled %} +# Unix Socket Configuration +unix_socket_directories = '{{ postgresql_unix_socket_directories }}' +unix_socket_permissions = {{ postgresql_unix_socket_permissions }} +{% endif %} listen_addresses = '{{ postgresql_listen_addresses }}' port = {{ postgresql_port }} diff --git a/roles/sigvild-gallery/README.md b/roles/sigvild-gallery/README.md index 6b093c4..cef7bf2 100644 --- a/roles/sigvild-gallery/README.md +++ b/roles/sigvild-gallery/README.md @@ -44,6 +44,10 @@ sigvild_gallery_host: "127.0.0.1" sigvild_gallery_home: "/opt/sigvild-gallery" sigvild_gallery_web_root: "/var/www/sigvild-gallery" sigvild_gallery_local_project_path: "{{ ansible_env.PWD }}/sigvild-gallery" + +# Backup configuration +sigvild_gallery_backup_enabled: true +sigvild_gallery_backup_local_path: "{{ playbook_dir }}/backups/sigvild-gallery" ``` ## Usage @@ -71,6 +75,62 @@ ansible-playbook site.yml --tags="backend" ansible-playbook site.yml --tags="caddy" ``` +### Data Backup and Restoration + +#### Creating a Backup + +Before formatting your server or making major changes, create a backup of all production data: + +```bash +# Create backup of production data +ansible-playbook playbooks/backup-sigvild.yml + +# Backup will be saved to: ./backups/sigvild-gallery/sigvild-gallery-backup-YYYYMMDDTHHMMSS.tar.gz +``` + +The backup includes: +- PocketBase SQLite database (`data.db`, `auxiliary.db`) +- All uploaded wedding photos and media files +- PocketBase logs and system state + +#### Automatic Restoration + +When deploying to a fresh server, the role automatically detects and restores from the latest backup: + +```bash +# Normal deployment will auto-restore if backup exists +ansible-playbook playbooks/deploy-sigvild.yml + +# Or deploy full infrastructure (includes auto-restore) +ansible-playbook site.yml +``` + +#### Manual Restoration + +To restore data manually or from a specific backup: + +```bash +# Restore with specific backup file +ansible-playbook playbooks/deploy-sigvild.yml --tags="restore" \ + --extra-vars="sigvild_gallery_backup_local_path=/path/to/backup/directory" + +# Force restoration (overwrite existing data) +ansible-playbook playbooks/deploy-sigvild.yml --tags="backend,restore" +``` + +#### Backup Management + +```bash +# List available backups +ls -la ./backups/sigvild-gallery/ + +# Verify backup contents +tar -tzf ./backups/sigvild-gallery/sigvild-gallery-backup-YYYYMMDDTHHMMSS.tar.gz + +# Extract backup for inspection (local) +tar -xzf ./backups/sigvild-gallery/sigvild-gallery-backup-YYYYMMDDTHHMMSS.tar.gz +``` + ## Security Features ### Environment Variables @@ -169,6 +229,21 @@ systemctl reload caddy - `/opt/sigvild-gallery/` (application directory) - `/var/www/sigvild-gallery/` (frontend files) +## Data Protection + +### Backup Strategy +- **Automated**: Backup creation via dedicated playbook +- **Comprehensive**: Includes database, uploaded files, and system state +- **Consistent**: Service temporarily stopped during backup for data integrity +- **Local storage**: Backups stored in `./backups/sigvild-gallery/` directory +- **Timestamped**: Each backup includes ISO timestamp for easy identification + +### Recovery Process +- **Automatic detection**: Deployment automatically detects available backups +- **Zero-downtime restore**: Restoration happens before service startup +- **Integrity verification**: Backups verified before and after restoration +- **Permission preservation**: User/group ownership maintained during restore + ## Tags - `sigvild`: Complete Sigvild Gallery deployment @@ -177,4 +252,6 @@ systemctl reload caddy - `build`: Local build processes - `service`: SystemD service management - `caddy`: Caddy configuration -- `verify`: Post-deployment verification \ No newline at end of file +- `verify`: Post-deployment verification +- `backup`: Data backup operations +- `restore`: Data restoration operations \ No newline at end of file diff --git a/roles/sigvild-gallery/defaults/main.yml b/roles/sigvild-gallery/defaults/main.yml index 0515e04..973b767 100644 --- a/roles/sigvild-gallery/defaults/main.yml +++ b/roles/sigvild-gallery/defaults/main.yml @@ -31,6 +31,10 @@ sigvild_gallery_local_project_path: "{{ ansible_env.PWD }}/sigvild-gallery" sigvild_gallery_service_enabled: true sigvild_gallery_service_state: started +# Backup configuration +sigvild_gallery_backup_enabled: true +sigvild_gallery_backup_local_path: "{{ playbook_dir }}/backups/sigvild-gallery" + # Caddy integration (assumes caddy role provides these) # caddy_sites_enabled_dir: /etc/caddy/sites-enabled # caddy_user: caddy diff --git a/roles/sigvild-gallery/tasks/backup.yml b/roles/sigvild-gallery/tasks/backup.yml new file mode 100644 index 0000000..57b141c --- /dev/null +++ b/roles/sigvild-gallery/tasks/backup.yml @@ -0,0 +1,100 @@ +--- +# Sigvild Gallery Data Backup Tasks + +- name: Create local backup directory + local_action: + module: file + path: "{{ sigvild_gallery_backup_local_path }}/{{ ansible_date_time.iso8601_basic_short }}" + state: directory + mode: '0755' + become: false + run_once: true + +- name: Display backup information + debug: + msg: + - "Creating backup of Sigvild Gallery data..." + - "Data directory: {{ sigvild_gallery_data_dir }}" + - "Backup will be saved to: {{ sigvild_gallery_backup_local_path }}" + - "Timestamp: {{ ansible_date_time.iso8601_basic_short }}" + +- name: Check if data directory exists + stat: + path: "{{ sigvild_gallery_data_dir }}" + register: data_dir_stat + +- name: Fail if no data directory found + fail: + msg: "No data directory found at {{ sigvild_gallery_data_dir }}. Nothing to backup." + when: not data_dir_stat.stat.exists + +- name: Display data directory size + command: du -sh {{ sigvild_gallery_data_dir }} + register: data_size + changed_when: false + +- name: Show data size + debug: + msg: "Data directory size: {{ data_size.stdout }}" + +- name: Stop sigvild-gallery service for consistent backup + systemd: + name: sigvild-gallery + state: stopped + register: service_stopped + +- name: Create compressed backup of pb_data + archive: + path: "{{ sigvild_gallery_data_dir }}" + dest: "/tmp/sigvild-gallery-backup-{{ ansible_date_time.iso8601_basic_short }}.tar.gz" + format: gz + owner: "{{ sigvild_gallery_user }}" + group: "{{ sigvild_gallery_user }}" + mode: '0644' + register: backup_created + +- name: Verify backup contains critical files + command: tar -tzf /tmp/sigvild-gallery-backup-{{ ansible_date_time.iso8601_basic_short }}.tar.gz + register: backup_contents + changed_when: false + failed_when: + - "'data.db' not in backup_contents.stdout" + +- name: Display backup verification + debug: + msg: "Backup verified - contains required database files" + +- name: Get backup file size + stat: + path: "/tmp/sigvild-gallery-backup-{{ ansible_date_time.iso8601_basic_short }}.tar.gz" + register: backup_file_stat + +- name: Display backup file info + debug: + msg: "Backup file created: {{ (backup_file_stat.stat.size / 1024 / 1024) | round(2) }}MB" + when: backup_file_stat.stat.exists + +- name: Download backup to local machine + fetch: + src: "/tmp/sigvild-gallery-backup-{{ ansible_date_time.iso8601_basic_short }}.tar.gz" + dest: "{{ sigvild_gallery_backup_local_path }}/" + flat: yes + register: backup_downloaded + +- name: Clean up remote backup file + file: + path: "/tmp/sigvild-gallery-backup-{{ ansible_date_time.iso8601_basic_short }}.tar.gz" + state: absent + +- name: Restart sigvild-gallery service + systemd: + name: sigvild-gallery + state: started + when: service_stopped.changed + +- name: Display backup completion + debug: + msg: + - "✅ Backup completed successfully!" + - "Local backup location: {{ sigvild_gallery_backup_local_path }}/sigvild-gallery-backup-{{ ansible_date_time.iso8601_basic_short }}.tar.gz" + - "Service has been restarted." \ No newline at end of file diff --git a/roles/sigvild-gallery/tasks/deploy_backend.yml b/roles/sigvild-gallery/tasks/deploy_backend.yml index c587b77..613e8fb 100644 --- a/roles/sigvild-gallery/tasks/deploy_backend.yml +++ b/roles/sigvild-gallery/tasks/deploy_backend.yml @@ -33,6 +33,10 @@ notify: restart sigvild-gallery tags: [backend] +- name: Restore data from backup if available + include_tasks: restore.yml + tags: [backend, restore] + - name: Create data directory for PocketBase file: path: "{{ sigvild_gallery_data_dir }}" diff --git a/roles/sigvild-gallery/tasks/restore.yml b/roles/sigvild-gallery/tasks/restore.yml new file mode 100644 index 0000000..f87c9d1 --- /dev/null +++ b/roles/sigvild-gallery/tasks/restore.yml @@ -0,0 +1,126 @@ +--- +# Sigvild Gallery Data Restoration Tasks + +- name: Check for existing data backup files + local_action: + module: find + paths: "{{ sigvild_gallery_backup_local_path }}" + patterns: "sigvild-gallery-backup-*.tar.gz" + register: backup_files + become: false + +- name: Display backup search results + debug: + msg: + - "Searching for backups in: {{ sigvild_gallery_backup_local_path }}" + - "Found {{ backup_files.files | length }} backup file(s)" + +- name: Display found backup files + debug: + msg: "Found backup: {{ item.path }} ({{ (item.size / 1024 / 1024) | round(2) }}MB, {{ item.mtime | to_datetime }})" + loop: "{{ backup_files.files | sort(attribute='mtime') }}" + when: backup_files.files | length > 0 + +- name: Check if data directory already exists with data + stat: + path: "{{ sigvild_gallery_data_dir }}/data.db" + register: existing_data + +- name: Warn about existing data + debug: + msg: "⚠️ WARNING: Existing data found at {{ sigvild_gallery_data_dir }}/data.db - restoration will overwrite it!" + when: + - existing_data.stat.exists + - backup_files.files | length > 0 + +- name: Restore from latest backup + block: + - name: Get latest backup file + set_fact: + latest_backup: "{{ (backup_files.files | sort(attribute='mtime') | last).path }}" + + - name: Display restoration info + debug: + msg: + - "Restoring from latest backup: {{ latest_backup | basename }}" + - "Target directory: {{ sigvild_gallery_data_dir }}" + + - name: Stop sigvild-gallery service before restoration + systemd: + name: sigvild-gallery + state: stopped + register: service_stopped_for_restore + ignore_errors: true # Service might not exist on fresh deployment + + - name: Upload backup to remote server + copy: + src: "{{ latest_backup }}" + dest: "/tmp/restore-backup.tar.gz" + owner: root + group: root + mode: '0644' + + - name: Verify uploaded backup integrity + command: tar -tzf /tmp/restore-backup.tar.gz + register: restore_contents + changed_when: false + failed_when: "'data.db' not in restore_contents.stdout" + + - name: Remove existing data directory if it exists + file: + path: "{{ sigvild_gallery_data_dir }}" + state: absent + when: existing_data.stat.exists + + - name: Ensure parent directory exists + file: + path: "{{ sigvild_gallery_home }}" + state: directory + owner: "{{ sigvild_gallery_user }}" + group: "{{ sigvild_gallery_user }}" + mode: '0755' + + - name: Extract backup to target location + unarchive: + src: "/tmp/restore-backup.tar.gz" + dest: "{{ sigvild_gallery_home }}" + remote_src: true + owner: "{{ sigvild_gallery_user }}" + group: "{{ sigvild_gallery_user }}" + + - name: Verify restoration + stat: + path: "{{ sigvild_gallery_data_dir }}/data.db" + register: restored_data + + - name: Fail if restoration unsuccessful + fail: + msg: "Restoration failed - data.db not found after extraction" + when: not restored_data.stat.exists + + - name: Get restored data size + command: du -sh {{ sigvild_gallery_data_dir }} + register: restored_size + changed_when: false + + - name: Clean up uploaded backup file + file: + path: "/tmp/restore-backup.tar.gz" + state: absent + + - name: Display restoration success + debug: + msg: + - "✅ Data restoration completed successfully!" + - "Restored from: {{ latest_backup | basename }}" + - "Restored data size: {{ restored_size.stdout }}" + - "Location: {{ sigvild_gallery_data_dir }}" + + when: backup_files.files | length > 0 + +- name: No backup available message + debug: + msg: + - "ℹ️ No backup files found - starting with fresh installation" + - "Data directory will be created empty: {{ sigvild_gallery_data_dir }}" + when: backup_files.files | length == 0 \ No newline at end of file diff --git a/roles/valkey/defaults/main.yml b/roles/valkey/defaults/main.yml index 648b439..5e45baf 100644 --- a/roles/valkey/defaults/main.yml +++ b/roles/valkey/defaults/main.yml @@ -18,6 +18,11 @@ valkey_bind: "127.0.0.1" valkey_port: 6379 valkey_protected_mode: true +# Unix Socket Configuration +valkey_unix_socket_enabled: true +valkey_unix_socket_path: "/var/run/valkey/valkey.sock" +valkey_unix_socket_perm: "770" + # Authentication valkey_requirepass: "{{ vault_valkey_password }}" diff --git a/roles/valkey/tasks/main.yml b/roles/valkey/tasks/main.yml index 9cf1d28..deaea80 100644 --- a/roles/valkey/tasks/main.yml +++ b/roles/valkey/tasks/main.yml @@ -28,6 +28,24 @@ group: valkey mode: '0750' +- name: Create Valkey Unix socket directory + file: + path: "{{ valkey_unix_socket_path | dirname }}" + state: directory + owner: valkey + group: valkey + mode: '0775' + when: valkey_unix_socket_enabled + +- name: Ensure socket directory is accessible + file: + path: "{{ valkey_unix_socket_path | dirname }}" + owner: valkey + group: valkey + mode: '0775' + recurse: yes + when: valkey_unix_socket_enabled + - name: Deploy Valkey configuration file template: src: valkey.conf.j2 @@ -60,28 +78,67 @@ name: valkey enabled: "{{ valkey_service_enabled }}" state: "{{ valkey_service_state }}" - daemon_reload: yes + daemon_reload: true + register: valkey_service_result -- name: Wait for Valkey to be ready +- name: Wait for Valkey to be ready (TCP) wait_for: port: "{{ valkey_port }}" host: "{{ valkey_bind }}" timeout: 30 - when: valkey_service_state == "started" + when: valkey_service_state == "started" and not valkey_unix_socket_enabled -- name: Test Valkey connectivity - command: redis-cli -h {{ valkey_bind }} -p {{ valkey_port }} -a {{ valkey_requirepass }} ping - register: valkey_ping_result +- name: Wait for Valkey socket file to exist + wait_for: + path: "{{ valkey_unix_socket_path }}" + timeout: 30 + when: valkey_service_state == "started" and valkey_unix_socket_enabled + +- name: Wait for Valkey to be ready (Unix Socket) - Try without auth first + command: redis-cli -s {{ valkey_unix_socket_path }} ping + register: valkey_socket_ping_noauth + until: > + valkey_socket_ping_noauth.stdout == "PONG" or + "NOAUTH" in (valkey_socket_ping_noauth.stdout + valkey_socket_ping_noauth.stderr) + retries: 15 + delay: 2 changed_when: false - failed_when: valkey_ping_result.stdout != "PONG" - when: valkey_service_state == "started" + failed_when: false + when: valkey_service_state == "started" and valkey_unix_socket_enabled + +- name: Wait for Valkey to be ready (Unix Socket) - Try with auth if needed + command: redis-cli -s {{ valkey_unix_socket_path }} -a {{ valkey_requirepass }} ping + register: valkey_socket_ping_auth + until: valkey_socket_ping_auth.stdout == "PONG" + retries: 5 + delay: 2 + changed_when: false + failed_when: valkey_socket_ping_auth.rc != 0 + when: > + valkey_service_state == "started" and valkey_unix_socket_enabled and + (valkey_socket_ping_noauth.stdout != "PONG") and + ("NOAUTH" in (valkey_socket_ping_noauth.stdout + valkey_socket_ping_noauth.stderr) or valkey_socket_ping_noauth.rc != 0) + +- name: Test Valkey connectivity (TCP) + command: redis-cli -h {{ valkey_bind }} -p {{ valkey_port }} -a {{ valkey_requirepass }} ping + register: valkey_ping_result_tcp + changed_when: false + failed_when: valkey_ping_result_tcp.stdout != "PONG" + when: valkey_service_state == "started" and not valkey_unix_socket_enabled + +- name: Test Valkey connectivity (Unix Socket) + command: redis-cli -s {{ valkey_unix_socket_path }} -a {{ valkey_requirepass }} ping + register: valkey_ping_result_socket + changed_when: false + failed_when: valkey_ping_result_socket.stdout != "PONG" + when: valkey_service_state == "started" and valkey_unix_socket_enabled - name: Display Valkey infrastructure status debug: msg: | ✅ Valkey infrastructure ready! - 📡 Service: {{ valkey_bind }}:{{ valkey_port }} + 📡 Service: {% if valkey_unix_socket_enabled %}Unix Socket ({{ valkey_unix_socket_path }}){% else %}{{ valkey_bind }}:{{ valkey_port }}{% endif %} 🔒 Auth: Password protected 💾 Persistence: {{ 'RDB enabled' if valkey_save_enabled else 'Memory only' }} 🗄️ Databases: {{ valkey_databases }} available (0-{{ valkey_databases - 1 }}) @@ -91,4 +148,5 @@ 📋 Application Integration: - Use database numbers 1-{{ valkey_databases - 1 }} for applications - Database 0 reserved for system/testing + - {% if valkey_unix_socket_enabled %}Unix socket: {{ valkey_unix_socket_path }}{% else %}TCP: {{ valkey_bind }}:{{ valkey_port }}{% endif %} - Redis-compatible: applications can use REDIS_* or VALKEY_* env vars diff --git a/roles/valkey/templates/valkey.conf.j2 b/roles/valkey/templates/valkey.conf.j2 index 17cadcb..02de0e2 100644 --- a/roles/valkey/templates/valkey.conf.j2 +++ b/roles/valkey/templates/valkey.conf.j2 @@ -14,6 +14,15 @@ bind {{ valkey_bind }} # Valkey port port {{ valkey_port }} +{% if valkey_unix_socket_enabled %} +# Unix Socket Configuration +unixsocket {{ valkey_unix_socket_path }} +unixsocketperm {{ valkey_unix_socket_perm }} + +# Enable both TCP and Unix socket (for compatibility during transition) +# To disable TCP completely, comment out the port line above +{% endif %} + # Protected mode - requires authentication protected-mode {{ 'yes' if valkey_protected_mode else 'no' }} diff --git a/site.yml b/site.yml index bb653ea..34c62f7 100644 --- a/site.yml +++ b/site.yml @@ -4,8 +4,8 @@ - name: Deploy Core Infrastructure hosts: arch-vps - become: yes - gather_facts: yes + become: true + gather_facts: true roles: # Infrastructure services @@ -13,12 +13,14 @@ # tags: ['postgresql', 'infrastructure', 'database'] # - role: valkey # tags: ['valkey', 'redis', 'infrastructure', 'cache'] - - role: podman - tags: ['podman', 'containers', 'infrastructure'] + # - role: podman + # tags: ['podman', 'containers', 'infrastructure'] # - role: caddy # tags: ['caddy', 'infrastructure', 'web'] # Application services + - role: authentik + tags: ['authentik'] # - role: gitea # tags: ['gitea', 'git', 'development'] # - role: sigvild-gallery