Compare commits
267 Commits
42903b4fe1
...
feat-backu
| Author | SHA1 | Date | |
|---|---|---|---|
|
a31a07bd16
|
|||
|
54a951b96a
|
|||
|
e1379bc480
|
|||
|
037e0cab9b
|
|||
|
2655869814
|
|||
|
0e96b5030d
|
|||
|
a217c79e7d
|
|||
|
6a16ebf084
|
|||
|
2617aa2bd2
|
|||
|
b686e4da4d
|
|||
|
439c239ac8
|
|||
|
acf599f905
|
|||
|
eae4f5e27b
|
|||
|
4fbe9bd5de
|
|||
|
dcc4970b20
|
|||
|
2eac1362b5
|
|||
|
e3d8479397
|
|||
|
91c5eab236
|
|||
|
ca7f089fe6
|
|||
|
479e256b1e
|
|||
|
11e5b5752e
|
|||
|
392938d0fb
|
|||
|
2cc059104e
|
|||
|
d09a26b73a
|
|||
|
097676f569
|
|||
|
e878661cb3
|
|||
|
cb50c1c515
|
|||
|
33de71a087
|
|||
|
fbd5fa5faa
|
|||
|
faf7d58f78
|
|||
|
0a75378bbc
|
|||
|
bdd74bdf2e
|
|||
|
78bee84061
|
|||
|
7b81858af6
|
|||
|
08fda17561
|
|||
|
841bd38807
|
|||
|
fb1fd711c2
|
|||
|
ecf714eda7
|
|||
|
81f693938e
|
|||
|
10d67861a0
|
|||
|
3f5befb44d
|
|||
|
1b75ddaef2
|
|||
|
7d6ef77e64
|
|||
|
ae7c20a7aa
|
|||
|
67df03efca
|
|||
|
48bb8c9d33
|
|||
|
5b53cb30ac
|
|||
|
f2bc221663
|
|||
|
b41a50006b
|
|||
|
c2ea2cdb39
|
|||
|
7e67409393
|
|||
|
6882d61f8e
|
|||
|
47a63202b8
|
|||
|
af289f1e28
|
|||
|
b08f681c92
|
|||
|
8dfd061991
|
|||
|
306d4bf8d0
|
|||
|
dbd679aa8b
|
|||
|
47ed9c11c1
|
|||
|
f9ad08fd09
|
|||
|
4c7338f857
|
|||
|
a95da35389
|
|||
|
c74683cfe7
|
|||
|
9dff413867
|
|||
|
23a2bae7ec
|
|||
|
942bb7d999
|
|||
|
6ff7a7e3b4
|
|||
|
8ae28e64f4
|
|||
|
f7e8248cac
|
|||
|
2af9066dec
|
|||
|
e3b2e064c0
|
|||
|
380a54cb25
|
|||
|
d5078186e7
|
|||
|
57bb696e6e
|
|||
|
c6cc7d4c6c
|
|||
|
90abc6c833
|
|||
|
395203f236
|
|||
|
57cc639cc8
|
|||
|
1405a2364e
|
|||
|
b165899f25
|
|||
|
86147d0103
|
|||
|
2c9ade0a8e
|
|||
|
35c1f727f6
|
|||
|
b7d2fca2f2
|
|||
|
725c4c02cc
|
|||
|
328256c6be
|
|||
|
b08dc862c9
|
|||
|
0810c6c099
|
|||
|
dd6b34e983
|
|||
|
6fd6d76594
|
|||
|
61e2431975
|
|||
|
9a23e35126
|
|||
|
f2a9e660ed
|
|||
|
bd5f5ca452
|
|||
|
860cfd0450
|
|||
|
884553892b
|
|||
|
3e43c3e44d
|
|||
|
823533a8cb
|
|||
|
f54cac170e
|
|||
|
17950bcfad
|
|||
|
5f6891d192
|
|||
|
9c4ff91ccf
|
|||
|
541312b5e9
|
|||
|
c82168561e
|
|||
|
c37a5f0d7d
|
|||
|
8714f1bd95
|
|||
|
0e2eba5167
|
|||
|
a1a94d29a8
|
|||
|
1d5ce38922
|
|||
|
0b9e66f067
|
|||
|
379a113b86
|
|||
|
8538c00175
|
|||
|
645276018b
|
|||
|
ce5d682842
|
|||
|
de5b0f66bd
|
|||
|
64602b1db3
|
|||
|
caecb9b57e
|
|||
|
e8be04d5e1
|
|||
|
a7f90da43f
|
|||
|
0f80206c62
|
|||
|
1daff82cc5
|
|||
|
9b4293c624
|
|||
|
0d93e8094c
|
|||
|
b92ab556e5
|
|||
|
8086799c7b
|
|||
|
6ec5df4b66
|
|||
|
fb91e45806
|
|||
|
44f82434e7
|
|||
|
31ca27750e
|
|||
|
4be8d297ba
|
|||
|
bcd8e62691
|
|||
|
160f4219c5
|
|||
|
c518125bbd
|
|||
|
e16e23d18c
|
|||
|
ede37e7fa3
|
|||
|
b4cddb337a
|
|||
|
35f1abd718
|
|||
|
21b52a1887
|
|||
|
af39ca9de8
|
|||
|
e10b37a9f6
|
|||
|
85627f8931
|
|||
|
38e2294a65
|
|||
|
dfcb781a90
|
|||
|
2c8bd2bb8d
|
|||
|
592c3a062b
|
|||
|
04d789f0a4
|
|||
|
16719977c8
|
|||
|
6cd8d3b14b
|
|||
|
791caab704
|
|||
|
df60296f9d
|
|||
|
232d06a123
|
|||
|
a3886c8675
|
|||
|
db55fcd180
|
|||
|
53dd462cac
|
|||
|
28faff3c99
|
|||
|
2619b8f9e6
|
|||
|
8a9b3db287
|
|||
|
a72c67f070
|
|||
|
47745b7bc9
|
|||
|
c568f00db1
|
|||
|
99b6959c84
|
|||
|
fa65726096
|
|||
|
f9eaf7a41e
|
|||
|
d825b1f391
|
|||
|
b296a3f2fe
|
|||
|
8ff89c9ee1
|
|||
|
62a4e598bd
|
|||
|
b65aaa5072
|
|||
|
98b7aff274
|
|||
|
6eaf7f7390
|
|||
|
32e80282ef
|
|||
|
c8bd9f4ec3
|
|||
|
d3d189e284
|
|||
|
71fe688ef8
|
|||
|
c5d0f96bdf
|
|||
|
eea8db6499
|
|||
|
7893349da4
|
|||
|
a4c61f94e6
|
|||
|
da0a261ddd
|
|||
|
b9954d1bba
|
|||
|
3a23c08f37
|
|||
|
d1500ea373
|
|||
|
a77fefcded
|
|||
|
41fac2c4f9
|
|||
|
280ea24dea
|
|||
|
855bafee5b
|
|||
|
adde4e32c1
|
|||
|
527067146f
|
|||
|
93326907d2
|
|||
|
bcad87c6e0
|
|||
|
5d127d27ef
|
|||
|
2d6cb3ffe0
|
|||
|
e68920c0e2
|
|||
|
c5c15341b8
|
|||
|
cd4a7177d7
|
|||
|
daeef1bc4b
|
|||
|
ddae18f8b3
|
|||
|
8c8657fdd8
|
|||
|
c4b0200dc6
|
|||
|
38bafd7186
|
|||
|
c6db39b55a
|
|||
|
528512e665
|
|||
|
0e05d3e066
|
|||
|
4221fb0009
|
|||
|
255ac33e04
|
|||
|
0bdd2c2543
|
|||
|
155d065dd0
|
|||
|
9a3e646d8a
|
|||
|
f4b5fcb0f1
|
|||
|
3054836085
|
|||
|
838f959fd9
|
|||
|
5b60af6061
|
|||
|
d1eae9b5b5
|
|||
|
76328bf6c6
|
|||
|
a31cbbe18e
|
|||
|
132da79fab
|
|||
|
676f6626f2
|
|||
|
dda5cb4449
|
|||
|
4ae238a09a
|
|||
|
fcedbdbe3d
|
|||
|
e5ad8fda80
|
|||
|
c5a7db6a55
|
|||
|
30f7a913ab
|
|||
|
5a3c32ba73
|
|||
|
9f075fac11
|
|||
|
5e427e688d
|
|||
|
32437de3f1
|
|||
|
88b47fb32d
|
|||
|
cad1c9bd89
|
|||
|
ba2891b18c
|
|||
|
45185fd8a8
|
|||
|
0ce778871e
|
|||
|
6fc30522d0
|
|||
|
87e13973ec
|
|||
|
4cd6d8ff64
|
|||
|
7241cca4fb
|
|||
|
03db3241b3
|
|||
|
cd86348f88
|
|||
|
98a1087e84
|
|||
|
22e09afd00
|
|||
|
9c5266deab
|
|||
|
f843ef9f47
|
|||
|
42dbf8e19f
|
|||
|
9776f79432
|
|||
|
faaa40b77d
|
|||
|
e62889437f
|
|||
|
68e3afad9a
|
|||
|
88be388d37
|
|||
|
7d6fbc9829
|
|||
|
52a6387f24
|
|||
|
0669acd9f9
|
|||
|
e76b31fd99
|
|||
|
3b11a63fa0
|
|||
|
37e683db4b
|
|||
|
7ab8dc6a78
|
|||
|
d38f53d648
|
|||
|
95202132f2
|
|||
|
76acb0d162
|
|||
|
13b6deae14
|
|||
|
c44b1322cf
|
|||
|
ea43f571ae
|
|||
|
6b714a8486
|
|||
|
f81cdd2fe1
|
|||
|
e105228eb2
|
|||
|
b7c722f4ed
|
|||
|
11a971ad67
|
|||
|
87135056cf
|
6
.ansible-lint.yml
Normal file
6
.ansible-lint.yml
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
---
|
||||||
|
exclude_paths:
|
||||||
|
- ".ansible/"
|
||||||
|
- ".gitea/"
|
||||||
|
- "galaxy.roles/"
|
||||||
|
- "Taskfile.yml"
|
||||||
4
.crushignore
Normal file
4
.crushignore
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
ansible-vault-password-file
|
||||||
|
|
||||||
|
*secrets.yml
|
||||||
|
*secrets.toml
|
||||||
@@ -6,11 +6,9 @@ insert_final_newline = true
|
|||||||
indent_style = space
|
indent_style = space
|
||||||
indent_size = 4
|
indent_size = 4
|
||||||
|
|
||||||
[*.yml]
|
[*.{yml,yaml,yml.j2}]
|
||||||
indent_size = 2
|
indent_size = 2
|
||||||
|
trim_trailing_whitespace = true
|
||||||
|
|
||||||
[Vagrantfile]
|
[Vagrantfile]
|
||||||
indent_size = 2
|
indent_size = 2
|
||||||
|
|
||||||
[Makefile]
|
|
||||||
indent_style = tab
|
|
||||||
|
|||||||
50
.gitea/workflows/lint.yml
Normal file
50
.gitea/workflows/lint.yml
Normal file
@@ -0,0 +1,50 @@
|
|||||||
|
name: Linting
|
||||||
|
|
||||||
|
on: push
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
yamllint:
|
||||||
|
name: YAML Lint
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout code
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Set up Python
|
||||||
|
uses: actions/setup-python@v4
|
||||||
|
with:
|
||||||
|
python-version: '3.x'
|
||||||
|
|
||||||
|
- name: Install yamllint
|
||||||
|
run: pip install yamllint
|
||||||
|
|
||||||
|
- name: Run yamllint
|
||||||
|
run: yamllint . --format colored
|
||||||
|
|
||||||
|
ansible-lint:
|
||||||
|
name: Ansible Lint
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout code
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Set up Python
|
||||||
|
uses: actions/setup-python@v4
|
||||||
|
with:
|
||||||
|
python-version: '3.x'
|
||||||
|
|
||||||
|
- name: Install dependencies
|
||||||
|
run: pip install ansible ansible-lint
|
||||||
|
|
||||||
|
# Создаем пустой vault password file если он указан в конфиге, но отсутствует
|
||||||
|
- name: Fix vault issue
|
||||||
|
run: |
|
||||||
|
if grep -q "vault_password_file" ansible.cfg && [ ! -f ansible-vault-password-file ]; then
|
||||||
|
echo "Creating empty vault password file for CI..."
|
||||||
|
echo "foobar" > ansible-vault-password-file
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Run ansible-lint
|
||||||
|
run: ansible-lint .
|
||||||
10
.gitignore
vendored
10
.gitignore
vendored
@@ -1,5 +1,11 @@
|
|||||||
|
/.ansible
|
||||||
/.idea
|
/.idea
|
||||||
/.vagrant
|
/.vagrant
|
||||||
/ansible/galaxy.roles/
|
/.vscode
|
||||||
ansible-vault-password-file
|
|
||||||
|
/galaxy.roles/
|
||||||
|
/ansible-vault-password-file
|
||||||
|
/temp
|
||||||
*.retry
|
*.retry
|
||||||
|
|
||||||
|
__pycache__
|
||||||
|
|||||||
25
.yamllint.yml
Normal file
25
.yamllint.yml
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
extends: default
|
||||||
|
|
||||||
|
ignore:
|
||||||
|
- ".ansible/"
|
||||||
|
- "galaxy.roles/"
|
||||||
|
|
||||||
|
rules:
|
||||||
|
# Правила, требуемые ansible-lint
|
||||||
|
comments:
|
||||||
|
min-spaces-from-content: 1
|
||||||
|
comments-indentation: false
|
||||||
|
braces:
|
||||||
|
max-spaces-inside: 1
|
||||||
|
octal-values:
|
||||||
|
forbid-implicit-octal: true
|
||||||
|
forbid-explicit-octal: true
|
||||||
|
|
||||||
|
# Дополнительные настройки (опционально)
|
||||||
|
line-length:
|
||||||
|
max: 120
|
||||||
|
allow-non-breakable-words: true
|
||||||
|
allow-non-breakable-inline-mappings: true
|
||||||
|
document-start: disable # Не требовать --- в начале файла
|
||||||
|
truthy:
|
||||||
|
level: warning
|
||||||
69
AGENTS.md
Normal file
69
AGENTS.md
Normal file
@@ -0,0 +1,69 @@
|
|||||||
|
# AGENTS GUIDE
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
Ansible-based server automation for personal services. Playbooks provision Dockerized apps (e.g., gitea, authelia, homepage, miniflux, wakapi, memos) via per-app users, Caddy proxy, and Yandex Docker Registry. Secrets are managed with Ansible Vault.
|
||||||
|
|
||||||
|
## Project Layout
|
||||||
|
- Playbooks: `playbook-*.yml` (per service), `playbook-all-*.yml` for grouped actions.
|
||||||
|
- Inventory: `production.yml` (ungrouped host `server`).
|
||||||
|
- Variables: `vars/*.yml` (app configs, images), secrets in `vars/secrets.yml` (vault-encrypted).
|
||||||
|
- Roles: custom roles under `roles/` (e.g., `eget`, `owner`, `secrets`) plus galaxy roles fetched to `galaxy.roles/`.
|
||||||
|
- Files/templates: service docker-compose and backup templates under `files/`, shared templates under `templates/`.
|
||||||
|
- Scripts: helper Python scripts in `scripts/` (SMTP utilities) and `files/backups/backup-all.py`.
|
||||||
|
- CI: `.gitea/workflows/lint.yml` runs yamllint and ansible-lint.
|
||||||
|
- Hooks: `lefthook.yml` references local hooks in `/home/av/projects/private/git-hooks` (gitleaks, vault check).
|
||||||
|
- Formatting: `.editorconfig` enforces LF, trailing newline, 4-space indent; YAML/Jinja use 2-space indent.
|
||||||
|
|
||||||
|
## Setup
|
||||||
|
- Copy vault password sample: `cp ansible-vault-password-file.dist ansible-vault-password-file` (needed for ansible and CI).
|
||||||
|
- Install galaxy roles: `ansible-galaxy role install --role-file requirements.yml --force` (or `task install-roles`).
|
||||||
|
- Ensure `yq`, `task`, `ansible` installed per README requirements.
|
||||||
|
|
||||||
|
## Tasks (taskfile)
|
||||||
|
- `task install-roles` — install galaxy roles into `galaxy.roles/`.
|
||||||
|
- `task ssh` — SSH to target using inventory (`production.yml`).
|
||||||
|
- `task btop` — run `btop` on remote.
|
||||||
|
- `task encrypt|decrypt -- <files>` — ansible-vault helpers.
|
||||||
|
- Authelia helpers:
|
||||||
|
- `task authelia-cli -- <args>` — run authelia CLI in docker.
|
||||||
|
- `task authelia-validate-config` — render `files/authelia/configuration.template.yml` with secrets and validate via authelia docker image.
|
||||||
|
- `task authelia-gen-random-string LEN=64` — generate random string.
|
||||||
|
- `task authelia-gen-secret-and-hash LEN=72` — generate hashed secret.
|
||||||
|
- `task format-py-files` — run Black via docker (pyfound/black).
|
||||||
|
|
||||||
|
## Ansible Usage
|
||||||
|
- Inventory: `production.yml` with `server` host. `ansible.cfg` points to `./ansible-vault-password-file` and `./galaxy.roles` for roles path.
|
||||||
|
- Typical deploy example (from README): `ansible-playbook -i production.yml --diff playbook-gitea.yml`.
|
||||||
|
- Per-app playbooks: `playbook-<app>.yml`; grouped runs: `playbook-all-setup.yml`, `playbook-all-applications.yml`, `playbook-upgrade.yml`, etc.
|
||||||
|
- Secrets: encrypted `vars/secrets.yml`; additional `files/<app>/secrets.yml` used for templating (e.g., Authelia). Respect `.crushignore` ignoring vault files.
|
||||||
|
- Templates: many `docker-compose.template.yml` and `*.template.sh` files under `files/*` plus shared `templates/env.j2`. Use `vars/*.yml` to supply values.
|
||||||
|
- Custom roles:
|
||||||
|
- `roles/eget`: installs `eget` tool; see defaults/vars for version/source.
|
||||||
|
- `roles/owner`: manages user/group and env template.
|
||||||
|
- `roles/secrets`: manages vault-related items.
|
||||||
|
|
||||||
|
## Linting & CI
|
||||||
|
- Local lint configs: `.yamllint.yml`, `.ansible-lint.yml` (excludes `.ansible/`, `.gitea/`, `galaxy.roles/`, `Taskfile.yml`).
|
||||||
|
- CI (.gitea/workflows/lint.yml) installs `yamllint` and `ansible-lint` and runs `yamllint .` then `ansible-lint .`; creates dummy vault file if missing.
|
||||||
|
- Pre-commit via lefthook (local hooks path): runs `gitleaks git --staged` and secret-file vault check script.
|
||||||
|
|
||||||
|
## Coding/Templating Conventions
|
||||||
|
- Indentation: 2 spaces for YAML/Jinja (`.editorconfig`), 4 spaces default elsewhere.
|
||||||
|
- End-of-line: LF; ensure final newline.
|
||||||
|
- Template suffixes `.template.yml`, `.yml.j2`, `.template.sh` are rendered via Ansible `template` module.
|
||||||
|
- Avoid committing real secrets; `.crushignore` excludes `ansible-vault-password-file` and `*secrets.yml`.
|
||||||
|
- Service directories under `files/` hold docker-compose and backup templates; ensure per-app users and registry settings align with `vars/*.yml`.
|
||||||
|
|
||||||
|
## Testing/Validation
|
||||||
|
- YAML lint: `yamllint .` (CI default).
|
||||||
|
- Ansible lint: `ansible-lint .` (CI default).
|
||||||
|
- Authelia config validation: `task authelia-validate-config` (renders with secrets then validates via docker).
|
||||||
|
- Black formatting for Python helpers: `task format-py-files`.
|
||||||
|
- Python types validation with mypy: `mypy <file.py>`.
|
||||||
|
|
||||||
|
## Operational Notes
|
||||||
|
- Deployments rely on `production.yml` inventory and per-app playbooks; run with `--diff` for visibility.
|
||||||
|
- Yandex Docker Registry auth helper: `files/yandex-docker-registry-auth.sh`.
|
||||||
|
- Backups: templates and scripts under `files/backups/` per service; `backup-all.py` orchestrates.
|
||||||
|
- Home network/DNS reference in README (Yandex domains).
|
||||||
|
- Ensure `ansible-vault-password-file` present for vault operations and CI.
|
||||||
81
Makefile
81
Makefile
@@ -1,81 +0,0 @@
|
|||||||
PLAYBOOK := ansible/configuration.yml
|
|
||||||
STAGE := vagrant
|
|
||||||
|
|
||||||
ifeq ($(STAGE), prod)
|
|
||||||
ANSIBLE_HOST_KEY_CHECKING := True
|
|
||||||
INVENTORY := ansible/hosts_prod
|
|
||||||
USER_ARGS := --user="major" --become
|
|
||||||
else
|
|
||||||
ANSIBLE_HOST_KEY_CHECKING := False
|
|
||||||
INVENTORY := ansible/hosts_vagrant
|
|
||||||
USER_ARGS := --user="root"
|
|
||||||
endif
|
|
||||||
|
|
||||||
ifneq ($(TAGS),)
|
|
||||||
TAGS_ARGS := --tags="$(TAGS)"
|
|
||||||
else
|
|
||||||
TAGS_ARGS :=
|
|
||||||
endif
|
|
||||||
|
|
||||||
# Tasks
|
|
||||||
|
|
||||||
install-roles:
|
|
||||||
ansible-galaxy install \
|
|
||||||
--role-file "ansible/requirements.yml" \
|
|
||||||
--force
|
|
||||||
|
|
||||||
rebuild-test-machine:
|
|
||||||
vagrant destroy -f && vagrant up
|
|
||||||
|
|
||||||
edit-vars:
|
|
||||||
EDITOR=micro \
|
|
||||||
ansible-vault edit ansible/vars/vars.yml
|
|
||||||
|
|
||||||
configure:
|
|
||||||
ANSIBLE_HOST_KEY_CHECKING=$(ANSIBLE_HOST_KEY_CHECKING) \
|
|
||||||
ansible-playbook \
|
|
||||||
$(USER_ARGS) \
|
|
||||||
$(TAGS_ARGS) \
|
|
||||||
--inventory="$(INVENTORY)" \
|
|
||||||
--extra-vars='ansible_python_interpreter=/usr/bin/python3' \
|
|
||||||
-vvv \
|
|
||||||
$(PLAYBOOK)
|
|
||||||
|
|
||||||
configure-prod:
|
|
||||||
$(MAKE) configure STAGE="prod"
|
|
||||||
|
|
||||||
configure-monitoring:
|
|
||||||
$(MAKE) configure STAGE="prod" TAGS="monitoring"
|
|
||||||
|
|
||||||
configure-apps:
|
|
||||||
$(MAKE) configure TAGS="webserver,apps,env"
|
|
||||||
|
|
||||||
configure-apps-in-prod:
|
|
||||||
$(MAKE) configure STAGE="prod" TAGS="webserver,apps,env"
|
|
||||||
|
|
||||||
configure-users:
|
|
||||||
$(MAKE) configure TAGS="apps,env"
|
|
||||||
|
|
||||||
dry-run:
|
|
||||||
ANSIBLE_HOST_KEY_CHECKING=$(ANSIBLE_HOST_KEY_CHECKING) \
|
|
||||||
ansible-playbook \
|
|
||||||
$(USER_ARGS) \
|
|
||||||
$(TAGS_ARGS) \
|
|
||||||
--inventory="$(INVENTORY)" \
|
|
||||||
--extra-vars='ansible_python_interpreter=/usr/bin/python3' \
|
|
||||||
--check \
|
|
||||||
--diff -vvv \
|
|
||||||
$(PLAYBOOK)
|
|
||||||
|
|
||||||
list-tags:
|
|
||||||
ansible-playbook \
|
|
||||||
--inventory="$(INVENTORY)" \
|
|
||||||
--list-tags \
|
|
||||||
$(PLAYBOOK)
|
|
||||||
|
|
||||||
lint:
|
|
||||||
ansible-lint "./ansible/configuration.yml" --exclude="./ansible/galaxy.roles/" -v || true
|
|
||||||
ansible-lint "./ansible/roles/ssl-certificate/tasks/main.yml" -v || true
|
|
||||||
|
|
||||||
caddy-hash-password:
|
|
||||||
docker run -it caddy:2.5.2 caddy hash-password --plaintext="$(PASS)"
|
|
||||||
29
README.md
29
README.md
@@ -2,30 +2,39 @@
|
|||||||
|
|
||||||
Настройки виртуального сервера для домашних проектов.
|
Настройки виртуального сервера для домашних проектов.
|
||||||
|
|
||||||
|
> В этом проекте не самые оптимальные решения.
|
||||||
|
> Но они помогают мне поддерживать сервер для моих личных проектов уже много лет.
|
||||||
|
|
||||||
## Требования
|
## Требования
|
||||||
|
|
||||||
- [ansible](https://docs.ansible.com/ansible/latest/getting_started/index.html)
|
- [ansible](https://docs.ansible.com/ansible/latest/getting_started/index.html)
|
||||||
|
- [task](https://taskfile.dev/)
|
||||||
|
- [yq](https://github.com/mikefarah/yq)
|
||||||
|
|
||||||
## Установка
|
## Установка
|
||||||
|
|
||||||
$ cp ansible-vault-password-file.dist ansible-vault-password-file
|
```bash
|
||||||
$ ansible-galaxy install --role-file ansible/requirements.yml
|
$ cp ansible-vault-password-file.dist ansible-vault-password-file
|
||||||
|
$ ansible-galaxy install --role-file requirements.yml
|
||||||
|
```
|
||||||
|
|
||||||
## Структура
|
## Структура
|
||||||
|
|
||||||
- Для каждого приложения создается свой пользователь.
|
- Для каждого приложения создается свой пользователь (опционально).
|
||||||
- Для доступа используется ssh-ключ.
|
- Для доступа используется ssh-ключ.
|
||||||
- Докер используется для запуска и изоляции приложений. Для загрузки образов настраивается Yandex Docker Registry.
|
- Докер используется для запуска и изоляции приложений. Для загрузки образов настраивается Yandex Docker Registry.
|
||||||
- Выход во внешнюю сеть через proxy-server Caddy.
|
- Выход во внешнюю сеть через proxy server [Caddy](https://caddyserver.com/).
|
||||||
- Чувствительные данные в `ansible/vars/vars.yaml` зашифрованы с помощью Ansible Vault.
|
- Чувствительные данные в `vars/vars.yaml` зашифрованы с помощью Ansible Vault.
|
||||||
- Для мониторинга за сервером устанавливается [netdata](https://github.com/netdata/netdata).
|
- Для мониторинга за сервером устанавливается [netdata](https://github.com/netdata/netdata).
|
||||||
|
|
||||||
## Частые команды
|
## Настройка DNS
|
||||||
|
|
||||||
Конфигурация приложений (если нужно добавить новое приложение):
|
В организации Яндекс: https://admin.yandex.ru/domains/vakhrushev.me?action=set_dns&uid=46045840
|
||||||
|
|
||||||
$ make configure-apps-in-prod
|
## Деплой приложений
|
||||||
|
|
||||||
Конфигурация мониторинга (если нужно обновить netdata):
|
Деплой всех приложений через ansible:
|
||||||
|
|
||||||
$ make configure-monitoring
|
```bash
|
||||||
|
ansible-playbook -i production.yml --diff playbook-gitea.yml
|
||||||
|
```
|
||||||
|
|||||||
81
Taskfile.yml
81
Taskfile.yml
@@ -4,24 +4,77 @@ version: '3'
|
|||||||
|
|
||||||
vars:
|
vars:
|
||||||
USER_ID:
|
USER_ID:
|
||||||
sh: id -u
|
sh: 'id -u'
|
||||||
GROUP_ID:
|
GROUP_ID:
|
||||||
sh: id -g
|
sh: 'id -g'
|
||||||
ANSIBLE_HOST_KEY_CHECKING: 'True'
|
HOSTS_FILE: 'production.yml'
|
||||||
|
REMOTE_USER:
|
||||||
|
sh: 'yq .ungrouped.hosts.server.ansible_user {{.HOSTS_FILE}}'
|
||||||
|
REMOTE_HOST:
|
||||||
|
sh: 'yq .ungrouped.hosts.server.ansible_host {{.HOSTS_FILE}}'
|
||||||
|
AUTHELIA_DOCKER: 'docker run --rm -v $PWD:/data authelia/authelia:4.39.4 authelia'
|
||||||
|
|
||||||
tasks:
|
tasks:
|
||||||
configure-monitoring:
|
install-roles:
|
||||||
cmds:
|
cmds:
|
||||||
- >-
|
- ansible-galaxy role install --role-file requirements.yml --force
|
||||||
ansible-playbook
|
|
||||||
--user="major"
|
ssh:
|
||||||
--become
|
cmds:
|
||||||
--tags="monitoring"
|
- ssh {{.REMOTE_USER}}@{{.REMOTE_HOST}}
|
||||||
--inventory="ansible/hosts_prod"
|
|
||||||
--extra-vars="ansible_python_interpreter=/usr/bin/python3"
|
btop:
|
||||||
-vvv
|
cmds:
|
||||||
ansible/configuration.yml
|
- ssh {{.REMOTE_USER}}@{{.REMOTE_HOST}} -t btop
|
||||||
|
|
||||||
|
encrypt:
|
||||||
|
cmds:
|
||||||
|
- ansible-vault encrypt {{.CLI_ARGS}}
|
||||||
|
|
||||||
|
decrypt:
|
||||||
|
cmds:
|
||||||
|
- ansible-vault decrypt {{.CLI_ARGS}}
|
||||||
|
|
||||||
|
authelia-cli:
|
||||||
|
cmds:
|
||||||
|
- "{{.AUTHELIA_DOCKER}} {{.CLI_ARGS}}"
|
||||||
|
|
||||||
|
authelia-validate-config:
|
||||||
|
vars:
|
||||||
|
DEST_FILE: "temp/configuration.yml"
|
||||||
|
cmds:
|
||||||
|
- >
|
||||||
|
ansible localhost
|
||||||
|
--module-name template
|
||||||
|
--args "src=files/authelia/configuration.template.yml dest={{.DEST_FILE}}"
|
||||||
|
--extra-vars "@vars/secrets.yml"
|
||||||
|
--extra-vars "@files/authelia/secrets.yml"
|
||||||
|
- defer: rm -f {{.DEST_FILE}}
|
||||||
|
- >
|
||||||
|
{{.AUTHELIA_DOCKER}}
|
||||||
|
validate-config --config /data/{{.DEST_FILE}}
|
||||||
|
|
||||||
|
authelia-gen-random-string:
|
||||||
|
summary: |
|
||||||
|
Generate random string.
|
||||||
|
Usage example:
|
||||||
|
task authelia-gen-random-string LEN=64
|
||||||
|
vars:
|
||||||
|
LEN: '{{ .LEN | default 10 }}'
|
||||||
|
cmds:
|
||||||
|
- >
|
||||||
|
{{.AUTHELIA_DOCKER}}
|
||||||
|
crypto rand --length {{.LEN}} --charset alphanumeric
|
||||||
|
|
||||||
|
authelia-gen-secret-and-hash:
|
||||||
|
vars:
|
||||||
|
LEN: '{{ .LEN | default 72 }}'
|
||||||
|
cmds:
|
||||||
|
- >
|
||||||
|
{{.AUTHELIA_DOCKER}}
|
||||||
|
crypto hash generate pbkdf2 --variant sha512 --random --random.length {{.LEN}} --random.charset rfc3986
|
||||||
|
|
||||||
format-py-files:
|
format-py-files:
|
||||||
cmds:
|
cmds:
|
||||||
- docker run --rm -u {{.USER_ID}}:{{.GROUP_ID}} -v $PWD/app:/app -w /app pyfound/black:latest_release black .
|
- >-
|
||||||
|
docker run --rm -u {{.USER_ID}}:{{.GROUP_ID}} -v $PWD:/app -w /app pyfound/black:latest_release black .
|
||||||
|
|||||||
28
Vagrantfile
vendored
28
Vagrantfile
vendored
@@ -1,28 +0,0 @@
|
|||||||
# -*- mode: ruby -*-
|
|
||||||
# vi: set ft=ruby :
|
|
||||||
|
|
||||||
# Этот файл предназначен для запуска тестовой виртуальной машины,
|
|
||||||
# на которой можно обкатать роли для настройки сервера.
|
|
||||||
|
|
||||||
|
|
||||||
ENV["LC_ALL"] = "en_US.UTF-8"
|
|
||||||
|
|
||||||
Vagrant.configure("2") do |config|
|
|
||||||
|
|
||||||
config.vm.box = "ubuntu/bionic64"
|
|
||||||
config.vm.provider "virtualbox" do |v|
|
|
||||||
v.memory = 2048
|
|
||||||
v.cpus = 2
|
|
||||||
end
|
|
||||||
|
|
||||||
config.vm.network "private_network", ip: "192.168.50.10"
|
|
||||||
|
|
||||||
# Приватный ключ для доступа к машине
|
|
||||||
config.vm.provision "shell" do |s|
|
|
||||||
ssh_pub_key = File.readlines("#{Dir.home}/.ssh/id_rsa.pub").first.strip
|
|
||||||
s.inline = <<-SHELL
|
|
||||||
echo #{ssh_pub_key} >> /home/vagrant/.ssh/authorized_keys
|
|
||||||
echo #{ssh_pub_key} >> /root/.ssh/authorized_keys
|
|
||||||
SHELL
|
|
||||||
end
|
|
||||||
end
|
|
||||||
@@ -1,3 +1,4 @@
|
|||||||
[defaults]
|
[defaults]
|
||||||
roles_path = ./ansible/galaxy.roles
|
host_key_checking = True
|
||||||
vault_password_file = ./ansible-vault-password-file
|
vault_password_file = ./ansible-vault-password-file
|
||||||
|
roles_path = ./galaxy.roles
|
||||||
|
|||||||
@@ -1,170 +0,0 @@
|
|||||||
---
|
|
||||||
- hosts: all
|
|
||||||
vars:
|
|
||||||
base_port: 41080
|
|
||||||
notes_port: "{{ base_port + 1 }}"
|
|
||||||
dayoff_port: "{{ base_port + 2 }}"
|
|
||||||
homepage_port: "{{ base_port + 3 }}"
|
|
||||||
netdata_port: "{{ base_port + 4 }}"
|
|
||||||
wiki_port: "{{ base_port + 5 }}"
|
|
||||||
nomie_port: "{{ base_port + 6 }}"
|
|
||||||
nomie_db_port: "{{ base_port + 7 }}"
|
|
||||||
gitea_port: "{{ base_port + 8 }}"
|
|
||||||
vars_files:
|
|
||||||
- vars/vars.yml
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
|
|
||||||
- name: 'Install additional packages.'
|
|
||||||
apt:
|
|
||||||
name: '{{ packages }}'
|
|
||||||
update_cache: yes
|
|
||||||
vars:
|
|
||||||
packages:
|
|
||||||
- git
|
|
||||||
- python3-pip
|
|
||||||
- acl
|
|
||||||
|
|
||||||
- import_role:
|
|
||||||
name: yatesr.timezone
|
|
||||||
vars:
|
|
||||||
timezone: UTC
|
|
||||||
|
|
||||||
- import_role:
|
|
||||||
name: geerlingguy.security
|
|
||||||
vars:
|
|
||||||
security_ssh_permit_root_login: "yes"
|
|
||||||
security_autoupdate_enabled: "no"
|
|
||||||
security_fail2ban_enabled: "yes"
|
|
||||||
|
|
||||||
- name: 'Install python docker lib.'
|
|
||||||
pip:
|
|
||||||
name: docker
|
|
||||||
tags:
|
|
||||||
- docker
|
|
||||||
|
|
||||||
- import_role:
|
|
||||||
name: geerlingguy.docker
|
|
||||||
tags:
|
|
||||||
- docker
|
|
||||||
|
|
||||||
- name: 'Ensure networkd service is started (required by Caddy).'
|
|
||||||
systemd:
|
|
||||||
name: systemd-networkd
|
|
||||||
state: started
|
|
||||||
enabled: true
|
|
||||||
tags:
|
|
||||||
- webserver
|
|
||||||
|
|
||||||
- import_role:
|
|
||||||
name: caddy_ansible.caddy_ansible
|
|
||||||
vars:
|
|
||||||
caddy_github_token: '{{ caddy_vars.github_token }}'
|
|
||||||
caddy_config: '{{ lookup("template", "templates/Caddyfile.j2") }}'
|
|
||||||
caddy_update: False
|
|
||||||
caddy_setcap: True
|
|
||||||
caddy_systemd_capabilities_enabled: True
|
|
||||||
caddy_systemd_capabilities: "CAP_NET_BIND_SERVICE"
|
|
||||||
tags:
|
|
||||||
- webserver
|
|
||||||
|
|
||||||
- import_role:
|
|
||||||
name: netdata
|
|
||||||
vars:
|
|
||||||
netdata_version: 'v1.43.1'
|
|
||||||
netdata_exposed_port: '{{ netdata_port }}'
|
|
||||||
tags:
|
|
||||||
- monitoring
|
|
||||||
|
|
||||||
# Applications
|
|
||||||
|
|
||||||
- import_role:
|
|
||||||
name: docker-app
|
|
||||||
vars:
|
|
||||||
username: homepage
|
|
||||||
extra_groups:
|
|
||||||
- docker
|
|
||||||
ssh_keys:
|
|
||||||
- '{{ lookup("file", "files/av_id_rsa.pub") }}'
|
|
||||||
env:
|
|
||||||
DOCKER_PREFIX: homepage
|
|
||||||
PROJECT_NAME: homepage
|
|
||||||
IMAGE_PREFIX: homepage
|
|
||||||
CONTAINER_PREFIX: homepage
|
|
||||||
WEB_SERVER_PORT: '127.0.0.1:{{ homepage_port }}'
|
|
||||||
tags:
|
|
||||||
- apps
|
|
||||||
|
|
||||||
- import_role:
|
|
||||||
name: docker-app
|
|
||||||
vars:
|
|
||||||
username: dayoff
|
|
||||||
extra_groups:
|
|
||||||
- docker
|
|
||||||
ssh_keys:
|
|
||||||
- '{{ lookup("file", "files/av_id_rsa.pub") }}'
|
|
||||||
- '{{ lookup("file", "files/dayoff_id_rsa.pub") }}'
|
|
||||||
env:
|
|
||||||
DOCKER_PREFIX: dayoff
|
|
||||||
PROJECT_NAME: dayoff
|
|
||||||
IMAGE_PREFIX: dayoff
|
|
||||||
CONTAINER_PREFIX: dayoff
|
|
||||||
WEB_SERVER_PORT: '127.0.0.1:{{ dayoff_port }}'
|
|
||||||
tags:
|
|
||||||
- apps
|
|
||||||
|
|
||||||
- import_role:
|
|
||||||
name: docker-app
|
|
||||||
vars:
|
|
||||||
username: wiki
|
|
||||||
extra_groups:
|
|
||||||
- docker
|
|
||||||
ssh_keys:
|
|
||||||
- '{{ lookup("file", "files/av_id_rsa.pub") }}'
|
|
||||||
env:
|
|
||||||
PROJECT_NAME: wiki
|
|
||||||
DOCKER_PREFIX: wiki
|
|
||||||
IMAGE_PREFIX: wiki
|
|
||||||
CONTAINER_PREFIX: wiki
|
|
||||||
WEB_SERVER_PORT: '127.0.0.1:{{ wiki_port }}'
|
|
||||||
tags:
|
|
||||||
- apps
|
|
||||||
|
|
||||||
- import_role:
|
|
||||||
name: docker-app
|
|
||||||
vars:
|
|
||||||
username: nomie
|
|
||||||
extra_groups:
|
|
||||||
- docker
|
|
||||||
ssh_keys:
|
|
||||||
- '{{ lookup("file", "files/av_id_rsa.pub") }}'
|
|
||||||
env:
|
|
||||||
PROJECT_NAME: nomie
|
|
||||||
DOCKER_PREFIX: nomie
|
|
||||||
IMAGE_PREFIX: nomie
|
|
||||||
CONTAINER_PREFIX: nomie
|
|
||||||
WEB_SERVER_PORT: '127.0.0.1:{{ nomie_port }}'
|
|
||||||
COUCH_DB_PORT: '127.0.0.1:{{ nomie_db_port }}'
|
|
||||||
COUCH_DB_USER: 'couch-admin'
|
|
||||||
COUCH_DB_PASSWORD: '{{ nomie.couch_db_password }}'
|
|
||||||
tags:
|
|
||||||
- apps
|
|
||||||
|
|
||||||
- import_role:
|
|
||||||
name: docker-app
|
|
||||||
vars:
|
|
||||||
username: gitea
|
|
||||||
extra_groups:
|
|
||||||
- docker
|
|
||||||
ssh_keys:
|
|
||||||
- '{{ lookup("file", "files/av_id_rsa.pub") }}'
|
|
||||||
env:
|
|
||||||
PROJECT_NAME: gitea
|
|
||||||
DOCKER_PREFIX: gitea
|
|
||||||
IMAGE_PREFIX: gitea
|
|
||||||
CONTAINER_PREFIX: gitea
|
|
||||||
WEB_SERVER_PORT: '127.0.0.1:{{ gitea_port }}'
|
|
||||||
USER_UID: '{{ uc_result.uid }}'
|
|
||||||
USER_GID: '{{ uc_result.group }}'
|
|
||||||
tags:
|
|
||||||
- apps
|
|
||||||
@@ -1,25 +0,0 @@
|
|||||||
$ANSIBLE_VAULT;1.1;AES256
|
|
||||||
36373937313831396330393762313931643536363765353936333166376465343033376564613538
|
|
||||||
3235356131646564393664376535646561323435363330660a353632613334633461383562306662
|
|
||||||
37373439373636383834383464316337656531626663393830323332613136323438313762656435
|
|
||||||
6338353136306338640a636539363766663030356432663361636438386538323238373235663766
|
|
||||||
37393035356137653763373364623836346439663062313061346537353634306138376231633635
|
|
||||||
30363465663836373830366231636265663837646137313764316364623637623333346636363934
|
|
||||||
33666164343832653536303262663635616632663561633739636561333964653862313131613232
|
|
||||||
39316239376566633964633064393532613935306161666666323337343130393861306532623666
|
|
||||||
39653463323532333932646262663862313961393430306663643866623865346666313731366331
|
|
||||||
32663262636132663238313630373937663936326532643730613161376565653263633935393363
|
|
||||||
63373163346566363639396432653132646334643031323532613238666531363630353266303139
|
|
||||||
31613138303131343364343438663762343936393165356235646239343039396637643666653065
|
|
||||||
31363163623863613533663366303664623134396134393765636435633464373731653563646537
|
|
||||||
39373766626338646564356463623531373337303861383862613966323132656639326533356533
|
|
||||||
38346263326361656563386333663531663232623436653866383865393964353363353563653532
|
|
||||||
65343130383262386262393634636338313732623565666531303636303433333638323230346565
|
|
||||||
61633837373531343530383238396162373632623135333263323234623833383731336463333063
|
|
||||||
62656533636237303962653238653934346430366533636436646264306461323639666665623839
|
|
||||||
32643637623630613863323335666138303538313236343932386461346433656432626433663365
|
|
||||||
38376666623839393630343637386336623334623064383131316331333564363934636662633630
|
|
||||||
31363337393339643738306363306538373133626564613765643138666237303330613036666537
|
|
||||||
61363838353736613531613436313730313936363564303464346661376137303133633062613932
|
|
||||||
36383631303739306264386663333338666235346339623338333663386663303439363362376239
|
|
||||||
35626136646634363430
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
158.160.115.150
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
192.168.50.10
|
|
||||||
@@ -1,12 +0,0 @@
|
|||||||
---
|
|
||||||
- src: yatesr.timezone
|
|
||||||
version: 1.2.0
|
|
||||||
|
|
||||||
- src: geerlingguy.security
|
|
||||||
version: 2.2.0
|
|
||||||
|
|
||||||
- src: geerlingguy.docker
|
|
||||||
version: 6.1.0
|
|
||||||
|
|
||||||
- src: caddy_ansible.caddy_ansible
|
|
||||||
version: v3.2.0
|
|
||||||
@@ -1,24 +0,0 @@
|
|||||||
---
|
|
||||||
- name: 'Create owner.'
|
|
||||||
import_role:
|
|
||||||
name: owner
|
|
||||||
vars:
|
|
||||||
owner_name: '{{ username }}'
|
|
||||||
owner_group: '{{ username }}'
|
|
||||||
owner_extra_groups: '{{ extra_groups | default([]) }}'
|
|
||||||
owner_ssh_keys: '{{ ssh_keys | default([]) }}'
|
|
||||||
owner_env: '{{ env | default({}) }}'
|
|
||||||
|
|
||||||
- name: 'Create web dir.'
|
|
||||||
file:
|
|
||||||
path: '/var/www/{{ username }}'
|
|
||||||
state: directory
|
|
||||||
owner: '{{ username }}'
|
|
||||||
group: '{{ username }}'
|
|
||||||
recurse: True
|
|
||||||
|
|
||||||
- name: 'Login to yandex docker registry.'
|
|
||||||
ansible.builtin.script:
|
|
||||||
cmd: 'files/yandex-docker-registry-auth.sh'
|
|
||||||
become: yes
|
|
||||||
become_user: '{{ username }}'
|
|
||||||
@@ -1,4 +0,0 @@
|
|||||||
---
|
|
||||||
netdata_version: 'v1.18.0'
|
|
||||||
netdata_image: 'netdata/netdata:{{ netdata_version }}'
|
|
||||||
netdata_exposed_port: '19999'
|
|
||||||
@@ -1,22 +0,0 @@
|
|||||||
---
|
|
||||||
- name: 'Grab docker group id.'
|
|
||||||
shell: grep docker /etc/group | cut -d ':' -f 3
|
|
||||||
register: docker_group
|
|
||||||
|
|
||||||
- name: 'Create NetData container from {{ netdata_image }}'
|
|
||||||
docker_container:
|
|
||||||
name: netdata
|
|
||||||
image: '{{ netdata_image }}'
|
|
||||||
restart_policy: 'always'
|
|
||||||
published_ports:
|
|
||||||
- '127.0.0.1:{{ netdata_exposed_port }}:19999'
|
|
||||||
volumes:
|
|
||||||
- '/proc:/host/proc:ro'
|
|
||||||
- '/sys:/host/sys:ro'
|
|
||||||
- '/var/run/docker.sock:/var/run/docker.sock:ro'
|
|
||||||
capabilities:
|
|
||||||
- 'SYS_PTRACE'
|
|
||||||
security_opts:
|
|
||||||
- 'apparmor:unconfined'
|
|
||||||
env:
|
|
||||||
PGID: '{{ docker_group.stdout|default(999) }}'
|
|
||||||
@@ -1,5 +0,0 @@
|
|||||||
---
|
|
||||||
owner_name: ''
|
|
||||||
owner_group: '{{ owner_name }}'
|
|
||||||
owner_ssh_keys: []
|
|
||||||
owner_env: {}
|
|
||||||
@@ -1,60 +0,0 @@
|
|||||||
---
|
|
||||||
- name: 'Check app requirements for user "{{ owner_name }}".'
|
|
||||||
fail:
|
|
||||||
msg: You must set owner name.
|
|
||||||
when: not owner_name
|
|
||||||
|
|
||||||
- name: 'Create group "{{ owner_group }}".'
|
|
||||||
group:
|
|
||||||
name: '{{ owner_group }}'
|
|
||||||
state: present
|
|
||||||
|
|
||||||
- name: 'Create user "{{ owner_name }}".'
|
|
||||||
user:
|
|
||||||
name: '{{ owner_name }}'
|
|
||||||
group: '{{ owner_group }}'
|
|
||||||
groups: '{{ owner_extra_groups }}'
|
|
||||||
shell: /bin/bash
|
|
||||||
register: uc_result
|
|
||||||
|
|
||||||
- name: 'Set up user ssh keys for user "{{ owner_name }}".'
|
|
||||||
authorized_key:
|
|
||||||
user: '{{ owner_name }}'
|
|
||||||
key: '{{ item }}'
|
|
||||||
state: present
|
|
||||||
with_items: '{{ owner_ssh_keys }}'
|
|
||||||
when: owner_ssh_keys | length > 0
|
|
||||||
|
|
||||||
- name: 'Prepare env variables.'
|
|
||||||
set_fact:
|
|
||||||
env_dict: '{{ owner_env | combine({
|
|
||||||
"CURRENT_UID": uc_result.uid | default(owner_name),
|
|
||||||
"CURRENT_GID": uc_result.group | default(owner_group) }) }}'
|
|
||||||
tags:
|
|
||||||
- env
|
|
||||||
|
|
||||||
- name: 'Set up environment variables for user "{{ owner_name }}".'
|
|
||||||
template:
|
|
||||||
src: env.j2
|
|
||||||
dest: '/home/{{ owner_name }}/.env'
|
|
||||||
owner: '{{ owner_name }}'
|
|
||||||
group: '{{ owner_group }}'
|
|
||||||
tags:
|
|
||||||
- env
|
|
||||||
|
|
||||||
- name: 'Remove absent environment variables for user "{{ owner_name }}" from bashrc.'
|
|
||||||
lineinfile:
|
|
||||||
path: '/home/{{ owner_name }}/.bashrc'
|
|
||||||
regexp: '^export {{ item.key }}='
|
|
||||||
state: absent
|
|
||||||
with_dict: '{{ env_dict }}'
|
|
||||||
tags:
|
|
||||||
- env
|
|
||||||
|
|
||||||
- name: 'Include environment variables for user "{{ owner_name }}" in bashrc.'
|
|
||||||
lineinfile:
|
|
||||||
path: '/home/{{ owner_name }}/.bashrc'
|
|
||||||
regexp: '^export \$\(grep -v'
|
|
||||||
line: 'export $(grep -v "^#" "$HOME"/.env | xargs)'
|
|
||||||
tags:
|
|
||||||
- env
|
|
||||||
@@ -1,64 +0,0 @@
|
|||||||
# -------------------------------------------------------------------
|
|
||||||
# Global options
|
|
||||||
# -------------------------------------------------------------------
|
|
||||||
{
|
|
||||||
grace_period 15s
|
|
||||||
}
|
|
||||||
|
|
||||||
# -------------------------------------------------------------------
|
|
||||||
# Proxy services
|
|
||||||
# -------------------------------------------------------------------
|
|
||||||
|
|
||||||
vakhrushev.me {
|
|
||||||
tls anwinged@ya.ru
|
|
||||||
|
|
||||||
reverse_proxy {
|
|
||||||
to 127.0.0.1:{{ homepage_port }}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# NetData proxy
|
|
||||||
status.vakhrushev.me, :29999 {
|
|
||||||
tls anwinged@ya.ru
|
|
||||||
|
|
||||||
reverse_proxy {
|
|
||||||
to 127.0.0.1:{{ netdata_port }}
|
|
||||||
}
|
|
||||||
|
|
||||||
basicauth / {
|
|
||||||
{{ netdata.login }} {{ netdata.password_hash }}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
wiki.vakhrushev.me {
|
|
||||||
tls anwinged@ya.ru
|
|
||||||
|
|
||||||
reverse_proxy {
|
|
||||||
to 127.0.0.1:{{ wiki_port }}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
nomie.vakhrushev.me {
|
|
||||||
tls anwinged@ya.ru
|
|
||||||
|
|
||||||
reverse_proxy {
|
|
||||||
to 127.0.0.1:{{ nomie_port }}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
nomie-db.vakhrushev.me {
|
|
||||||
tls anwinged@ya.ru
|
|
||||||
|
|
||||||
reverse_proxy {
|
|
||||||
to 127.0.0.1:{{ nomie_db_port }}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
git.vakhrushev.me {
|
|
||||||
tls anwinged@ya.ru
|
|
||||||
|
|
||||||
reverse_proxy {
|
|
||||||
to 127.0.0.1:{{ gitea_port }}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,26 +0,0 @@
|
|||||||
$ANSIBLE_VAULT;1.1;AES256
|
|
||||||
64633761653833666663633233393164376238316564663434623333333738393637313836353831
|
|
||||||
6165306432626631303432376563326631616262393964640a333936663761353763386563316337
|
|
||||||
64646531343532393430663066336564353464393465386337656132363361396435363266666462
|
|
||||||
3032346163626366300a353139386236386466383931356533366565613231363561653830653735
|
|
||||||
36636661386636633533366332656536366636373366666633396538623030383339363462363531
|
|
||||||
35666362303865343231613639343262373832393062366437633036393036613531643266356334
|
|
||||||
30336163383130613933383333626131306666623534373866653530366664383138303131376534
|
|
||||||
32666336643965323431643465633232656465313230346663653266313962643761303731326338
|
|
||||||
32366566666463663165366538386236383039326433346632336263663566613362333639643439
|
|
||||||
34363863653132333963643934313633626565333063333965623036646265393532666135346237
|
|
||||||
62356239383937383362623135306531616134653036346664346437363335393061636539306233
|
|
||||||
66653433333134623130613330643465313837303233303761383031373733353838393532626635
|
|
||||||
38623632636237353932663834643962646663306563376333306235363137356131636537373864
|
|
||||||
34343936633330383561373437613261623162303632333161316163343239303839323231366230
|
|
||||||
62343938666566626538653765333138633865333637653135343361623532636562313035383639
|
|
||||||
64316137656364356237303232616431333439353564626233393830393761646131356466353166
|
|
||||||
30643634323563303734303765663835303864313237646238363134376565663765376664623066
|
|
||||||
66616437393739633166306333313535623637323838363665393735646537666563333766626235
|
|
||||||
62306264313238383033303633653061313137613331333737343333386634323661653765653337
|
|
||||||
39386132306561663562643836323831363937613631303066313439353734373263393963316631
|
|
||||||
65303462653035643465393862636532353330313037633264353863343837363361646634663732
|
|
||||||
66313838643564346139363831366232616462633865343638636437323439316132363034663534
|
|
||||||
62393664353066313361366430396166653435393539343530303237363562653834653230366337
|
|
||||||
31643639343436366664663137623235613963636265623335383734326233323263336437313435
|
|
||||||
6434
|
|
||||||
@@ -1,3 +0,0 @@
|
|||||||
WEB_SERVER_PORT=9494
|
|
||||||
USER_UID=1000
|
|
||||||
USER_GID=1000
|
|
||||||
1
app/gitea/.gitignore
vendored
1
app/gitea/.gitignore
vendored
@@ -1 +0,0 @@
|
|||||||
data/
|
|
||||||
@@ -1,18 +0,0 @@
|
|||||||
version: "3"
|
|
||||||
|
|
||||||
services:
|
|
||||||
|
|
||||||
server:
|
|
||||||
image: gitea/gitea:1.20.5
|
|
||||||
environment:
|
|
||||||
- "USER_UID=${USER_UID}"
|
|
||||||
- "USER_GID=${USER_GID}"
|
|
||||||
- "GITEA__server__SSH_PORT=2222"
|
|
||||||
restart: unless-stopped
|
|
||||||
volumes:
|
|
||||||
- ./data:/data
|
|
||||||
- /etc/timezone:/etc/timezone:ro
|
|
||||||
- /etc/localtime:/etc/localtime:ro
|
|
||||||
ports:
|
|
||||||
- "${WEB_SERVER_PORT}:3000"
|
|
||||||
- "2222:22"
|
|
||||||
@@ -1,21 +0,0 @@
|
|||||||
from fabric import Connection
|
|
||||||
from invoke import task
|
|
||||||
import shlex
|
|
||||||
|
|
||||||
APP_NAME = "gitea"
|
|
||||||
SSH_HOST = f"{APP_NAME}@158.160.115.150"
|
|
||||||
|
|
||||||
|
|
||||||
@task
|
|
||||||
def deploy(c):
|
|
||||||
print("Ready to setup remote host")
|
|
||||||
with Connection(SSH_HOST) as c:
|
|
||||||
c.put(
|
|
||||||
local="docker-compose.yml",
|
|
||||||
remote=f"/home/{APP_NAME}/docker-compose.yml",
|
|
||||||
)
|
|
||||||
c.run("cp .env .env.prod")
|
|
||||||
c.run("mkdir -p data")
|
|
||||||
c.run(
|
|
||||||
f"docker-compose --project-name {shlex.quote(APP_NAME)} --env-file=.env.prod up --detach --remove-orphans"
|
|
||||||
)
|
|
||||||
@@ -1,33 +0,0 @@
|
|||||||
version: "3"
|
|
||||||
|
|
||||||
services:
|
|
||||||
|
|
||||||
db:
|
|
||||||
image: postgres:15.2-alpine
|
|
||||||
environment:
|
|
||||||
POSTGRES_DB: wiki
|
|
||||||
POSTGRES_PASSWORD: wikijsrocks
|
|
||||||
POSTGRES_USER: wikijs
|
|
||||||
logging:
|
|
||||||
driver: "none"
|
|
||||||
restart: unless-stopped
|
|
||||||
volumes:
|
|
||||||
- db-data:/var/lib/postgresql/data
|
|
||||||
|
|
||||||
wiki:
|
|
||||||
image: ghcr.io/requarks/wiki:2.5.300
|
|
||||||
depends_on:
|
|
||||||
- db
|
|
||||||
environment:
|
|
||||||
DB_TYPE: postgres
|
|
||||||
DB_HOST: db
|
|
||||||
DB_PORT: 5432
|
|
||||||
DB_USER: wikijs
|
|
||||||
DB_PASS: wikijsrocks
|
|
||||||
DB_NAME: wiki
|
|
||||||
restart: unless-stopped
|
|
||||||
ports:
|
|
||||||
- "${WEB_SERVER_PORT}:3000"
|
|
||||||
|
|
||||||
volumes:
|
|
||||||
db-data:
|
|
||||||
@@ -1,26 +0,0 @@
|
|||||||
from fabric import Connection
|
|
||||||
from invoke import task
|
|
||||||
import subprocess
|
|
||||||
import shlex
|
|
||||||
|
|
||||||
APP_NAME = "wiki"
|
|
||||||
SSH_HOST = f"{APP_NAME}@158.160.115.150"
|
|
||||||
DOCKER_REGISTRY = "cr.yandex/crplfk0168i4o8kd7ade"
|
|
||||||
|
|
||||||
|
|
||||||
def run(args):
|
|
||||||
return subprocess.run(args, check=True, capture_output=True).stdout
|
|
||||||
|
|
||||||
|
|
||||||
@task
|
|
||||||
def deploy(c):
|
|
||||||
print("Ready to setup remote host")
|
|
||||||
with Connection(SSH_HOST) as c:
|
|
||||||
c.put(
|
|
||||||
"docker-compose.yml",
|
|
||||||
remote=f"/home/{APP_NAME}/docker-compose.yml",
|
|
||||||
)
|
|
||||||
c.run("cp .env .env.prod")
|
|
||||||
c.run(
|
|
||||||
f"docker-compose --project-name {shlex.quote(APP_NAME)} --env-file=.env.prod up --detach --remove-orphans"
|
|
||||||
)
|
|
||||||
10
files/authelia/backup.template.sh
Normal file
10
files/authelia/backup.template.sh
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
set -eu
|
||||||
|
set -o pipefail
|
||||||
|
|
||||||
|
echo "{{ app_name }}: backup data with gobackups"
|
||||||
|
|
||||||
|
(cd "{{ base_dir }}" && gobackup perform --config "{{ gobackup_config }}")
|
||||||
|
|
||||||
|
echo "{{ app_name }}: done."
|
||||||
1706
files/authelia/configuration.template.yml
Normal file
1706
files/authelia/configuration.template.yml
Normal file
File diff suppressed because it is too large
Load Diff
26
files/authelia/docker-compose.template.yml
Normal file
26
files/authelia/docker-compose.template.yml
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
services:
|
||||||
|
|
||||||
|
authelia_app:
|
||||||
|
container_name: 'authelia_app'
|
||||||
|
image: 'docker.io/authelia/authelia:4.39.14'
|
||||||
|
user: '{{ user_create_result.uid }}:{{ user_create_result.group }}'
|
||||||
|
restart: 'unless-stopped'
|
||||||
|
networks:
|
||||||
|
- "web_proxy_network"
|
||||||
|
- "monitoring_network"
|
||||||
|
volumes:
|
||||||
|
- "{{ config_dir }}:/config"
|
||||||
|
- "{{ data_dir }}:/data"
|
||||||
|
|
||||||
|
authelia_redis:
|
||||||
|
image: valkey/valkey:9.0-alpine
|
||||||
|
container_name: authelia_redis
|
||||||
|
restart: unless-stopped
|
||||||
|
networks:
|
||||||
|
- "monitoring_network"
|
||||||
|
|
||||||
|
networks:
|
||||||
|
web_proxy_network:
|
||||||
|
external: true
|
||||||
|
monitoring_network:
|
||||||
|
external: true
|
||||||
16
files/authelia/gobackup.template.yml
Normal file
16
files/authelia/gobackup.template.yml
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
# https://gobackup.github.io/configuration
|
||||||
|
|
||||||
|
models:
|
||||||
|
|
||||||
|
authelia:
|
||||||
|
compress_with:
|
||||||
|
type: 'tgz'
|
||||||
|
storages:
|
||||||
|
local:
|
||||||
|
type: 'local'
|
||||||
|
path: '{{ backups_dir }}'
|
||||||
|
keep: 3
|
||||||
|
databases:
|
||||||
|
users:
|
||||||
|
type: sqlite
|
||||||
|
path: "{{ (data_dir, 'authelia_storage.sqlite3') | path_join }}"
|
||||||
147
files/authelia/secrets.yml
Normal file
147
files/authelia/secrets.yml
Normal file
@@ -0,0 +1,147 @@
|
|||||||
|
$ANSIBLE_VAULT;1.1;AES256
|
||||||
|
65666236353432623164363161633732363265343935316466326332323638626138633130666663
|
||||||
|
3566393731353063393466663831623537393939663838640a303935383565376430646431613464
|
||||||
|
66363161373765663665316433393835323236363561663633623533363166323533623137646437
|
||||||
|
3438383538613832660a373337323633666535656439383438653132646235623834643730323762
|
||||||
|
64656336323037653937393836623061343563303337633830346164613731633761376138313666
|
||||||
|
33343265343830306139386164353861633230326333616539313032323935313435623762316163
|
||||||
|
63353535663533656166333034636462663266343061626637613830313962333531396339393334
|
||||||
|
39336233623462303137343264373631396464303038346131633333333033343839393438653330
|
||||||
|
61646166313762633333353233366430333764646261356438336532653463646264643562346262
|
||||||
|
32653339353861653964663935393638656563373239633234656131633133643366653833376233
|
||||||
|
34643033393039616363656330323765346336643664393934376138353266303731613066626231
|
||||||
|
30313362663430343337663663373132383831333662353034316134623639393938306237396230
|
||||||
|
65633565643861366434336135313465623537616166356637346137323566333239376361633930
|
||||||
|
66356664633433306134613166353032363234633961623862646537376365383535623033356639
|
||||||
|
34336433646430323630313464333631663739343832316362666165623039313535306433643761
|
||||||
|
61376264616462333839326233613764333633643233663236666562306666353934323636353262
|
||||||
|
32386333623632326332373438373236646138326537343835383766663463643535383534633633
|
||||||
|
34343166343362343165333837313362666533353939336533326138386538323961323530626663
|
||||||
|
63633334663561396565656662363830656133323364383536346163316333356661643261633635
|
||||||
|
37323536653664613564616466633631616465656266373830373964326566623733396130303031
|
||||||
|
31663166323863353565353235323235363161366534663539343338633434626232336261356438
|
||||||
|
38373339383361356565366436376266336234356436363562623431666436626562353065356230
|
||||||
|
37333737623161303431393461383438643137613962613838386537663564646333356538393337
|
||||||
|
34373136343361333738396664616436363066323030306438666437333839653336316264343139
|
||||||
|
38656234623363386561393961363262316535373232353132616136633339613533633333376235
|
||||||
|
35656632643363386133656438616366643630303337383130386330326532623930346561383739
|
||||||
|
65636138313566363539323663303631663862323034313062353633646638633463653263333035
|
||||||
|
66646539393639643834353966396132343437666435626537633336393864326630623961303338
|
||||||
|
36616663383262363138346331386239653634373135616232656462353562373939356631633433
|
||||||
|
64356461653336353039643065663538373363666339363231373762363163653762363832373238
|
||||||
|
37386638373261383533633937613561613961353765363864613031356334303138613766383631
|
||||||
|
36666433636663343931643462383338613662333233666337363038666531633864333436383737
|
||||||
|
36363562323936313764386336656639633638643465636131373532313238376565353933336133
|
||||||
|
37663961623937336230636466643531316463323733626134663331323135396637323231383231
|
||||||
|
33643736316537646264353662643261666165366562393031613565623630336333353961353661
|
||||||
|
62633362613863303936623436616136616139363861653233313865343532366465373937306139
|
||||||
|
61316665653234363033396566316331316164346461353438633864333334653730333065376631
|
||||||
|
66616238663062666139653062383036636366646364346632396239623233356533343038363733
|
||||||
|
33316165306130616665326364616231613830313334383961633333303261656131333161323237
|
||||||
|
38616435633334646533653830393739336363653664373235363863396262623736626435313735
|
||||||
|
39353065643033343062616137346361646136313265653965313133666130393361306430303638
|
||||||
|
64643364663335343961373865653564366362396138626531613232313461376463336437336666
|
||||||
|
30373766666231646264356663626335393233333465386164313630613137303066336430653662
|
||||||
|
35336531633039633938363430363239653065356230313538323630316561643033623833656164
|
||||||
|
65366435653063616361366666373561663538373363346264386331316531376262663663383266
|
||||||
|
38346163653439366430656536666631366534663163396230353531396335663638386261613832
|
||||||
|
31336339336465393333383761623663383563613930663430626166666635393164366562663063
|
||||||
|
34323031333939656161643139386532666361663630346632383333373261326134636564393233
|
||||||
|
38363630633435353730383234663536623166373533333639353963613665353339383837626138
|
||||||
|
38653730316538626662303636363664613566383033323661363533333032306362346562316464
|
||||||
|
36303730666531396531386331653466396233623138393763653965323239393237636237303237
|
||||||
|
66323366343036643765646539366261613062646532306265353430636332386330613962666131
|
||||||
|
33383063343638616338326533646162306438616434316139313433303636366665336364656534
|
||||||
|
66353634646563373463633637383766343332346530653033663937613135363233386138306565
|
||||||
|
39386262663939346432383134333661623637396162623336626137316166613035333138653632
|
||||||
|
34653364333732366231396637353939653262323934366333373130613932366533346632366164
|
||||||
|
31313663303034306436393763323361616434306134336231383639346261376439643162643539
|
||||||
|
35323534366435393531613665333337633365353831326534363737396463363666316639373233
|
||||||
|
38386431336631363831366261373439376231326465323736356331636136393762383331336265
|
||||||
|
64356363336639356361326539396234626566653334656561386431616139323433376563303132
|
||||||
|
32363864663062643065643534623933653165636264653461303262313662663165616463353965
|
||||||
|
36613134343634393066613533343362393137356464303530653964363031653231663962303037
|
||||||
|
38633730633766306264613865373736346661623531616563316232393235643931353663383066
|
||||||
|
39386633383930383732343266326161303663646636383735646332623661303433366161363635
|
||||||
|
31313839396539343063613166616134636134333639363362343566356435663934646263653061
|
||||||
|
31616137363031333561656134396534333430613637376465363633663861666262616137336337
|
||||||
|
38396639333465316431303433653338653338303031313566633330656535666531316235393138
|
||||||
|
64383332323466653065343765346162343532386438326362626637656130613433393339323564
|
||||||
|
39633262393336383932306632636563313663336337653164306434393661663265306638636265
|
||||||
|
37326335616666393262356663326665663561633038333237363234303838636135653861373032
|
||||||
|
62643332376364386537326336663531613164376261653938333165656632613434343063613565
|
||||||
|
62623262303632633335303430343164343433646238366137623030323233303661373434666234
|
||||||
|
62626265623239343634363732623739383536646132656564313032663061383939616162366538
|
||||||
|
61373738626362386265623739333139613531373738333862396430356635626130663633666439
|
||||||
|
63333836346364313262613331656531623831646165313036393138636162336138613365396331
|
||||||
|
63323333366232326365333965363734396565343334613733396437393932616637313738633765
|
||||||
|
61373437313636666566303032343366376166643864333639326564363935623534386164346365
|
||||||
|
65373538303765386337383964303937343038663832326665356666646336633835653530396337
|
||||||
|
63316631333231623861373330303033396138613834623265633263653061623132366235336661
|
||||||
|
31376633396534613161663462343365653262383133636238613166343366653464636137353165
|
||||||
|
33623539366339353064373238663838303237343737376131306532333134313561323234336530
|
||||||
|
64653335653262613738623335343361386261633636616663393035633066653735346431643663
|
||||||
|
63626331633337363231313835376239636136633262393463643539343333373139616330373634
|
||||||
|
63366439383232363165356166656664633133313533353236393637386535616536613630616137
|
||||||
|
37316538653839353932363264663934613936346661343835333666303536363332363234653262
|
||||||
|
33303530343764316462336634613661663532393864626437353764656564343435613131663339
|
||||||
|
31616236376563623762326566333933303432646465613138373733363263346337633165616563
|
||||||
|
61613836363231333966336165393961313930313934323536333334363763393438636138396361
|
||||||
|
61373264626531393165376538336533303861643663333439653732313337613362346162373931
|
||||||
|
35333330653531623134396134333938616538666661363737633639643462313034356531633033
|
||||||
|
37613834356563303639356134323231646461356262636237323061386339306462343035336164
|
||||||
|
63376664386663306135333635323030396639326639656131333564353265316631336261373562
|
||||||
|
35663639306361373433633530616162636434373533333263303936343539386439303130366439
|
||||||
|
37656263363839363339333236333835386537343232636461343338356234616332383330373161
|
||||||
|
66646533393037353030616264623461626339613538306133393337623264326535343836363165
|
||||||
|
39626263396162336434323437323133653735663136616266376331633665626234386131393433
|
||||||
|
34663236623636656237666531353763333861646264646538313964663130353836396236336564
|
||||||
|
31333132396535653064366430306464363634633032383666313738396265343335663664613662
|
||||||
|
34626331303834346637383932633832343962666633323838383132323239363965313139373762
|
||||||
|
36306266313337393235303834356435336138373437306661636535333836396366386336613937
|
||||||
|
37386136623965373439373433353264393363653534306662316132643761353138303538353037
|
||||||
|
62316636323338353633313461346161343461613465653463393931363661336638346435666134
|
||||||
|
38303533633530383466623766393138653564373065373261656165313763663361373235356531
|
||||||
|
66346663343636633961656639316365396265303632626465666532313338373336376135366138
|
||||||
|
61303865356436616139373064393939356364623461316266316537363963396263613562613363
|
||||||
|
37386466633934623062333634303335646232613039633839336365613634613561373436393733
|
||||||
|
61316162623063636365346236323164333933643662366463313138623561633533653932663065
|
||||||
|
64363734636663613831663630666432396237303630623234376432316532626165373464333134
|
||||||
|
30646433646438373961333230343430383232316431313465646136643139353937313761333731
|
||||||
|
66653335343935636530393361306162616232333935393135623235626238323238303339643863
|
||||||
|
35373366353365613965656562633633303330383631383736306535366137393638313330343636
|
||||||
|
35383039313866303239623564666461633161373231303534313466353137643666396133363265
|
||||||
|
36373133323162666363303862313566613132333739333164663166666565303032306633636632
|
||||||
|
63366539353663666162633832373264306135646266656563373433376438616530626530393131
|
||||||
|
36376630306539303865613639643538333134666533643261356662386433626265613431613334
|
||||||
|
35353634663930653537326666653763393831363637613639653862323730633266323234656662
|
||||||
|
35666433623739323435626536303561386332313838383432303437633731666435393331353139
|
||||||
|
32376638343939626237346363623236653639356234633366323464663763366339663536386162
|
||||||
|
62623831336165393630363263396466643563383330353232373435613364316538613835313332
|
||||||
|
30623136316437333462333864663164356539656436666539313536653065346337353565656138
|
||||||
|
30663737333035336137313034626339613631396237326163366364366634346438643831376166
|
||||||
|
33616238643033313662353031306534666161636133653531303932633231326139326161356564
|
||||||
|
65323231663562323430633561363838663030346432313930323165313835616230663463316161
|
||||||
|
63326364376461643035333564643964303030306131396233333439393131336435323134663064
|
||||||
|
31306162383766333636386633393863663035376562633965666635353939653936626631346534
|
||||||
|
64656462393335653332646562373361613132643034653536303435343833626433613137346232
|
||||||
|
65643465323733336162366261336636326136616532343939613363663537336365363966373437
|
||||||
|
62626566303435356237333238353736383262313933656139326634663934343864373131646461
|
||||||
|
62653662336162313739663961636430346130666130646364393034636464616362636533353262
|
||||||
|
62346162386561363239666232306432646336346434636335366638633762666634623737663866
|
||||||
|
32343339646330353837323665626430646432316163656361323139633336646363643434363731
|
||||||
|
36616534663330376333623631313332616462383936316238363032363762356531393332343430
|
||||||
|
34613735633732333762306331666332316265333962343935613936613438353164623031346432
|
||||||
|
66396264366666383835333538616430326161653839663838313764663664316266623762653463
|
||||||
|
31383235326363646234306636636564366238663965663331623965373139353064363733363339
|
||||||
|
34383937303033386566633939366331353333373935353263376235623430336236396135303233
|
||||||
|
61666434613136396338656334306463666535373364373130343161373866333339333436643036
|
||||||
|
64346162353530353334343438623835363664396265353762663832303366623735636238313039
|
||||||
|
37353136626563393231353662633031343435636261616131323833613062643834663634353537
|
||||||
|
30343430396135363466346236373462356366643539363665373663363932316163346665663935
|
||||||
|
61343434303039623139323265623538366563373633623065353862303935393434663566303232
|
||||||
|
37653033343230613766306334316464666533326566386633363835373466326263323861636635
|
||||||
|
36396437373161346636616664313734343565643330376431633238396462633764386531313165
|
||||||
|
62666538353239396361653035393636633263613639623038383734326564346261666335383234
|
||||||
|
36333663303365313066616533333336306639363239663339313766356431316562353836303464
|
||||||
|
3534373436363831616163383134383266636130316433633635
|
||||||
37
files/authelia/users.secrets.yml
Normal file
37
files/authelia/users.secrets.yml
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
$ANSIBLE_VAULT;1.1;AES256
|
||||||
|
33323463653739626134366261626263396338333966376262313263613131343962326432613263
|
||||||
|
6430616564313432666436376432383539626231616438330a646161313364353566373833353337
|
||||||
|
64633361306564646564663736663937303435356332316432666135353863393439663235646462
|
||||||
|
3136303031383835390a396531366636386133656366653835633833633733326561383066656464
|
||||||
|
31613933333731643065316130303561383563626636346633396266346332653234373732326535
|
||||||
|
39663765353938333835646563663633393835633163323435303164663261303661666435306239
|
||||||
|
34353264633736383565306336633565376436646536623835613330393466363935303031346664
|
||||||
|
63626465656435383162633761333131393934666632336539386435613362353135383538643836
|
||||||
|
66373261306139353134393839333539366531393163393266386531613732366431663865343134
|
||||||
|
64363933616338663966353431396133316561653366396130653232636561343739336265386339
|
||||||
|
38646238653436663531633465616164303633356233363433623038666465326339656238653233
|
||||||
|
36323162303233633935646132353835336364303833636563346535316166346533636536656665
|
||||||
|
64323030616665316133363739393364306462316135636630613262646436643062373138656431
|
||||||
|
35663334616239623534383564643738616264373762663034376332323637626337306639653830
|
||||||
|
65386339666465343931303933663561643664313364386662656663643336636264636333666435
|
||||||
|
66366531613538363233346137383462326334306534333564636232393931393433386664363036
|
||||||
|
39623134636331646536323531653063326231613363366562643561353939633062663132303035
|
||||||
|
38303265326136303633666566613966636133666336396133333033643434303138303065666463
|
||||||
|
36643765316134636133333937396332613233383932663265386264623133633364646237346465
|
||||||
|
32623965653662336335366639643765393636623236323036396538353666646132393636663536
|
||||||
|
65646638643236313762373135336430643731643961386264303134366633353934366431333430
|
||||||
|
34313362633836613166336437323835626537653237666139383230663835626630623933383834
|
||||||
|
32636136663830643661363663303136393733646133626538333836666135653936323832336433
|
||||||
|
64396234396430326334656561393264366263313730306631383037643135613765373861356561
|
||||||
|
37363933383238316232336564363364376637626630373963666262376165343838303530653764
|
||||||
|
64343937666365646666363939383662313334656236326566373565643637313434616261616635
|
||||||
|
35646131396432623534396133666239613036386332663038353531313935636139363136666562
|
||||||
|
62616234663935383262626235313337623332333733383035666633393965336535316234323561
|
||||||
|
37353563623138343339616565653465633633383563636631356333303435376536393634343031
|
||||||
|
63653062303432366230643333353634383061313135616533643935316263393366653335353964
|
||||||
|
36363135356365373064613338393261326265396330323930613538326330663532616163666564
|
||||||
|
39313631633434353938626637626462376139383536306531633733646331303030333238373161
|
||||||
|
36336364383939663132366461383264346631366566363638333738386235623264623331343738
|
||||||
|
34316436393363323165396430343163653837623035626236313663643038336666633535666462
|
||||||
|
33323566353062653964643362363233346264396365336637376661323730336437333031363830
|
||||||
|
38303962646561346262
|
||||||
488
files/backups/backup-all.py
Normal file
488
files/backups/backup-all.py
Normal file
@@ -0,0 +1,488 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
Backup script for all applications
|
||||||
|
Automatically discovers and runs backup scripts for all users,
|
||||||
|
then creates restic backups and sends notifications.
|
||||||
|
"""
|
||||||
|
import itertools
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import subprocess
|
||||||
|
import logging
|
||||||
|
import pwd
|
||||||
|
from abc import ABC
|
||||||
|
from dataclasses import dataclass
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Dict, List, Optional, Any
|
||||||
|
import requests
|
||||||
|
import tomllib
|
||||||
|
|
||||||
|
# Default config path
|
||||||
|
CONFIG_PATH = Path("/etc/backup/config.toml")
|
||||||
|
|
||||||
|
# File name to store directories and files to back up
|
||||||
|
BACKUP_TARGETS_FILE = "backup-targets"
|
||||||
|
|
||||||
|
# Default directory fo backups (relative to app dir)
|
||||||
|
# Used when backup-targets file not exists
|
||||||
|
BACKUP_DEFAULT_DIR = "backups"
|
||||||
|
|
||||||
|
# Configure logging
|
||||||
|
logging.basicConfig(
|
||||||
|
level=logging.INFO,
|
||||||
|
format="%(asctime)s - %(levelname)s - %(message)s",
|
||||||
|
handlers=[
|
||||||
|
logging.StreamHandler(sys.stdout),
|
||||||
|
logging.FileHandler("/var/log/backup-all.log"),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class Config:
|
||||||
|
host_name: str
|
||||||
|
roots: List[Path]
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class Application:
|
||||||
|
path: Path
|
||||||
|
owner: str
|
||||||
|
|
||||||
|
|
||||||
|
class Storage(ABC):
|
||||||
|
def backup(self, backup_dirs: List[str]) -> bool:
|
||||||
|
"""Backup directories"""
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
|
||||||
|
class ResticStorage(Storage):
|
||||||
|
TYPE_NAME = "restic"
|
||||||
|
|
||||||
|
def __init__(self, name: str, params: Dict[str, Any]):
|
||||||
|
self.name = name
|
||||||
|
self.restic_repository = str(params.get("restic_repository", ""))
|
||||||
|
self.restic_password = str(params.get("restic_password", ""))
|
||||||
|
self.aws_access_key_id = str(params.get("aws_access_key_id", ""))
|
||||||
|
self.aws_secret_access_key = str(params.get("aws_secret_access_key", ""))
|
||||||
|
self.aws_default_region = str(params.get("aws_default_region", ""))
|
||||||
|
|
||||||
|
if not all(
|
||||||
|
[
|
||||||
|
self.restic_repository,
|
||||||
|
self.restic_password,
|
||||||
|
self.aws_access_key_id,
|
||||||
|
self.aws_secret_access_key,
|
||||||
|
self.aws_default_region,
|
||||||
|
]
|
||||||
|
):
|
||||||
|
raise ValueError(
|
||||||
|
f"Missing storage configuration values for backend ResticStorage: '{self.name}'"
|
||||||
|
)
|
||||||
|
|
||||||
|
def backup(self, backup_dirs: List[str]) -> bool:
|
||||||
|
if not backup_dirs:
|
||||||
|
logger.warning("No backup directories found")
|
||||||
|
return True
|
||||||
|
try:
|
||||||
|
return self.__backup_internal(backup_dirs)
|
||||||
|
except Exception as exc: # noqa: BLE001
|
||||||
|
logger.error("Restic backup process failed: %s", exc)
|
||||||
|
return False
|
||||||
|
|
||||||
|
def __backup_internal(self, backup_dirs: List[str]) -> bool:
|
||||||
|
logger.info("Starting restic backup")
|
||||||
|
logger.info("Destination: %s", self.restic_repository)
|
||||||
|
|
||||||
|
env = os.environ.copy()
|
||||||
|
env.update(
|
||||||
|
{
|
||||||
|
"RESTIC_REPOSITORY": self.restic_repository,
|
||||||
|
"RESTIC_PASSWORD": self.restic_password,
|
||||||
|
"AWS_ACCESS_KEY_ID": self.aws_access_key_id,
|
||||||
|
"AWS_SECRET_ACCESS_KEY": self.aws_secret_access_key,
|
||||||
|
"AWS_DEFAULT_REGION": self.aws_default_region,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
backup_cmd = ["restic", "backup", "--verbose"] + backup_dirs
|
||||||
|
result = subprocess.run(backup_cmd, env=env, capture_output=True, text=True)
|
||||||
|
|
||||||
|
if result.returncode != 0:
|
||||||
|
logger.error("Restic backup failed: %s", result.stderr)
|
||||||
|
return False
|
||||||
|
|
||||||
|
logger.info("Restic backup completed successfully")
|
||||||
|
|
||||||
|
check_cmd = ["restic", "check"]
|
||||||
|
result = subprocess.run(check_cmd, env=env, capture_output=True, text=True)
|
||||||
|
|
||||||
|
if result.returncode != 0:
|
||||||
|
logger.error("Restic check failed: %s", result.stderr)
|
||||||
|
return False
|
||||||
|
|
||||||
|
logger.info("Restic check completed successfully")
|
||||||
|
|
||||||
|
forget_cmd = [
|
||||||
|
"restic",
|
||||||
|
"forget",
|
||||||
|
"--compact",
|
||||||
|
"--prune",
|
||||||
|
"--keep-daily",
|
||||||
|
"90",
|
||||||
|
"--keep-monthly",
|
||||||
|
"36",
|
||||||
|
]
|
||||||
|
result = subprocess.run(forget_cmd, env=env, capture_output=True, text=True)
|
||||||
|
|
||||||
|
if result.returncode != 0:
|
||||||
|
logger.error("Restic forget/prune failed: %s", result.stderr)
|
||||||
|
return False
|
||||||
|
|
||||||
|
logger.info("Restic forget/prune completed successfully")
|
||||||
|
|
||||||
|
result = subprocess.run(check_cmd, env=env, capture_output=True, text=True)
|
||||||
|
|
||||||
|
if result.returncode != 0:
|
||||||
|
logger.error("Final restic check failed: %s", result.stderr)
|
||||||
|
return False
|
||||||
|
|
||||||
|
logger.info("Final restic check completed successfully")
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
class Notifier(ABC):
|
||||||
|
def send(self, html_message: str):
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
|
||||||
|
class TelegramNotifier(Notifier):
|
||||||
|
TYPE_NAME = "telegram"
|
||||||
|
|
||||||
|
def __init__(self, name: str, params: Dict[str, Any]):
|
||||||
|
self.name = name
|
||||||
|
self.telegram_bot_token = str(params.get("telegram_bot_token", ""))
|
||||||
|
self.telegram_chat_id = str(params.get("telegram_chat_id", ""))
|
||||||
|
if not all(
|
||||||
|
[
|
||||||
|
self.telegram_bot_token,
|
||||||
|
self.telegram_chat_id,
|
||||||
|
]
|
||||||
|
):
|
||||||
|
raise ValueError(
|
||||||
|
f"Missing notification configuration values for backend {name}"
|
||||||
|
)
|
||||||
|
|
||||||
|
def send(self, html_message: str):
|
||||||
|
url = f"https://api.telegram.org/bot{self.telegram_bot_token}/sendMessage"
|
||||||
|
data = {
|
||||||
|
"chat_id": self.telegram_chat_id,
|
||||||
|
"parse_mode": "HTML",
|
||||||
|
"text": html_message,
|
||||||
|
}
|
||||||
|
|
||||||
|
response = requests.post(url, data=data, timeout=30)
|
||||||
|
|
||||||
|
if response.status_code == 200:
|
||||||
|
logger.info("Telegram notification sent successfully")
|
||||||
|
else:
|
||||||
|
logger.error(
|
||||||
|
f"Failed to send Telegram notification: {response.status_code} - {response.text}"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class BackupManager:
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
config: Config,
|
||||||
|
roots: List[Path],
|
||||||
|
storages: List[Storage],
|
||||||
|
notifiers: List[Notifier],
|
||||||
|
):
|
||||||
|
self.errors: List[str] = []
|
||||||
|
self.warnings: List[str] = []
|
||||||
|
self.successful_backups: List[str] = []
|
||||||
|
self.config = config
|
||||||
|
self.roots: List[Path] = roots
|
||||||
|
self.storages = storages
|
||||||
|
self.notifiers = notifiers
|
||||||
|
|
||||||
|
def find_applications(self) -> List[Application]:
|
||||||
|
"""Get all application directories and their owners."""
|
||||||
|
applications: List[Application] = []
|
||||||
|
source_dirs = itertools.chain(*(root.iterdir() for root in self.roots))
|
||||||
|
|
||||||
|
for app_dir in source_dirs:
|
||||||
|
if "lost+found" in str(app_dir):
|
||||||
|
continue
|
||||||
|
if app_dir.is_dir():
|
||||||
|
try:
|
||||||
|
stat_info = app_dir.stat()
|
||||||
|
owner = pwd.getpwuid(stat_info.st_uid).pw_name
|
||||||
|
applications.append(Application(path=app_dir, owner=owner))
|
||||||
|
except (KeyError, OSError) as e:
|
||||||
|
logger.warning(f"Could not get owner for {app_dir}: {e}")
|
||||||
|
|
||||||
|
return applications
|
||||||
|
|
||||||
|
def find_backup_script(self, app_dir: str) -> Optional[str]:
|
||||||
|
"""Find backup script in user's home directory"""
|
||||||
|
possible_scripts = [
|
||||||
|
os.path.join(app_dir, "backup.sh"),
|
||||||
|
os.path.join(app_dir, "backup"),
|
||||||
|
]
|
||||||
|
|
||||||
|
for script_path in possible_scripts:
|
||||||
|
if os.path.exists(script_path):
|
||||||
|
# Check if file is executable
|
||||||
|
if os.access(script_path, os.X_OK):
|
||||||
|
return script_path
|
||||||
|
else:
|
||||||
|
logger.warning(
|
||||||
|
f"Backup script {script_path} exists but is not executable"
|
||||||
|
)
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
def run_app_backup(self, script_path: str, app_dir: str, username: str) -> bool:
|
||||||
|
"""Run backup script as the specified user"""
|
||||||
|
try:
|
||||||
|
logger.info(f"Running backup script {script_path} (user {username})")
|
||||||
|
|
||||||
|
# Use su to run the script as the user
|
||||||
|
cmd = ["su", "--login", username, "--command", script_path]
|
||||||
|
|
||||||
|
result = subprocess.run(
|
||||||
|
cmd,
|
||||||
|
cwd=app_dir,
|
||||||
|
capture_output=True,
|
||||||
|
text=True,
|
||||||
|
timeout=3600, # 1 hour timeout
|
||||||
|
)
|
||||||
|
|
||||||
|
if result.returncode == 0:
|
||||||
|
logger.info(f"Backup script for {username} completed successfully")
|
||||||
|
self.successful_backups.append(username)
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
error_msg = f"Backup script {script_path} failed with return code {result.returncode}"
|
||||||
|
if result.stderr:
|
||||||
|
error_msg += f": {result.stderr}"
|
||||||
|
logger.error(error_msg)
|
||||||
|
self.errors.append(f"App {username}: {error_msg}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
except subprocess.TimeoutExpired:
|
||||||
|
error_msg = f"Backup script {script_path} timed out"
|
||||||
|
logger.error(error_msg)
|
||||||
|
self.errors.append(f"App {username}: {error_msg}")
|
||||||
|
return False
|
||||||
|
except Exception as e:
|
||||||
|
error_msg = f"Failed to run backup script {script_path}: {str(e)}"
|
||||||
|
logger.error(error_msg)
|
||||||
|
self.errors.append(f"App {username}: {error_msg}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
def get_backup_directories(self) -> List[str]:
|
||||||
|
"""Collect backup targets according to backup-targets rules"""
|
||||||
|
backup_dirs: List[str] = []
|
||||||
|
applications = self.find_applications()
|
||||||
|
|
||||||
|
def parse_targets_file(targets_file: Path) -> List[str]:
|
||||||
|
"""Parse backup-targets file, skipping comments and empty lines."""
|
||||||
|
targets: List[str] = []
|
||||||
|
try:
|
||||||
|
for raw_line in targets_file.read_text(encoding="utf-8").splitlines():
|
||||||
|
line = raw_line.strip()
|
||||||
|
if not line or line.startswith("#"):
|
||||||
|
continue
|
||||||
|
targets.append(line)
|
||||||
|
except OSError as e:
|
||||||
|
warning_msg = f"Could not read backup targets file {targets_file}: {e}"
|
||||||
|
logger.warning(warning_msg)
|
||||||
|
self.warnings.append(warning_msg)
|
||||||
|
return targets
|
||||||
|
|
||||||
|
for app in applications:
|
||||||
|
app_dir = app.path
|
||||||
|
targets_file = app_dir / BACKUP_TARGETS_FILE
|
||||||
|
resolved_targets: List[Path] = []
|
||||||
|
|
||||||
|
if targets_file.exists():
|
||||||
|
# Read custom targets defined by the application.
|
||||||
|
for target_line in parse_targets_file(targets_file):
|
||||||
|
target_path = Path(target_line)
|
||||||
|
if not target_path.is_absolute():
|
||||||
|
target_path = (app_dir / target_path).resolve()
|
||||||
|
else:
|
||||||
|
target_path = target_path.resolve()
|
||||||
|
if target_path.exists():
|
||||||
|
resolved_targets.append(target_path)
|
||||||
|
else:
|
||||||
|
warning_msg = (
|
||||||
|
f"Backup target does not exist for {app_dir}: {target_path}"
|
||||||
|
)
|
||||||
|
logger.warning(warning_msg)
|
||||||
|
self.warnings.append(warning_msg)
|
||||||
|
else:
|
||||||
|
# Fallback to default backups directory when no list is provided.
|
||||||
|
default_target = (app_dir / BACKUP_DEFAULT_DIR).resolve()
|
||||||
|
if default_target.exists():
|
||||||
|
resolved_targets.append(default_target)
|
||||||
|
else:
|
||||||
|
warning_msg = f"Default backup path does not exist for {app_dir}: {default_target}"
|
||||||
|
logger.warning(warning_msg)
|
||||||
|
self.warnings.append(warning_msg)
|
||||||
|
|
||||||
|
for target in resolved_targets:
|
||||||
|
target_str = str(target)
|
||||||
|
if target_str not in backup_dirs:
|
||||||
|
backup_dirs.append(target_str)
|
||||||
|
|
||||||
|
return backup_dirs
|
||||||
|
|
||||||
|
def send_notification(self, success: bool) -> None:
|
||||||
|
"""Send notification to Notifiers"""
|
||||||
|
|
||||||
|
if success and not self.errors:
|
||||||
|
message = f"<b>{self.config.host_name}</b>: бекап успешно завершен!"
|
||||||
|
if self.successful_backups:
|
||||||
|
message += f"\n\nУспешные бекапы: {', '.join(self.successful_backups)}"
|
||||||
|
else:
|
||||||
|
message = f"<b>{self.config.host_name}</b>: бекап завершен с ошибками!"
|
||||||
|
|
||||||
|
if self.successful_backups:
|
||||||
|
message += (
|
||||||
|
f"\n\n✅ Успешные бекапы: {', '.join(self.successful_backups)}"
|
||||||
|
)
|
||||||
|
|
||||||
|
if self.warnings:
|
||||||
|
message += f"\n\n⚠️ Предупреждения:\n" + "\n".join(self.warnings)
|
||||||
|
|
||||||
|
if self.errors:
|
||||||
|
message += f"\n\n❌ Ошибки:\n" + "\n".join(self.errors)
|
||||||
|
|
||||||
|
for notificator in self.notifiers:
|
||||||
|
try:
|
||||||
|
notificator.send(message)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to send notification: {str(e)}")
|
||||||
|
|
||||||
|
def run_backup_process(self) -> bool:
|
||||||
|
"""Main backup process"""
|
||||||
|
logger.info("Starting backup process")
|
||||||
|
|
||||||
|
# Get all home directories
|
||||||
|
applications = self.find_applications()
|
||||||
|
logger.info(f"Found {len(applications)} application directories")
|
||||||
|
|
||||||
|
# Process each user's backup
|
||||||
|
for app in applications:
|
||||||
|
app_dir = str(app.path)
|
||||||
|
username = app.owner
|
||||||
|
logger.info(f"Processing backup for app: {app_dir} (user {username})")
|
||||||
|
|
||||||
|
# Find backup script
|
||||||
|
backup_script = self.find_backup_script(app_dir)
|
||||||
|
|
||||||
|
if backup_script is None:
|
||||||
|
warning_msg = (
|
||||||
|
f"No backup script found for app: {app_dir} (user {username})"
|
||||||
|
)
|
||||||
|
logger.warning(warning_msg)
|
||||||
|
self.warnings.append(warning_msg)
|
||||||
|
continue
|
||||||
|
|
||||||
|
self.run_app_backup(backup_script, app_dir, username)
|
||||||
|
|
||||||
|
# Get backup directories
|
||||||
|
backup_dirs = self.get_backup_directories()
|
||||||
|
logger.info(f"Found backup directories: {backup_dirs}")
|
||||||
|
|
||||||
|
overall_success = True
|
||||||
|
|
||||||
|
for storage in self.storages:
|
||||||
|
backup_result = storage.backup(backup_dirs)
|
||||||
|
if not backup_result:
|
||||||
|
self.errors.append("Restic backup failed")
|
||||||
|
|
||||||
|
# Determine overall success
|
||||||
|
overall_success = overall_success and backup_result
|
||||||
|
|
||||||
|
# Send notification
|
||||||
|
self.send_notification(overall_success)
|
||||||
|
|
||||||
|
logger.info("Backup process completed")
|
||||||
|
|
||||||
|
if self.errors:
|
||||||
|
logger.error(f"Backup completed with {len(self.errors)} errors")
|
||||||
|
return False
|
||||||
|
elif self.warnings:
|
||||||
|
logger.warning(f"Backup completed with {len(self.warnings)} warnings")
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
logger.info("Backup completed successfully")
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def initialize(config_path: Path) -> BackupManager:
|
||||||
|
try:
|
||||||
|
with config_path.open("rb") as config_file:
|
||||||
|
raw_config = tomllib.load(config_file)
|
||||||
|
except OSError as e:
|
||||||
|
logger.error(f"Failed to read config file {config_path}: {e}")
|
||||||
|
raise
|
||||||
|
|
||||||
|
host_name = str(raw_config.get("host_name", "unknown"))
|
||||||
|
|
||||||
|
roots_raw = raw_config.get("roots") or []
|
||||||
|
if not isinstance(roots_raw, list) or not roots_raw:
|
||||||
|
raise ValueError("roots must be a non-empty list of paths in config.toml")
|
||||||
|
roots = [Path(root) for root in roots_raw]
|
||||||
|
|
||||||
|
storage_raw = raw_config.get("storage") or {}
|
||||||
|
storages: List[Storage] = []
|
||||||
|
for name, params in storage_raw.items():
|
||||||
|
if not isinstance(params, dict):
|
||||||
|
raise ValueError(f"Storage config for {name} must be a table")
|
||||||
|
storage_type = params.get("type", "")
|
||||||
|
if storage_type == ResticStorage.TYPE_NAME:
|
||||||
|
storages.append(ResticStorage(name, params))
|
||||||
|
if not storages:
|
||||||
|
raise ValueError("At least one storage backend must be configured")
|
||||||
|
|
||||||
|
notifications_raw = raw_config.get("notifier") or {}
|
||||||
|
notifiers: List[Notifier] = []
|
||||||
|
for name, params in notifications_raw.items():
|
||||||
|
if not isinstance(params, dict):
|
||||||
|
raise ValueError(f"Notificator config for {name} must be a table")
|
||||||
|
notifier_type = params.get("type", "")
|
||||||
|
if notifier_type == TelegramNotifier.TYPE_NAME:
|
||||||
|
notifiers.append(TelegramNotifier(name, params))
|
||||||
|
if not notifiers:
|
||||||
|
raise ValueError("At least one notification backend must be configured")
|
||||||
|
|
||||||
|
config = Config(host_name=host_name, roots=roots)
|
||||||
|
|
||||||
|
return BackupManager(
|
||||||
|
config=config, roots=roots, storages=storages, notifiers=notifiers
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
try:
|
||||||
|
backup_manager = initialize(CONFIG_PATH)
|
||||||
|
success = backup_manager.run_backup_process()
|
||||||
|
if not success:
|
||||||
|
sys.exit(1)
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
logger.info("Backup process interrupted by user")
|
||||||
|
sys.exit(130)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Unexpected error in backup process: {str(e)}")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
18
files/backups/config.template.toml
Normal file
18
files/backups/config.template.toml
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
host_name = "{{ notifications_name }}"
|
||||||
|
|
||||||
|
roots = [
|
||||||
|
"{{ application_dir }}"
|
||||||
|
]
|
||||||
|
|
||||||
|
[storage.yandex_cloud_s3]
|
||||||
|
type = "restic"
|
||||||
|
restic_repository = "{{ restic_repository }}"
|
||||||
|
restic_password = "{{ restic_password }}"
|
||||||
|
aws_access_key_id = "{{ restic_s3_access_key }}"
|
||||||
|
aws_secret_access_key = "{{ restic_s3_access_secret }}"
|
||||||
|
aws_default_region = "{{ restic_s3_region }}"
|
||||||
|
|
||||||
|
[notifier.server_notifications_channel]
|
||||||
|
type = "telegram"
|
||||||
|
telegram_bot_token = "{{ notifications_tg_bot_token }}"
|
||||||
|
telegram_chat_id = "{{ notifications_tg_chat_id }}"
|
||||||
12
files/backups/restic-shell.sh.j2
Normal file
12
files/backups/restic-shell.sh.j2
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
set -eu
|
||||||
|
set -o pipefail
|
||||||
|
|
||||||
|
export RESTIC_REPOSITORY={{ restic_repository }}
|
||||||
|
export RESTIC_PASSWORD={{ restic_password }}
|
||||||
|
export AWS_ACCESS_KEY_ID={{ restic_s3_access_key }}
|
||||||
|
export AWS_SECRET_ACCESS_KEY={{ restic_s3_access_secret }}
|
||||||
|
export AWS_DEFAULT_REGION={{ restic_s3_region }}
|
||||||
|
|
||||||
|
restic "$@"
|
||||||
136
files/caddyproxy/Caddyfile.j2
Normal file
136
files/caddyproxy/Caddyfile.j2
Normal file
@@ -0,0 +1,136 @@
|
|||||||
|
# -------------------------------------------------------------------
|
||||||
|
# Global options
|
||||||
|
# -------------------------------------------------------------------
|
||||||
|
{
|
||||||
|
grace_period 15s
|
||||||
|
|
||||||
|
admin :2019
|
||||||
|
|
||||||
|
# Enable metrics in Prometheus format
|
||||||
|
# https://caddyserver.com/docs/metrics
|
||||||
|
metrics
|
||||||
|
}
|
||||||
|
|
||||||
|
# -------------------------------------------------------------------
|
||||||
|
# Applications
|
||||||
|
# -------------------------------------------------------------------
|
||||||
|
|
||||||
|
vakhrushev.me {
|
||||||
|
tls anwinged@ya.ru
|
||||||
|
|
||||||
|
reverse_proxy {
|
||||||
|
to homepage_app:80
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
auth.vakhrushev.me {
|
||||||
|
tls anwinged@ya.ru
|
||||||
|
|
||||||
|
reverse_proxy authelia_app:9091
|
||||||
|
}
|
||||||
|
|
||||||
|
status.vakhrushev.me, :29999 {
|
||||||
|
tls anwinged@ya.ru
|
||||||
|
|
||||||
|
forward_auth authelia_app:9091 {
|
||||||
|
uri /api/authz/forward-auth
|
||||||
|
copy_headers Remote-User Remote-Groups Remote-Email Remote-Name
|
||||||
|
}
|
||||||
|
|
||||||
|
reverse_proxy netdata:19999
|
||||||
|
}
|
||||||
|
|
||||||
|
git.vakhrushev.me {
|
||||||
|
tls anwinged@ya.ru
|
||||||
|
|
||||||
|
reverse_proxy {
|
||||||
|
to gitea_app:3000
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
outline.vakhrushev.me {
|
||||||
|
tls anwinged@ya.ru
|
||||||
|
|
||||||
|
reverse_proxy {
|
||||||
|
to outline_app:3000
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
gramps.vakhrushev.me {
|
||||||
|
tls anwinged@ya.ru
|
||||||
|
|
||||||
|
reverse_proxy {
|
||||||
|
to gramps_app:5000
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
miniflux.vakhrushev.me {
|
||||||
|
tls anwinged@ya.ru
|
||||||
|
|
||||||
|
reverse_proxy {
|
||||||
|
to miniflux_app:8080
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
wakapi.vakhrushev.me {
|
||||||
|
tls anwinged@ya.ru
|
||||||
|
|
||||||
|
reverse_proxy {
|
||||||
|
to wakapi_app:3000
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
wanderer.vakhrushev.me {
|
||||||
|
tls anwinged@ya.ru
|
||||||
|
|
||||||
|
reverse_proxy {
|
||||||
|
to wanderer_web:3000
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
memos.vakhrushev.me {
|
||||||
|
tls anwinged@ya.ru
|
||||||
|
|
||||||
|
reverse_proxy {
|
||||||
|
to memos_app:5230
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
wanderbase.vakhrushev.me {
|
||||||
|
tls anwinged@ya.ru
|
||||||
|
|
||||||
|
forward_auth authelia_app:9091 {
|
||||||
|
uri /api/authz/forward-auth
|
||||||
|
copy_headers Remote-User Remote-Groups Remote-Email Remote-Name
|
||||||
|
}
|
||||||
|
|
||||||
|
reverse_proxy {
|
||||||
|
to wanderer_db:8090
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
rssbridge.vakhrushev.me {
|
||||||
|
tls anwinged@ya.ru
|
||||||
|
|
||||||
|
forward_auth authelia_app:9091 {
|
||||||
|
uri /api/authz/forward-auth
|
||||||
|
copy_headers Remote-User Remote-Groups Remote-Email Remote-Name
|
||||||
|
}
|
||||||
|
|
||||||
|
reverse_proxy {
|
||||||
|
to rssbridge_app:80
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
dozzle.vakhrushev.me {
|
||||||
|
tls anwinged@ya.ru
|
||||||
|
|
||||||
|
forward_auth authelia_app:9091 {
|
||||||
|
uri /api/authz/forward-auth
|
||||||
|
copy_headers Remote-User Remote-Groups Remote-Email Remote-Name Remote-Filter
|
||||||
|
}
|
||||||
|
|
||||||
|
reverse_proxy dozzle_app:8080
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
22
files/caddyproxy/docker-compose.yml.j2
Normal file
22
files/caddyproxy/docker-compose.yml.j2
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
services:
|
||||||
|
|
||||||
|
{{ service_name }}:
|
||||||
|
image: caddy:2.10.2
|
||||||
|
restart: unless-stopped
|
||||||
|
container_name: {{ service_name }}
|
||||||
|
ports:
|
||||||
|
- "80:80"
|
||||||
|
- "443:443"
|
||||||
|
- "443:443/udp"
|
||||||
|
cap_add:
|
||||||
|
- NET_ADMIN
|
||||||
|
volumes:
|
||||||
|
- {{ caddy_file_dir }}:/etc/caddy
|
||||||
|
- {{ data_dir }}:/data
|
||||||
|
- {{ config_dir }}:/config
|
||||||
|
networks:
|
||||||
|
- "web_proxy_network"
|
||||||
|
|
||||||
|
networks:
|
||||||
|
web_proxy_network:
|
||||||
|
external: true
|
||||||
23
files/dozzle/docker-compose.template.yml
Normal file
23
files/dozzle/docker-compose.template.yml
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
services:
|
||||||
|
|
||||||
|
dozzle_app:
|
||||||
|
image: amir20/dozzle:v8.14.11
|
||||||
|
container_name: dozzle_app
|
||||||
|
restart: unless-stopped
|
||||||
|
volumes:
|
||||||
|
- "/var/run/docker.sock:/var/run/docker.sock"
|
||||||
|
networks:
|
||||||
|
- "web_proxy_network"
|
||||||
|
environment:
|
||||||
|
DOZZLE_HOSTNAME: vakhrushev.me
|
||||||
|
DOZZLE_AUTH_PROVIDER: forward-proxy
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "/dozzle", "healthcheck"]
|
||||||
|
interval: 3s
|
||||||
|
timeout: 30s
|
||||||
|
retries: 5
|
||||||
|
start_period: 30s
|
||||||
|
|
||||||
|
networks:
|
||||||
|
web_proxy_network:
|
||||||
|
external: true
|
||||||
21
files/gitea/backup.sh.j2
Normal file
21
files/gitea/backup.sh.j2
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
set -eu
|
||||||
|
set -o pipefail
|
||||||
|
|
||||||
|
echo "Gitea: backup data with gitea dump"
|
||||||
|
|
||||||
|
(cd "{{ base_dir }}" && \
|
||||||
|
docker compose exec \
|
||||||
|
-u "{{ user_create_result.uid }}:{{ user_create_result.group }}" \
|
||||||
|
-w /backups gitea_app \
|
||||||
|
gitea dump -c /data/gitea/conf/app.ini \
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
echo "Gitea: remove old backups"
|
||||||
|
|
||||||
|
keep-files.py "{{ backups_dir }}" --keep 3
|
||||||
|
|
||||||
|
|
||||||
|
echo "Gitea: done."
|
||||||
32
files/gitea/docker-compose.yml.j2
Normal file
32
files/gitea/docker-compose.yml.j2
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
services:
|
||||||
|
|
||||||
|
gitea_app:
|
||||||
|
image: gitea/gitea:1.25.3
|
||||||
|
restart: unless-stopped
|
||||||
|
container_name: gitea_app
|
||||||
|
ports:
|
||||||
|
- "2222:22"
|
||||||
|
volumes:
|
||||||
|
- {{ data_dir }}:/data
|
||||||
|
- {{ backups_dir }}:/backups
|
||||||
|
- /etc/timezone:/etc/timezone:ro
|
||||||
|
- /etc/localtime:/etc/localtime:ro
|
||||||
|
networks:
|
||||||
|
- "web_proxy_network"
|
||||||
|
environment:
|
||||||
|
- "USER_UID={{ user_create_result.uid }}"
|
||||||
|
- "USER_GID={{ user_create_result.group }}"
|
||||||
|
- "GITEA__server__SSH_PORT=2222"
|
||||||
|
|
||||||
|
# Mailer
|
||||||
|
- "GITEA__mailer__ENABLED=true"
|
||||||
|
- "GITEA__mailer__PROTOCOL=smtp+starttls"
|
||||||
|
- "GITEA__mailer__SMTP_ADDR={{ postbox_host }}"
|
||||||
|
- "GITEA__mailer__SMTP_PORT={{ postbox_port }}"
|
||||||
|
- "GITEA__mailer__USER={{ postbox_user }}"
|
||||||
|
- "GITEA__mailer__PASSWD={{ postbox_pass }}"
|
||||||
|
- "GITEA__mailer__FROM=gitea@vakhrushev.me"
|
||||||
|
|
||||||
|
networks:
|
||||||
|
web_proxy_network:
|
||||||
|
external: true
|
||||||
10
files/gramps/backup.template.sh
Normal file
10
files/gramps/backup.template.sh
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
set -eu
|
||||||
|
set -o pipefail
|
||||||
|
|
||||||
|
echo "Gramps: backup data with gobackups"
|
||||||
|
|
||||||
|
(cd "{{ base_dir }}" && gobackup perform --config "{{ gobackup_config }}")
|
||||||
|
|
||||||
|
echo "Gramps: done."
|
||||||
68
files/gramps/docker-compose.template.yml
Normal file
68
files/gramps/docker-compose.template.yml
Normal file
@@ -0,0 +1,68 @@
|
|||||||
|
# See versions: https://github.com/gramps-project/gramps-web/pkgs/container/grampsweb
|
||||||
|
|
||||||
|
services:
|
||||||
|
|
||||||
|
gramps_app: &gramps_app
|
||||||
|
image: ghcr.io/gramps-project/grampsweb:25.11.2
|
||||||
|
container_name: gramps_app
|
||||||
|
depends_on:
|
||||||
|
- gramps_redis
|
||||||
|
restart: unless-stopped
|
||||||
|
networks:
|
||||||
|
- "gramps_network"
|
||||||
|
- "web_proxy_network"
|
||||||
|
volumes:
|
||||||
|
- "{{ (data_dir, 'gramps_db') | path_join }}:/root/.gramps/grampsdb" # persist Gramps database
|
||||||
|
- "{{ (data_dir, 'gramps_users') | path_join }}:/app/users" # persist user database
|
||||||
|
- "{{ (data_dir, 'gramps_index') | path_join }}:/app/indexdir" # persist search index
|
||||||
|
- "{{ (data_dir, 'gramps_secret') | path_join }}:/app/secret" # persist flask secret
|
||||||
|
- "{{ (cache_dir, 'gramps_thumb_cache') | path_join }}:/app/thumbnail_cache" # persist thumbnails
|
||||||
|
- "{{ (cache_dir, 'gramps_cache') | path_join }}:/app/cache" # persist export and report caches
|
||||||
|
- "{{ media_dir }}:/app/media" # persist media files
|
||||||
|
environment:
|
||||||
|
GRAMPSWEB_TREE: "Gramps" # will create a new tree if not exists
|
||||||
|
GRAMPSWEB_SECRET_KEY: "{{ gramps_secret_key }}"
|
||||||
|
GRAMPSWEB_BASE_URL: "https://gramps.vakhrushev.me"
|
||||||
|
GRAMPSWEB_REGISTRATION_DISABLED: "true"
|
||||||
|
GRAMPSWEB_CELERY_CONFIG__broker_url: "redis://gramps_redis:6379/0"
|
||||||
|
GRAMPSWEB_CELERY_CONFIG__result_backend: "redis://gramps_redis:6379/0"
|
||||||
|
GRAMPSWEB_RATELIMIT_STORAGE_URI: "redis://gramps_redis:6379/1"
|
||||||
|
GUNICORN_NUM_WORKERS: 2
|
||||||
|
|
||||||
|
# Email options
|
||||||
|
GRAMPSWEB_EMAIL_HOST: "{{ postbox_host }}"
|
||||||
|
GRAMPSWEB_EMAIL_PORT: "{{ postbox_port }}"
|
||||||
|
GRAMPSWEB_EMAIL_HOST_USER: "{{ postbox_user }}"
|
||||||
|
GRAMPSWEB_EMAIL_HOST_PASSWORD: "{{ postbox_pass }}"
|
||||||
|
GRAMPSWEB_EMAIL_USE_TLS: "false"
|
||||||
|
GRAMPSWEB_DEFAULT_FROM_EMAIL: "gramps@vakhrushev.me"
|
||||||
|
|
||||||
|
# media storage
|
||||||
|
GRAMPSWEB_MEDIA_BASE_DIR: "/app/media"
|
||||||
|
|
||||||
|
gramps_celery:
|
||||||
|
<<: *gramps_app # YAML merge key copying the entire grampsweb service config
|
||||||
|
container_name: gramps_celery
|
||||||
|
depends_on:
|
||||||
|
- gramps_redis
|
||||||
|
restart: unless-stopped
|
||||||
|
ports: []
|
||||||
|
networks:
|
||||||
|
- "gramps_network"
|
||||||
|
command: celery -A gramps_webapi.celery worker --loglevel=INFO --concurrency=1
|
||||||
|
|
||||||
|
gramps_redis:
|
||||||
|
image: valkey/valkey:9.0-alpine
|
||||||
|
container_name: gramps_redis
|
||||||
|
restart: unless-stopped
|
||||||
|
networks:
|
||||||
|
- "gramps_network"
|
||||||
|
- "monitoring_network"
|
||||||
|
|
||||||
|
networks:
|
||||||
|
gramps_network:
|
||||||
|
driver: bridge
|
||||||
|
web_proxy_network:
|
||||||
|
external: true
|
||||||
|
monitoring_network:
|
||||||
|
external: true
|
||||||
25
files/gramps/gobackup.template.yml
Normal file
25
files/gramps/gobackup.template.yml
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
# https://gobackup.github.io/configuration
|
||||||
|
|
||||||
|
models:
|
||||||
|
|
||||||
|
gramps:
|
||||||
|
compress_with:
|
||||||
|
type: 'tgz'
|
||||||
|
storages:
|
||||||
|
local:
|
||||||
|
type: 'local'
|
||||||
|
path: '{{ backups_dir }}'
|
||||||
|
keep: 3
|
||||||
|
databases:
|
||||||
|
users:
|
||||||
|
type: sqlite
|
||||||
|
path: "{{ (data_dir, 'gramps_users/users.sqlite') | path_join }}"
|
||||||
|
search_index:
|
||||||
|
type: sqlite
|
||||||
|
path: "{{ (data_dir, 'gramps_index/search_index.db') | path_join }}"
|
||||||
|
sqlite:
|
||||||
|
type: sqlite
|
||||||
|
path: "{{ (data_dir, 'gramps_db/59a0f3d6-1c3d-4410-8c1d-1c9c6689659f/sqlite.db') | path_join }}"
|
||||||
|
undo:
|
||||||
|
type: sqlite
|
||||||
|
path: "{{ (data_dir, 'gramps_db/59a0f3d6-1c3d-4410-8c1d-1c9c6689659f/undo.db') | path_join }}"
|
||||||
65
files/gramps/gramps_rename.py
Executable file
65
files/gramps/gramps_rename.py
Executable file
@@ -0,0 +1,65 @@
|
|||||||
|
#!/usr/bin/env python3.12
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import sys
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
|
||||||
|
def parse_args() -> argparse.Namespace:
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description="Rename Gramps document files by appending extensions from a list."
|
||||||
|
)
|
||||||
|
parser.add_argument("directory", type=Path, help="Directory containing hashed files")
|
||||||
|
parser.add_argument("names_file", type=Path, help="Text file with target names")
|
||||||
|
return parser.parse_args()
|
||||||
|
|
||||||
|
|
||||||
|
def read_names(path: Path) -> list[str]:
|
||||||
|
if not path.is_file():
|
||||||
|
raise FileNotFoundError(f"Names file not found: {path}")
|
||||||
|
|
||||||
|
names = []
|
||||||
|
for line in path.read_text(encoding="utf-8").splitlines():
|
||||||
|
name = line.strip()
|
||||||
|
if name:
|
||||||
|
names.append(name)
|
||||||
|
return names
|
||||||
|
|
||||||
|
|
||||||
|
def rename_files(directory: Path, names: list[str]) -> None:
|
||||||
|
if not directory.is_dir():
|
||||||
|
raise NotADirectoryError(f"Directory not found: {directory}")
|
||||||
|
|
||||||
|
for name in names:
|
||||||
|
hash_part, dot, _ = name.partition(".")
|
||||||
|
if not dot:
|
||||||
|
print(f"Skipping invalid entry (missing extension): {name}", file=sys.stderr)
|
||||||
|
continue
|
||||||
|
|
||||||
|
source = directory / hash_part
|
||||||
|
target = directory / name
|
||||||
|
|
||||||
|
if target.exists():
|
||||||
|
print(f"Target already exists, skipping: {target}", file=sys.stderr)
|
||||||
|
continue
|
||||||
|
|
||||||
|
if not source.exists():
|
||||||
|
print(f"Source not found, skipping: {source}", file=sys.stderr)
|
||||||
|
continue
|
||||||
|
|
||||||
|
source.rename(target)
|
||||||
|
print(f"Renamed {source.name} -> {target.name}")
|
||||||
|
|
||||||
|
|
||||||
|
def main() -> None:
|
||||||
|
args = parse_args()
|
||||||
|
try:
|
||||||
|
names = read_names(args.names_file)
|
||||||
|
rename_files(args.directory, names)
|
||||||
|
except Exception as exc: # noqa: BLE001
|
||||||
|
print(str(exc), file=sys.stderr)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
13
files/homepage/docker-compose.template.yml
Normal file
13
files/homepage/docker-compose.template.yml
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
services:
|
||||||
|
|
||||||
|
homepage_app:
|
||||||
|
# noinspection ComposeUnknownValues
|
||||||
|
image: "{{ registry_homepage_nginx_image }}"
|
||||||
|
container_name: homepage_app
|
||||||
|
restart: unless-stopped
|
||||||
|
networks:
|
||||||
|
- "web_proxy_network"
|
||||||
|
|
||||||
|
networks:
|
||||||
|
web_proxy_network:
|
||||||
|
external: true
|
||||||
48
files/keep-files.py
Normal file
48
files/keep-files.py
Normal file
@@ -0,0 +1,48 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
|
import os
|
||||||
|
import argparse
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description="Retain specified number of files in a directory sorted by name, delete others."
|
||||||
|
)
|
||||||
|
parser.add_argument("directory", type=str, help="Path to target directory")
|
||||||
|
parser.add_argument(
|
||||||
|
"--keep", type=int, default=2, help="Number of files to retain (default: 2)"
|
||||||
|
)
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
# Validate arguments
|
||||||
|
if args.keep < 0:
|
||||||
|
parser.error("--keep value cannot be negative")
|
||||||
|
|
||||||
|
if not os.path.isdir(args.directory):
|
||||||
|
parser.error(f"Directory not found: {args.directory}")
|
||||||
|
|
||||||
|
# Get list of files (exclude subdirectories)
|
||||||
|
files = []
|
||||||
|
with os.scandir(args.directory) as entries:
|
||||||
|
for entry in entries:
|
||||||
|
if entry.is_file():
|
||||||
|
files.append(entry.name)
|
||||||
|
|
||||||
|
# Sort files alphabetically
|
||||||
|
sorted_files = sorted(files)
|
||||||
|
|
||||||
|
# Identify files to delete
|
||||||
|
to_delete = sorted_files[:-args.keep] if args.keep > 0 else sorted_files.copy()
|
||||||
|
|
||||||
|
# Delete files and print results
|
||||||
|
for filename in to_delete:
|
||||||
|
filepath = os.path.join(args.directory, filename)
|
||||||
|
try:
|
||||||
|
os.remove(filepath)
|
||||||
|
print(f"Deleted: {filename}")
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error deleting {filename}: {str(e)}")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
10
files/memos/backup.sh.j2
Normal file
10
files/memos/backup.sh.j2
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
set -eu
|
||||||
|
set -o pipefail
|
||||||
|
|
||||||
|
echo "{{ app_name }}: backup data with gobackups"
|
||||||
|
|
||||||
|
(cd "{{ base_dir }}" && gobackup perform --config "{{ gobackup_config }}")
|
||||||
|
|
||||||
|
echo "{{ app_name }}: done."
|
||||||
23
files/memos/docker-compose.template.yml
Normal file
23
files/memos/docker-compose.template.yml
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
# See versions: https://github.com/gramps-project/gramps-web/pkgs/container/grampsweb
|
||||||
|
|
||||||
|
services:
|
||||||
|
|
||||||
|
memos_app:
|
||||||
|
image: neosmemo/memos:0.25.3
|
||||||
|
container_name: memos_app
|
||||||
|
restart: unless-stopped
|
||||||
|
user: "{{ user_create_result.uid }}:{{ user_create_result.group }}"
|
||||||
|
networks:
|
||||||
|
- "web_proxy_network"
|
||||||
|
volumes:
|
||||||
|
- "{{ data_dir }}:/var/opt/memos"
|
||||||
|
environment:
|
||||||
|
- MEMOS_MODE=prod
|
||||||
|
- MEMOS_PORT=5230
|
||||||
|
- MEMOS_STORAGE_TYPE=local
|
||||||
|
- MEMOS_STORAGE_PATH=assets/{uuid}
|
||||||
|
- MEMOS_MAX_FILE_SIZE=52428800
|
||||||
|
|
||||||
|
networks:
|
||||||
|
web_proxy_network:
|
||||||
|
external: true
|
||||||
16
files/memos/gobackup.yml.j2
Normal file
16
files/memos/gobackup.yml.j2
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
# https://gobackup.github.io/configuration
|
||||||
|
|
||||||
|
models:
|
||||||
|
|
||||||
|
memos:
|
||||||
|
compress_with:
|
||||||
|
type: 'tgz'
|
||||||
|
storages:
|
||||||
|
local:
|
||||||
|
type: 'local'
|
||||||
|
path: '{{ backups_dir }}'
|
||||||
|
keep: 3
|
||||||
|
databases:
|
||||||
|
users:
|
||||||
|
type: sqlite
|
||||||
|
path: "{{ (data_dir, 'memos_prod.db') | path_join }}"
|
||||||
25
files/miniflux/backup.template.sh
Normal file
25
files/miniflux/backup.template.sh
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
set -eu
|
||||||
|
set -o pipefail
|
||||||
|
|
||||||
|
TIMESTAMP=$(date +%Y%m%d_%H%M%S)
|
||||||
|
BACKUP_FILE="miniflux_postgres_${TIMESTAMP}.sql.gz"
|
||||||
|
|
||||||
|
echo "miniflux: backing up postgresql database"
|
||||||
|
|
||||||
|
docker compose --file "{{ base_dir }}/docker-compose.yml" exec \
|
||||||
|
miniflux_postgres \
|
||||||
|
pg_dump \
|
||||||
|
-U "{{ miniflux_postgres_user }}" \
|
||||||
|
"{{ miniflux_postgres_database }}" \
|
||||||
|
| gzip > "{{ postgres_backups_dir }}/${BACKUP_FILE}"
|
||||||
|
|
||||||
|
echo "miniflux: PostgreSQL backup saved to {{ postgres_backups_dir }}/${BACKUP_FILE}"
|
||||||
|
|
||||||
|
echo "miniflux: removing old backups"
|
||||||
|
|
||||||
|
# Keep only the 3 most recent backups
|
||||||
|
keep-files.py "{{ postgres_backups_dir }}" --keep 3
|
||||||
|
|
||||||
|
echo "miniflux: backup completed successfully."
|
||||||
63
files/miniflux/docker-compose.template.yml
Normal file
63
files/miniflux/docker-compose.template.yml
Normal file
@@ -0,0 +1,63 @@
|
|||||||
|
# See sample https://miniflux.app/docs/docker.html#docker-compose
|
||||||
|
# See env https://miniflux.app/docs/configuration.html
|
||||||
|
|
||||||
|
services:
|
||||||
|
miniflux_app:
|
||||||
|
image: miniflux/miniflux:2.2.10
|
||||||
|
container_name: miniflux_app
|
||||||
|
user: "{{ user_create_result.uid }}:{{ user_create_result.group }}"
|
||||||
|
depends_on:
|
||||||
|
miniflux_postgres:
|
||||||
|
condition: service_healthy
|
||||||
|
restart: 'unless-stopped'
|
||||||
|
networks:
|
||||||
|
- "miniflux_network"
|
||||||
|
- "web_proxy_network"
|
||||||
|
volumes:
|
||||||
|
- "{{ secrets_dir }}:/secrets:ro"
|
||||||
|
environment:
|
||||||
|
- DATABASE_URL_FILE=/secrets/miniflux_database_url
|
||||||
|
- RUN_MIGRATIONS=1
|
||||||
|
- CREATE_ADMIN=1
|
||||||
|
- ADMIN_USERNAME_FILE=/secrets/miniflux_admin_user
|
||||||
|
- ADMIN_PASSWORD_FILE=/secrets/miniflux_admin_password
|
||||||
|
- BASE_URL=https://miniflux.vakhrushev.me
|
||||||
|
- DISABLE_LOCAL_AUTH=1
|
||||||
|
- OAUTH2_OIDC_DISCOVERY_ENDPOINT=https://auth.vakhrushev.me
|
||||||
|
- OAUTH2_CLIENT_ID_FILE=/secrets/miniflux_oidc_client_id
|
||||||
|
- OAUTH2_CLIENT_SECRET_FILE=/secrets/miniflux_oidc_client_secret
|
||||||
|
- OAUTH2_OIDC_PROVIDER_NAME=Authelia
|
||||||
|
- OAUTH2_PROVIDER=oidc
|
||||||
|
- OAUTH2_REDIRECT_URL=https://miniflux.vakhrushev.me/oauth2/oidc/callback
|
||||||
|
- OAUTH2_USER_CREATION=1
|
||||||
|
- METRICS_COLLECTOR=1
|
||||||
|
- METRICS_ALLOWED_NETWORKS=0.0.0.0/0
|
||||||
|
|
||||||
|
miniflux_postgres:
|
||||||
|
image: postgres:16.3-bookworm
|
||||||
|
container_name: miniflux_postgres
|
||||||
|
user: "{{ user_create_result.uid }}:{{ user_create_result.group }}"
|
||||||
|
restart: 'unless-stopped'
|
||||||
|
environment:
|
||||||
|
- POSTGRES_USER={{ miniflux_postgres_user }}
|
||||||
|
- POSTGRES_PASSWORD_FILE=/secrets/miniflux_postgres_password
|
||||||
|
- POSTGRES_DB={{ miniflux_postgres_database }}
|
||||||
|
networks:
|
||||||
|
- "miniflux_network"
|
||||||
|
- "monitoring_network"
|
||||||
|
volumes:
|
||||||
|
- "/etc/passwd:/etc/passwd:ro"
|
||||||
|
- "{{ secrets_dir }}:/secrets:ro"
|
||||||
|
- "{{ postgres_data_dir }}:/var/lib/postgresql/data"
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "pg_isready", "--username={{ miniflux_postgres_user }}", "--dbname={{ miniflux_postgres_database }}"]
|
||||||
|
interval: 10s
|
||||||
|
start_period: 30s
|
||||||
|
|
||||||
|
networks:
|
||||||
|
miniflux_network:
|
||||||
|
driver: bridge
|
||||||
|
web_proxy_network:
|
||||||
|
external: true
|
||||||
|
monitoring_network:
|
||||||
|
external: true
|
||||||
43
files/netdata/docker-compose.template.yml
Normal file
43
files/netdata/docker-compose.template.yml
Normal file
@@ -0,0 +1,43 @@
|
|||||||
|
services:
|
||||||
|
|
||||||
|
netdata:
|
||||||
|
image: netdata/netdata:v2.8.4
|
||||||
|
container_name: netdata
|
||||||
|
restart: unless-stopped
|
||||||
|
cap_add:
|
||||||
|
- SYS_PTRACE
|
||||||
|
- SYS_ADMIN
|
||||||
|
security_opt:
|
||||||
|
- apparmor:unconfined
|
||||||
|
networks:
|
||||||
|
- "web_proxy_network"
|
||||||
|
- "monitoring_network"
|
||||||
|
volumes:
|
||||||
|
- "{{ config_dir }}:/etc/netdata"
|
||||||
|
- "{{ (data_dir, 'lib') | path_join }}:/var/lib/netdata"
|
||||||
|
- "{{ (data_dir, 'cache') | path_join }}:/var/cache/netdata"
|
||||||
|
|
||||||
|
# Netdata system volumes
|
||||||
|
- "/:/host/root:ro,rslave"
|
||||||
|
|
||||||
|
- "/etc/group:/host/etc/group:ro"
|
||||||
|
- "/etc/hostname:/host/etc/hostname:ro"
|
||||||
|
- "/etc/localtime:/etc/localtime:ro"
|
||||||
|
- "/etc/os-release:/host/etc/os-release:ro"
|
||||||
|
- "/etc/passwd:/host/etc/passwd:ro"
|
||||||
|
|
||||||
|
- "/proc:/host/proc:ro"
|
||||||
|
- "/run/dbus:/run/dbus:ro"
|
||||||
|
- "/sys:/host/sys:ro"
|
||||||
|
- "/var/log:/host/var/log:ro"
|
||||||
|
- "/var/run:/host/var/run:ro"
|
||||||
|
- "/var/run/docker.sock:/var/run/docker.sock:ro"
|
||||||
|
environment:
|
||||||
|
PGID: "{{ netdata_docker_group_output.stdout | default(999) }}"
|
||||||
|
NETDATA_EXTRA_DEB_PACKAGES: "fail2ban"
|
||||||
|
|
||||||
|
networks:
|
||||||
|
web_proxy_network:
|
||||||
|
external: true
|
||||||
|
monitoring_network:
|
||||||
|
external: true
|
||||||
3
files/netdata/go.d/fail2ban.conf
Normal file
3
files/netdata/go.d/fail2ban.conf
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
jobs:
|
||||||
|
- name: fail2ban
|
||||||
|
update_every: 60 # Collect Fail2Ban jails statistics every N seconds
|
||||||
9
files/netdata/go.d/postgres.conf
Normal file
9
files/netdata/go.d/postgres.conf
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
update_every: 60
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
|
||||||
|
- name: outline_db
|
||||||
|
dsn: 'postgresql://netdata:{{ netdata_postgres_password }}@outline_postgres:5432/outline'
|
||||||
|
|
||||||
|
- name: miniflux_db
|
||||||
|
dsn: 'postgresql://netdata:{{ netdata_postgres_password }}@miniflux_postgres:5432/miniflux'
|
||||||
24
files/netdata/go.d/prometheus.conf
Normal file
24
files/netdata/go.d/prometheus.conf
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
update_every: 15
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
|
||||||
|
- name: caddyproxy
|
||||||
|
url: http://caddyproxy:2019/metrics
|
||||||
|
selector:
|
||||||
|
allow:
|
||||||
|
- "caddy_http_*"
|
||||||
|
|
||||||
|
- name: authelia
|
||||||
|
url: http://authelia_app:9959/metrics
|
||||||
|
selector:
|
||||||
|
allow:
|
||||||
|
- "authelia_*"
|
||||||
|
|
||||||
|
- name: miniflux
|
||||||
|
url: http://miniflux_app:8080/metrics
|
||||||
|
selector:
|
||||||
|
allow:
|
||||||
|
- "miniflux_*"
|
||||||
|
|
||||||
|
- name: transcriber
|
||||||
|
url: http://transcriber_app:8080/metrics
|
||||||
702
files/netdata/netdata.template.conf
Normal file
702
files/netdata/netdata.template.conf
Normal file
@@ -0,0 +1,702 @@
|
|||||||
|
# netdata configuration
|
||||||
|
#
|
||||||
|
# You can download the latest version of this file, using:
|
||||||
|
#
|
||||||
|
# wget -O /etc/netdata/netdata.conf http://localhost:19999/netdata.conf
|
||||||
|
# or
|
||||||
|
# curl -o /etc/netdata/netdata.conf http://localhost:19999/netdata.conf
|
||||||
|
#
|
||||||
|
# You can uncomment and change any of the options below.
|
||||||
|
# The value shown in the commented settings, is the default value.
|
||||||
|
#
|
||||||
|
|
||||||
|
# global netdata configuration
|
||||||
|
|
||||||
|
[global]
|
||||||
|
# run as user = netdata
|
||||||
|
# host access prefix = /host
|
||||||
|
# pthread stack size = 8MiB
|
||||||
|
# cpu cores = 2
|
||||||
|
# libuv worker threads = 16
|
||||||
|
# profile = standalone
|
||||||
|
# hostname = rivendell-v2
|
||||||
|
# glibc malloc arena max for plugins = 1
|
||||||
|
# glibc malloc arena max for netdata = 1
|
||||||
|
# crash reports = all
|
||||||
|
# timezone = Etc/UTC
|
||||||
|
# OOM score = 0
|
||||||
|
# process scheduling policy = keep
|
||||||
|
# is ephemeral node = no
|
||||||
|
# has unstable connection = no
|
||||||
|
|
||||||
|
[db]
|
||||||
|
#| >>> [db].update every <<<
|
||||||
|
#| datatype: duration (seconds), default value: 1s
|
||||||
|
update every = 10s
|
||||||
|
|
||||||
|
# enable replication = yes
|
||||||
|
# replication period = 1d
|
||||||
|
# replication step = 1h
|
||||||
|
# replication threads = 1
|
||||||
|
# replication prefetch = 10
|
||||||
|
# db = dbengine
|
||||||
|
# memory deduplication (ksm) = auto
|
||||||
|
# cleanup orphan hosts after = 1h
|
||||||
|
# cleanup ephemeral hosts after = off
|
||||||
|
# cleanup obsolete charts after = 1h
|
||||||
|
# gap when lost iterations above = 1
|
||||||
|
# dbengine page type = gorilla
|
||||||
|
# dbengine page cache size = 32MiB
|
||||||
|
# dbengine extent cache size = off
|
||||||
|
# dbengine enable journal integrity check = no
|
||||||
|
# dbengine use all ram for caches = no
|
||||||
|
# dbengine out of memory protection = 391.49MiB
|
||||||
|
# dbengine use direct io = yes
|
||||||
|
# dbengine journal v2 unmount time = 2m
|
||||||
|
# dbengine pages per extent = 109
|
||||||
|
# storage tiers = 3
|
||||||
|
# dbengine tier backfill = new
|
||||||
|
# dbengine tier 1 update every iterations = 60
|
||||||
|
# dbengine tier 2 update every iterations = 60
|
||||||
|
# dbengine tier 0 retention size = 1024MiB
|
||||||
|
# dbengine tier 0 retention time = 14d
|
||||||
|
# dbengine tier 1 retention size = 1024MiB
|
||||||
|
# dbengine tier 1 retention time = 3mo
|
||||||
|
# dbengine tier 2 retention size = 1024MiB
|
||||||
|
# dbengine tier 2 retention time = 2y
|
||||||
|
# extreme cardinality protection = yes
|
||||||
|
# extreme cardinality keep instances = 1000
|
||||||
|
# extreme cardinality min ephemerality = 50
|
||||||
|
|
||||||
|
[directories]
|
||||||
|
# config = /etc/netdata
|
||||||
|
# stock config = /usr/lib/netdata/conf.d
|
||||||
|
# log = /var/log/netdata
|
||||||
|
# web = /usr/share/netdata/web
|
||||||
|
# cache = /var/cache/netdata
|
||||||
|
# lib = /var/lib/netdata
|
||||||
|
# cloud.d = /var/lib/netdata/cloud.d
|
||||||
|
# plugins = "/usr/libexec/netdata/plugins.d" "/etc/netdata/custom-plugins.d"
|
||||||
|
# registry = /var/lib/netdata/registry
|
||||||
|
# home = /etc/netdata
|
||||||
|
# stock health config = /usr/lib/netdata/conf.d/health.d
|
||||||
|
# health config = /etc/netdata/health.d
|
||||||
|
|
||||||
|
[logs]
|
||||||
|
# facility = daemon
|
||||||
|
# logs flood protection period = 1m
|
||||||
|
# logs to trigger flood protection = 1000
|
||||||
|
# level = info
|
||||||
|
# debug = /var/log/netdata/debug.log
|
||||||
|
# daemon = /var/log/netdata/daemon.log
|
||||||
|
# collector = /var/log/netdata/collector.log
|
||||||
|
# access = /var/log/netdata/access.log
|
||||||
|
# health = /var/log/netdata/health.log
|
||||||
|
# debug flags = 0x0000000000000000
|
||||||
|
|
||||||
|
[environment variables]
|
||||||
|
# PATH = /usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/sbin:/usr/sbin:/usr/local/bin:/usr/local/sbin
|
||||||
|
# PYTHONPATH =
|
||||||
|
# TZ = :/etc/localtime
|
||||||
|
|
||||||
|
[cloud]
|
||||||
|
# conversation log = no
|
||||||
|
# scope = full
|
||||||
|
# query threads = 6
|
||||||
|
# proxy = env
|
||||||
|
|
||||||
|
[ml]
|
||||||
|
# enabled = auto
|
||||||
|
# training window = 6h
|
||||||
|
# min training window = 15m
|
||||||
|
# max training vectors = 1440
|
||||||
|
# max samples to smooth = 3
|
||||||
|
# train every = 3h
|
||||||
|
# number of models per dimension = 18
|
||||||
|
# delete models older than = 7d
|
||||||
|
# num samples to diff = 1
|
||||||
|
# num samples to lag = 5
|
||||||
|
# maximum number of k-means iterations = 1000
|
||||||
|
# dimension anomaly score threshold = 0.99000
|
||||||
|
# host anomaly rate threshold = 1.00000
|
||||||
|
# anomaly detection grouping method = average
|
||||||
|
# anomaly detection grouping duration = 5m
|
||||||
|
# num training threads = 1
|
||||||
|
# flush models batch size = 256
|
||||||
|
# dimension anomaly rate suppression window = 15m
|
||||||
|
# dimension anomaly rate suppression threshold = 450
|
||||||
|
# enable statistics charts = yes
|
||||||
|
# hosts to skip from training = !*
|
||||||
|
# charts to skip from training = netdata.*
|
||||||
|
# stream anomaly detection charts = yes
|
||||||
|
|
||||||
|
[health]
|
||||||
|
# silencers file = /var/lib/netdata/health.silencers.json
|
||||||
|
# enabled = yes
|
||||||
|
# enable stock health configuration = yes
|
||||||
|
# use summary for notifications = yes
|
||||||
|
# default repeat warning = off
|
||||||
|
# default repeat critical = off
|
||||||
|
# in memory max health log entries = 1000
|
||||||
|
# health log retention = 5d
|
||||||
|
# script to execute on alarm = /usr/libexec/netdata/plugins.d/alarm-notify.sh
|
||||||
|
# enabled alarms = *
|
||||||
|
# run at least every = 10s
|
||||||
|
# postpone alarms during hibernation for = 1m
|
||||||
|
|
||||||
|
[web]
|
||||||
|
#| >>> [web].default port <<<
|
||||||
|
#| migrated from: [global].default port
|
||||||
|
# default port = 19999
|
||||||
|
|
||||||
|
# ssl key = /etc/netdata/ssl/key.pem
|
||||||
|
# ssl certificate = /etc/netdata/ssl/cert.pem
|
||||||
|
# tls version = 1.3
|
||||||
|
# tls ciphers = none
|
||||||
|
# ses max tg_des_window = 15
|
||||||
|
# des max tg_des_window = 15
|
||||||
|
# mode = static-threaded
|
||||||
|
# listen backlog = 4096
|
||||||
|
# bind to = *
|
||||||
|
# bearer token protection = no
|
||||||
|
# disconnect idle clients after = 1m
|
||||||
|
# timeout for first request = 1m
|
||||||
|
# accept a streaming request every = off
|
||||||
|
# respect do not track policy = no
|
||||||
|
# x-frame-options response header =
|
||||||
|
# allow connections from = localhost *
|
||||||
|
# allow connections by dns = heuristic
|
||||||
|
# allow dashboard from = localhost *
|
||||||
|
# allow dashboard by dns = heuristic
|
||||||
|
# allow badges from = *
|
||||||
|
# allow badges by dns = heuristic
|
||||||
|
# allow streaming from = *
|
||||||
|
# allow streaming by dns = heuristic
|
||||||
|
# allow netdata.conf from = localhost fd* 10.* 192.168.* 172.16.* 172.17.* 172.18.* 172.19.* 172.20.* 172.21.* 172.22.* 172.23.* 172.24.* 172.25.* 172.26.* 172.27.* 172.28.* 172.29.* 172.30.* 172.31.* UNKNOWN
|
||||||
|
# allow netdata.conf by dns = no
|
||||||
|
# allow management from = localhost
|
||||||
|
# allow management by dns = heuristic
|
||||||
|
# enable gzip compression = yes
|
||||||
|
# gzip compression strategy = default
|
||||||
|
# gzip compression level = 3
|
||||||
|
# ssl skip certificate verification = no
|
||||||
|
# web server threads = 6
|
||||||
|
# web server max sockets = 131072
|
||||||
|
|
||||||
|
[registry]
|
||||||
|
# enabled = no
|
||||||
|
# registry db file = /var/lib/netdata/registry/registry.db
|
||||||
|
# registry log file = /var/lib/netdata/registry/registry-log.db
|
||||||
|
# registry save db every new entries = 1000000
|
||||||
|
# registry expire idle persons = 1y
|
||||||
|
# registry domain =
|
||||||
|
# registry to announce = https://registry.my-netdata.io
|
||||||
|
# registry hostname = rivendell-v2
|
||||||
|
# verify browser cookies support = yes
|
||||||
|
# enable cookies SameSite and Secure = yes
|
||||||
|
# max URL length = 1024
|
||||||
|
# max URL name length = 50
|
||||||
|
# netdata management api key file = /var/lib/netdata/netdata.api.key
|
||||||
|
# allow from = *
|
||||||
|
# allow by dns = heuristic
|
||||||
|
|
||||||
|
[pulse]
|
||||||
|
# extended = no
|
||||||
|
# update every = 10s
|
||||||
|
|
||||||
|
[plugins]
|
||||||
|
#| >>> [plugins].perf <<<
|
||||||
|
#| datatype: yes or no, default value: yes
|
||||||
|
perf = no
|
||||||
|
|
||||||
|
#| >>> [plugins].python.d <<<
|
||||||
|
#| datatype: yes or no, default value: yes
|
||||||
|
python.d = no
|
||||||
|
|
||||||
|
#| >>> [plugins].charts.d <<<
|
||||||
|
#| datatype: yes or no, default value: yes
|
||||||
|
charts.d = no
|
||||||
|
|
||||||
|
#| >>> [plugins].otel <<<
|
||||||
|
#| datatype: yes or no, default value: yes
|
||||||
|
otel = no
|
||||||
|
|
||||||
|
#| >>> [plugins].statsd <<<
|
||||||
|
#| datatype: yes or no, default value: yes
|
||||||
|
statsd = no
|
||||||
|
|
||||||
|
# idlejitter = yes
|
||||||
|
# netdata pulse = yes
|
||||||
|
# profile = no
|
||||||
|
# tc = yes
|
||||||
|
# diskspace = yes
|
||||||
|
# proc = yes
|
||||||
|
# cgroups = yes
|
||||||
|
# timex = yes
|
||||||
|
# enable running new plugins = yes
|
||||||
|
# check for new plugins every = 1m
|
||||||
|
# slabinfo = no
|
||||||
|
# freeipmi = no
|
||||||
|
# debugfs = yes
|
||||||
|
# ioping = yes
|
||||||
|
# network-viewer = yes
|
||||||
|
# apps = yes
|
||||||
|
# go.d = yes
|
||||||
|
# systemd-units = yes
|
||||||
|
# systemd-journal = yes
|
||||||
|
|
||||||
|
[statsd]
|
||||||
|
# update every (flushInterval) = 10s
|
||||||
|
# udp messages to process at once = 10
|
||||||
|
# create private charts for metrics matching = *
|
||||||
|
# max private charts hard limit = 1000
|
||||||
|
# set charts as obsolete after = off
|
||||||
|
# decimal detail = 1000
|
||||||
|
# disconnect idle tcp clients after = 10m
|
||||||
|
# private charts hidden = no
|
||||||
|
# histograms and timers percentile (percentThreshold) = 95.00000
|
||||||
|
# dictionaries max unique dimensions = 200
|
||||||
|
# add dimension for number of events received = no
|
||||||
|
# gaps on gauges (deleteGauges) = no
|
||||||
|
# gaps on counters (deleteCounters) = no
|
||||||
|
# gaps on meters (deleteMeters) = no
|
||||||
|
# gaps on sets (deleteSets) = no
|
||||||
|
# gaps on histograms (deleteHistograms) = no
|
||||||
|
# gaps on timers (deleteTimers) = no
|
||||||
|
# gaps on dictionaries (deleteDictionaries) = no
|
||||||
|
# statsd server max TCP sockets = 131072
|
||||||
|
|
||||||
|
[plugin:idlejitter]
|
||||||
|
# loop time = 20ms
|
||||||
|
|
||||||
|
[plugin:timex]
|
||||||
|
# update every = 10s
|
||||||
|
# clock synchronization state = yes
|
||||||
|
# time offset = yes
|
||||||
|
|
||||||
|
[plugin:proc]
|
||||||
|
# /proc/net/dev = yes
|
||||||
|
# /proc/pagetypeinfo = no
|
||||||
|
# /proc/stat = yes
|
||||||
|
# /proc/uptime = yes
|
||||||
|
# /proc/loadavg = yes
|
||||||
|
# /proc/sys/fs/file-nr = yes
|
||||||
|
# /proc/sys/kernel/random/entropy_avail = yes
|
||||||
|
# /run/reboot_required = yes
|
||||||
|
# /proc/pressure = yes
|
||||||
|
# /proc/interrupts = yes
|
||||||
|
# /proc/softirqs = yes
|
||||||
|
# /proc/vmstat = yes
|
||||||
|
# /proc/meminfo = yes
|
||||||
|
# /sys/kernel/mm/ksm = yes
|
||||||
|
# /sys/block/zram = yes
|
||||||
|
# /sys/devices/system/edac/mc = yes
|
||||||
|
# /sys/devices/pci/aer = yes
|
||||||
|
# /sys/devices/system/node = yes
|
||||||
|
# /proc/net/wireless = yes
|
||||||
|
# /proc/net/sockstat = yes
|
||||||
|
# /proc/net/sockstat6 = yes
|
||||||
|
# /proc/net/netstat = yes
|
||||||
|
# /proc/net/sctp/snmp = yes
|
||||||
|
# /proc/net/softnet_stat = yes
|
||||||
|
# /proc/net/ip_vs/stats = yes
|
||||||
|
# /sys/class/infiniband = yes
|
||||||
|
# /proc/net/stat/conntrack = yes
|
||||||
|
# /proc/net/stat/synproxy = yes
|
||||||
|
# /proc/diskstats = yes
|
||||||
|
# /proc/mdstat = yes
|
||||||
|
# /proc/net/rpc/nfsd = yes
|
||||||
|
# /proc/net/rpc/nfs = yes
|
||||||
|
# /proc/spl/kstat/zfs/arcstats = yes
|
||||||
|
# /sys/fs/btrfs = yes
|
||||||
|
# ipc = yes
|
||||||
|
# /sys/class/power_supply = yes
|
||||||
|
# /sys/class/drm = yes
|
||||||
|
|
||||||
|
[plugin:cgroups]
|
||||||
|
#| >>> [plugin:cgroups].update every <<<
|
||||||
|
#| datatype: duration (seconds), default value: 10s
|
||||||
|
update every = 20s
|
||||||
|
|
||||||
|
#| >>> [plugin:cgroups].check for new cgroups every <<<
|
||||||
|
#| datatype: duration (seconds), default value: 10s
|
||||||
|
check for new cgroups every = 20s
|
||||||
|
|
||||||
|
# use unified cgroups = auto
|
||||||
|
# max cgroups to allow = 1000
|
||||||
|
# max cgroups depth to monitor = 0
|
||||||
|
# enable by default cgroups matching = !*/init.scope !/system.slice/run-*.scope *user.slice/docker-* !*user.slice* *.scope !/machine.slice/*/.control !/machine.slice/*/payload* !/machine.slice/*/supervisor /machine.slice/*.service */kubepods/pod*/* */kubepods/*/pod*/* */*-kubepods-pod*/* */*-kubepods-*-pod*/* !*kubepods* !*kubelet* !*/vcpu* !*/emulator !*.mount !*.partition !*.service !*.service/udev !*.socket !*.slice !*.swap !*.user !/ !/docker !*/libvirt !/lxc !/lxc/*/* !/lxc.monitor* !/lxc.pivot !/lxc.payload !*lxcfs.service/.control !/machine !/qemu !/system !/systemd !/user *
|
||||||
|
# enable by default cgroups names matching = *
|
||||||
|
# search for cgroups in subpaths matching = !*/init.scope !*-qemu !*.libvirt-qemu !/init.scope !/system !/systemd !/user !/lxc/*/* !/lxc.monitor !/lxc.payload/*/* !/lxc.payload.* *
|
||||||
|
# script to get cgroup names = /usr/libexec/netdata/plugins.d/cgroup-name.sh
|
||||||
|
# script to get cgroup network interfaces = /usr/libexec/netdata/plugins.d/cgroup-network
|
||||||
|
# run script to rename cgroups matching = !/ !*.mount !*.socket !*.partition /machine.slice/*.service !*.service !*.slice !*.swap !*.user !init.scope !*.scope/vcpu* !*.scope/emulator *.scope *docker* *lxc* *qemu* */kubepods/pod*/* */kubepods/*/pod*/* */*-kubepods-pod*/* */*-kubepods-*-pod*/* !*kubepods* !*kubelet* *.libvirt-qemu *
|
||||||
|
# cgroups to match as systemd services = !/system.slice/*/*.service /system.slice/*.service
|
||||||
|
|
||||||
|
[plugin:proc:diskspace]
|
||||||
|
#| >>> [plugin:proc:diskspace].update every <<<
|
||||||
|
#| datatype: duration (seconds), default value: 10s
|
||||||
|
update every = 1m
|
||||||
|
|
||||||
|
# remove charts of unmounted disks = yes
|
||||||
|
# check for new mount points every = 15s
|
||||||
|
# exclude space metrics on paths = /dev /dev/shm /proc/* /sys/* /var/run/user/* /run/lock /run/user/* /snap/* /var/lib/docker/* /var/lib/containers/storage/* /run/credentials/* /run/containerd/* /rpool /rpool/*
|
||||||
|
# exclude space metrics on filesystems = *gvfs *gluster* *s3fs *ipfs *davfs2 *httpfs *sshfs *gdfs *moosefs fusectl autofs cgroup cgroup2 hugetlbfs devtmpfs fuse.lxcfs
|
||||||
|
# exclude inode metrics on filesystems = msdosfs msdos vfat overlayfs aufs* *unionfs
|
||||||
|
# space usage for all disks = auto
|
||||||
|
# inodes usage for all disks = auto
|
||||||
|
|
||||||
|
[plugin:tc]
|
||||||
|
# script to run to get tc values = /usr/libexec/netdata/plugins.d/tc-qos-helper.sh
|
||||||
|
|
||||||
|
[plugin:go.d]
|
||||||
|
# update every = 10s
|
||||||
|
# command options =
|
||||||
|
|
||||||
|
[plugin:apps]
|
||||||
|
# update every = 10s
|
||||||
|
# command options =
|
||||||
|
|
||||||
|
[plugin:systemd-journal]
|
||||||
|
# update every = 10s
|
||||||
|
# command options =
|
||||||
|
|
||||||
|
[plugin:network-viewer]
|
||||||
|
# update every = 10s
|
||||||
|
# command options =
|
||||||
|
|
||||||
|
[plugin:debugfs]
|
||||||
|
# update every = 10s
|
||||||
|
# command options =
|
||||||
|
|
||||||
|
[plugin:ioping]
|
||||||
|
# update every = 10s
|
||||||
|
# command options =
|
||||||
|
|
||||||
|
[plugin:proc:/proc/net/dev]
|
||||||
|
# compressed packets for all interfaces = no
|
||||||
|
# disable by default interfaces matching = lo fireqos* *-ifb fwpr* fwbr* fwln* ifb4*
|
||||||
|
|
||||||
|
[plugin:proc:/proc/stat]
|
||||||
|
# cpu utilization = yes
|
||||||
|
# per cpu core utilization = no
|
||||||
|
# cpu interrupts = yes
|
||||||
|
# context switches = yes
|
||||||
|
# processes started = yes
|
||||||
|
# processes running = yes
|
||||||
|
# keep per core files open = yes
|
||||||
|
# keep cpuidle files open = yes
|
||||||
|
# core_throttle_count = auto
|
||||||
|
# package_throttle_count = no
|
||||||
|
# cpu frequency = yes
|
||||||
|
# cpu idle states = no
|
||||||
|
# core_throttle_count filename to monitor = /host/sys/devices/system/cpu/%s/thermal_throttle/core_throttle_count
|
||||||
|
# package_throttle_count filename to monitor = /host/sys/devices/system/cpu/%s/thermal_throttle/package_throttle_count
|
||||||
|
# scaling_cur_freq filename to monitor = /host/sys/devices/system/cpu/%s/cpufreq/scaling_cur_freq
|
||||||
|
# time_in_state filename to monitor = /host/sys/devices/system/cpu/%s/cpufreq/stats/time_in_state
|
||||||
|
# schedstat filename to monitor = /host/proc/schedstat
|
||||||
|
# cpuidle name filename to monitor = /host/sys/devices/system/cpu/cpu%zu/cpuidle/state%zu/name
|
||||||
|
# cpuidle time filename to monitor = /host/sys/devices/system/cpu/cpu%zu/cpuidle/state%zu/time
|
||||||
|
# filename to monitor = /host/proc/stat
|
||||||
|
|
||||||
|
[plugin:proc:/proc/uptime]
|
||||||
|
# filename to monitor = /host/proc/uptime
|
||||||
|
|
||||||
|
[plugin:proc:/proc/loadavg]
|
||||||
|
# filename to monitor = /host/proc/loadavg
|
||||||
|
# enable load average = yes
|
||||||
|
# enable total processes = yes
|
||||||
|
|
||||||
|
[plugin:proc:/proc/sys/fs/file-nr]
|
||||||
|
# filename to monitor = /host/proc/sys/fs/file-nr
|
||||||
|
|
||||||
|
[plugin:proc:/proc/sys/kernel/random/entropy_avail]
|
||||||
|
# filename to monitor = /host/proc/sys/kernel/random/entropy_avail
|
||||||
|
|
||||||
|
[plugin:proc:/proc/pressure]
|
||||||
|
# base path of pressure metrics = /proc/pressure
|
||||||
|
# enable cpu some pressure = yes
|
||||||
|
# enable cpu full pressure = no
|
||||||
|
# enable memory some pressure = yes
|
||||||
|
# enable memory full pressure = yes
|
||||||
|
# enable io some pressure = yes
|
||||||
|
# enable io full pressure = yes
|
||||||
|
# enable irq some pressure = no
|
||||||
|
# enable irq full pressure = yes
|
||||||
|
|
||||||
|
[plugin:proc:/proc/interrupts]
|
||||||
|
# interrupts per core = no
|
||||||
|
# filename to monitor = /host/proc/interrupts
|
||||||
|
|
||||||
|
[plugin:proc:/proc/softirqs]
|
||||||
|
# interrupts per core = no
|
||||||
|
# filename to monitor = /host/proc/softirqs
|
||||||
|
|
||||||
|
[plugin:proc:/proc/vmstat]
|
||||||
|
# filename to monitor = /host/proc/vmstat
|
||||||
|
# swap i/o = auto
|
||||||
|
# disk i/o = yes
|
||||||
|
# memory page faults = yes
|
||||||
|
# out of memory kills = yes
|
||||||
|
# system-wide numa metric summary = auto
|
||||||
|
# transparent huge pages = auto
|
||||||
|
# zswap i/o = auto
|
||||||
|
# memory ballooning = auto
|
||||||
|
# kernel same memory = auto
|
||||||
|
|
||||||
|
[plugin:proc:/sys/devices/system/node]
|
||||||
|
# directory to monitor = /host/sys/devices/system/node
|
||||||
|
# enable per-node numa metrics = auto
|
||||||
|
|
||||||
|
[plugin:proc:/proc/meminfo]
|
||||||
|
# system ram = yes
|
||||||
|
# system swap = auto
|
||||||
|
# hardware corrupted ECC = auto
|
||||||
|
# committed memory = yes
|
||||||
|
# writeback memory = yes
|
||||||
|
# kernel memory = yes
|
||||||
|
# slab memory = yes
|
||||||
|
# hugepages = auto
|
||||||
|
# transparent hugepages = auto
|
||||||
|
# memory reclaiming = yes
|
||||||
|
# high low memory = yes
|
||||||
|
# cma memory = auto
|
||||||
|
# direct maps = yes
|
||||||
|
# filename to monitor = /host/proc/meminfo
|
||||||
|
|
||||||
|
[plugin:proc:/sys/kernel/mm/ksm]
|
||||||
|
# /sys/kernel/mm/ksm/pages_shared = /host/sys/kernel/mm/ksm/pages_shared
|
||||||
|
# /sys/kernel/mm/ksm/pages_sharing = /host/sys/kernel/mm/ksm/pages_sharing
|
||||||
|
# /sys/kernel/mm/ksm/pages_unshared = /host/sys/kernel/mm/ksm/pages_unshared
|
||||||
|
# /sys/kernel/mm/ksm/pages_volatile = /host/sys/kernel/mm/ksm/pages_volatile
|
||||||
|
|
||||||
|
[plugin:proc:/sys/devices/system/edac/mc]
|
||||||
|
# directory to monitor = /host/sys/devices/system/edac/mc
|
||||||
|
|
||||||
|
[plugin:proc:/sys/class/pci/aer]
|
||||||
|
# enable root ports = no
|
||||||
|
# enable pci slots = no
|
||||||
|
|
||||||
|
[plugin:proc:/proc/net/wireless]
|
||||||
|
# filename to monitor = /host/proc/net/wireless
|
||||||
|
# status for all interfaces = auto
|
||||||
|
# quality for all interfaces = auto
|
||||||
|
# discarded packets for all interfaces = auto
|
||||||
|
# missed beacon for all interface = auto
|
||||||
|
|
||||||
|
[plugin:proc:/proc/net/sockstat]
|
||||||
|
# ipv4 sockets = auto
|
||||||
|
# ipv4 TCP sockets = auto
|
||||||
|
# ipv4 TCP memory = auto
|
||||||
|
# ipv4 UDP sockets = auto
|
||||||
|
# ipv4 UDP memory = auto
|
||||||
|
# ipv4 UDPLITE sockets = auto
|
||||||
|
# ipv4 RAW sockets = auto
|
||||||
|
# ipv4 FRAG sockets = auto
|
||||||
|
# ipv4 FRAG memory = auto
|
||||||
|
# update constants every = 1m
|
||||||
|
# filename to monitor = /host/proc/net/sockstat
|
||||||
|
|
||||||
|
[plugin:proc:/proc/net/sockstat6]
|
||||||
|
# ipv6 TCP sockets = auto
|
||||||
|
# ipv6 UDP sockets = auto
|
||||||
|
# ipv6 UDPLITE sockets = auto
|
||||||
|
# ipv6 RAW sockets = auto
|
||||||
|
# ipv6 FRAG sockets = auto
|
||||||
|
# filename to monitor = /host/proc/net/sockstat6
|
||||||
|
|
||||||
|
[plugin:proc:/proc/net/netstat]
|
||||||
|
# bandwidth = auto
|
||||||
|
# input errors = auto
|
||||||
|
# multicast bandwidth = auto
|
||||||
|
# broadcast bandwidth = auto
|
||||||
|
# multicast packets = auto
|
||||||
|
# broadcast packets = auto
|
||||||
|
# ECN packets = auto
|
||||||
|
# TCP reorders = auto
|
||||||
|
# TCP SYN cookies = auto
|
||||||
|
# TCP out-of-order queue = auto
|
||||||
|
# TCP connection aborts = auto
|
||||||
|
# TCP memory pressures = auto
|
||||||
|
# TCP SYN queue = auto
|
||||||
|
# TCP accept queue = auto
|
||||||
|
# filename to monitor = /host/proc/net/netstat
|
||||||
|
|
||||||
|
[plugin:proc:/proc/net/snmp]
|
||||||
|
# ipv4 packets = auto
|
||||||
|
# ipv4 fragments sent = auto
|
||||||
|
# ipv4 fragments assembly = auto
|
||||||
|
# ipv4 errors = auto
|
||||||
|
# ipv4 TCP connections = auto
|
||||||
|
# ipv4 TCP packets = auto
|
||||||
|
# ipv4 TCP errors = auto
|
||||||
|
# ipv4 TCP opens = auto
|
||||||
|
# ipv4 TCP handshake issues = auto
|
||||||
|
# ipv4 UDP packets = auto
|
||||||
|
# ipv4 UDP errors = auto
|
||||||
|
# ipv4 ICMP packets = auto
|
||||||
|
# ipv4 ICMP messages = auto
|
||||||
|
# ipv4 UDPLite packets = auto
|
||||||
|
# filename to monitor = /host/proc/net/snmp
|
||||||
|
|
||||||
|
[plugin:proc:/proc/net/snmp6]
|
||||||
|
# ipv6 packets = auto
|
||||||
|
# ipv6 fragments sent = auto
|
||||||
|
# ipv6 fragments assembly = auto
|
||||||
|
# ipv6 errors = auto
|
||||||
|
# ipv6 UDP packets = auto
|
||||||
|
# ipv6 UDP errors = auto
|
||||||
|
# ipv6 UDPlite packets = auto
|
||||||
|
# ipv6 UDPlite errors = auto
|
||||||
|
# bandwidth = auto
|
||||||
|
# multicast bandwidth = auto
|
||||||
|
# broadcast bandwidth = auto
|
||||||
|
# multicast packets = auto
|
||||||
|
# icmp = auto
|
||||||
|
# icmp redirects = auto
|
||||||
|
# icmp errors = auto
|
||||||
|
# icmp echos = auto
|
||||||
|
# icmp group membership = auto
|
||||||
|
# icmp router = auto
|
||||||
|
# icmp neighbor = auto
|
||||||
|
# icmp mldv2 = auto
|
||||||
|
# icmp types = auto
|
||||||
|
# ect = auto
|
||||||
|
# filename to monitor = /host/proc/net/snmp6
|
||||||
|
|
||||||
|
[plugin:proc:/proc/net/sctp/snmp]
|
||||||
|
# established associations = auto
|
||||||
|
# association transitions = auto
|
||||||
|
# fragmentation = auto
|
||||||
|
# packets = auto
|
||||||
|
# packet errors = auto
|
||||||
|
# chunk types = auto
|
||||||
|
# filename to monitor = /host/proc/net/sctp/snmp
|
||||||
|
|
||||||
|
[plugin:proc:/proc/net/softnet_stat]
|
||||||
|
# softnet_stat per core = no
|
||||||
|
# filename to monitor = /host/proc/net/softnet_stat
|
||||||
|
|
||||||
|
[plugin:proc:/proc/net/ip_vs_stats]
|
||||||
|
# IPVS bandwidth = yes
|
||||||
|
# IPVS connections = yes
|
||||||
|
# IPVS packets = yes
|
||||||
|
# filename to monitor = /host/proc/net/ip_vs_stats
|
||||||
|
|
||||||
|
[plugin:proc:/sys/class/infiniband]
|
||||||
|
# dirname to monitor = /host/sys/class/infiniband
|
||||||
|
# bandwidth counters = yes
|
||||||
|
# packets counters = yes
|
||||||
|
# errors counters = yes
|
||||||
|
# hardware packets counters = auto
|
||||||
|
# hardware errors counters = auto
|
||||||
|
# monitor only active ports = auto
|
||||||
|
# disable by default interfaces matching =
|
||||||
|
# refresh ports state every = 30s
|
||||||
|
|
||||||
|
[plugin:proc:/proc/net/stat/nf_conntrack]
|
||||||
|
# filename to monitor = /host/proc/net/stat/nf_conntrack
|
||||||
|
# netfilter new connections = no
|
||||||
|
# netfilter connection changes = no
|
||||||
|
# netfilter connection expectations = no
|
||||||
|
# netfilter connection searches = no
|
||||||
|
# netfilter errors = no
|
||||||
|
# netfilter connections = yes
|
||||||
|
|
||||||
|
[plugin:proc:/proc/sys/net/netfilter/nf_conntrack_max]
|
||||||
|
# filename to monitor = /host/proc/sys/net/netfilter/nf_conntrack_max
|
||||||
|
# read every seconds = 10
|
||||||
|
|
||||||
|
[plugin:proc:/proc/sys/net/netfilter/nf_conntrack_count]
|
||||||
|
# filename to monitor = /host/proc/sys/net/netfilter/nf_conntrack_count
|
||||||
|
|
||||||
|
[plugin:proc:/proc/net/stat/synproxy]
|
||||||
|
# SYNPROXY cookies = auto
|
||||||
|
# SYNPROXY SYN received = auto
|
||||||
|
# SYNPROXY connections reopened = auto
|
||||||
|
# filename to monitor = /host/proc/net/stat/synproxy
|
||||||
|
|
||||||
|
[plugin:proc:/proc/diskstats]
|
||||||
|
# enable new disks detected at runtime = yes
|
||||||
|
# performance metrics for physical disks = auto
|
||||||
|
# performance metrics for virtual disks = auto
|
||||||
|
# performance metrics for partitions = no
|
||||||
|
# bandwidth for all disks = auto
|
||||||
|
# operations for all disks = auto
|
||||||
|
# merged operations for all disks = auto
|
||||||
|
# i/o time for all disks = auto
|
||||||
|
# queued operations for all disks = auto
|
||||||
|
# utilization percentage for all disks = auto
|
||||||
|
# extended operations for all disks = auto
|
||||||
|
# backlog for all disks = auto
|
||||||
|
# bcache for all disks = auto
|
||||||
|
# bcache priority stats update every = off
|
||||||
|
# remove charts of removed disks = yes
|
||||||
|
# path to get block device = /host/sys/block/%s
|
||||||
|
# path to get block device bcache = /host/sys/block/%s/bcache
|
||||||
|
# path to get virtual block device = /host/sys/devices/virtual/block/%s
|
||||||
|
# path to get block device infos = /host/sys/dev/block/%lu:%lu/%s
|
||||||
|
# path to device mapper = /host/dev/mapper
|
||||||
|
# path to /dev/disk = /host/dev/disk
|
||||||
|
# path to /sys/block = /host/sys/block
|
||||||
|
# path to /dev/disk/by-label = /host/dev/disk/by-label
|
||||||
|
# path to /dev/disk/by-id = /host/dev/disk/by-id
|
||||||
|
# path to /dev/vx/dsk = /host/dev/vx/dsk
|
||||||
|
# name disks by id = no
|
||||||
|
# preferred disk ids = *
|
||||||
|
# exclude disks = loop* ram*
|
||||||
|
# filename to monitor = /host/proc/diskstats
|
||||||
|
# performance metrics for disks with major 253 = yes
|
||||||
|
|
||||||
|
[plugin:proc:/proc/mdstat]
|
||||||
|
# faulty devices = yes
|
||||||
|
# nonredundant arrays availability = yes
|
||||||
|
# mismatch count = auto
|
||||||
|
# disk stats = yes
|
||||||
|
# operation status = yes
|
||||||
|
# make charts obsolete = yes
|
||||||
|
# filename to monitor = /host/proc/mdstat
|
||||||
|
# mismatch_cnt filename to monitor = /host/sys/block/%s/md/mismatch_cnt
|
||||||
|
|
||||||
|
[plugin:proc:/proc/net/rpc/nfsd]
|
||||||
|
# filename to monitor = /host/proc/net/rpc/nfsd
|
||||||
|
|
||||||
|
[plugin:proc:/proc/net/rpc/nfs]
|
||||||
|
# filename to monitor = /host/proc/net/rpc/nfs
|
||||||
|
|
||||||
|
[plugin:proc:/proc/spl/kstat/zfs/arcstats]
|
||||||
|
# filename to monitor = /host/proc/spl/kstat/zfs/arcstats
|
||||||
|
|
||||||
|
[plugin:proc:/sys/fs/btrfs]
|
||||||
|
# path to monitor = /host/sys/fs/btrfs
|
||||||
|
# check for btrfs changes every = 1m
|
||||||
|
# physical disks allocation = auto
|
||||||
|
# data allocation = auto
|
||||||
|
# metadata allocation = auto
|
||||||
|
# system allocation = auto
|
||||||
|
# commit stats = auto
|
||||||
|
# error stats = auto
|
||||||
|
|
||||||
|
[plugin:proc:ipc]
|
||||||
|
# message queues = yes
|
||||||
|
# semaphore totals = yes
|
||||||
|
# shared memory totals = yes
|
||||||
|
# msg filename to monitor = /host/proc/sysvipc/msg
|
||||||
|
# shm filename to monitor = /host/proc/sysvipc/shm
|
||||||
|
# max dimensions in memory allowed = 50
|
||||||
|
|
||||||
|
[plugin:proc:/sys/class/power_supply]
|
||||||
|
# battery capacity = yes
|
||||||
|
# battery power = yes
|
||||||
|
# battery charge = no
|
||||||
|
# battery energy = no
|
||||||
|
# power supply voltage = no
|
||||||
|
# keep files open = auto
|
||||||
|
# directory to monitor = /host/sys/class/power_supply
|
||||||
|
|
||||||
|
[plugin:proc:/sys/class/drm]
|
||||||
|
# directory to monitor = /host/sys/class/drm
|
||||||
|
|
||||||
|
[plugin:systemd-units]
|
||||||
|
# update every = 10s
|
||||||
|
# command options =
|
||||||
25
files/outline/backup.template.sh
Normal file
25
files/outline/backup.template.sh
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
set -eu
|
||||||
|
set -o pipefail
|
||||||
|
|
||||||
|
TIMESTAMP=$(date +%Y%m%d_%H%M%S)
|
||||||
|
BACKUP_FILE="outline_postgres_${TIMESTAMP}.sql.gz"
|
||||||
|
|
||||||
|
echo "Outline: backing up PostgreSQL database"
|
||||||
|
|
||||||
|
docker compose --file "{{ base_dir }}/docker-compose.yml" exec \
|
||||||
|
outline_postgres \
|
||||||
|
pg_dump \
|
||||||
|
-U "{{ outline_postgres_user }}" \
|
||||||
|
"{{ outline_postgres_database }}" \
|
||||||
|
| gzip > "{{ postgres_backups_dir }}/${BACKUP_FILE}"
|
||||||
|
|
||||||
|
echo "Outline: PostgreSQL backup saved to {{ postgres_backups_dir }}/${BACKUP_FILE}"
|
||||||
|
|
||||||
|
echo "Outline: removing old backups"
|
||||||
|
|
||||||
|
# Keep only the 3 most recent backups
|
||||||
|
keep-files.py "{{ postgres_backups_dir }}" --keep 3
|
||||||
|
|
||||||
|
echo "Outline: backup completed successfully."
|
||||||
88
files/outline/docker-compose.template.yml
Normal file
88
files/outline/docker-compose.template.yml
Normal file
@@ -0,0 +1,88 @@
|
|||||||
|
services:
|
||||||
|
|
||||||
|
# See sample https://github.com/outline/outline/blob/main/.env.sample
|
||||||
|
|
||||||
|
outline_app:
|
||||||
|
image: outlinewiki/outline:1.1.0
|
||||||
|
container_name: outline_app
|
||||||
|
restart: unless-stopped
|
||||||
|
depends_on:
|
||||||
|
- outline_postgres
|
||||||
|
- outline_redis
|
||||||
|
networks:
|
||||||
|
- "outline_network"
|
||||||
|
- "web_proxy_network"
|
||||||
|
environment:
|
||||||
|
NODE_ENV: 'production'
|
||||||
|
URL: 'https://outline.vakhrushev.me'
|
||||||
|
FORCE_HTTPS: 'true'
|
||||||
|
SECRET_KEY: '{{ outline_secret_key }}'
|
||||||
|
UTILS_SECRET: '{{ outline_utils_secret }}'
|
||||||
|
DATABASE_URL: 'postgres://{{ outline_postgres_user }}:{{ outline_postgres_password }}@outline_postgres:5432/{{ outline_postgres_database }}' # yamllint disable-line rule:line-length
|
||||||
|
PGSSLMODE: 'disable'
|
||||||
|
REDIS_URL: 'redis://outline_redis:6379'
|
||||||
|
|
||||||
|
FILE_STORAGE: 's3'
|
||||||
|
FILE_STORAGE_UPLOAD_MAX_SIZE: '262144000'
|
||||||
|
AWS_ACCESS_KEY_ID: '{{ outline_s3_access_key }}'
|
||||||
|
AWS_SECRET_ACCESS_KEY: '{{ outline_s3_secret_key }}'
|
||||||
|
AWS_REGION: '{{ outline_s3_region }}'
|
||||||
|
AWS_S3_ACCELERATE_URL: ''
|
||||||
|
AWS_S3_UPLOAD_BUCKET_URL: '{{ outline_s3_url }}'
|
||||||
|
AWS_S3_UPLOAD_BUCKET_NAME: '{{ outline_s3_bucket }}'
|
||||||
|
AWS_S3_FORCE_PATH_STYLE: 'true'
|
||||||
|
AWS_S3_ACL: 'private'
|
||||||
|
|
||||||
|
OIDC_CLIENT_ID: '{{ outline_oidc_client_id | replace("$", "$$") }}'
|
||||||
|
OIDC_CLIENT_SECRET: '{{ outline_oidc_client_secret | replace("$", "$$") }}'
|
||||||
|
OIDC_AUTH_URI: 'https://auth.vakhrushev.me/api/oidc/authorization'
|
||||||
|
OIDC_TOKEN_URI: 'https://auth.vakhrushev.me/api/oidc/token'
|
||||||
|
OIDC_USERINFO_URI: 'https://auth.vakhrushev.me/api/oidc/userinfo'
|
||||||
|
OIDC_LOGOUT_URI: 'https://auth.vakhrushev.me/logout'
|
||||||
|
OIDC_USERNAME_CLAIM: 'email'
|
||||||
|
OIDC_SCOPES: 'openid profile email'
|
||||||
|
OIDC_DISPLAY_NAME: 'Authelia'
|
||||||
|
|
||||||
|
SMTP_HOST: '{{ postbox_host }}'
|
||||||
|
SMTP_PORT: '{{ postbox_port }}'
|
||||||
|
SMTP_USERNAME: '{{ postbox_user }}'
|
||||||
|
SMTP_PASSWORD: '{{ postbox_pass }}'
|
||||||
|
SMTP_FROM_EMAIL: 'outline@vakhrushev.me'
|
||||||
|
SMTP_TLS_CIPHERS: 'TLSv1.2'
|
||||||
|
SMTP_SECURE: 'false'
|
||||||
|
|
||||||
|
outline_redis:
|
||||||
|
image: valkey/valkey:9.0-alpine
|
||||||
|
container_name: outline_redis
|
||||||
|
restart: unless-stopped
|
||||||
|
networks:
|
||||||
|
- "outline_network"
|
||||||
|
- "monitoring_network"
|
||||||
|
|
||||||
|
outline_postgres:
|
||||||
|
image: postgres:16.3-bookworm
|
||||||
|
container_name: outline_postgres
|
||||||
|
user: "{{ user_create_result.uid }}:{{ user_create_result.group }}"
|
||||||
|
restart: unless-stopped
|
||||||
|
volumes:
|
||||||
|
- "/etc/passwd:/etc/passwd:ro"
|
||||||
|
- "{{ postgres_data_dir }}:/var/lib/postgresql/data"
|
||||||
|
environment:
|
||||||
|
POSTGRES_USER: '{{ outline_postgres_user }}'
|
||||||
|
POSTGRES_PASSWORD: '{{ outline_postgres_password }}'
|
||||||
|
POSTGRES_DB: '{{ outline_postgres_database }}'
|
||||||
|
networks:
|
||||||
|
- "outline_network"
|
||||||
|
- "monitoring_network"
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "pg_isready", "--username={{ outline_postgres_user }}", "--dbname={{ outline_postgres_database }}"]
|
||||||
|
interval: 10s
|
||||||
|
start_period: 30s
|
||||||
|
|
||||||
|
networks:
|
||||||
|
outline_network:
|
||||||
|
driver: bridge
|
||||||
|
web_proxy_network:
|
||||||
|
external: true
|
||||||
|
monitoring_network:
|
||||||
|
external: true
|
||||||
12
files/rssbridge/docker-compose.yml.j2
Normal file
12
files/rssbridge/docker-compose.yml.j2
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
services:
|
||||||
|
|
||||||
|
rssbridge_app:
|
||||||
|
image: rssbridge/rss-bridge:2025-08-05
|
||||||
|
container_name: rssbridge_app
|
||||||
|
restart: unless-stopped
|
||||||
|
networks:
|
||||||
|
- "web_proxy_network"
|
||||||
|
|
||||||
|
networks:
|
||||||
|
web_proxy_network:
|
||||||
|
external: true
|
||||||
44
files/transcriber/config.secrets.toml
Normal file
44
files/transcriber/config.secrets.toml
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
$ANSIBLE_VAULT;1.1;AES256
|
||||||
|
33396537353265633634336630353330653337623861373731613734663938633837613437366537
|
||||||
|
3439383366633266623463366530626662346338393165630a663539313066663061353635666366
|
||||||
|
61393437393131333166626165306563366661353338363138633239666566313330363331666537
|
||||||
|
3763356535396334380a386362383436363732353234333033613133383264643934306432313335
|
||||||
|
34646164323664636532663835306230386633316539373564383163346663376666633564326134
|
||||||
|
30666135626637343963383766383836653135633739636261353666303666633566346562643962
|
||||||
|
63376165636434343066306539653637343736323437653465656436323533636237643333326438
|
||||||
|
35626239323530643066363533323039393237333338316135313838643464306161646635313062
|
||||||
|
36386565626435373333393566393831366538363864313737306565343162316536353539333864
|
||||||
|
63376264643566613266373665666363366662643262616634333132386535383731396462633430
|
||||||
|
32343738343039616139343833366661303430383766376139636434616565356161396433643035
|
||||||
|
37363165383935373937346464343738643430333764336264373931616332393964346566636638
|
||||||
|
39303434343461326464623363323937396663376335316237373166306134636432376435663033
|
||||||
|
34346436623435626363636237373965633139343661623135633764303862353465306235666563
|
||||||
|
66653764666635636462636434663264646665383236343166643133613966366334653030653262
|
||||||
|
38326437313939616332636638323033346139343732653933356239306132613665376163646164
|
||||||
|
30316663643666633334653133613764396165646533636534613931663138666366316235396466
|
||||||
|
61313964396264626339306135376635633133366433303033633363396132303938363638346333
|
||||||
|
66326466326134313535393831343262363862663065323135643630316431336531373833316363
|
||||||
|
64376338653366353031333836643137333736363534363164306331313337353663653961623665
|
||||||
|
64626562366637336637353433303261303964633236356162363139396339396136393237643935
|
||||||
|
34316266326561663834353762343766363933313463313263393063343562613933393361653861
|
||||||
|
38363635323231666438366536626435373365323733663139666534636564623666356436346539
|
||||||
|
63326436386436356636633637373738343032353664323736653939346234643165313461643833
|
||||||
|
35666439613136396264313033336539313537613238393262306365656238396464373936616538
|
||||||
|
64316365616464386638313331653030346330393665353539393834346135643434363736323135
|
||||||
|
37663433326439356663633162616435313061353662373766633731636439636266666466613363
|
||||||
|
39343930386534376330663230623832643933336235636166626534366664366562356165373764
|
||||||
|
63343432323864366162376263656565646661633536666336643030363039616666343063386165
|
||||||
|
37343238303034313832393538313632396261316232376635633732656663396631323261363433
|
||||||
|
38373738363833323934353739643538376237316535623035383965613965636337646537326537
|
||||||
|
64663837643632666334393634323264613139353332306263613165383733386662366333316139
|
||||||
|
63373839346265366166333331353231663763306163323063613138323835313831303666306561
|
||||||
|
39316666343761303464333535336361333462623363633333383363303134336139356436666165
|
||||||
|
62616364373030613837353939363636653537373965613531636130383266643637333233316137
|
||||||
|
39353866366239643265366162663031346439663234363935353138323739393337313835313062
|
||||||
|
33373263326565383735366364316461323930336437623834356132346633636364313732383661
|
||||||
|
66346634613762613037386238656334616430633037343066623463313035646339313638653137
|
||||||
|
65643166316664626236633332326136303235623934306462643636373437373630346435633835
|
||||||
|
66346364393236393563623032306631396561623263653236393939313333373635303365316638
|
||||||
|
66373037333565323733656331636337336665363038353635383531386366633632363031623430
|
||||||
|
31356461663438653736316464363231303938653932613561633139316361633461626361383132
|
||||||
|
396436303634613135383839396566393135
|
||||||
24
files/transcriber/docker-compose.template.yml
Normal file
24
files/transcriber/docker-compose.template.yml
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
services:
|
||||||
|
|
||||||
|
transcriber_app:
|
||||||
|
# noinspection ComposeUnknownValues
|
||||||
|
image: "{{ registry_transcriber_image }}"
|
||||||
|
container_name: transcriber_app
|
||||||
|
user: "{{ user_create_result.uid }}:{{ user_create_result.group }}"
|
||||||
|
restart: unless-stopped
|
||||||
|
volumes:
|
||||||
|
- "{{ config_file }}:/config/config.toml:ro"
|
||||||
|
- "{{ data_dir }}:/data"
|
||||||
|
networks:
|
||||||
|
- "web_proxy_network"
|
||||||
|
- "monitoring_network"
|
||||||
|
environment:
|
||||||
|
- "USER_UID={{ user_create_result.uid }}"
|
||||||
|
- "USER_GID={{ user_create_result.group }}"
|
||||||
|
command: ./transcriber --config=/config/config.toml
|
||||||
|
|
||||||
|
networks:
|
||||||
|
web_proxy_network:
|
||||||
|
external: true
|
||||||
|
monitoring_network:
|
||||||
|
external: true
|
||||||
10
files/wakapi/backup.sh.j2
Normal file
10
files/wakapi/backup.sh.j2
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
set -eu
|
||||||
|
set -o pipefail
|
||||||
|
|
||||||
|
echo "{{ app_name }}: backup data with gobackups"
|
||||||
|
|
||||||
|
(cd "{{ base_dir }}" && gobackup perform --config "{{ gobackup_config }}")
|
||||||
|
|
||||||
|
echo "{{ app_name }}: done."
|
||||||
36
files/wakapi/docker-compose.yml.j2
Normal file
36
files/wakapi/docker-compose.yml.j2
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
# See versions: https://github.com/gramps-project/gramps-web/pkgs/container/grampsweb
|
||||||
|
|
||||||
|
services:
|
||||||
|
|
||||||
|
wakapi_app:
|
||||||
|
image: ghcr.io/muety/wakapi:2.16.1
|
||||||
|
container_name: wakapi_app
|
||||||
|
restart: unless-stopped
|
||||||
|
user: '{{ user_create_result.uid }}:{{ user_create_result.group }}'
|
||||||
|
networks:
|
||||||
|
- "web_proxy_network"
|
||||||
|
volumes:
|
||||||
|
- "{{ data_dir }}:/data"
|
||||||
|
environment:
|
||||||
|
WAKAPI_PUBLIC_URL: "https://wakapi.vakhrushev.me"
|
||||||
|
WAKAPI_PASSWORD_SALT: "{{ wakapi_password_salt }}"
|
||||||
|
WAKAPI_ALLOW_SIGNUP: "false"
|
||||||
|
WAKAPI_DISABLE_FRONTPAGE: "true"
|
||||||
|
WAKAPI_COOKIE_MAX_AGE: 31536000
|
||||||
|
# OIDC
|
||||||
|
# WAKAPI_OIDC_PROVIDER_NAME: "authelia"
|
||||||
|
# WAKAPI_OIDC_PROVIDER_CLIENT_ID: "{{ wakapi_oidc_client_id }}"
|
||||||
|
# WAKAPI_OIDC_PROVIDER_CLIENT_SECRET: "{{ wakapi_oidc_client_secret }}"
|
||||||
|
# WAKAPI_OIDC_PROVIDER_ENDPOINT: "https://auth.vakhrushev.me/.well-known/openid-configuration"
|
||||||
|
# Mail
|
||||||
|
WAKAPI_MAIL_SENDER: "Wakapi <wakapi@vakhrushev.me>"
|
||||||
|
WAKAPI_MAIL_PROVIDER: "smtp"
|
||||||
|
WAKAPI_MAIL_SMTP_HOST: "{{ postbox_host }}"
|
||||||
|
WAKAPI_MAIL_SMTP_PORT: "{{ postbox_port }}"
|
||||||
|
WAKAPI_MAIL_SMTP_USER: "{{ postbox_user }}"
|
||||||
|
WAKAPI_MAIL_SMTP_PASS: "{{ postbox_pass }}"
|
||||||
|
WAKAPI_MAIL_SMTP_TLS: "false"
|
||||||
|
|
||||||
|
networks:
|
||||||
|
web_proxy_network:
|
||||||
|
external: true
|
||||||
16
files/wakapi/gobackup.yml.j2
Normal file
16
files/wakapi/gobackup.yml.j2
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
# https://gobackup.github.io/configuration
|
||||||
|
|
||||||
|
models:
|
||||||
|
|
||||||
|
gramps:
|
||||||
|
compress_with:
|
||||||
|
type: 'tgz'
|
||||||
|
storages:
|
||||||
|
local:
|
||||||
|
type: 'local'
|
||||||
|
path: '{{ backups_dir }}'
|
||||||
|
keep: 3
|
||||||
|
databases:
|
||||||
|
wakapi:
|
||||||
|
type: sqlite
|
||||||
|
path: "{{ (data_dir, 'wakapi.db') | path_join }}"
|
||||||
10
files/wanderer/backup.template.sh
Normal file
10
files/wanderer/backup.template.sh
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
set -eu
|
||||||
|
set -o pipefail
|
||||||
|
|
||||||
|
echo "{{ app_name }}: backup data with gobackups"
|
||||||
|
|
||||||
|
(cd "{{ base_dir }}" && gobackup perform --config "{{ gobackup_config }}")
|
||||||
|
|
||||||
|
echo "{{ app_name }}: done."
|
||||||
109
files/wanderer/docker-compose.template.yml
Normal file
109
files/wanderer/docker-compose.template.yml
Normal file
@@ -0,0 +1,109 @@
|
|||||||
|
x-common-env: &cenv
|
||||||
|
MEILI_URL: http://wanderer_search:7700
|
||||||
|
MEILI_MASTER_KEY: "{{ wanderer_melli_master_key }}"
|
||||||
|
|
||||||
|
services:
|
||||||
|
|
||||||
|
wanderer_search:
|
||||||
|
container_name: wanderer_search
|
||||||
|
image: getmeili/meilisearch:v1.20.0
|
||||||
|
user: "{{ user_create_result.uid }}:{{ user_create_result.group }}"
|
||||||
|
environment:
|
||||||
|
<<: *cenv
|
||||||
|
MEILI_NO_ANALYTICS: "true"
|
||||||
|
ports:
|
||||||
|
- "127.0.0.1:7700:7700"
|
||||||
|
networks:
|
||||||
|
- wanderer_network
|
||||||
|
volumes:
|
||||||
|
- ./data/ms_data:/meili_data
|
||||||
|
restart: unless-stopped
|
||||||
|
healthcheck:
|
||||||
|
test: curl --fail http://localhost:7700/health || exit 1
|
||||||
|
interval: 15s
|
||||||
|
retries: 10
|
||||||
|
start_period: 20s
|
||||||
|
timeout: 10s
|
||||||
|
|
||||||
|
wanderer_db:
|
||||||
|
container_name: wanderer_db
|
||||||
|
image: "flomp/wanderer-db:{{ wanderer_version }}"
|
||||||
|
user: "{{ user_create_result.uid }}:{{ user_create_result.group }}"
|
||||||
|
depends_on:
|
||||||
|
wanderer_search:
|
||||||
|
condition: service_healthy
|
||||||
|
environment:
|
||||||
|
<<: *cenv
|
||||||
|
POCKETBASE_ENCRYPTION_KEY: "{{ wanderer_pocketbase_enc_key }}"
|
||||||
|
ORIGIN: "{{ wanderer_origin }}"
|
||||||
|
ports:
|
||||||
|
- "127.0.0.1:8090:8090"
|
||||||
|
networks:
|
||||||
|
- wanderer_network
|
||||||
|
- web_proxy_network
|
||||||
|
restart: unless-stopped
|
||||||
|
volumes:
|
||||||
|
- ./data/pb_data:/pb_data
|
||||||
|
healthcheck:
|
||||||
|
test: wget --spider -q http://localhost:8090/health || exit 1
|
||||||
|
interval: 15s
|
||||||
|
retries: 10
|
||||||
|
start_period: 20s
|
||||||
|
timeout: 10s
|
||||||
|
|
||||||
|
wanderer_web:
|
||||||
|
container_name: wanderer_web
|
||||||
|
image: "flomp/wanderer-web:{{ wanderer_version }}"
|
||||||
|
user: "{{ user_create_result.uid }}:{{ user_create_result.group }}"
|
||||||
|
depends_on:
|
||||||
|
wanderer_search:
|
||||||
|
condition: service_healthy
|
||||||
|
wanderer_db:
|
||||||
|
condition: service_healthy
|
||||||
|
environment:
|
||||||
|
<<: *cenv
|
||||||
|
ORIGIN: "{{ wanderer_origin }}"
|
||||||
|
BODY_SIZE_LIMIT: Infinity
|
||||||
|
PUBLIC_POCKETBASE_URL: http://wanderer_db:8090
|
||||||
|
PUBLIC_DISABLE_SIGNUP: "true"
|
||||||
|
UPLOAD_FOLDER: /app/uploads
|
||||||
|
UPLOAD_USER:
|
||||||
|
UPLOAD_PASSWORD:
|
||||||
|
PUBLIC_VALHALLA_URL: https://valhalla1.openstreetmap.de
|
||||||
|
PUBLIC_NOMINATIM_URL: https://nominatim.openstreetmap.org
|
||||||
|
volumes:
|
||||||
|
- ./data/uploads:/app/uploads
|
||||||
|
# - ./data/about.md:/app/build/client/md/about.md
|
||||||
|
ports:
|
||||||
|
- "127.0.0.1:3000:3000"
|
||||||
|
networks:
|
||||||
|
- wanderer_network
|
||||||
|
- web_proxy_network
|
||||||
|
restart: unless-stopped
|
||||||
|
healthcheck:
|
||||||
|
test: curl --fail http://localhost:3000/ || exit 1
|
||||||
|
interval: 15s
|
||||||
|
retries: 10
|
||||||
|
start_period: 20s
|
||||||
|
timeout: 10s
|
||||||
|
|
||||||
|
# valhalla:
|
||||||
|
# image: ghcr.io/gis-ops/docker-valhalla/valhalla:latest
|
||||||
|
# ports:
|
||||||
|
# - "8002:8002"
|
||||||
|
# volumes:
|
||||||
|
# - ./data/valhalla:/custom_files
|
||||||
|
# environment:
|
||||||
|
# - tile_urls=https://download.geofabrik.de/europe/germany/bayern/oberbayern-latest.osm.pbf
|
||||||
|
# - use_tiles_ignore_pbf=True
|
||||||
|
# - force_rebuild=False
|
||||||
|
# - force_rebuild_elevation=False
|
||||||
|
# - build_elevation=True
|
||||||
|
# - build_admins=True
|
||||||
|
# - build_time_zones=True
|
||||||
|
|
||||||
|
networks:
|
||||||
|
wanderer_network:
|
||||||
|
driver: bridge
|
||||||
|
web_proxy_network:
|
||||||
|
external: true
|
||||||
32
files/wanderer/gobackup.template.yml
Normal file
32
files/wanderer/gobackup.template.yml
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
# https://gobackup.github.io/configuration
|
||||||
|
|
||||||
|
models:
|
||||||
|
|
||||||
|
application:
|
||||||
|
compress_with:
|
||||||
|
type: 'tgz'
|
||||||
|
storages:
|
||||||
|
local:
|
||||||
|
type: 'local'
|
||||||
|
path: '{{ backups_dir }}'
|
||||||
|
keep: 3
|
||||||
|
# databases:
|
||||||
|
# users:
|
||||||
|
# type: sqlite
|
||||||
|
# path: "{{ (data_dir, 'gramps_users/users.sqlite') | path_join }}"
|
||||||
|
# search_index:
|
||||||
|
# type: sqlite
|
||||||
|
# path: "{{ (data_dir, 'gramps_index/search_index.db') | path_join }}"
|
||||||
|
# sqlite:
|
||||||
|
# type: sqlite
|
||||||
|
# path: "{{ (data_dir, 'gramps_db/59a0f3d6-1c3d-4410-8c1d-1c9c6689659f/sqlite.db') | path_join }}"
|
||||||
|
# undo:
|
||||||
|
# type: sqlite
|
||||||
|
# path: "{{ (data_dir, 'gramps_db/59a0f3d6-1c3d-4410-8c1d-1c9c6689659f/undo.db') | path_join }}"
|
||||||
|
archive:
|
||||||
|
includes:
|
||||||
|
- "{{ data_dir }}"
|
||||||
|
# excludes:
|
||||||
|
# - "{{ (data_dir, 'gramps_cache') | path_join }}"
|
||||||
|
# - "{{ (data_dir, 'gramps_thumb_cache') | path_join }}"
|
||||||
|
# - "{{ (data_dir, 'gramps_tmp') | path_join }}"
|
||||||
@@ -1,5 +1,6 @@
|
|||||||
#!/usr/bin/env sh
|
#!/usr/bin/env sh
|
||||||
|
|
||||||
|
# Must be executed for every user
|
||||||
# See https://cloud.yandex.ru/docs/container-registry/tutorials/run-docker-on-vm#run
|
# See https://cloud.yandex.ru/docs/container-registry/tutorials/run-docker-on-vm#run
|
||||||
|
|
||||||
set -eu
|
set -eu
|
||||||
25
lefthook.yml
Normal file
25
lefthook.yml
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
# Refer for explanation to following link:
|
||||||
|
# https://lefthook.dev/configuration/
|
||||||
|
|
||||||
|
glob_matcher: doublestar
|
||||||
|
|
||||||
|
templates:
|
||||||
|
av-hooks-dir: "/home/av/projects/private/git-hooks"
|
||||||
|
|
||||||
|
pre-commit:
|
||||||
|
jobs:
|
||||||
|
|
||||||
|
- name: "gitleaks"
|
||||||
|
run: "gitleaks git --staged"
|
||||||
|
|
||||||
|
- name: "check secret files"
|
||||||
|
run: "python3 {av-hooks-dir}/pre-commit/check-secrets-encrypted-with-ansible-vault.py"
|
||||||
|
|
||||||
|
- name: "format python"
|
||||||
|
glob: "**/*.py"
|
||||||
|
run: "black --quiet {staged_files}"
|
||||||
|
stage_fixed: true
|
||||||
|
|
||||||
|
- name: "mypy"
|
||||||
|
glob: "**/*.py"
|
||||||
|
run: "mypy {staged_files}"
|
||||||
48
playbook-all-applications.yml
Normal file
48
playbook-all-applications.yml
Normal file
@@ -0,0 +1,48 @@
|
|||||||
|
---
|
||||||
|
- name: 'Configure netdata'
|
||||||
|
ansible.builtin.import_playbook: playbook-netdata.yml
|
||||||
|
|
||||||
|
#
|
||||||
|
|
||||||
|
- name: 'Configure dozzle'
|
||||||
|
ansible.builtin.import_playbook: playbook-dozzle.yml
|
||||||
|
|
||||||
|
- name: 'Configure gitea'
|
||||||
|
ansible.builtin.import_playbook: playbook-gitea.yml
|
||||||
|
|
||||||
|
- name: 'Configure gramps'
|
||||||
|
ansible.builtin.import_playbook: playbook-gramps.yml
|
||||||
|
|
||||||
|
- name: 'Configure memos'
|
||||||
|
ansible.builtin.import_playbook: playbook-memos.yml
|
||||||
|
|
||||||
|
- name: 'Configure miniflux'
|
||||||
|
ansible.builtin.import_playbook: playbook-miniflux.yml
|
||||||
|
|
||||||
|
- name: 'Configure outline'
|
||||||
|
ansible.builtin.import_playbook: playbook-outline.yml
|
||||||
|
|
||||||
|
- name: 'Configure rssbridge'
|
||||||
|
ansible.builtin.import_playbook: playbook-rssbridge.yml
|
||||||
|
|
||||||
|
- name: 'Configure wakapi'
|
||||||
|
ansible.builtin.import_playbook: playbook-wakapi.yml
|
||||||
|
|
||||||
|
- name: 'Configure wanderer'
|
||||||
|
ansible.builtin.import_playbook: playbook-wanderer.yml
|
||||||
|
|
||||||
|
#
|
||||||
|
|
||||||
|
- name: 'Configure homepage'
|
||||||
|
ansible.builtin.import_playbook: playbook-homepage.yml
|
||||||
|
|
||||||
|
- name: 'Configure transcriber'
|
||||||
|
ansible.builtin.import_playbook: playbook-transcriber.yml
|
||||||
|
|
||||||
|
#
|
||||||
|
|
||||||
|
- name: 'Configure authelia'
|
||||||
|
ansible.builtin.import_playbook: playbook-authelia.yml
|
||||||
|
|
||||||
|
- name: 'Configure caddy proxy'
|
||||||
|
ansible.builtin.import_playbook: playbook-caddyproxy.yml
|
||||||
12
playbook-all-setup.yml
Normal file
12
playbook-all-setup.yml
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
---
|
||||||
|
- name: 'Configure system'
|
||||||
|
ansible.builtin.import_playbook: playbook-system.yml
|
||||||
|
|
||||||
|
- name: 'Configure docker'
|
||||||
|
ansible.builtin.import_playbook: playbook-docker.yml
|
||||||
|
|
||||||
|
- name: 'Configure eget applications'
|
||||||
|
ansible.builtin.import_playbook: playbook-eget.yml
|
||||||
|
|
||||||
|
- name: 'Configure backups'
|
||||||
|
ansible.builtin.import_playbook: playbook-backups.yml
|
||||||
96
playbook-authelia.yml
Normal file
96
playbook-authelia.yml
Normal file
@@ -0,0 +1,96 @@
|
|||||||
|
---
|
||||||
|
- name: "Configure authelia application"
|
||||||
|
hosts: all
|
||||||
|
|
||||||
|
vars_files:
|
||||||
|
- vars/secrets.yml
|
||||||
|
- files/authelia/secrets.yml
|
||||||
|
|
||||||
|
vars:
|
||||||
|
app_name: "authelia"
|
||||||
|
app_user: "{{ app_name }}"
|
||||||
|
app_owner_uid: 1011
|
||||||
|
app_owner_gid: 1012
|
||||||
|
base_dir: "{{ (application_dir, app_name) | path_join }}"
|
||||||
|
data_dir: "{{ (base_dir, 'data') | path_join }}"
|
||||||
|
config_dir: "{{ (base_dir, 'config') | path_join }}"
|
||||||
|
backups_dir: "{{ (base_dir, 'backups') | path_join }}"
|
||||||
|
gobackup_config: "{{ (base_dir, 'gobackup.yml') | path_join }}"
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- name: "Create user and environment"
|
||||||
|
ansible.builtin.import_role:
|
||||||
|
name: owner
|
||||||
|
vars:
|
||||||
|
owner_name: "{{ app_user }}"
|
||||||
|
owner_uid: "{{ app_owner_uid }}"
|
||||||
|
owner_gid: "{{ app_owner_gid }}"
|
||||||
|
owner_extra_groups: ["docker"]
|
||||||
|
|
||||||
|
- name: "Create internal application directories"
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ item }}"
|
||||||
|
state: "directory"
|
||||||
|
owner: "{{ app_user }}"
|
||||||
|
group: "{{ app_user }}"
|
||||||
|
mode: "0700"
|
||||||
|
loop:
|
||||||
|
- "{{ base_dir }}"
|
||||||
|
- "{{ data_dir }}"
|
||||||
|
- "{{ config_dir }}"
|
||||||
|
- "{{ backups_dir }}"
|
||||||
|
|
||||||
|
- name: "Copy users file"
|
||||||
|
ansible.builtin.copy:
|
||||||
|
src: "files/{{ app_name }}/users.secrets.yml"
|
||||||
|
dest: "{{ (config_dir, 'users.yml') | path_join }}"
|
||||||
|
owner: "{{ app_user }}"
|
||||||
|
group: "{{ app_user }}"
|
||||||
|
mode: "0600"
|
||||||
|
|
||||||
|
- name: "Copy configuration file"
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: "files/{{ app_name }}/configuration.template.yml"
|
||||||
|
dest: "{{ (config_dir, 'configuration.yml') | path_join }}"
|
||||||
|
owner: "{{ app_user }}"
|
||||||
|
group: "{{ app_user }}"
|
||||||
|
mode: "0600"
|
||||||
|
|
||||||
|
- name: "Copy gobackup config"
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: "files/{{ app_name }}/gobackup.template.yml"
|
||||||
|
dest: "{{ gobackup_config }}"
|
||||||
|
owner: "{{ app_user }}"
|
||||||
|
group: "{{ app_user }}"
|
||||||
|
mode: "0640"
|
||||||
|
|
||||||
|
- name: "Copy backup script"
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: "files/{{ app_name }}/backup.template.sh"
|
||||||
|
dest: "{{ (base_dir, 'backup.sh') | path_join }}"
|
||||||
|
owner: "{{ app_user }}"
|
||||||
|
group: "{{ app_user }}"
|
||||||
|
mode: "0750"
|
||||||
|
|
||||||
|
- name: "Copy docker compose file"
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: "./files/{{ app_name }}/docker-compose.template.yml"
|
||||||
|
dest: "{{ base_dir }}/docker-compose.yml"
|
||||||
|
owner: "{{ app_user }}"
|
||||||
|
group: "{{ app_user }}"
|
||||||
|
mode: "0640"
|
||||||
|
|
||||||
|
- name: "Run application with docker compose"
|
||||||
|
community.docker.docker_compose_v2:
|
||||||
|
project_src: "{{ base_dir }}"
|
||||||
|
state: "present"
|
||||||
|
remove_orphans: true
|
||||||
|
tags:
|
||||||
|
- run-app
|
||||||
|
|
||||||
|
- name: "Restart application with docker compose"
|
||||||
|
community.docker.docker_compose_v2:
|
||||||
|
project_src: "{{ base_dir }}"
|
||||||
|
state: "restarted"
|
||||||
|
tags:
|
||||||
|
- run-app
|
||||||
79
playbook-backups.yml
Normal file
79
playbook-backups.yml
Normal file
@@ -0,0 +1,79 @@
|
|||||||
|
---
|
||||||
|
- name: "Configure restic and backup schedule"
|
||||||
|
hosts: all
|
||||||
|
|
||||||
|
vars_files:
|
||||||
|
- vars/secrets.yml
|
||||||
|
|
||||||
|
vars:
|
||||||
|
backup_config_dir: "/etc/backup"
|
||||||
|
backup_config_file: "{{ (backup_config_dir, 'config.toml') | path_join }}"
|
||||||
|
|
||||||
|
restic_shell_script: "{{ (bin_prefix, 'restic-shell.sh') | path_join }}"
|
||||||
|
backup_all_script: "{{ (bin_prefix, 'backup-all.py') | path_join }}"
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- name: "Create backup config directory"
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ backup_config_dir }}"
|
||||||
|
state: "directory"
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: "0755"
|
||||||
|
|
||||||
|
- name: "Create backup config file"
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: "files/backups/config.template.toml"
|
||||||
|
dest: "{{ backup_config_file }}"
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: "0640"
|
||||||
|
|
||||||
|
- name: "Allow user to run the backup script without a password"
|
||||||
|
ansible.builtin.lineinfile:
|
||||||
|
path: /etc/sudoers
|
||||||
|
state: present
|
||||||
|
line: "{{ primary_user }} ALL=(ALL) NOPASSWD: {{ backup_all_script }}"
|
||||||
|
validate: /usr/sbin/visudo -cf %s # ВАЖНО: проверка синтаксиса перед сохранением
|
||||||
|
create: no # Файл уже должен существовать
|
||||||
|
|
||||||
|
- name: "Copy restic shell script"
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: "files/backups/restic-shell.sh.j2"
|
||||||
|
dest: "{{ restic_shell_script }}"
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: "0700"
|
||||||
|
|
||||||
|
- name: "Copy backup all script"
|
||||||
|
ansible.builtin.copy:
|
||||||
|
src: "files/backups/backup-all.py"
|
||||||
|
dest: "{{ backup_all_script }}"
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: "0700"
|
||||||
|
|
||||||
|
- name: "Setup paths for backup cron file"
|
||||||
|
ansible.builtin.cron:
|
||||||
|
cron_file: "ansible_restic_backup"
|
||||||
|
user: "root"
|
||||||
|
env: true
|
||||||
|
name: "PATH"
|
||||||
|
job: "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin"
|
||||||
|
|
||||||
|
- name: "Setup mail for backup cron file"
|
||||||
|
ansible.builtin.cron:
|
||||||
|
cron_file: "ansible_restic_backup"
|
||||||
|
user: "root"
|
||||||
|
env: true
|
||||||
|
name: "MAILTO"
|
||||||
|
job: ""
|
||||||
|
|
||||||
|
- name: "Creates a cron file for backups under /etc/cron.d"
|
||||||
|
ansible.builtin.cron:
|
||||||
|
name: "restic backup"
|
||||||
|
minute: "0"
|
||||||
|
hour: "1"
|
||||||
|
job: "{{ backup_all_script }} 2>&1 | logger -t backup"
|
||||||
|
cron_file: "ansible_restic_backup"
|
||||||
|
user: "root"
|
||||||
81
playbook-caddyproxy.yml
Normal file
81
playbook-caddyproxy.yml
Normal file
@@ -0,0 +1,81 @@
|
|||||||
|
---
|
||||||
|
- name: "Configure caddy reverse proxy service"
|
||||||
|
hosts: all
|
||||||
|
|
||||||
|
vars_files:
|
||||||
|
- vars/secrets.yml
|
||||||
|
|
||||||
|
vars:
|
||||||
|
app_name: "caddyproxy"
|
||||||
|
app_user: "{{ app_name }}"
|
||||||
|
app_owner_uid: 1010
|
||||||
|
app_owner_gid: 1011
|
||||||
|
base_dir: "{{ (application_dir, app_name) | path_join }}"
|
||||||
|
|
||||||
|
data_dir: "{{ (base_dir, 'data') | path_join }}"
|
||||||
|
config_dir: "{{ (base_dir, 'config') | path_join }}"
|
||||||
|
caddy_file_dir: "{{ (base_dir, 'caddy_file') | path_join }}"
|
||||||
|
|
||||||
|
service_name: "{{ app_name }}"
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- name: "Create user and environment"
|
||||||
|
ansible.builtin.import_role:
|
||||||
|
name: owner
|
||||||
|
vars:
|
||||||
|
owner_name: "{{ app_user }}"
|
||||||
|
owner_uid: "{{ app_owner_uid }}"
|
||||||
|
owner_gid: "{{ app_owner_gid }}"
|
||||||
|
owner_extra_groups: ["docker"]
|
||||||
|
|
||||||
|
- name: "Create internal application directories"
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ item }}"
|
||||||
|
state: "directory"
|
||||||
|
owner: "{{ app_user }}"
|
||||||
|
group: "{{ app_user }}"
|
||||||
|
mode: "0770"
|
||||||
|
loop:
|
||||||
|
- "{{ base_dir }}"
|
||||||
|
- "{{ data_dir }}"
|
||||||
|
- "{{ config_dir }}"
|
||||||
|
- "{{ caddy_file_dir }}"
|
||||||
|
|
||||||
|
- name: "Copy caddy file"
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: "./files/{{ app_name }}/Caddyfile.j2"
|
||||||
|
dest: "{{ (caddy_file_dir, 'Caddyfile') | path_join }}"
|
||||||
|
owner: "{{ app_user }}"
|
||||||
|
group: "{{ app_user }}"
|
||||||
|
mode: "0640"
|
||||||
|
|
||||||
|
- name: "Copy docker compose file"
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: "./files/{{ app_name }}/docker-compose.yml.j2"
|
||||||
|
dest: "{{ base_dir }}/docker-compose.yml"
|
||||||
|
owner: "{{ app_user }}"
|
||||||
|
group: "{{ app_user }}"
|
||||||
|
mode: "0640"
|
||||||
|
|
||||||
|
- name: "Run application with docker compose"
|
||||||
|
community.docker.docker_compose_v2:
|
||||||
|
project_src: "{{ base_dir }}"
|
||||||
|
state: "present"
|
||||||
|
remove_orphans: true
|
||||||
|
tags:
|
||||||
|
- run-app
|
||||||
|
|
||||||
|
# - name: "Reload caddy"
|
||||||
|
# community.docker.docker_compose_v2_exec:
|
||||||
|
# project_src: '{{ base_dir }}'
|
||||||
|
# service: "{{ service_name }}"
|
||||||
|
# command: caddy reload --config /etc/caddy/Caddyfile
|
||||||
|
# tags:
|
||||||
|
# - run-app
|
||||||
|
|
||||||
|
- name: "Restart application with docker compose"
|
||||||
|
community.docker.docker_compose_v2:
|
||||||
|
project_src: "{{ base_dir }}"
|
||||||
|
state: "restarted"
|
||||||
|
tags:
|
||||||
|
- run-app
|
||||||
40
playbook-docker.yml
Normal file
40
playbook-docker.yml
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
---
|
||||||
|
- name: "Configure docker parameters"
|
||||||
|
hosts: all
|
||||||
|
|
||||||
|
vars_files:
|
||||||
|
- vars/secrets.yml
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
# - name: "Install python docker lib from pip"
|
||||||
|
# ansible.builtin.pip:
|
||||||
|
# name: docker
|
||||||
|
|
||||||
|
- name: "Install docker"
|
||||||
|
ansible.builtin.import_role:
|
||||||
|
name: geerlingguy.docker
|
||||||
|
vars:
|
||||||
|
docker_edition: "ce"
|
||||||
|
docker_packages:
|
||||||
|
- "docker-{{ docker_edition }}"
|
||||||
|
- "docker-{{ docker_edition }}-cli"
|
||||||
|
- "docker-{{ docker_edition }}-rootless-extras"
|
||||||
|
docker_users:
|
||||||
|
- "{{ primary_user }}"
|
||||||
|
|
||||||
|
- name: Create a network for web proxy
|
||||||
|
community.docker.docker_network:
|
||||||
|
name: "web_proxy_network"
|
||||||
|
driver: "bridge"
|
||||||
|
|
||||||
|
- name: Create a network for monitoring
|
||||||
|
community.docker.docker_network:
|
||||||
|
name: "monitoring_network"
|
||||||
|
driver: "bridge"
|
||||||
|
|
||||||
|
- name: "Schedule docker image prune"
|
||||||
|
ansible.builtin.cron:
|
||||||
|
name: "docker image prune"
|
||||||
|
minute: "0"
|
||||||
|
hour: "3"
|
||||||
|
job: "/usr/bin/docker image prune -af"
|
||||||
49
playbook-dozzle.yml
Normal file
49
playbook-dozzle.yml
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
---
|
||||||
|
- name: "Configure dozzle application"
|
||||||
|
hosts: all
|
||||||
|
|
||||||
|
vars_files:
|
||||||
|
- vars/secrets.yml
|
||||||
|
|
||||||
|
vars:
|
||||||
|
app_name: "dozzle"
|
||||||
|
app_user: "{{ app_name }}"
|
||||||
|
app_owner_uid: 1016
|
||||||
|
app_owner_gid: 1017
|
||||||
|
base_dir: "{{ (application_dir, app_name) | path_join }}"
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- name: "Create user and environment"
|
||||||
|
ansible.builtin.import_role:
|
||||||
|
name: owner
|
||||||
|
vars:
|
||||||
|
owner_name: "{{ app_user }}"
|
||||||
|
owner_uid: "{{ app_owner_uid }}"
|
||||||
|
owner_gid: "{{ app_owner_gid }}"
|
||||||
|
owner_extra_groups: ["docker"]
|
||||||
|
|
||||||
|
- name: "Create internal application directories"
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ item }}"
|
||||||
|
state: "directory"
|
||||||
|
owner: "{{ app_user }}"
|
||||||
|
group: "{{ app_user }}"
|
||||||
|
mode: "0770"
|
||||||
|
loop:
|
||||||
|
- "{{ base_dir }}"
|
||||||
|
|
||||||
|
- name: "Copy docker compose file"
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: "./files/{{ app_name }}/docker-compose.template.yml"
|
||||||
|
dest: "{{ base_dir }}/docker-compose.yml"
|
||||||
|
owner: "{{ app_user }}"
|
||||||
|
group: "{{ app_user }}"
|
||||||
|
mode: "0640"
|
||||||
|
|
||||||
|
- name: "Run application with docker compose"
|
||||||
|
community.docker.docker_compose_v2:
|
||||||
|
project_src: "{{ base_dir }}"
|
||||||
|
state: "present"
|
||||||
|
remove_orphans: true
|
||||||
|
tags:
|
||||||
|
- run-app
|
||||||
62
playbook-eget.yml
Normal file
62
playbook-eget.yml
Normal file
@@ -0,0 +1,62 @@
|
|||||||
|
---
|
||||||
|
- name: "Install eget"
|
||||||
|
hosts: all
|
||||||
|
|
||||||
|
vars_files:
|
||||||
|
- vars/secrets.yml
|
||||||
|
|
||||||
|
# See: https://github.com/zyedidia/eget/releases
|
||||||
|
|
||||||
|
vars:
|
||||||
|
eget_install_dir: "{{ bin_prefix }}"
|
||||||
|
eget_bin_path: '{{ (eget_install_dir, "eget") | path_join }}'
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- name: "Install eget"
|
||||||
|
ansible.builtin.import_role:
|
||||||
|
name: eget
|
||||||
|
vars:
|
||||||
|
eget_version: "1.3.4"
|
||||||
|
eget_install_path: "{{ eget_bin_path }}"
|
||||||
|
|
||||||
|
- name: "Install rclone"
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: >
|
||||||
|
{{ eget_bin_path }} rclone/rclone --quiet --upgrade-only --to {{ eget_install_dir }} --asset zip
|
||||||
|
--tag v1.72.0
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
- name: "Install restic"
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: >
|
||||||
|
{{ eget_bin_path }} restic/restic --quiet --upgrade-only --to {{ eget_install_dir }}
|
||||||
|
--tag v0.18.1
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
- name: "Install btop"
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: >
|
||||||
|
{{ eget_bin_path }} aristocratos/btop --quiet --upgrade-only --to {{ eget_install_dir }}
|
||||||
|
--tag v1.4.5
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
- name: "Install gobackup"
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: >
|
||||||
|
{{ eget_bin_path }} gobackup/gobackup --quiet --upgrade-only --to {{ eget_install_dir }}
|
||||||
|
--tag v2.17.0
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
- name: "Install task"
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: >
|
||||||
|
{{ eget_bin_path }} go-task/task --quiet --upgrade-only --to {{ eget_install_dir }} --asset tar.gz
|
||||||
|
--tag v3.45.5
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
- name: 'Install dust'
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: >
|
||||||
|
{{ bin_prefix }}/eget bootandy/dust --quiet --upgrade-only --to {{ bin_prefix }} --asset gnu
|
||||||
|
--tag v1.2.3
|
||||||
|
changed_when: false
|
||||||
61
playbook-gitea.yml
Normal file
61
playbook-gitea.yml
Normal file
@@ -0,0 +1,61 @@
|
|||||||
|
---
|
||||||
|
- name: "Configure gitea application"
|
||||||
|
hosts: all
|
||||||
|
|
||||||
|
vars_files:
|
||||||
|
- vars/secrets.yml
|
||||||
|
|
||||||
|
vars:
|
||||||
|
app_name: "gitea"
|
||||||
|
app_user: "{{ app_name }}"
|
||||||
|
app_owner_uid: 1005
|
||||||
|
app_owner_gid: 1006
|
||||||
|
base_dir: "{{ (application_dir, app_name) | path_join }}"
|
||||||
|
data_dir: "{{ (base_dir, 'data') | path_join }}"
|
||||||
|
backups_dir: "{{ (base_dir, 'backups') | path_join }}"
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- name: "Create user and environment"
|
||||||
|
ansible.builtin.import_role:
|
||||||
|
name: owner
|
||||||
|
vars:
|
||||||
|
owner_name: "{{ app_user }}"
|
||||||
|
owner_uid: "{{ app_owner_uid }}"
|
||||||
|
owner_gid: "{{ app_owner_gid }}"
|
||||||
|
owner_extra_groups: ["docker"]
|
||||||
|
|
||||||
|
- name: "Create internal application directories"
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ item }}"
|
||||||
|
state: "directory"
|
||||||
|
owner: "{{ app_user }}"
|
||||||
|
group: "{{ app_user }}"
|
||||||
|
mode: "0770"
|
||||||
|
loop:
|
||||||
|
- "{{ base_dir }}"
|
||||||
|
- "{{ data_dir }}"
|
||||||
|
- "{{ backups_dir }}"
|
||||||
|
|
||||||
|
- name: "Copy backup script"
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: "files/{{ app_name }}/backup.sh.j2"
|
||||||
|
dest: "{{ base_dir }}/backup.sh"
|
||||||
|
owner: "{{ app_user }}"
|
||||||
|
group: "{{ app_user }}"
|
||||||
|
mode: "0750"
|
||||||
|
|
||||||
|
- name: "Copy docker compose file"
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: "./files/{{ app_name }}/docker-compose.yml.j2"
|
||||||
|
dest: "{{ base_dir }}/docker-compose.yml"
|
||||||
|
owner: "{{ app_user }}"
|
||||||
|
group: "{{ app_user }}"
|
||||||
|
mode: "0640"
|
||||||
|
|
||||||
|
- name: "Run application with docker compose"
|
||||||
|
community.docker.docker_compose_v2:
|
||||||
|
project_src: "{{ base_dir }}"
|
||||||
|
state: "present"
|
||||||
|
remove_orphans: true
|
||||||
|
tags:
|
||||||
|
- run-app
|
||||||
95
playbook-gramps.yml
Normal file
95
playbook-gramps.yml
Normal file
@@ -0,0 +1,95 @@
|
|||||||
|
---
|
||||||
|
- name: "Configure gramps application"
|
||||||
|
hosts: all
|
||||||
|
|
||||||
|
vars_files:
|
||||||
|
- vars/secrets.yml
|
||||||
|
|
||||||
|
vars:
|
||||||
|
app_name: "gramps"
|
||||||
|
app_user: "{{ app_name }}"
|
||||||
|
app_owner_uid: 1009
|
||||||
|
app_owner_gid: 1010
|
||||||
|
base_dir: "{{ (application_dir, app_name) | path_join }}"
|
||||||
|
data_dir: "{{ (base_dir, 'data') | path_join }}"
|
||||||
|
media_dir: "{{ (base_dir, 'media') | path_join }}"
|
||||||
|
cache_dir: "{{ (base_dir, 'cache') | path_join }}"
|
||||||
|
backups_dir: "{{ (base_dir, 'backups') | path_join }}"
|
||||||
|
gobackup_config: "{{ (base_dir, 'gobackup.yml') | path_join }}"
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- name: "Create user and environment"
|
||||||
|
ansible.builtin.import_role:
|
||||||
|
name: owner
|
||||||
|
vars:
|
||||||
|
owner_name: "{{ app_user }}"
|
||||||
|
owner_uid: "{{ app_owner_uid }}"
|
||||||
|
owner_gid: "{{ app_owner_gid }}"
|
||||||
|
owner_extra_groups: ["docker"]
|
||||||
|
|
||||||
|
- name: "Create application internal directories"
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ item }}"
|
||||||
|
state: "directory"
|
||||||
|
owner: "{{ app_user }}"
|
||||||
|
group: "{{ app_user }}"
|
||||||
|
mode: "0750"
|
||||||
|
loop:
|
||||||
|
- "{{ base_dir }}"
|
||||||
|
- "{{ data_dir }}"
|
||||||
|
- "{{ media_dir }}"
|
||||||
|
- "{{ cache_dir }}"
|
||||||
|
- "{{ backups_dir }}"
|
||||||
|
|
||||||
|
- name: "Copy gobackup config"
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: "./files/{{ app_name }}/gobackup.template.yml"
|
||||||
|
dest: "{{ gobackup_config }}"
|
||||||
|
owner: "{{ app_user }}"
|
||||||
|
group: "{{ app_user }}"
|
||||||
|
mode: "0640"
|
||||||
|
|
||||||
|
- name: "Copy backup script"
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: "files/{{ app_name }}/backup.template.sh"
|
||||||
|
dest: "{{ base_dir }}/backup.sh"
|
||||||
|
owner: "{{ app_user }}"
|
||||||
|
group: "{{ app_user }}"
|
||||||
|
mode: "0750"
|
||||||
|
|
||||||
|
- name: "Create backup targets file"
|
||||||
|
ansible.builtin.lineinfile:
|
||||||
|
path: "{{ base_dir }}/backup-targets"
|
||||||
|
line: "{{ item }}"
|
||||||
|
create: true
|
||||||
|
owner: "{{ app_user }}"
|
||||||
|
group: "{{ app_user }}"
|
||||||
|
mode: "0750"
|
||||||
|
loop:
|
||||||
|
- "{{ data_dir }}"
|
||||||
|
- "{{ media_dir }}"
|
||||||
|
- "{{ backups_dir }}"
|
||||||
|
|
||||||
|
- name: "Copy rename script"
|
||||||
|
ansible.builtin.copy:
|
||||||
|
src: "files/{{ app_name }}/gramps_rename.py"
|
||||||
|
dest: "{{ base_dir }}/gramps_rename.py"
|
||||||
|
owner: "{{ app_user }}"
|
||||||
|
group: "{{ app_user }}"
|
||||||
|
mode: "0750"
|
||||||
|
|
||||||
|
- name: "Copy docker compose file"
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: "./files/{{ app_name }}/docker-compose.template.yml"
|
||||||
|
dest: "{{ base_dir }}/docker-compose.yml"
|
||||||
|
owner: "{{ app_user }}"
|
||||||
|
group: "{{ app_user }}"
|
||||||
|
mode: "0640"
|
||||||
|
|
||||||
|
- name: "Run application with docker compose"
|
||||||
|
community.docker.docker_compose_v2:
|
||||||
|
project_src: "{{ base_dir }}"
|
||||||
|
state: "present"
|
||||||
|
remove_orphans: true
|
||||||
|
tags:
|
||||||
|
- run-app
|
||||||
20
playbook-homepage-registry.yml
Normal file
20
playbook-homepage-registry.yml
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
---
|
||||||
|
- name: "Upload local homepage images to registry"
|
||||||
|
hosts: all
|
||||||
|
gather_facts: false
|
||||||
|
|
||||||
|
vars_files:
|
||||||
|
- vars/secrets.yml
|
||||||
|
- vars/homepage.yml
|
||||||
|
- vars/homepage.images.yml
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
|
||||||
|
- name: "Push web service image to remote registry"
|
||||||
|
community.docker.docker_image:
|
||||||
|
state: present
|
||||||
|
source: local
|
||||||
|
name: "{{ homepage_nginx_image }}"
|
||||||
|
repository: "{{ registry_homepage_nginx_image }}"
|
||||||
|
push: true
|
||||||
|
delegate_to: 127.0.0.1
|
||||||
48
playbook-homepage.yml
Normal file
48
playbook-homepage.yml
Normal file
@@ -0,0 +1,48 @@
|
|||||||
|
---
|
||||||
|
- name: "Setup and deploy homepage service"
|
||||||
|
hosts: all
|
||||||
|
|
||||||
|
vars_files:
|
||||||
|
- vars/secrets.yml
|
||||||
|
- vars/homepage.yml
|
||||||
|
- vars/homepage.images.yml
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- name: "Create user and environment"
|
||||||
|
ansible.builtin.import_role:
|
||||||
|
name: owner
|
||||||
|
vars:
|
||||||
|
owner_name: "{{ app_user }}"
|
||||||
|
owner_uid: "{{ app_owner_uid }}"
|
||||||
|
owner_gid: "{{ app_owner_gid }}"
|
||||||
|
owner_extra_groups: ["docker"]
|
||||||
|
|
||||||
|
- name: "Create application internal directories"
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ item }}"
|
||||||
|
state: "directory"
|
||||||
|
owner: "{{ app_user }}"
|
||||||
|
group: "{{ app_user }}"
|
||||||
|
mode: "0750"
|
||||||
|
loop:
|
||||||
|
- "{{ base_dir }}"
|
||||||
|
|
||||||
|
- name: "Login to yandex docker registry."
|
||||||
|
ansible.builtin.script:
|
||||||
|
cmd: "files/yandex-docker-registry-auth.sh"
|
||||||
|
|
||||||
|
- name: "Copy docker compose file"
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: "./files/{{ app_name }}/docker-compose.template.yml"
|
||||||
|
dest: "{{ base_dir }}/docker-compose.yml"
|
||||||
|
owner: "{{ app_user }}"
|
||||||
|
group: "{{ app_user }}"
|
||||||
|
mode: "0640"
|
||||||
|
|
||||||
|
- name: "Run application with docker compose"
|
||||||
|
community.docker.docker_compose_v2:
|
||||||
|
project_src: "{{ base_dir }}"
|
||||||
|
state: "present"
|
||||||
|
remove_orphans: true
|
||||||
|
tags:
|
||||||
|
- run-app
|
||||||
82
playbook-memos.yml
Normal file
82
playbook-memos.yml
Normal file
@@ -0,0 +1,82 @@
|
|||||||
|
---
|
||||||
|
- name: "Configure memos application"
|
||||||
|
hosts: all
|
||||||
|
|
||||||
|
vars_files:
|
||||||
|
- vars/secrets.yml
|
||||||
|
|
||||||
|
vars:
|
||||||
|
app_name: "memos"
|
||||||
|
app_user: "{{ app_name }}"
|
||||||
|
app_owner_uid: 1019
|
||||||
|
app_owner_gid: 1020
|
||||||
|
base_dir: "{{ (application_dir, app_name) | path_join }}"
|
||||||
|
data_dir: "{{ (base_dir, 'data') | path_join }}"
|
||||||
|
backups_dir: "{{ (base_dir, 'backups') | path_join }}"
|
||||||
|
gobackup_config: "{{ (base_dir, 'gobackup.yml') | path_join }}"
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- name: "Create user and environment"
|
||||||
|
ansible.builtin.import_role:
|
||||||
|
name: owner
|
||||||
|
vars:
|
||||||
|
owner_name: "{{ app_user }}"
|
||||||
|
owner_uid: "{{ app_owner_uid }}"
|
||||||
|
owner_gid: "{{ app_owner_gid }}"
|
||||||
|
owner_extra_groups: ["docker"]
|
||||||
|
|
||||||
|
- name: "Create application internal directories"
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ item }}"
|
||||||
|
state: "directory"
|
||||||
|
owner: "{{ app_user }}"
|
||||||
|
group: "{{ app_user }}"
|
||||||
|
mode: "0750"
|
||||||
|
loop:
|
||||||
|
- "{{ base_dir }}"
|
||||||
|
- "{{ data_dir }}"
|
||||||
|
- "{{ backups_dir }}"
|
||||||
|
|
||||||
|
- name: "Copy gobackup config"
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: "./files/{{ app_name }}/gobackup.yml.j2"
|
||||||
|
dest: "{{ gobackup_config }}"
|
||||||
|
owner: "{{ app_user }}"
|
||||||
|
group: "{{ app_user }}"
|
||||||
|
mode: "0640"
|
||||||
|
|
||||||
|
- name: "Copy backup script"
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: "files/{{ app_name }}/backup.sh.j2"
|
||||||
|
dest: "{{ base_dir }}/backup.sh"
|
||||||
|
owner: "{{ app_user }}"
|
||||||
|
group: "{{ app_user }}"
|
||||||
|
mode: "0750"
|
||||||
|
|
||||||
|
- name: "Create backup targets file"
|
||||||
|
ansible.builtin.lineinfile:
|
||||||
|
path: "{{ base_dir }}/backup-targets"
|
||||||
|
line: "{{ item }}"
|
||||||
|
create: true
|
||||||
|
owner: "{{ app_user }}"
|
||||||
|
group: "{{ app_user }}"
|
||||||
|
mode: "0750"
|
||||||
|
loop:
|
||||||
|
- "{{ data_dir }}"
|
||||||
|
- "{{ backups_dir }}"
|
||||||
|
|
||||||
|
- name: "Copy docker compose file"
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: "./files/{{ app_name }}/docker-compose.template.yml"
|
||||||
|
dest: "{{ base_dir }}/docker-compose.yml"
|
||||||
|
owner: "{{ app_user }}"
|
||||||
|
group: "{{ app_user }}"
|
||||||
|
mode: "0640"
|
||||||
|
|
||||||
|
- name: "Run application with docker compose"
|
||||||
|
community.docker.docker_compose_v2:
|
||||||
|
project_src: "{{ base_dir }}"
|
||||||
|
state: "present"
|
||||||
|
remove_orphans: true
|
||||||
|
tags:
|
||||||
|
- run-app
|
||||||
81
playbook-miniflux.yml
Normal file
81
playbook-miniflux.yml
Normal file
@@ -0,0 +1,81 @@
|
|||||||
|
---
|
||||||
|
- name: "Configure miniflux application"
|
||||||
|
hosts: all
|
||||||
|
|
||||||
|
vars_files:
|
||||||
|
- vars/secrets.yml
|
||||||
|
|
||||||
|
vars:
|
||||||
|
app_name: "miniflux"
|
||||||
|
app_user: "{{ app_name }}"
|
||||||
|
app_owner_uid: 1013
|
||||||
|
app_owner_gid: 1014
|
||||||
|
base_dir: "{{ (application_dir, app_name) | path_join }}"
|
||||||
|
data_dir: "{{ (base_dir, 'data') | path_join }}"
|
||||||
|
secrets_dir: "{{ (base_dir, 'secrets') | path_join }}"
|
||||||
|
postgres_data_dir: "{{ (base_dir, 'data', 'postgres') | path_join }}"
|
||||||
|
postgres_backups_dir: "{{ (base_dir, 'backups', 'postgres') | path_join }}"
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- name: "Create user and environment"
|
||||||
|
ansible.builtin.import_role:
|
||||||
|
name: owner
|
||||||
|
vars:
|
||||||
|
owner_name: "{{ app_user }}"
|
||||||
|
owner_uid: "{{ app_owner_uid }}"
|
||||||
|
owner_gid: "{{ app_owner_gid }}"
|
||||||
|
owner_extra_groups: ["docker"]
|
||||||
|
|
||||||
|
- name: "Create internal directories"
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ item }}"
|
||||||
|
state: "directory"
|
||||||
|
owner: "{{ app_user }}"
|
||||||
|
group: "{{ app_user }}"
|
||||||
|
mode: "0770"
|
||||||
|
loop:
|
||||||
|
- "{{ base_dir }}"
|
||||||
|
- "{{ data_dir }}"
|
||||||
|
- "{{ secrets_dir }}"
|
||||||
|
- "{{ postgres_data_dir }}"
|
||||||
|
- "{{ postgres_backups_dir }}"
|
||||||
|
|
||||||
|
- name: "Copy secrets"
|
||||||
|
ansible.builtin.import_role:
|
||||||
|
name: secrets
|
||||||
|
vars:
|
||||||
|
secrets_dest: "{{ secrets_dir }}"
|
||||||
|
secrets_user: "{{ app_user }}"
|
||||||
|
secrets_group: "{{ app_user }}"
|
||||||
|
secrets_vars:
|
||||||
|
- "miniflux_database_url"
|
||||||
|
- "miniflux_admin_user"
|
||||||
|
- "miniflux_admin_password"
|
||||||
|
- "miniflux_oidc_client_id"
|
||||||
|
- "miniflux_oidc_client_secret"
|
||||||
|
- "miniflux_postgres_password"
|
||||||
|
|
||||||
|
- name: "Copy docker compose file"
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: "./files/{{ app_name }}/docker-compose.template.yml"
|
||||||
|
dest: "{{ base_dir }}/docker-compose.yml"
|
||||||
|
owner: "{{ app_user }}"
|
||||||
|
group: "{{ app_user }}"
|
||||||
|
mode: "0640"
|
||||||
|
|
||||||
|
- name: "Copy backup script"
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: "./files/{{ app_name }}/backup.template.sh"
|
||||||
|
dest: "{{ base_dir }}/backup.sh"
|
||||||
|
owner: "{{ app_user }}"
|
||||||
|
group: "{{ app_user }}"
|
||||||
|
mode: "0750"
|
||||||
|
|
||||||
|
- name: "Run application with docker compose"
|
||||||
|
community.docker.docker_compose_v2:
|
||||||
|
project_src: "{{ base_dir }}"
|
||||||
|
state: "present"
|
||||||
|
recreate: "always"
|
||||||
|
remove_orphans: true
|
||||||
|
tags:
|
||||||
|
- run-app
|
||||||
108
playbook-netdata.yml
Normal file
108
playbook-netdata.yml
Normal file
@@ -0,0 +1,108 @@
|
|||||||
|
---
|
||||||
|
- name: "Install Netdata monitoring service"
|
||||||
|
hosts: all
|
||||||
|
|
||||||
|
vars_files:
|
||||||
|
- vars/secrets.yml
|
||||||
|
|
||||||
|
vars:
|
||||||
|
app_name: "netdata"
|
||||||
|
app_user: "{{ app_name }}"
|
||||||
|
app_owner_uid: 1012
|
||||||
|
app_owner_gid: 1013
|
||||||
|
base_dir: "{{ (application_dir, app_name) | path_join }}"
|
||||||
|
config_dir: "{{ (base_dir, 'config') | path_join }}"
|
||||||
|
config_go_d_dir: "{{ (config_dir, 'go.d') | path_join }}"
|
||||||
|
data_dir: "{{ (base_dir, 'data') | path_join }}"
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- name: "Create user and environment"
|
||||||
|
ansible.builtin.import_role:
|
||||||
|
name: owner
|
||||||
|
vars:
|
||||||
|
owner_name: "{{ app_user }}"
|
||||||
|
owner_uid: "{{ app_owner_uid }}"
|
||||||
|
owner_gid: "{{ app_owner_gid }}"
|
||||||
|
owner_extra_groups: ["docker"]
|
||||||
|
|
||||||
|
- name: "Create internal application directories"
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ item }}"
|
||||||
|
state: "directory"
|
||||||
|
owner: "{{ app_user }}"
|
||||||
|
group: "{{ app_user }}"
|
||||||
|
mode: "0770"
|
||||||
|
loop:
|
||||||
|
- "{{ base_dir }}"
|
||||||
|
- "{{ data_dir }}"
|
||||||
|
- "{{ config_dir }}"
|
||||||
|
- "{{ config_go_d_dir }}"
|
||||||
|
|
||||||
|
- name: "Copy netdata config file"
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: "files/{{ app_name }}/netdata.template.conf"
|
||||||
|
dest: "{{ config_dir }}/netdata.conf"
|
||||||
|
owner: "{{ app_user }}"
|
||||||
|
group: "{{ app_user }}"
|
||||||
|
mode: "0640"
|
||||||
|
|
||||||
|
- name: "Find all go.d plugin config files"
|
||||||
|
ansible.builtin.find:
|
||||||
|
paths: "files/{{ app_name }}/go.d"
|
||||||
|
file_type: file
|
||||||
|
delegate_to: localhost
|
||||||
|
register: go_d_source_files
|
||||||
|
|
||||||
|
- name: "Template all go.d plugin config files"
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: "{{ item.path }}"
|
||||||
|
dest: "{{ config_go_d_dir }}/{{ item.path | basename }}"
|
||||||
|
owner: "{{ app_user }}"
|
||||||
|
group: "{{ app_user }}"
|
||||||
|
mode: "0640"
|
||||||
|
loop: "{{ go_d_source_files.files }}"
|
||||||
|
|
||||||
|
- name: "Find existing go.d config files on server"
|
||||||
|
ansible.builtin.find:
|
||||||
|
paths: "{{ config_go_d_dir }}"
|
||||||
|
file_type: file
|
||||||
|
register: go_d_existing_files
|
||||||
|
|
||||||
|
- name: "Remove go.d config files that don't exist in source"
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ item.path }}"
|
||||||
|
state: absent
|
||||||
|
loop: "{{ go_d_existing_files.files }}"
|
||||||
|
when: (item.path | basename) not in (go_d_source_files.files | map(attribute='path') | map('basename') | list)
|
||||||
|
|
||||||
|
- name: "Grab docker group id."
|
||||||
|
ansible.builtin.shell:
|
||||||
|
cmd: |
|
||||||
|
set -o pipefail
|
||||||
|
grep docker /etc/group | cut -d ':' -f 3
|
||||||
|
executable: /bin/bash
|
||||||
|
register: netdata_docker_group_output
|
||||||
|
changed_when: netdata_docker_group_output.rc != 0
|
||||||
|
|
||||||
|
- name: "Copy docker compose file"
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: "./files/{{ app_name }}/docker-compose.template.yml"
|
||||||
|
dest: "{{ base_dir }}/docker-compose.yml"
|
||||||
|
owner: "{{ app_user }}"
|
||||||
|
group: "{{ app_user }}"
|
||||||
|
mode: "0640"
|
||||||
|
|
||||||
|
- name: "Run application with docker compose"
|
||||||
|
community.docker.docker_compose_v2:
|
||||||
|
project_src: "{{ base_dir }}"
|
||||||
|
state: "present"
|
||||||
|
remove_orphans: true
|
||||||
|
tags:
|
||||||
|
- run-app
|
||||||
|
|
||||||
|
- name: "Restart application with docker compose"
|
||||||
|
community.docker.docker_compose_v2:
|
||||||
|
project_src: "{{ base_dir }}"
|
||||||
|
state: "restarted"
|
||||||
|
tags:
|
||||||
|
- run-app
|
||||||
63
playbook-outline.yml
Normal file
63
playbook-outline.yml
Normal file
@@ -0,0 +1,63 @@
|
|||||||
|
---
|
||||||
|
- name: "Configure outline application"
|
||||||
|
hosts: all
|
||||||
|
|
||||||
|
vars_files:
|
||||||
|
- vars/secrets.yml
|
||||||
|
|
||||||
|
vars:
|
||||||
|
app_name: "outline"
|
||||||
|
app_user: "{{ app_name }}"
|
||||||
|
app_owner_uid: 1007
|
||||||
|
app_owner_gid: 1008
|
||||||
|
base_dir: "{{ (application_dir, app_name) | path_join }}"
|
||||||
|
data_dir: "{{ (base_dir, 'data') | path_join }}"
|
||||||
|
postgres_data_dir: "{{ (base_dir, 'data', 'postgres') | path_join }}"
|
||||||
|
postgres_backups_dir: "{{ (base_dir, 'backups', 'postgres') | path_join }}"
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- name: "Create user and environment"
|
||||||
|
ansible.builtin.import_role:
|
||||||
|
name: owner
|
||||||
|
vars:
|
||||||
|
owner_name: "{{ app_user }}"
|
||||||
|
owner_uid: "{{ app_owner_uid }}"
|
||||||
|
owner_gid: "{{ app_owner_gid }}"
|
||||||
|
owner_extra_groups: ["docker"]
|
||||||
|
|
||||||
|
- name: "Create internal directories"
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ item }}"
|
||||||
|
state: "directory"
|
||||||
|
owner: "{{ app_user }}"
|
||||||
|
group: "{{ app_user }}"
|
||||||
|
mode: "0770"
|
||||||
|
loop:
|
||||||
|
- "{{ base_dir }}"
|
||||||
|
- "{{ data_dir }}"
|
||||||
|
- "{{ postgres_data_dir }}"
|
||||||
|
- "{{ postgres_backups_dir }}"
|
||||||
|
|
||||||
|
- name: "Copy docker compose file"
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: "./files/{{ app_name }}/docker-compose.template.yml"
|
||||||
|
dest: "{{ base_dir }}/docker-compose.yml"
|
||||||
|
owner: "{{ app_user }}"
|
||||||
|
group: "{{ app_user }}"
|
||||||
|
mode: "0640"
|
||||||
|
|
||||||
|
- name: "Copy backup script"
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: "./files/{{ app_name }}/backup.template.sh"
|
||||||
|
dest: "{{ base_dir }}/backup.sh"
|
||||||
|
owner: "{{ app_user }}"
|
||||||
|
group: "{{ app_user }}"
|
||||||
|
mode: "0750"
|
||||||
|
|
||||||
|
- name: "Run application with docker compose"
|
||||||
|
community.docker.docker_compose_v2:
|
||||||
|
project_src: "{{ base_dir }}"
|
||||||
|
state: "present"
|
||||||
|
remove_orphans: true
|
||||||
|
tags:
|
||||||
|
- run-app
|
||||||
31
playbook-remove-user-and-app.yml
Normal file
31
playbook-remove-user-and-app.yml
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
---
|
||||||
|
- name: "Update and upgrade system packages"
|
||||||
|
hosts: all
|
||||||
|
|
||||||
|
vars_files:
|
||||||
|
- vars/secrets.yml
|
||||||
|
|
||||||
|
vars:
|
||||||
|
user_name: "<put-name-here>"
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- name: 'Remove user "{{ user_name }}"'
|
||||||
|
ansible.builtin.user:
|
||||||
|
name: "{{ user_name }}"
|
||||||
|
state: absent
|
||||||
|
remove: true
|
||||||
|
|
||||||
|
- name: 'Remove group "{{ user_name }}"'
|
||||||
|
ansible.builtin.group:
|
||||||
|
name: "{{ user_name }}"
|
||||||
|
state: absent
|
||||||
|
|
||||||
|
- name: "Remove web dir"
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "/var/www/{{ user_name }}"
|
||||||
|
state: absent
|
||||||
|
|
||||||
|
- name: "Remove home dir"
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "/home/{{ user_name }}"
|
||||||
|
state: absent
|
||||||
49
playbook-rssbridge.yml
Normal file
49
playbook-rssbridge.yml
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
---
|
||||||
|
- name: "Configure rssbridge application"
|
||||||
|
hosts: all
|
||||||
|
|
||||||
|
vars_files:
|
||||||
|
- vars/secrets.yml
|
||||||
|
|
||||||
|
vars:
|
||||||
|
app_name: "rssbridge"
|
||||||
|
app_user: "{{ app_name }}"
|
||||||
|
app_owner_uid: 1014
|
||||||
|
app_owner_gid: 1015
|
||||||
|
base_dir: "{{ (application_dir, app_name) | path_join }}"
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- name: "Create user and environment"
|
||||||
|
ansible.builtin.import_role:
|
||||||
|
name: owner
|
||||||
|
vars:
|
||||||
|
owner_name: "{{ app_user }}"
|
||||||
|
owner_uid: "{{ app_owner_uid }}"
|
||||||
|
owner_gid: "{{ app_owner_gid }}"
|
||||||
|
owner_extra_groups: ["docker"]
|
||||||
|
|
||||||
|
- name: "Create internal application directories"
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ item }}"
|
||||||
|
state: "directory"
|
||||||
|
owner: "{{ app_user }}"
|
||||||
|
group: "{{ app_user }}"
|
||||||
|
mode: "0770"
|
||||||
|
loop:
|
||||||
|
- "{{ base_dir }}"
|
||||||
|
|
||||||
|
- name: "Copy docker compose file"
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: "./files/{{ app_name }}/docker-compose.yml.j2"
|
||||||
|
dest: "{{ base_dir }}/docker-compose.yml"
|
||||||
|
owner: "{{ app_user }}"
|
||||||
|
group: "{{ app_user }}"
|
||||||
|
mode: "0640"
|
||||||
|
|
||||||
|
- name: "Run application with docker compose"
|
||||||
|
community.docker.docker_compose_v2:
|
||||||
|
project_src: "{{ base_dir }}"
|
||||||
|
state: "present"
|
||||||
|
remove_orphans: true
|
||||||
|
tags:
|
||||||
|
- run-app
|
||||||
58
playbook-system.yml
Normal file
58
playbook-system.yml
Normal file
@@ -0,0 +1,58 @@
|
|||||||
|
---
|
||||||
|
- name: "Configure base system parameters"
|
||||||
|
hosts: all
|
||||||
|
|
||||||
|
vars_files:
|
||||||
|
- vars/secrets.yml
|
||||||
|
|
||||||
|
vars:
|
||||||
|
apt_packages:
|
||||||
|
- acl
|
||||||
|
- curl
|
||||||
|
- fuse
|
||||||
|
- git
|
||||||
|
- htop
|
||||||
|
- jq
|
||||||
|
- make
|
||||||
|
- python3-pip
|
||||||
|
- sqlite3
|
||||||
|
- tree
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- name: "Install additional apt packages"
|
||||||
|
ansible.builtin.apt:
|
||||||
|
name: "{{ apt_packages }}"
|
||||||
|
update_cache: true
|
||||||
|
|
||||||
|
- name: "Configure security settings"
|
||||||
|
ansible.builtin.import_role:
|
||||||
|
name: geerlingguy.security
|
||||||
|
vars:
|
||||||
|
security_ssh_permit_root_login: "yes"
|
||||||
|
security_autoupdate_enabled: "no"
|
||||||
|
security_fail2ban_enabled: true
|
||||||
|
|
||||||
|
- name: "Copy keep files script"
|
||||||
|
ansible.builtin.copy:
|
||||||
|
src: "files/keep-files.py"
|
||||||
|
dest: "{{ bin_prefix }}/keep-files.py"
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: "0755"
|
||||||
|
|
||||||
|
- name: 'Create directory for mount'
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: '/mnt/applications'
|
||||||
|
state: 'directory'
|
||||||
|
mode: '0755'
|
||||||
|
tags:
|
||||||
|
- mount-storage
|
||||||
|
|
||||||
|
- name: 'Mount external storages'
|
||||||
|
ansible.posix.mount:
|
||||||
|
path: '/mnt/applications'
|
||||||
|
src: 'UUID=3942bffd-8328-4536-8e88-07926fb17d17'
|
||||||
|
fstype: ext4
|
||||||
|
state: mounted
|
||||||
|
tags:
|
||||||
|
- mount-storage
|
||||||
20
playbook-transcriber-registry.yml
Normal file
20
playbook-transcriber-registry.yml
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
---
|
||||||
|
- name: "Upload local transcriber images to registry"
|
||||||
|
hosts: all
|
||||||
|
gather_facts: false
|
||||||
|
|
||||||
|
vars_files:
|
||||||
|
- vars/secrets.yml
|
||||||
|
- vars/transcriber.yml
|
||||||
|
- vars/transcriber.images.yml
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
|
||||||
|
- name: "Push web service image to remote registry"
|
||||||
|
community.docker.docker_image:
|
||||||
|
state: present
|
||||||
|
source: local
|
||||||
|
name: "{{ transcriber_image }}"
|
||||||
|
repository: "{{ registry_transcriber_image }}"
|
||||||
|
push: true
|
||||||
|
delegate_to: 127.0.0.1
|
||||||
59
playbook-transcriber.yml
Normal file
59
playbook-transcriber.yml
Normal file
@@ -0,0 +1,59 @@
|
|||||||
|
---
|
||||||
|
- name: "Deploy transcriber application"
|
||||||
|
hosts: all
|
||||||
|
|
||||||
|
vars_files:
|
||||||
|
- vars/secrets.yml
|
||||||
|
- vars/transcriber.yml
|
||||||
|
- vars/transcriber.images.yml
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- name: "Create user and environment"
|
||||||
|
ansible.builtin.import_role:
|
||||||
|
name: owner
|
||||||
|
vars:
|
||||||
|
owner_name: "{{ app_user }}"
|
||||||
|
owner_uid: "{{ app_owner_uid }}"
|
||||||
|
owner_gid: "{{ app_owner_gid }}"
|
||||||
|
owner_extra_groups: ["docker"]
|
||||||
|
|
||||||
|
- name: "Create application internal directories"
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ item }}"
|
||||||
|
state: "directory"
|
||||||
|
owner: "{{ app_user }}"
|
||||||
|
group: "{{ app_user }}"
|
||||||
|
mode: "0750"
|
||||||
|
loop:
|
||||||
|
- "{{ base_dir }}"
|
||||||
|
- "{{ config_dir }}"
|
||||||
|
- "{{ data_dir }}"
|
||||||
|
- "{{ backups_dir }}"
|
||||||
|
|
||||||
|
- name: "Copy configuration files (templates)"
|
||||||
|
ansible.builtin.copy:
|
||||||
|
src: "files/{{ app_name }}/config.secrets.toml"
|
||||||
|
dest: "{{ config_file }}"
|
||||||
|
owner: "{{ app_user }}"
|
||||||
|
group: "{{ app_user }}"
|
||||||
|
mode: "0600"
|
||||||
|
|
||||||
|
- name: "Login to yandex docker registry."
|
||||||
|
ansible.builtin.script:
|
||||||
|
cmd: "files/yandex-docker-registry-auth.sh"
|
||||||
|
|
||||||
|
- name: "Copy docker compose file"
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: "./files/{{ app_name }}/docker-compose.template.yml"
|
||||||
|
dest: "{{ base_dir }}/docker-compose.yml"
|
||||||
|
owner: "{{ app_user }}"
|
||||||
|
group: "{{ app_user }}"
|
||||||
|
mode: "0640"
|
||||||
|
|
||||||
|
- name: "Run application with docker compose"
|
||||||
|
community.docker.docker_compose_v2:
|
||||||
|
project_src: "{{ base_dir }}"
|
||||||
|
state: "present"
|
||||||
|
remove_orphans: true
|
||||||
|
tags:
|
||||||
|
- run-app
|
||||||
41
playbook-upgrade.yml
Normal file
41
playbook-upgrade.yml
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
---
|
||||||
|
- name: "Update and upgrade system packages"
|
||||||
|
hosts: all
|
||||||
|
|
||||||
|
vars_files:
|
||||||
|
- vars/secrets.yml
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- name: Perform an upgrade of packages
|
||||||
|
ansible.builtin.apt:
|
||||||
|
upgrade: "yes"
|
||||||
|
update_cache: true
|
||||||
|
|
||||||
|
- name: Check if a reboot is required
|
||||||
|
ansible.builtin.stat:
|
||||||
|
path: /var/run/reboot-required
|
||||||
|
get_checksum: false
|
||||||
|
register: reboot_required_file
|
||||||
|
|
||||||
|
- name: Reboot the server (if required)
|
||||||
|
ansible.builtin.reboot:
|
||||||
|
when: reboot_required_file.stat.exists
|
||||||
|
|
||||||
|
- name: Remove dependencies that are no longer required
|
||||||
|
ansible.builtin.apt:
|
||||||
|
autoremove: true
|
||||||
|
|
||||||
|
- name: Check if Docker is available
|
||||||
|
ansible.builtin.stat:
|
||||||
|
path: /usr/bin/docker
|
||||||
|
register: docker_exists
|
||||||
|
|
||||||
|
- name: Clean up unnecessary Docker data
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: docker system prune --all --force
|
||||||
|
register: docker_prune_result
|
||||||
|
when: docker_exists.stat.exists
|
||||||
|
failed_when:
|
||||||
|
- docker_prune_result.rc is defined
|
||||||
|
- docker_prune_result.rc != 0
|
||||||
|
changed_when: "'Total reclaimed space' in docker_prune_result.stdout"
|
||||||
70
playbook-wakapi.yml
Normal file
70
playbook-wakapi.yml
Normal file
@@ -0,0 +1,70 @@
|
|||||||
|
---
|
||||||
|
- name: "Configure wakapi application"
|
||||||
|
hosts: all
|
||||||
|
|
||||||
|
vars_files:
|
||||||
|
- vars/secrets.yml
|
||||||
|
|
||||||
|
vars:
|
||||||
|
app_name: "wakapi"
|
||||||
|
app_user: "{{ app_name }}"
|
||||||
|
app_owner_uid: 1015
|
||||||
|
app_owner_gid: 1016
|
||||||
|
base_dir: "{{ (application_dir, app_name) | path_join }}"
|
||||||
|
data_dir: "{{ (base_dir, 'data') | path_join }}"
|
||||||
|
backups_dir: "{{ (base_dir, 'backups') | path_join }}"
|
||||||
|
gobackup_config: "{{ (base_dir, 'gobackup.yml') | path_join }}"
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- name: "Create user and environment"
|
||||||
|
ansible.builtin.import_role:
|
||||||
|
name: owner
|
||||||
|
vars:
|
||||||
|
owner_name: "{{ app_user }}"
|
||||||
|
owner_uid: "{{ app_owner_uid }}"
|
||||||
|
owner_gid: "{{ app_owner_gid }}"
|
||||||
|
owner_extra_groups: ["docker"]
|
||||||
|
|
||||||
|
- name: "Create application internal directories"
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ item }}"
|
||||||
|
state: "directory"
|
||||||
|
owner: "{{ app_user }}"
|
||||||
|
group: "{{ app_user }}"
|
||||||
|
mode: "0750"
|
||||||
|
loop:
|
||||||
|
- "{{ base_dir }}"
|
||||||
|
- "{{ data_dir }}"
|
||||||
|
- "{{ backups_dir }}"
|
||||||
|
|
||||||
|
- name: "Copy gobackup config"
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: "./files/{{ app_name }}/gobackup.yml.j2"
|
||||||
|
dest: "{{ gobackup_config }}"
|
||||||
|
owner: "{{ app_user }}"
|
||||||
|
group: "{{ app_user }}"
|
||||||
|
mode: "0640"
|
||||||
|
|
||||||
|
- name: "Copy backup script"
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: "files/{{ app_name }}/backup.sh.j2"
|
||||||
|
dest: "{{ base_dir }}/backup.sh"
|
||||||
|
owner: "{{ app_user }}"
|
||||||
|
group: "{{ app_user }}"
|
||||||
|
mode: "0750"
|
||||||
|
|
||||||
|
- name: "Copy docker compose file"
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: "./files/{{ app_name }}/docker-compose.yml.j2"
|
||||||
|
dest: "{{ base_dir }}/docker-compose.yml"
|
||||||
|
owner: "{{ app_user }}"
|
||||||
|
group: "{{ app_user }}"
|
||||||
|
mode: "0640"
|
||||||
|
|
||||||
|
- name: "Run application with docker compose"
|
||||||
|
community.docker.docker_compose_v2:
|
||||||
|
project_src: "{{ base_dir }}"
|
||||||
|
state: "present"
|
||||||
|
remove_orphans: true
|
||||||
|
tags:
|
||||||
|
- run-app
|
||||||
92
playbook-wanderer.yml
Normal file
92
playbook-wanderer.yml
Normal file
@@ -0,0 +1,92 @@
|
|||||||
|
---
|
||||||
|
- name: "Configure gramps application"
|
||||||
|
hosts: all
|
||||||
|
|
||||||
|
vars_files:
|
||||||
|
- vars/secrets.yml
|
||||||
|
|
||||||
|
vars:
|
||||||
|
app_name: "wanderer"
|
||||||
|
app_user: "{{ app_name }}"
|
||||||
|
app_owner_uid: 1018
|
||||||
|
app_owner_gid: 1019
|
||||||
|
base_dir: "{{ (application_dir, app_name) | path_join }}"
|
||||||
|
data_dir: "{{ (base_dir, 'data') | path_join }}"
|
||||||
|
backups_dir: "{{ (base_dir, 'backups') | path_join }}"
|
||||||
|
gobackup_config: "{{ (base_dir, 'gobackup.yml') | path_join }}"
|
||||||
|
|
||||||
|
wanderer_version: "v0.18.3"
|
||||||
|
wanderer_origin: "https://wanderer.vakhrushev.me"
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- name: "Create user and environment"
|
||||||
|
ansible.builtin.import_role:
|
||||||
|
name: owner
|
||||||
|
vars:
|
||||||
|
owner_name: "{{ app_user }}"
|
||||||
|
owner_uid: "{{ app_owner_uid }}"
|
||||||
|
owner_gid: "{{ app_owner_gid }}"
|
||||||
|
owner_extra_groups: ["docker"]
|
||||||
|
|
||||||
|
- name: "Create application internal directories"
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ item }}"
|
||||||
|
state: "directory"
|
||||||
|
owner: "{{ app_user }}"
|
||||||
|
group: "{{ app_user }}"
|
||||||
|
mode: "0750"
|
||||||
|
loop:
|
||||||
|
- "{{ base_dir }}"
|
||||||
|
- "{{ data_dir }}"
|
||||||
|
- "{{ (data_dir, 'pb_data') | path_join }}"
|
||||||
|
- "{{ (data_dir, 'uploads') | path_join }}"
|
||||||
|
- "{{ (data_dir, 'ms_data') | path_join }}"
|
||||||
|
- "{{ backups_dir }}"
|
||||||
|
|
||||||
|
- name: "Copy gobackup config"
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: "./files/{{ app_name }}/gobackup.template.yml"
|
||||||
|
dest: "{{ gobackup_config }}"
|
||||||
|
owner: "{{ app_user }}"
|
||||||
|
group: "{{ app_user }}"
|
||||||
|
mode: "0640"
|
||||||
|
|
||||||
|
# - name: "Copy backup script"
|
||||||
|
# ansible.builtin.template:
|
||||||
|
# src: "files/{{ app_name }}/backup.template.sh"
|
||||||
|
# dest: "{{ base_dir }}/backup.sh"
|
||||||
|
# owner: "{{ app_user }}"
|
||||||
|
# group: "{{ app_user }}"
|
||||||
|
# mode: "0750"
|
||||||
|
|
||||||
|
- name: "Disable backup script"
|
||||||
|
ansible.builtin.file:
|
||||||
|
dest: "{{ base_dir }}/backup.sh"
|
||||||
|
state: absent
|
||||||
|
|
||||||
|
- name: "Create backup targets file"
|
||||||
|
ansible.builtin.lineinfile:
|
||||||
|
path: "{{ base_dir }}/backup-targets"
|
||||||
|
line: "{{ item }}"
|
||||||
|
create: true
|
||||||
|
owner: "{{ app_user }}"
|
||||||
|
group: "{{ app_user }}"
|
||||||
|
mode: "0750"
|
||||||
|
loop:
|
||||||
|
- "{{ data_dir }}"
|
||||||
|
|
||||||
|
- name: "Copy docker compose file"
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: "./files/{{ app_name }}/docker-compose.template.yml"
|
||||||
|
dest: "{{ base_dir }}/docker-compose.yml"
|
||||||
|
owner: "{{ app_user }}"
|
||||||
|
group: "{{ app_user }}"
|
||||||
|
mode: "0640"
|
||||||
|
|
||||||
|
- name: "Run application with docker compose"
|
||||||
|
community.docker.docker_compose_v2:
|
||||||
|
project_src: "{{ base_dir }}"
|
||||||
|
state: "present"
|
||||||
|
remove_orphans: true
|
||||||
|
tags:
|
||||||
|
- run-app
|
||||||
7
production.yml
Normal file
7
production.yml
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
---
|
||||||
|
ungrouped:
|
||||||
|
hosts:
|
||||||
|
server:
|
||||||
|
ansible_host: "158.160.46.255"
|
||||||
|
ansible_user: "major"
|
||||||
|
ansible_become: true
|
||||||
9
requirements.yml
Normal file
9
requirements.yml
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
---
|
||||||
|
- src: yatesr.timezone
|
||||||
|
version: 1.2.2
|
||||||
|
|
||||||
|
- src: geerlingguy.security
|
||||||
|
version: 3.0.0
|
||||||
|
|
||||||
|
- src: geerlingguy.docker
|
||||||
|
version: 7.4.7
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user