Compare commits

...

2 commits

Author SHA1 Message Date
Sangelo 04e9440630 [c] migrate to gitpot.org 2024-04-08 15:19:55 +02:00
Sangelo 7fbd76bb16 [a] Conduit Matrix Service 2024-02-15 09:34:37 +01:00
36 changed files with 791 additions and 536 deletions

View file

@ -4,8 +4,8 @@ collections:
- name: community.general
- name: ansible.posix
- name: https://gitpot.dev/lunivity/ansible-core.git
- name: https://gitpot.org/lunivity/ansible-core.git
type: git
- name: https://gitpot.dev/lunivity/ansible-common.git
type: git
- name: https://gitpot.org/lunivity/ansible-common.git
type: git

View file

@ -1,20 +0,0 @@
proxmox_id: 1005
common_firewall_enable: false
core_groups:
- name: "headscale"
state: present
core_users:
- name: "headscale"
password: "{{ sec_headscale_pass }}"
groups: ['docker', 'headscale']
state: present
authorized_keys:
- "sangelo"
- "sangelo-access"
# Headscale
headscale_server_config_server_url: https://vpn.lunivity.com
headscale_server_config_ip_prefixes: '{{ sec_headscale_server_config_ip_prefixes }}'
headscale_server_config_disable_check_updates: true

View file

@ -1,18 +0,0 @@
proxmox_id: 1015
common_firewall_enable: false
core_groups:
- name: "headscale"
state: present
core_users:
- name: "headscale"
password: "{{ sec_headscale_pass }}"
groups: ['headscale']
state: present
authorized_keys:
- "sangelo"
- "sangelo-access"
# Install Tailscale CLI?
tailscale_install_cli: false

View file

@ -0,0 +1,26 @@
proxmox_id: 2010
common_firewall_enable: false
core_groups:
- name: "hedgedoc"
state: present
- name: "mgmt"
state: present
core_users:
- name: "sangelo"
password: "{{ sec_hedgedoc_mgmt_pass }}"
groups: ["sudo", "mgmt"]
state: present
authorized_keys:
- "sangelo"
- "sangelo-access"
- name: "hedgedoc"
password: "{{ sec_hedgedoc_pass }}"
groups: ["docker", "hedgedoc"]
state: present
# authorized_keys:
# - "sangelo"
# - "sangelo-access"

View file

@ -0,0 +1,66 @@
proxmox_id: 2007
common_firewall_enable: false
core_groups:
- name: "mgmt"
state: present
- name: "mumble"
state: present
core_users:
- name: "sangelo"
password: "{{ sec_mumble_mgmt_pass }}"
groups: ["sudo", "mgmt"]
state: present
authorized_keys:
- "sangelo"
- "sangelo-access"
- name: "mumble"
password: "{{ sec_mumble_pass }}"
groups: ["docker", "mumble"]
state: present
authorized_keys:
- "sangelo"
- "sangelo-access"
# mumble config
mumble_base_dir: "/var/mumble"
mumble_config_dir: "config"
mumble_user: mumble
mumble_group: mumble
# # run compose task?
# core_docker_compose: true
# # compose user and group
# core_docker_compose_user: '{{ mumble_user }}'
# core_docker_compose_group: '{{ mumble_group }}'
# # the docker compose service's name (cosmetic)
# core_docker_compose_name: "Mumble"
# # create directories
# core_docker_compose_mkdir: true
# # directories to create
# core_docker_compose_dirs:
# - "{{ mumble_base_dir }}"
# - "{{ mumble_base_dir }}/{{ mumble_config_dir }}"
# # chown directories with this value
# core_docker_compose_dirs_mode: '0750'
# # copy templates
# core_docker_compose_cp_templates: false
# # chown templates with this value
# core_docker_compose_templates_mode: '0650'
# # chown the main docker compose with this value
# core_docker_compose_mode: '0650'
# # docker-compose.yml template source
# core_docker_compose_file:
# src: "{{ playbook_dir }}/templates/mumble/docker-compose.yml.j2"
# dest: "{{ mumble_base_dir }}/docker-compose.yml"

View file

@ -9,7 +9,7 @@ core_groups:
core_users:
- name: "sangelo"
password: "!"
password: "{{ sec_outline_mgmt_pass }}"
groups: ["sudo", "mgmt"]
state: present
authorized_keys:

View file

@ -0,0 +1,43 @@
proxmox_id: 2009
common_firewall_enable: false
core_groups:
- name: "conduit"
state: present
- name: "mgmt"
state: present
core_users:
- name: "sangelo"
password: "{{ sec_conduit_mgmt_pass }}"
groups: ["sudo", "mgmt"]
state: present
authorized_keys:
- "sangelo"
- "sangelo-access"
- name: "conduit"
password: "{{ sec_conduit_pass }}"
groups: ["docker", "conduit"]
state: present
# authorized_keys:
# - "sangelo"
# - "sangelo-access"
# conduit configuration overrides
conduit_server_base_dir: /srv/conduit
conduit_cinny_config_dir: cinny # cinny's config dir (inside the base dir), if cinny is enabled
conduit_data_dir: data # the directory (inside the base dir), in which the database will be stored
conduit_watchtower_enable: true # enable automatic container updates with watchtower
conduit_use_cinny: true # include cinny in the docker compose
conduit_cinny_default_homeserver: 0
conduit_cinny_homeserver_list: '["stardust.foo", "matrix.org"]'
conduit_cinny_allow_custom_homeservers: 'false'
conduit_server_name: stardust.foo # your server's name
conduit_allow_registration: 'false' # allow public registration to this homeserver
conduit_setup_bots: true # set up bots
conduit_include_mumble: true # set up mumble

View file

@ -0,0 +1,24 @@
proxmox_id: 1005
common_firewall_enable: false
core_groups:
- name: "wormhole"
state: present
- name: "mgmt"
core_users:
- name: "sangelo"
password: "{{ sec_wormhole_mgmt_pass }}"
groups: ["sudo", "mgmt"]
state: present
authorized_keys:
- "sangelo"
- "sangelo-access"
- name: "wormhole"
password: "{{ sec_wormhole_pass }}"
groups: ['docker', 'wormhole']
state: present
authorized_keys:
- "sangelo"
- "sangelo-access"

View file

@ -1,13 +1,16 @@
# Headscale Nodes
[headscale]
# VPN Nodes
[wormhole]
10.1.0.5
[headscale_exit_nodes]
10.1.0.15
[mumble]
10.2.0.7
[outline]
10.2.0.8
[stardust]
10.2.0.9
[gitpot]
; 10.5.0.1
@ -17,3 +20,7 @@
[gitpot_runners]
10.5.1.1
; 10.5.1.2
[hedgedoc]
10.2.0.10

View file

@ -1,16 +0,0 @@
- name: Headscale VPN Server
hosts: headscale_exit_nodes
remote_user: root
roles:
- lunivity.common.all
- lunivity.core.users
- headscale
tasks:
- name: Install Tailscale CLI
ansible.builtin.shell: |
curl -fsSL https://tailscale.com/install.sh | sh
args:
executable: /bin/bash
when: tailscale_install_cli

View file

@ -1,9 +1,8 @@
- name: Headscale VPN Server
hosts: headscale
- name: Wireguard VPN Server
hosts: wormhole
remote_user: root
roles:
- lunivity.common.all
- lunivity.core.docker
- lunivity.core.users
- headscale

View file

@ -0,0 +1,9 @@
---
- name: Install Hedgedoc
hosts: hedgedoc
remote_user: root
roles:
- lunivity.common.all
- lunivity.core.docker
- lunivity.core.users

View file

@ -0,0 +1,10 @@
---
- name: Install Mumble
hosts: mumble
remote_user: root
roles:
- lunivity.common.all
- lunivity.core.users
- lunivity.core.docker
- mumble

View file

@ -0,0 +1,10 @@
---
- name: Install Stardust Conduit
hosts: stardust
remote_user: root
roles:
- lunivity.common.all
- lunivity.core.docker
- lunivity.core.users
- conduit-rs

View file

@ -0,0 +1,36 @@
# server configuration #
conduit_server_base_dir: /srv/conduit
conduit_cinny_config_dir: cinny # cinny's config dir, if cinny is enabled
conduit_data_dir: data # the directory in which the database will be stored
conduit_user: conduit # the user to own the files we're creating
conduit_group: conduit # the group to own the files we're creating
# docker-compose configuration #
conduit_image_version: latest # the container tag to pull
conduit_cinny_image_version: latest # the container tag to pull
conduit_mumble_image_version: latest # the container tag to pull
conduit_watchtower_enable: true # enable automatic container updates with watchtower
conduit_use_cinny: false # include cinny in the docker compose
conduit_setup_bots: false # set up bots
# cinny configuration #
conduit_cinny_default_homeserver: 0 # the default server to choose from the list below. 0 is the first item, 1 the second, 2 the third, etc.
conduit_cinny_homeserver_list: '["your.server.name"]' # list of homeservers in the server picker
conduit_cinny_allow_custom_homeservers: 'false' # allow to log in using custom homeservers not specified above
conduit_cinny_port: 8080 # cinny's port to be accessed from the outside
conduit_cinny_internal_port: 80 # cinny's port inside the container
# conduit configuration #
conduit_server_name: your.server.name # your server's name
conduit_address: 0.0.0.0 # the address that conduit will bind to
conduit_internal_database_path: /var/lib/matrix-conduit/ # database on the container
conduit_database_backend: rocksdb # the database backend
conduit_port: 8448 # the port on which conduit will be available to the outside
conduit_internal_port: 6167 # the port on which conduit will listen to inside the container
conduit_max_request_size: 20_000_000 # the maximum request size, in bytes (default: ~20 MB)
conduit_allow_registration: 'true' # allow public registration to this homeserver
conduit_allow_federation: 'true' # allow federation with other matrix instances
conduit_allow_check_for_updates: 'true' # allow conduit to check for updates
conduit_trusted_servers: '["matrix.org"]' # an array of trusted servers
conduit_max_concurrent_requests: 100 # the amount of maximum concurrent requests. disabled by default inside the conduit.env file (commented out)
conduit_log: warn,rocket=off,_=off,sled=off # logging, commented out in the conduit.env file

View file

@ -0,0 +1,60 @@
---
# roles/conduit-rs/tasks/bots.yml
- name: Make sure Conduit's base directory exists
ansible.builtin.file:
path: '{{ conduit_server_base_dir }}'
state: directory
owner: '{{ conduit_user }}'
group: '{{ conduit_group }}'
mode: '0750'
- name: Make sure bot directory exists
ansible.builtin.file:
path: '{{ conduit_server_base_dir }}/bots'
state: directory
owner: '{{ conduit_user }}'
group: '{{ conduit_group }}'
mode: '0750'
- name: Make sure draupnir directory exists
ansible.builtin.file:
path: '{{ conduit_bots_draupnir_dirs }}'
state: directory
owner: '{{ conduit_user }}'
group: '{{ conduit_group }}'
mode: '0750'
loop_control:
loop_var: conduit_bots_draupnir_dirs
loop:
- "{{ conduit_server_base_dir }}/bots/draupnir"
- "{{ conduit_server_base_dir }}/bots/draupnir/config"
- name: Make sure pantalaimon directory exists
ansible.builtin.file:
path: '{{ conduit_server_base_dir }}/bots/pantalaimon'
state: directory
owner: '{{ conduit_user }}'
group: '{{ conduit_group }}'
mode: '0750'
- name: Download pantalaimon repository
ansible.builtin.git:
repo: 'https://github.com/matrix-org/pantalaimon.git'
dest: '{{ conduit_server_base_dir }}/bots/pantalaimon-git'
version: 'master'
force: yes
- name: Create bot configs
ansible.builtin.template:
src: '{{ conduit_configs.src }}'
dest: '{{ conduit_server_base_dir }}/{{ conduit_configs.dest }}'
owner: '{{ conduit_user }}'
group: '{{ conduit_group }}'
mode: '0640'
loop_control:
loop_var: conduit_configs
loop:
- { src: bots/bots-docker-compose.yml.j2, dest: bots/docker-compose.yml }
- { src: bots/pantalaimon.conf.j2, dest: bots/pantalaimon/pantalaimon.conf }
- { src: bots/draupnir.yaml.j2, dest: bots/draupnir/config/production.yaml }

View file

@ -0,0 +1,55 @@
---
# roles/conduit-rs/tasks/main.yml
- name: Make sure Conduit's base directory exists
ansible.builtin.file:
path: '{{ conduit_server_base_dir }}'
state: directory
owner: '{{ conduit_user }}'
group: '{{ conduit_group }}'
mode: '0750'
# - name: Make sure Conduit's data directory exists
# ansible.builtin.file:
# path: '{{ conduit_server_base_dir }}/{{ conduit_data_dir }}/'
# state: directory
# owner: 'root'
# group: 'docker'
# mode: '0770'
- name: Make sure Cinny's configuration directories exist
ansible.builtin.file:
path: '{{ conduit_server_base_dir }}/{{ conduit_cinny_config_dir }}/'
state: directory
owner: '{{ conduit_user }}'
group: '{{ conduit_group }}'
mode: '0750'
when: conduit_use_cinny
- name: Create Cinny configuration file
ansible.builtin.template:
src: cinny-config.json.j2
dest: '{{ conduit_server_base_dir }}/{{ conduit_cinny_config_dir }}/config.json'
owner: '{{ conduit_user }}'
group: '{{ conduit_group }}'
mode: '0644'
when: conduit_use_cinny
- name: Create Conduit configuration files
ansible.builtin.template:
src: '{{ conduit_configs.src }}'
dest: '{{ conduit_server_base_dir }}/{{ conduit_configs.dest }}'
owner: '{{ conduit_user }}'
group: '{{ conduit_group }}'
mode: '0640'
loop_control:
loop_var: conduit_configs
loop:
- { src: docker-compose.yml.j2, dest: docker-compose.yml }
- { src: conduit.env.j2, dest: conduit.env }
- { src: coturn.conf.j2, dest: coturn.conf }
- name: Set up bots
ansible.builtin.include_tasks:
file: bots.yml
when: conduit_setup_bots

View file

@ -0,0 +1,18 @@
version: "3.3"
services:
pantalaimon:
build: ./pantalaimon-git
container_name: pantalaimon
restart: unless-stopped
volumes:
- ./pantalaimon:/data
ports:
- 8008:8008
draupnir:
image: gnuxie/draupnir:latest
restart: unless-stopped
volumes:
- ./draupnir:/data
depends_on:
- pantalaimon

View file

@ -0,0 +1,295 @@
# Endpoint URL that Draupnir uses to interact with the matrix homeserver (client-server API),
# set this to the pantalaimon URL if you're using that.
homeserverUrl: "http://pantalaimon:8008"
{# homeserverUrl: "https://stardust.foo" #}
# Endpoint URL that Draupnir could use to fetch events related to reports (client-server API and /_synapse/),
# only set this to the public-internet homeserver client API URL, do NOT set this to the pantalaimon URL.
rawHomeserverUrl: "https://stardust.foo"
# Matrix Access Token to use, Draupnir will only use this if pantalaimon.use is false.
# This option can be loaded from a file by passing "--access-token-path <path>" at the command line,
# which would allow using secret management systems such as systemd's service credentials.
accessToken: "{{ sec_conduit_bots_draupnir_access_token }}"
# Options related to Pantalaimon (https://github.com/matrix-org/pantalaimon)
pantalaimon:
# Whether or not Draupnir will use pantalaimon to access the matrix homeserver,
# set to `true` if you're using pantalaimon.
#
# Be sure to point homeserverUrl to the pantalaimon instance.
#
# Draupnir will log in using the given username and password once,
# then store the resulting access token in a file under dataPath.
use: true
# The username to login with.
username: draupnir
# The password Draupnir will login with.
#
# After successfully logging in once, this will be ignored, so this value can be blanked after first startup.
# This option can be loaded from a file by passing "--pantalaimon-password-path <path>" at the command line,
# which would allow using secret management systems such as systemd's service credentials.
password: "{{ sec_conduit_bots_draupnir_pass }}"
# Experimental usage of the matrix-bot-sdk rust crypto.
# This can not be used with Pantalaimon.
# Make sure to setup the bot as if you are not using pantalaimon for this.
#
# Warning: At this time this is not considered production safe.
experimentalRustCrypto: false
# The path Draupnir will store its state/data in, leave default ("/data/storage") when using containers.
dataPath: "/data/storage"
# If true (the default), Draupnir will only accept invites from users present in managementRoom.
autojoinOnlyIfManager: true
# If `autojoinOnlyIfManager` is false, only the members in this space can invite
# the bot to new rooms.
acceptInvitesFromSpace: "!example:example.org"
# Whether Draupnir should report ignored invites to the management room (if autojoinOnlyIfManager is true).
recordIgnoredInvites: false
# The room ID (or room alias) of the management room, anyone in this room can issue commands to Draupnir.
#
# Draupnir has no more granular access controls other than this, be sure you trust everyone in this room - secure it!
#
# This should be a room alias or room ID - not a matrix.to URL.
#
# Note: By default, Draupnir is fairly verbose - expect a lot of messages in this room.
# (see verboseLogging to adjust this a bit.)
managementRoom: "#bot-management:stardust.foo"
# Deprecated and will be removed in a future version.
# Running with verboseLogging is unsupported.
# Whether Draupnir should log a lot more messages in the room,
# mainly involves "all-OK" messages, and debugging messages for when draupnir checks bans in a room.
verboseLogging: false
# The log level of terminal (or container) output,
# can be one of DEBUG, INFO, WARN and ERROR, in increasing order of importance and severity.
#
# This should be at INFO or DEBUG in order to get support for Draupnir problems.
logLevel: "INFO"
# Whether or not Draupnir should synchronize policy lists immediately after startup.
# Equivalent to running '!draupnir sync'.
syncOnStartup: true
# Whether or not Draupnir should check moderation permissions in all protected rooms on startup.
# Equivalent to running `!draupnir verify`.
verifyPermissionsOnStartup: true
# Whether or not Draupnir should actually apply bans and policy lists,
# turn on to trial some untrusted configuration or lists.
noop: false
# Whether or not Draupnir should apply `m.room.server_acl` events.
# DO NOT change this to `true` unless you are very confident that you know what you are doing.
disableServerACL: false
# Whether Draupnir should check member lists quicker (by using a different endpoint),
# keep in mind that enabling this will miss invited (but not joined) users.
#
# Turn on if your bot is in (very) large rooms, or in large amounts of rooms.
fasterMembershipChecks: false
# A case-insensitive list of ban reasons to have the bot also automatically redact the user's messages for.
#
# If the bot sees you ban a user with a reason that is an (exact case-insensitive) match to this list,
# it will also remove the user's messages automatically.
#
# Typically this is useful to avoid having to give two commands to the bot.
# Advanced: Use asterisks to have the reason match using "globs"
# (f.e. "spam*testing" would match "spam for testing" as well as "spamtesting").
#
# See here for more info: https://www.digitalocean.com/community/tools/glob
# Note: Keep in mind that glob is NOT regex!
automaticallyRedactForReasons:
- "spam"
- "advertising"
- "harassment"
- "*rule*"
# A list of rooms to protect. Draupnir will add this to the list it knows from its account data.
#
# It won't, however, add it to the account data.
# Manually add the room via '!draupnir rooms add' to have it stay protected regardless if this config value changes.
#
# Note: These must be matrix.to URLs
protectedRooms:
- "https://matrix.to/#/#yourroom:example.org"
# Whether or not to add all joined rooms to the "protected rooms" list
# (excluding the management room and watched policy list rooms, see below).
#
# Note that this effectively makes the protectedRooms and associated commands useless
# for regular rooms.
#
# Note: the management room is *excluded* from this condition.
# Explicitly add it as a protected room to protect it.
#
# Note: Ban list rooms the bot is watching but didn't create will not be protected.
# Explicitly add these rooms as a protected room list if you want them protected.
protectAllJoinedRooms: false
# Increase this delay to have Draupnir wait longer between two consecutive backgrounded
# operations. The total duration of operations will be longer, but the homeserver won't
# be affected as much. Conversely, decrease this delay to have Draupnir chain operations
# faster. The total duration of operations will generally be shorter, but the performance
# of the homeserver may be more impacted.
backgroundDelayMS: 500
# Server administration commands, these commands will only work if Draupnir is
# a global server administrator, and the bot's server is a Synapse instance.
admin:
# Whether or not Draupnir can temporarily take control of any eligible account from the local homeserver who's in the room
# (with enough permissions) to "make" a user an admin.
#
# This only works if a local user with enough admin permissions is present in the room.
enableMakeRoomAdminCommand: false
# Misc options for command handling and commands
commands:
# Whether or not the `!draupnir` prefix is necessary to submit commands.
#
# If `true`, will allow commands like `!ban`, `!help`, etc.
#
# Note: Draupnir can also be pinged by display name instead of having to use
# the !draupnir prefix. For example, "my_moderator_bot: ban @spammer:example.org"
# will address only my_moderator_bot.
allowNoPrefix: false
# Any additional bot prefixes that Draupnir will listen to. i.e. adding `mod` will allow `!mod help`.
additionalPrefixes:
- "draupnir"
- "mod"
# Whether or not commands with a wildcard (*) will require an additional `--force` argument
# in the command to be able to be submitted.
confirmWildcardBan: true
# The default reasons to be prompted with if the reason is missing from a ban command.
ban:
defaultReasons:
- "rule-breaking"
- "rule 1: respect and harassment"
- "rule 2: spam"
- "rule 3: advertising"
- "rule 4: age restricted content"
- "rule 5: terms of service"
- "rule 6: common sense"
- "spam"
- "brigading"
- "harassment"
- "disagreement"
# Configuration specific to certain toggle-able protections
protections:
# Configuration for the wordlist plugin, which can ban users based if they say certain
# blocked words shortly after joining.
wordlist:
# A list of case-insensitive keywords that the WordList protection will watch for from new users.
#
# WordList will ban users who use these words when first joining a room, so take caution when selecting them.
#
# For advanced usage, regex can also be used, see the following links for more information;
# - https://www.digitalocean.com/community/tutorials/an-introduction-to-regular-expressions
# - https://regexr.com/
# - https://regexone.com/
words:
- "LoReM"
- "IpSuM"
- "DoLoR"
- "aMeT"
# For how long (in minutes) the user is "new" to the WordList plugin.
#
# After this time, the user will no longer be banned for using a word in the above wordlist.
#
# Set to zero to disable the timeout and make users *always* appear "new".
# (users will always be banned if they say a bad word)
minutesBeforeTrusting: 20
# Options for advanced monitoring of the health of the bot.
health:
# healthz options. These options are best for use in container environments
# like Kubernetes to detect how healthy the service is. The bot will report
# that it is unhealthy until it is able to process user requests. Typically
# this means that it'll flag itself as unhealthy for a number of minutes
# before saying "Now monitoring rooms" and flagging itself healthy.
#
# Health is flagged through HTTP status codes, defined below.
healthz:
# Whether the healthz integration should be enabled (default false)
enabled: false
# The port to expose the webserver on. Defaults to 8080.
port: 8080
# The address to listen for requests on. Defaults to all addresses.
address: "0.0.0.0"
# The path to expose the monitoring endpoint at. Defaults to `/healthz`
endpoint: "/healthz"
# The HTTP status code which reports that the bot is healthy/ready to
# process requests. Typically this should not be changed. Defaults to
# 200.
healthyStatus: 200
# The HTTP status code which reports that the bot is not healthy/ready.
# Defaults to 418.
unhealthyStatus: 418
# Sentry options. Sentry is a tool used to receive/collate/triage runtime
# errors and performance issues. Skip this section if you do not wish to use
# Sentry.
sentry:
# The key used to upload Sentry data to the server.
# dsn: "https://XXXXXXXXX@example.com/YYY
# Frequency of performance monitoring.
# A number in [0.0, 1.0], where 0.0 means "don't bother with tracing"
# and 1.0 means "trace performance at every opportunity".
# tracesSampleRate: 0.5
# Options for exposing web APIs.
web:
# Whether to enable web APIs.
enabled: false
# The port to expose the webserver on. Defaults to 8080.
port: 8080
# The address to listen for requests on. Defaults to only the current
# computer.
address: localhost
# Alternative setting to open to the entire web. Be careful,
# as this will increase your security perimeter:
#
# address: "0.0.0.0"
# A web API designed to intercept Matrix API
# POST /_matrix/client/r0/rooms/{roomId}/report/{eventId}
# and display readable abuse reports in the moderation room.
#
# If you wish to take advantage of this feature, you will need
# to configure a reverse proxy, see e.g. test/nginx.conf
abuseReporting:
# Whether to enable this feature.
enabled: false
# Whether or not to actively poll synapse for abuse reports, to be used
# instead of intercepting client calls to synapse's abuse endpoint, when that
# isn't possible/practical.
pollReports: false
# Whether or not new reports, received either by webapi or polling,
# should be printed to our managementRoom.
displayReports: true

View file

@ -0,0 +1,11 @@
[Default]
LogLevel = Debug
SSL = True
[local-matrix]
Homeserver = https://stardust.foo
ListenAddress = 0.0.0.0
ListenPort = 8008
SSL = False
UseKeyring = False
IgnoreVerification = True

View file

@ -0,0 +1,5 @@
{
"defaultHomeserver": {{ conduit_cinny_default_homeserver }},
"homeserverList": {{ conduit_cinny_homeserver_list }},
"allowCustomHomeservers": {{ conduit_cinny_allow_custom_homeservers }}
}

View file

@ -0,0 +1,13 @@
CONDUIT_SERVER_NAME: '{{ conduit_server_name }}'
CONDUIT_DATABASE_PATH: '{{ conduit_internal_database_path }}' # /var/lib/matrix-conduit/
CONDUIT_DATABASE_BACKEND: '{{ conduit_database_backend }}' # rocksdb
CONDUIT_PORT: {{ conduit_internal_port }} # 6167
CONDUIT_MAX_REQUEST_SIZE: {{ conduit_max_request_size }} # 20_000_000 # in bytes, ~20 MB
CONDUIT_ALLOW_REGISTRATION: '{{ conduit_allow_registration }}' # 'true'
CONDUIT_ALLOW_FEDERATION: '{{ conduit_allow_federation }}' # 'true'
CONDUIT_ALLOW_CHECK_FOR_UPDATES: '{{ conduit_allow_check_for_updates }}' # 'true'
CONDUIT_TRUSTED_SERVERS: '{{ conduit_trusted_servers }}' # '["matrix.org"]'
#CONDUIT_MAX_CONCURRENT_REQUESTS: '{{ conduit_max_concurrent_requests }}' # 100
#CONDUIT_LOG: {{ conduit_log }} # warn,rocket=off,_=off,sled=off
CONDUIT_ADDRESS: {{ conduit_address }} # 0.0.0.0
CONDUIT_CONFIG: '' # Ignore this

View file

@ -0,0 +1,3 @@
use-auth-secret
static-auth-secret={{ sec_conduit_coturn_auth_secret }}
realm=matrix.stardust.foo

View file

@ -0,0 +1,43 @@
# Conduit
version: '3'
services:
{% if conduit_watchtower_enable == true %}
watchtower:
image: containrrr/watchtower
container_name: watchtower
restart: unless-stopped
volumes:
- /var/run/docker.sock:/var/run/docker.sock
{% endif %}
homeserver:
image: matrixconduit/matrix-conduit:{{ conduit_image_version }}
restart: unless-stopped
ports:
- {{ conduit_port }}:{{ conduit_internal_port }}
volumes:
{# - {{ conduit_server_base_dir }}/{{ conduit_data_dir }}:{{ conduit_internal_database_path }} #}
- db:{{ conduit_internal_database_path }}
env_file:
- conduit.env
turn:
container_name: coturn-server
image: docker.io/coturn/coturn
restart: unless-stopped
network_mode: "host"
volumes:
- ./coturn.conf:/etc/coturn/turnserver.conf
{% if conduit_use_cinny == true %}
cinny:
image: 'ghcr.io/cinnyapp/cinny:{{ conduit_cinny_image_version }}'
restart: unless-stopped
ports:
- 8080:80
volumes:
- {{ conduit_server_base_dir }}/{{ conduit_cinny_config_dir }}/config.json:/app/config.json
depends_on:
- homeserver
{% endif %}
volumes:
db:

View file

@ -1,5 +1,5 @@
gitpot_runner_version: 3.3.0
gitpot_runner_instance: https://gitpot.dev
gitpot_runner_instance: https://gitpot.org
gitpot_runner_dind_port: 2376
# Create Shared Secret with `openssl rand -hex 20`. A token from the Forgejo Web Interface cannot be used here.

View file

@ -45,7 +45,8 @@ services:
done ;
forgejo-runner generate-config > config.yml ;
sed -i -e "s|network: .*|network: host|" config.yml ;
sed -i -e "s|labels: \[\]|labels: \[\"docker:docker://alpine:3.18\"\]|" config.yml ;
sed -i -e "s|labels: \[\]|labels: \[\"docker:docker://alpine:3.18\", \"ubuntu-latest:docker://ghcr.io/catthehacker/ubuntu:act-latest\", \"ubuntu-22.04:docker://ghcr.io/catthehacker/ubuntu:act-22.04\"\]|" config.yml ;
sed -i -e "/valid_volumes: \[\]/ {s/valid_volumes: \[\]/valid_volumes:\n - \"\*\*\"/}" config.yml ;
chown -R 1000:1000 /data
'
# restart: always

View file

View file

@ -0,0 +1,12 @@
version: "3"
services:
grafana:
image: grafana/grafana:latest
ports:
- "3000:3000"
env_file:
- grafana.env
volumes:
- grafana_data:/var/lib/grafana
volumes:
grafana_data:

View file

@ -0,0 +1,10 @@
GF_AUTH_GENERIC_OAUTH_ENABLED=true
GF_AUTH_GENERIC_OAUTH_ALLOW_SIGN_UP=true
GF_AUTH_GENERIC_OAUTH_NAME=Authentik
GF_AUTH_GENERIC_OAUTH_SCOPES=openid profile email
GF_AUTH_GENERIC_OAUTH_AUTH_URL=https://auth.lunivity.com/application/o/authorize/
GF_AUTH_GENERIC_OAUTH_TOKEN_URL=https://auth.lunivity.com/application/o/token/
GF_AUTH_GENERIC_OAUTH_API_URL=https://auth.lunivity.com/application/o/userinfo/
GF_AUTH_GENERIC_OAUTH_CLIENT_ID=w0ox6ckj0hOohUFY8xHuqfwt5onN1aFfe2VwiExu
GF_AUTH_GENERIC_OAUTH_CLIENT_SECRET=p2ELUPvMFrR8ytYHp4Q1adlJbZGgFxqp136Q5WyAnwV3IdE4YzqWJbs7tSeFIGKnviYV1uio9Rg1BnL0uCM2ZbgFnHQSJNRacHt4iA2ko0EdVQyWqdnUUoDJ6eidkQPl
GF_AUTH_GENERIC_OAUTH_TLS_SKIP_VERIFY_INSECURE=true

View file

@ -1,66 +0,0 @@
# Who will run the headscale docker stack? Needs to be present and be in docker group.
headscale_user: headscale
headscale_group: headscale
# Enable automatic updates with watchtower
headscale_watchtower_enable: true
# Headscale's base directory for configs, data, etc.
headscale_server_base_dir: /etc/headscale
# Data and Config directories, inside the base dir specified above
headscale_server_config_dir: config
headscale_server_data_dir: data
headscale_server_keys_dir: keys
# Headscale's exposed port
# headscale_server_port: 27896
headscale_server_port: 8080
headscale_server_metrics_port: 9090
# Headscale UI's exposed port
headscale_web_port_http: 9480
# Config
# General (headscale container config)
# Change to your hostname or host IP
headscale_server_config_server_url: https://vpn.example.com
# Listen Addresses
headscale_server_config_listen_addr: 0.0.0.0:8080
headscale_server_config_metrics_listen_addr: 0.0.0.0:9090
# The default /var/lib/headscale path is not writable in the container
headscale_server_config_private_key_path: /keys/private.key
# The default /var/lib/headscale path is not writable in the container
headscale_server_config_noise_private_key_path: /keys/noise_private.key
# The default /var/lib/headscale path is not writable in the container
headscale_server_config_db_type: sqlite3
headscale_server_config_db_path: /data/db.sqlite
# headscale_server_config_grpc_listen_addr: 127.0.0.1:50443
# headscale_server_config_grpc_allow_insecure: false
# IP Prefixes
# headscale_server_config_ip_prefixes:
# - ''
headscale_server_config_disable_check_updates: false
# headscale_server_config_ephemeral_node_inactivity_timeout: 30m
# Derp
# headscale_server_config_derp_server_enabled: false
# headscale_server_config_derp_server_region_id: 999
# headscale_server_config_derp_server_region_code: "headscale"
# headscale_server_config_derp_server_region_name: "Headscale Embedded DERP"
# headscale_server_config_derp_server_stun_listen_addr: "0.0.0.0:3478"
# headscale_server_config_derp_server_private_key_path: /var/lib/headscale/derp_server_private.key
# headscale_server_config_derp_urls:
# - https://controlplane.tailscale.com/derpmap/default
# headscale_server_config_derp_paths: []
# headscale_server_config_derp_auto_update_enabled: true
# headscale_server_config_derp_update_frequency: 24h
# to be continued if necessary

View file

@ -1,17 +0,0 @@
---
# roles/headscale/handlers/main.yml
- name: docker compose up
block:
- name: remove existing containers
ansible.builtin.command:
cmd: docker compose -f '{{ headscale_server_base_dir }}/docker-compose.yml' down
args:
chdir: '{{ headscale_server_base_dir }}'
- name: create containers
ansible.builtin.command:
cmd: docker compose -f '{{ headscale_server_base_dir }}/docker-compose.yml' up -d
args:
chdir: '{{ headscale_server_base_dir }}'
register: output

View file

@ -1,333 +0,0 @@
# Managed by Ansible
# --- BEGIN ANSIBLE BLOCK --- #
---
# headscale will look for a configuration file named `config.yaml` (or `config.json`) in the following order:
#
# - `/etc/headscale`
# - `~/.headscale`
# - current working directory
# The url clients will connect to.
# Typically this will be a domain like:
#
# https://myheadscale.example.com:443
#
server_url: {{ headscale_server_config_server_url }}
# Address to listen to / bind to on the server
#
# For production:
# listen_addr: 0.0.0.0:8080
listen_addr: {{ headscale_server_config_listen_addr }}
# Address to listen to /metrics, you may want
# to keep this endpoint private to your internal
# network
#
metrics_listen_addr: {{ headscale_server_config_metrics_listen_addr }}
# Address to listen for gRPC.
# gRPC is used for controlling a headscale server
# remotely with the CLI
# Note: Remote access _only_ works if you have
# valid certificates.
#
# For production:
# grpc_listen_addr: 0.0.0.0:50443
grpc_listen_addr: 127.0.0.1:50443
# Allow the gRPC admin interface to run in INSECURE
# mode. This is not recommended as the traffic will
# be unencrypted. Only enable if you know what you
# are doing.
grpc_allow_insecure: false
private_key_path: {{ headscale_server_config_private_key_path }}
# The Noise section includes specific configuration for the
# TS2021 Noise protocol
noise:
# The Noise private key is used to encrypt the
# traffic between headscale and Tailscale clients when
# using the new Noise-based protocol.
private_key_path: {{ headscale_server_config_noise_private_key_path }}
# private_key_path: /var/lib/headscale/noise_private.key
# List of IP prefixes to allocate tailaddresses from.
# Each prefix consists of either an IPv4 or IPv6 address,
# and the associated prefix length, delimited by a slash.
# It must be within IP ranges supported by the Tailscale
# client - i.e., subnets of 100.64.0.0/10 and fd7a:115c:a1e0::/48.
# See below:
# IPv6: https://github.com/tailscale/tailscale/blob/22ebb25e833264f58d7c3f534a8b166894a89536/net/tsaddr/tsaddr.go#LL81C52-L81C71
# IPv4: https://github.com/tailscale/tailscale/blob/22ebb25e833264f58d7c3f534a8b166894a89536/net/tsaddr/tsaddr.go#L33
# Any other range is NOT supported, and it will cause unexpected issues.
ip_prefixes: {{ headscale_server_config_ip_prefixes }}
# DERP is a relay system that Tailscale uses when a direct
# connection cannot be established.
# https://tailscale.com/blog/how-tailscale-works/#encrypted-tcp-relays-derp
#
# headscale needs a list of DERP servers that can be presented
# to the clients.
derp:
server:
# If enabled, runs the embedded DERP server and merges it into the rest of the DERP config
# The Headscale server_url defined above MUST be using https, DERP requires TLS to be in place
enabled: false
# Region ID to use for the embedded DERP server.
# The local DERP prevails if the region ID collides with other region ID coming from
# the regular DERP config.
region_id: 999
# Region code and name are displayed in the Tailscale UI to identify a DERP region
region_code: "headscale"
region_name: "Headscale Embedded DERP"
# Listens over UDP at the configured address for STUN connections - to help with NAT traversal.
# When the embedded DERP server is enabled stun_listen_addr MUST be defined.
#
# For more details on how this works, check this great article: https://tailscale.com/blog/how-tailscale-works/
stun_listen_addr: "0.0.0.0:3478"
# Private key used to encrypt the traffic between headscale DERP
# and Tailscale clients.
# The private key file will be autogenerated if it's missing.
#
private_key_path: /var/lib/headscale/derp_server_private.key
# List of externally available DERP maps encoded in JSON
urls:
- https://controlplane.tailscale.com/derpmap/default
# Locally available DERP map files encoded in YAML
#
# This option is mostly interesting for people hosting
# their own DERP servers:
# https://tailscale.com/kb/1118/custom-derp-servers/
#
# paths:
# - /etc/headscale/derp-example.yaml
paths: []
# If enabled, a worker will be set up to periodically
# refresh the given sources and update the derpmap
# will be set up.
auto_update_enabled: true
# How often should we check for DERP updates?
update_frequency: 24h
# Disables the automatic check for headscale updates on startup
disable_check_updates: false
# Time before an inactive ephemeral node is deleted?
ephemeral_node_inactivity_timeout: 30m
# Period to check for node updates within the tailnet. A value too low will severely affect
# CPU consumption of Headscale. A value too high (over 60s) will cause problems
# for the nodes, as they won't get updates or keep alive messages frequently enough.
# In case of doubts, do not touch the default 10s.
node_update_check_interval: 10s
# SQLite config
db_type: {{ headscale_server_config_db_type }}
# For production:
db_path: {{ headscale_server_config_db_path }}
# db_path: /var/lib/headscale/db.sqlite
# # Postgres config
# If using a Unix socket to connect to Postgres, set the socket path in the 'host' field and leave 'port' blank.
# db_type: postgres
# db_host: localhost
# db_port: 5432
# db_name: headscale
# db_user: foo
# db_pass: bar
# If other 'sslmode' is required instead of 'require(true)' and 'disabled(false)', set the 'sslmode' you need
# in the 'db_ssl' field. Refers to https://www.postgresql.org/docs/current/libpq-ssl.html Table 34.1.
# db_ssl: false
### TLS configuration
#
## Let's encrypt / ACME
#
# headscale supports automatically requesting and setting up
# TLS for a domain with Let's Encrypt.
#
# URL to ACME directory
acme_url: https://acme-v02.api.letsencrypt.org/directory
# Email to register with ACME provider
acme_email: ""
# Domain name to request a TLS certificate for:
tls_letsencrypt_hostname: ""
# Path to store certificates and metadata needed by
# letsencrypt
# For production:
tls_letsencrypt_cache_dir: /var/lib/headscale/cache
# Type of ACME challenge to use, currently supported types:
# HTTP-01 or TLS-ALPN-01
# See [docs/tls.md](docs/tls.md) for more information
tls_letsencrypt_challenge_type: HTTP-01
# When HTTP-01 challenge is chosen, letsencrypt must set up a
# verification endpoint, and it will be listening on:
# :http = port 80
tls_letsencrypt_listen: ":http"
## Use already defined certificates:
tls_cert_path: ""
tls_key_path: ""
log:
# Output formatting for logs: text or json
format: text
level: info
# Path to a file containg ACL policies.
# ACLs can be defined as YAML or HUJSON.
# https://tailscale.com/kb/1018/acls/
acl_policy_path: ""
## DNS
#
# headscale supports Tailscale's DNS configuration and MagicDNS.
# Please have a look to their KB to better understand the concepts:
#
# - https://tailscale.com/kb/1054/dns/
# - https://tailscale.com/kb/1081/magicdns/
# - https://tailscale.com/blog/2021-09-private-dns-with-magicdns/
#
dns_config:
# Whether to prefer using Headscale provided DNS or use local.
override_local_dns: true
# List of DNS servers to expose to clients.
nameservers:
- 1.1.1.1
# NextDNS (see https://tailscale.com/kb/1218/nextdns/).
# "abc123" is example NextDNS ID, replace with yours.
#
# With metadata sharing:
# nameservers:
# - https://dns.nextdns.io/abc123
#
# Without metadata sharing:
# nameservers:
# - 2a07:a8c0::ab:c123
# - 2a07:a8c1::ab:c123
# Split DNS (see https://tailscale.com/kb/1054/dns/),
# list of search domains and the DNS to query for each one.
#
# restricted_nameservers:
# foo.bar.com:
# - 1.1.1.1
# darp.headscale.net:
# - 1.1.1.1
# - 8.8.8.8
# Search domains to inject.
domains: []
# Extra DNS records
# so far only A-records are supported (on the tailscale side)
# See https://github.com/juanfont/headscale/blob/main/docs/dns-records.md#Limitations
# extra_records:
# - name: "grafana.myvpn.example.com"
# type: "A"
# value: "100.64.0.3"
#
# # you can also put it in one line
# - { name: "prometheus.myvpn.example.com", type: "A", value: "100.64.0.3" }
# Whether to use [MagicDNS](https://tailscale.com/kb/1081/magicdns/).
# Only works if there is at least a nameserver defined.
magic_dns: true
# Defines the base domain to create the hostnames for MagicDNS.
# `base_domain` must be a FQDNs, without the trailing dot.
# The FQDN of the hosts will be
# `hostname.user.base_domain` (e.g., _myhost.myuser.example.com_).
base_domain: example.com
# Unix socket used for the CLI to connect without authentication
# Note: for production you will want to set this to something like:
unix_socket: /var/run/headscale/headscale.sock
unix_socket_permission: "0770"
#
# headscale supports experimental OpenID connect support,
# it is still being tested and might have some bugs, please
# help us test it.
# OpenID Connect
# oidc:
# only_start_if_oidc_is_available: true
# issuer: "https://your-oidc.issuer.com/path"
# client_id: "your-oidc-client-id"
# client_secret: "your-oidc-client-secret"
# # Alternatively, set `client_secret_path` to read the secret from the file.
# # It resolves environment variables, making integration to systemd's
# # `LoadCredential` straightforward:
# client_secret_path: "${CREDENTIALS_DIRECTORY}/oidc_client_secret"
# # client_secret and client_secret_path are mutually exclusive.
#
# # The amount of time from a node is authenticated with OpenID until it
# # expires and needs to reauthenticate.
# # Setting the value to "0" will mean no expiry.
# expiry: 180d
#
# # Use the expiry from the token received from OpenID when the user logged
# # in, this will typically lead to frequent need to reauthenticate and should
# # only been enabled if you know what you are doing.
# # Note: enabling this will cause `oidc.expiry` to be ignored.
# use_expiry_from_token: false
#
# # Customize the scopes used in the OIDC flow, defaults to "openid", "profile" and "email" and add custom query
# # parameters to the Authorize Endpoint request. Scopes default to "openid", "profile" and "email".
#
# scope: ["openid", "profile", "email", "custom"]
# extra_params:
# domain_hint: example.com
#
# # List allowed principal domains and/or users. If an authenticated user's domain is not in this list, the
# # authentication request will be rejected.
#
# allowed_domains:
# - example.com
# # Note: Groups from keycloak have a leading '/'
# allowed_groups:
# - /headscale
# allowed_users:
# - alice@example.com
#
# # If `strip_email_domain` is set to `true`, the domain part of the username email address will be removed.
# # This will transform `first-name.last-name@example.com` to the user `first-name.last-name`
# # If `strip_email_domain` is set to `false` the domain part will NOT be removed resulting to the following
# user: `first-name.last-name.example.com`
#
# strip_email_domain: true
# Logtail configuration
# Logtail is Tailscales logging and auditing infrastructure, it allows the control panel
# to instruct tailscale nodes to log their activity to a remote server.
logtail:
# Enable logtail for this headscales clients.
# As there is currently no support for overriding the log server in headscale, this is
# disabled by default. Enabling this will make your clients send logs to Tailscale Inc.
enabled: false
# Enabling this option makes devices prefer a random port for WireGuard traffic over the
# default static port 41641. This option is intended as a workaround for some buggy
# firewall devices. See https://tailscale.com/kb/1181/firewalls/ for more information.
randomize_client_port: false
# --- END ANSIBLE BLOCK --- #

View file

@ -1,33 +0,0 @@
# Managed by Ansible
# --- BEGIN ANSIBLE BLOCK --- #
version: '3.5'
services:
{% if headscale_watchtower_enable == true %}
watchtower:
image: containrrr/watchtower
container_name: watchtower
restart: unless-stopped
volumes:
- /var/run/docker.sock:/var/run/docker.sock
{% endif %}
headscale:
image: headscale/headscale:latest
container_name: headscale
volumes:
- {{ headscale_server_base_dir }}/{{ headscale_server_config_dir }}/:/etc/headscale/
- {{ headscale_server_base_dir }}/{{ headscale_server_data_dir }}/:/data/
- {{ headscale_server_base_dir }}/{{ headscale_server_keys_dir }}/:/keys/
ports:
- {{ headscale_server_port }}:8080
- {{ headscale_server_metrics_port }}:9090
command: headscale serve
restart: unless-stopped
headscale-ui:
image: ghcr.io/gurucomputing/headscale-ui:latest
restart: unless-stopped
container_name: headscale-ui
ports:
- {{ headscale_web_port_http }}:80
# --- END ANSIBLE BLOCK --- #

View file

@ -1,20 +1,3 @@
---
# roles/headscale/tasks/main.yml
- name: Enable IPv4 packet forwarding
ansible.posix.sysctl:
name: net.ipv4.ip_forward
value: '1'
sysctl_set: true
reload: true
- name: Enable IPv6 packet forwarding
ansible.posix.sysctl:
name: net.ipv6.conf.all.forwarding
value: '1'
sysctl_set: true
reload: true
- name: Make sure headscale's directories and files exist
ansible.builtin.file:
path: '{{ headscale_dirs }}'
@ -44,6 +27,4 @@
dest: '{{ headscale_server_base_dir }}/docker-compose.yml'
owner: '{{ headscale_user }}'
group: '{{ headscale_group }}'
mode: '0750'
# notify: docker compose up
# notify is broken
mode: '0750'

View file

@ -0,0 +1,21 @@
version: '3'
services:
{% if conduit_watchtower_enable == true %}
watchtower:
image: containrrr/watchtower
container_name: watchtower
restart: unless-stopped
volumes:
- /var/run/docker.sock:/var/run/docker.sock
{% endif %}
mumble-server:
image: mumblevoip/mumble-server:{{ conduit_mumble_image_version }}
container_name: mumble-server
hostname: mumble-server
restart: unless-stopped
ports:
- 64738:64738
- 64738:64738/udp
volumes:
- ./mumble:/etc/mumble