[c] migrate to gitpot.org

This commit is contained in:
Sangelo 2024-04-08 15:19:55 +02:00
parent 7fbd76bb16
commit 04e9440630
22 changed files with 178 additions and 534 deletions

View file

@ -4,8 +4,8 @@ collections:
- name: community.general
- name: ansible.posix
- name: https://gitpot.dev/lunivity/ansible-core.git
- name: https://gitpot.org/lunivity/ansible-core.git
type: git
- name: https://gitpot.dev/lunivity/ansible-common.git
type: git
- name: https://gitpot.org/lunivity/ansible-common.git
type: git

View file

@ -1,28 +0,0 @@
proxmox_id: 1005
common_firewall_enable: false
core_groups:
- name: "headscale"
state: present
core_users:
- name: "sangelo"
password: "{{ sec_headscale_mgmt_pass }}"
groups: ["sudo", "mgmt"]
state: present
authorized_keys:
- "sangelo"
- "sangelo-access"
- name: "headscale"
password: "{{ sec_headscale_pass }}"
groups: ['docker', 'headscale']
state: present
authorized_keys:
- "sangelo"
- "sangelo-access"
# Headscale
headscale_server_config_server_url: https://vpn.lunivity.com
headscale_server_config_ip_prefixes: '{{ sec_headscale_server_config_ip_prefixes }}'
headscale_server_config_disable_check_updates: true

View file

@ -0,0 +1,26 @@
proxmox_id: 2010
common_firewall_enable: false
core_groups:
- name: "hedgedoc"
state: present
- name: "mgmt"
state: present
core_users:
- name: "sangelo"
password: "{{ sec_hedgedoc_mgmt_pass }}"
groups: ["sudo", "mgmt"]
state: present
authorized_keys:
- "sangelo"
- "sangelo-access"
- name: "hedgedoc"
password: "{{ sec_hedgedoc_pass }}"
groups: ["docker", "hedgedoc"]
state: present
# authorized_keys:
# - "sangelo"
# - "sangelo-access"

View file

@ -0,0 +1,66 @@
proxmox_id: 2007
common_firewall_enable: false
core_groups:
- name: "mgmt"
state: present
- name: "mumble"
state: present
core_users:
- name: "sangelo"
password: "{{ sec_mumble_mgmt_pass }}"
groups: ["sudo", "mgmt"]
state: present
authorized_keys:
- "sangelo"
- "sangelo-access"
- name: "mumble"
password: "{{ sec_mumble_pass }}"
groups: ["docker", "mumble"]
state: present
authorized_keys:
- "sangelo"
- "sangelo-access"
# mumble config
mumble_base_dir: "/var/mumble"
mumble_config_dir: "config"
mumble_user: mumble
mumble_group: mumble
# # run compose task?
# core_docker_compose: true
# # compose user and group
# core_docker_compose_user: '{{ mumble_user }}'
# core_docker_compose_group: '{{ mumble_group }}'
# # the docker compose service's name (cosmetic)
# core_docker_compose_name: "Mumble"
# # create directories
# core_docker_compose_mkdir: true
# # directories to create
# core_docker_compose_dirs:
# - "{{ mumble_base_dir }}"
# - "{{ mumble_base_dir }}/{{ mumble_config_dir }}"
# # chown directories with this value
# core_docker_compose_dirs_mode: '0750'
# # copy templates
# core_docker_compose_cp_templates: false
# # chown templates with this value
# core_docker_compose_templates_mode: '0650'
# # chown the main docker compose with this value
# core_docker_compose_mode: '0650'
# # docker-compose.yml template source
# core_docker_compose_file:
# src: "{{ playbook_dir }}/templates/mumble/docker-compose.yml.j2"
# dest: "{{ mumble_base_dir }}/docker-compose.yml"

View file

@ -1,26 +1,24 @@
proxmox_id: 1015
proxmox_id: 1005
common_firewall_enable: false
core_groups:
- name: "headscale"
- name: "wormhole"
state: present
- name: "mgmt"
core_users:
- name: "sangelo"
password: "{{ sec_headscale_mgmt_pass }}"
password: "{{ sec_wormhole_mgmt_pass }}"
groups: ["sudo", "mgmt"]
state: present
authorized_keys:
- "sangelo"
- "sangelo-access"
- name: "headscale"
password: "{{ sec_headscale_pass }}"
groups: ['headscale']
- name: "wormhole"
password: "{{ sec_wormhole_pass }}"
groups: ['docker', 'wormhole']
state: present
authorized_keys:
- "sangelo"
- "sangelo-access"
# Install Tailscale CLI?
tailscale_install_cli: false

View file

@ -1,9 +1,9 @@
# Headscale Nodes
[headscale]
# VPN Nodes
[wormhole]
10.1.0.5
[headscale_exit_nodes]
10.1.0.15
[mumble]
10.2.0.7
[outline]
10.2.0.8
@ -20,3 +20,7 @@
[gitpot_runners]
10.5.1.1
; 10.5.1.2
[hedgedoc]
10.2.0.10

View file

@ -1,16 +0,0 @@
- name: Headscale VPN Server
hosts: headscale_exit_nodes
remote_user: root
roles:
- lunivity.common.all
- lunivity.core.users
- headscale
tasks:
- name: Install Tailscale CLI
ansible.builtin.shell: |
curl -fsSL https://tailscale.com/install.sh | sh
args:
executable: /bin/bash
when: tailscale_install_cli

View file

@ -1,9 +1,8 @@
- name: Headscale VPN Server
hosts: headscale
- name: Wireguard VPN Server
hosts: wormhole
remote_user: root
roles:
- lunivity.common.all
- lunivity.core.docker
- lunivity.core.users
- headscale

View file

@ -0,0 +1,9 @@
---
- name: Install Hedgedoc
hosts: hedgedoc
remote_user: root
roles:
- lunivity.common.all
- lunivity.core.docker
- lunivity.core.users

View file

@ -0,0 +1,10 @@
---
- name: Install Mumble
hosts: mumble
remote_user: root
roles:
- lunivity.common.all
- lunivity.core.users
- lunivity.core.docker
- mumble

View file

@ -1,5 +1,5 @@
gitpot_runner_version: 3.3.0
gitpot_runner_instance: https://gitpot.dev
gitpot_runner_instance: https://gitpot.org
gitpot_runner_dind_port: 2376
# Create Shared Secret with `openssl rand -hex 20`. A token from the Forgejo Web Interface cannot be used here.

View file

@ -45,7 +45,8 @@ services:
done ;
forgejo-runner generate-config > config.yml ;
sed -i -e "s|network: .*|network: host|" config.yml ;
sed -i -e "s|labels: \[\]|labels: \[\"docker:docker://alpine:3.18\"\]|" config.yml ;
sed -i -e "s|labels: \[\]|labels: \[\"docker:docker://alpine:3.18\", \"ubuntu-latest:docker://ghcr.io/catthehacker/ubuntu:act-latest\", \"ubuntu-22.04:docker://ghcr.io/catthehacker/ubuntu:act-22.04\"\]|" config.yml ;
sed -i -e "/valid_volumes: \[\]/ {s/valid_volumes: \[\]/valid_volumes:\n - \"\*\*\"/}" config.yml ;
chown -R 1000:1000 /data
'
# restart: always

View file

View file

@ -0,0 +1,12 @@
version: "3"
services:
grafana:
image: grafana/grafana:latest
ports:
- "3000:3000"
env_file:
- grafana.env
volumes:
- grafana_data:/var/lib/grafana
volumes:
grafana_data:

View file

@ -0,0 +1,10 @@
GF_AUTH_GENERIC_OAUTH_ENABLED=true
GF_AUTH_GENERIC_OAUTH_ALLOW_SIGN_UP=true
GF_AUTH_GENERIC_OAUTH_NAME=Authentik
GF_AUTH_GENERIC_OAUTH_SCOPES=openid profile email
GF_AUTH_GENERIC_OAUTH_AUTH_URL=https://auth.lunivity.com/application/o/authorize/
GF_AUTH_GENERIC_OAUTH_TOKEN_URL=https://auth.lunivity.com/application/o/token/
GF_AUTH_GENERIC_OAUTH_API_URL=https://auth.lunivity.com/application/o/userinfo/
GF_AUTH_GENERIC_OAUTH_CLIENT_ID=w0ox6ckj0hOohUFY8xHuqfwt5onN1aFfe2VwiExu
GF_AUTH_GENERIC_OAUTH_CLIENT_SECRET=p2ELUPvMFrR8ytYHp4Q1adlJbZGgFxqp136Q5WyAnwV3IdE4YzqWJbs7tSeFIGKnviYV1uio9Rg1BnL0uCM2ZbgFnHQSJNRacHt4iA2ko0EdVQyWqdnUUoDJ6eidkQPl
GF_AUTH_GENERIC_OAUTH_TLS_SKIP_VERIFY_INSECURE=true

View file

@ -1,66 +0,0 @@
# Who will run the headscale docker stack? Needs to be present and be in docker group.
headscale_user: headscale
headscale_group: headscale
# Enable automatic updates with watchtower
headscale_watchtower_enable: true
# Headscale's base directory for configs, data, etc.
headscale_server_base_dir: /etc/headscale
# Data and Config directories, inside the base dir specified above
headscale_server_config_dir: config
headscale_server_data_dir: data
headscale_server_keys_dir: keys
# Headscale's exposed port
# headscale_server_port: 27896
headscale_server_port: 8080
headscale_server_metrics_port: 9090
# Headscale UI's exposed port
headscale_web_port_http: 9480
# Config
# General (headscale container config)
# Change to your hostname or host IP
headscale_server_config_server_url: https://vpn.example.com
# Listen Addresses
headscale_server_config_listen_addr: 0.0.0.0:8080
headscale_server_config_metrics_listen_addr: 0.0.0.0:9090
# The default /var/lib/headscale path is not writable in the container
headscale_server_config_private_key_path: /keys/private.key
# The default /var/lib/headscale path is not writable in the container
headscale_server_config_noise_private_key_path: /keys/noise_private.key
# The default /var/lib/headscale path is not writable in the container
headscale_server_config_db_type: sqlite3
headscale_server_config_db_path: /data/db.sqlite
# headscale_server_config_grpc_listen_addr: 127.0.0.1:50443
# headscale_server_config_grpc_allow_insecure: false
# IP Prefixes
# headscale_server_config_ip_prefixes:
# - ''
headscale_server_config_disable_check_updates: false
# headscale_server_config_ephemeral_node_inactivity_timeout: 30m
# Derp
# headscale_server_config_derp_server_enabled: false
# headscale_server_config_derp_server_region_id: 999
# headscale_server_config_derp_server_region_code: "headscale"
# headscale_server_config_derp_server_region_name: "Headscale Embedded DERP"
# headscale_server_config_derp_server_stun_listen_addr: "0.0.0.0:3478"
# headscale_server_config_derp_server_private_key_path: /var/lib/headscale/derp_server_private.key
# headscale_server_config_derp_urls:
# - https://controlplane.tailscale.com/derpmap/default
# headscale_server_config_derp_paths: []
# headscale_server_config_derp_auto_update_enabled: true
# headscale_server_config_derp_update_frequency: 24h
# to be continued if necessary

View file

@ -1,17 +0,0 @@
---
# roles/headscale/handlers/main.yml
- name: docker compose up
block:
- name: remove existing containers
ansible.builtin.command:
cmd: docker compose -f '{{ headscale_server_base_dir }}/docker-compose.yml' down
args:
chdir: '{{ headscale_server_base_dir }}'
- name: create containers
ansible.builtin.command:
cmd: docker compose -f '{{ headscale_server_base_dir }}/docker-compose.yml' up -d
args:
chdir: '{{ headscale_server_base_dir }}'
register: output

View file

@ -1,333 +0,0 @@
# Managed by Ansible
# --- BEGIN ANSIBLE BLOCK --- #
---
# headscale will look for a configuration file named `config.yaml` (or `config.json`) in the following order:
#
# - `/etc/headscale`
# - `~/.headscale`
# - current working directory
# The url clients will connect to.
# Typically this will be a domain like:
#
# https://myheadscale.example.com:443
#
server_url: {{ headscale_server_config_server_url }}
# Address to listen to / bind to on the server
#
# For production:
# listen_addr: 0.0.0.0:8080
listen_addr: {{ headscale_server_config_listen_addr }}
# Address to listen to /metrics, you may want
# to keep this endpoint private to your internal
# network
#
metrics_listen_addr: {{ headscale_server_config_metrics_listen_addr }}
# Address to listen for gRPC.
# gRPC is used for controlling a headscale server
# remotely with the CLI
# Note: Remote access _only_ works if you have
# valid certificates.
#
# For production:
# grpc_listen_addr: 0.0.0.0:50443
grpc_listen_addr: 127.0.0.1:50443
# Allow the gRPC admin interface to run in INSECURE
# mode. This is not recommended as the traffic will
# be unencrypted. Only enable if you know what you
# are doing.
grpc_allow_insecure: false
private_key_path: {{ headscale_server_config_private_key_path }}
# The Noise section includes specific configuration for the
# TS2021 Noise protocol
noise:
# The Noise private key is used to encrypt the
# traffic between headscale and Tailscale clients when
# using the new Noise-based protocol.
private_key_path: {{ headscale_server_config_noise_private_key_path }}
# private_key_path: /var/lib/headscale/noise_private.key
# List of IP prefixes to allocate tailaddresses from.
# Each prefix consists of either an IPv4 or IPv6 address,
# and the associated prefix length, delimited by a slash.
# It must be within IP ranges supported by the Tailscale
# client - i.e., subnets of 100.64.0.0/10 and fd7a:115c:a1e0::/48.
# See below:
# IPv6: https://github.com/tailscale/tailscale/blob/22ebb25e833264f58d7c3f534a8b166894a89536/net/tsaddr/tsaddr.go#LL81C52-L81C71
# IPv4: https://github.com/tailscale/tailscale/blob/22ebb25e833264f58d7c3f534a8b166894a89536/net/tsaddr/tsaddr.go#L33
# Any other range is NOT supported, and it will cause unexpected issues.
ip_prefixes: {{ headscale_server_config_ip_prefixes }}
# DERP is a relay system that Tailscale uses when a direct
# connection cannot be established.
# https://tailscale.com/blog/how-tailscale-works/#encrypted-tcp-relays-derp
#
# headscale needs a list of DERP servers that can be presented
# to the clients.
derp:
server:
# If enabled, runs the embedded DERP server and merges it into the rest of the DERP config
# The Headscale server_url defined above MUST be using https, DERP requires TLS to be in place
enabled: false
# Region ID to use for the embedded DERP server.
# The local DERP prevails if the region ID collides with other region ID coming from
# the regular DERP config.
region_id: 999
# Region code and name are displayed in the Tailscale UI to identify a DERP region
region_code: "headscale"
region_name: "Headscale Embedded DERP"
# Listens over UDP at the configured address for STUN connections - to help with NAT traversal.
# When the embedded DERP server is enabled stun_listen_addr MUST be defined.
#
# For more details on how this works, check this great article: https://tailscale.com/blog/how-tailscale-works/
stun_listen_addr: "0.0.0.0:3478"
# Private key used to encrypt the traffic between headscale DERP
# and Tailscale clients.
# The private key file will be autogenerated if it's missing.
#
private_key_path: /var/lib/headscale/derp_server_private.key
# List of externally available DERP maps encoded in JSON
urls:
- https://controlplane.tailscale.com/derpmap/default
# Locally available DERP map files encoded in YAML
#
# This option is mostly interesting for people hosting
# their own DERP servers:
# https://tailscale.com/kb/1118/custom-derp-servers/
#
# paths:
# - /etc/headscale/derp-example.yaml
paths: []
# If enabled, a worker will be set up to periodically
# refresh the given sources and update the derpmap
# will be set up.
auto_update_enabled: true
# How often should we check for DERP updates?
update_frequency: 24h
# Disables the automatic check for headscale updates on startup
disable_check_updates: false
# Time before an inactive ephemeral node is deleted?
ephemeral_node_inactivity_timeout: 30m
# Period to check for node updates within the tailnet. A value too low will severely affect
# CPU consumption of Headscale. A value too high (over 60s) will cause problems
# for the nodes, as they won't get updates or keep alive messages frequently enough.
# In case of doubts, do not touch the default 10s.
node_update_check_interval: 10s
# SQLite config
db_type: {{ headscale_server_config_db_type }}
# For production:
db_path: {{ headscale_server_config_db_path }}
# db_path: /var/lib/headscale/db.sqlite
# # Postgres config
# If using a Unix socket to connect to Postgres, set the socket path in the 'host' field and leave 'port' blank.
# db_type: postgres
# db_host: localhost
# db_port: 5432
# db_name: headscale
# db_user: foo
# db_pass: bar
# If other 'sslmode' is required instead of 'require(true)' and 'disabled(false)', set the 'sslmode' you need
# in the 'db_ssl' field. Refers to https://www.postgresql.org/docs/current/libpq-ssl.html Table 34.1.
# db_ssl: false
### TLS configuration
#
## Let's encrypt / ACME
#
# headscale supports automatically requesting and setting up
# TLS for a domain with Let's Encrypt.
#
# URL to ACME directory
acme_url: https://acme-v02.api.letsencrypt.org/directory
# Email to register with ACME provider
acme_email: ""
# Domain name to request a TLS certificate for:
tls_letsencrypt_hostname: ""
# Path to store certificates and metadata needed by
# letsencrypt
# For production:
tls_letsencrypt_cache_dir: /var/lib/headscale/cache
# Type of ACME challenge to use, currently supported types:
# HTTP-01 or TLS-ALPN-01
# See [docs/tls.md](docs/tls.md) for more information
tls_letsencrypt_challenge_type: HTTP-01
# When HTTP-01 challenge is chosen, letsencrypt must set up a
# verification endpoint, and it will be listening on:
# :http = port 80
tls_letsencrypt_listen: ":http"
## Use already defined certificates:
tls_cert_path: ""
tls_key_path: ""
log:
# Output formatting for logs: text or json
format: text
level: info
# Path to a file containg ACL policies.
# ACLs can be defined as YAML or HUJSON.
# https://tailscale.com/kb/1018/acls/
acl_policy_path: ""
## DNS
#
# headscale supports Tailscale's DNS configuration and MagicDNS.
# Please have a look to their KB to better understand the concepts:
#
# - https://tailscale.com/kb/1054/dns/
# - https://tailscale.com/kb/1081/magicdns/
# - https://tailscale.com/blog/2021-09-private-dns-with-magicdns/
#
dns_config:
# Whether to prefer using Headscale provided DNS or use local.
override_local_dns: true
# List of DNS servers to expose to clients.
nameservers:
- 1.1.1.1
# NextDNS (see https://tailscale.com/kb/1218/nextdns/).
# "abc123" is example NextDNS ID, replace with yours.
#
# With metadata sharing:
# nameservers:
# - https://dns.nextdns.io/abc123
#
# Without metadata sharing:
# nameservers:
# - 2a07:a8c0::ab:c123
# - 2a07:a8c1::ab:c123
# Split DNS (see https://tailscale.com/kb/1054/dns/),
# list of search domains and the DNS to query for each one.
#
# restricted_nameservers:
# foo.bar.com:
# - 1.1.1.1
# darp.headscale.net:
# - 1.1.1.1
# - 8.8.8.8
# Search domains to inject.
domains: []
# Extra DNS records
# so far only A-records are supported (on the tailscale side)
# See https://github.com/juanfont/headscale/blob/main/docs/dns-records.md#Limitations
# extra_records:
# - name: "grafana.myvpn.example.com"
# type: "A"
# value: "100.64.0.3"
#
# # you can also put it in one line
# - { name: "prometheus.myvpn.example.com", type: "A", value: "100.64.0.3" }
# Whether to use [MagicDNS](https://tailscale.com/kb/1081/magicdns/).
# Only works if there is at least a nameserver defined.
magic_dns: true
# Defines the base domain to create the hostnames for MagicDNS.
# `base_domain` must be a FQDNs, without the trailing dot.
# The FQDN of the hosts will be
# `hostname.user.base_domain` (e.g., _myhost.myuser.example.com_).
base_domain: example.com
# Unix socket used for the CLI to connect without authentication
# Note: for production you will want to set this to something like:
unix_socket: /var/run/headscale/headscale.sock
unix_socket_permission: "0770"
#
# headscale supports experimental OpenID connect support,
# it is still being tested and might have some bugs, please
# help us test it.
# OpenID Connect
# oidc:
# only_start_if_oidc_is_available: true
# issuer: "https://your-oidc.issuer.com/path"
# client_id: "your-oidc-client-id"
# client_secret: "your-oidc-client-secret"
# # Alternatively, set `client_secret_path` to read the secret from the file.
# # It resolves environment variables, making integration to systemd's
# # `LoadCredential` straightforward:
# client_secret_path: "${CREDENTIALS_DIRECTORY}/oidc_client_secret"
# # client_secret and client_secret_path are mutually exclusive.
#
# # The amount of time from a node is authenticated with OpenID until it
# # expires and needs to reauthenticate.
# # Setting the value to "0" will mean no expiry.
# expiry: 180d
#
# # Use the expiry from the token received from OpenID when the user logged
# # in, this will typically lead to frequent need to reauthenticate and should
# # only been enabled if you know what you are doing.
# # Note: enabling this will cause `oidc.expiry` to be ignored.
# use_expiry_from_token: false
#
# # Customize the scopes used in the OIDC flow, defaults to "openid", "profile" and "email" and add custom query
# # parameters to the Authorize Endpoint request. Scopes default to "openid", "profile" and "email".
#
# scope: ["openid", "profile", "email", "custom"]
# extra_params:
# domain_hint: example.com
#
# # List allowed principal domains and/or users. If an authenticated user's domain is not in this list, the
# # authentication request will be rejected.
#
# allowed_domains:
# - example.com
# # Note: Groups from keycloak have a leading '/'
# allowed_groups:
# - /headscale
# allowed_users:
# - alice@example.com
#
# # If `strip_email_domain` is set to `true`, the domain part of the username email address will be removed.
# # This will transform `first-name.last-name@example.com` to the user `first-name.last-name`
# # If `strip_email_domain` is set to `false` the domain part will NOT be removed resulting to the following
# user: `first-name.last-name.example.com`
#
# strip_email_domain: true
# Logtail configuration
# Logtail is Tailscales logging and auditing infrastructure, it allows the control panel
# to instruct tailscale nodes to log their activity to a remote server.
logtail:
# Enable logtail for this headscales clients.
# As there is currently no support for overriding the log server in headscale, this is
# disabled by default. Enabling this will make your clients send logs to Tailscale Inc.
enabled: false
# Enabling this option makes devices prefer a random port for WireGuard traffic over the
# default static port 41641. This option is intended as a workaround for some buggy
# firewall devices. See https://tailscale.com/kb/1181/firewalls/ for more information.
randomize_client_port: false
# --- END ANSIBLE BLOCK --- #

View file

@ -1,33 +0,0 @@
# Managed by Ansible
# --- BEGIN ANSIBLE BLOCK --- #
version: '3.5'
services:
{% if headscale_watchtower_enable == true %}
watchtower:
image: containrrr/watchtower
container_name: watchtower
restart: unless-stopped
volumes:
- /var/run/docker.sock:/var/run/docker.sock
{% endif %}
headscale:
image: headscale/headscale:latest
container_name: headscale
volumes:
- {{ headscale_server_base_dir }}/{{ headscale_server_config_dir }}/:/etc/headscale/
- {{ headscale_server_base_dir }}/{{ headscale_server_data_dir }}/:/data/
- {{ headscale_server_base_dir }}/{{ headscale_server_keys_dir }}/:/keys/
ports:
- {{ headscale_server_port }}:8080
- {{ headscale_server_metrics_port }}:9090
command: headscale serve
restart: unless-stopped
headscale-ui:
image: ghcr.io/gurucomputing/headscale-ui:latest
restart: unless-stopped
container_name: headscale-ui
ports:
- {{ headscale_web_port_http }}:80
# --- END ANSIBLE BLOCK --- #

View file

@ -1,20 +1,3 @@
---
# roles/headscale/tasks/main.yml
- name: Enable IPv4 packet forwarding
ansible.posix.sysctl:
name: net.ipv4.ip_forward
value: '1'
sysctl_set: true
reload: true
- name: Enable IPv6 packet forwarding
ansible.posix.sysctl:
name: net.ipv6.conf.all.forwarding
value: '1'
sysctl_set: true
reload: true
- name: Make sure headscale's directories and files exist
ansible.builtin.file:
path: '{{ headscale_dirs }}'
@ -44,6 +27,4 @@
dest: '{{ headscale_server_base_dir }}/docker-compose.yml'
owner: '{{ headscale_user }}'
group: '{{ headscale_group }}'
mode: '0750'
# notify: docker compose up
# notify is broken
mode: '0750'

View file

@ -0,0 +1,21 @@
version: '3'
services:
{% if conduit_watchtower_enable == true %}
watchtower:
image: containrrr/watchtower
container_name: watchtower
restart: unless-stopped
volumes:
- /var/run/docker.sock:/var/run/docker.sock
{% endif %}
mumble-server:
image: mumblevoip/mumble-server:{{ conduit_mumble_image_version }}
container_name: mumble-server
hostname: mumble-server
restart: unless-stopped
ports:
- 64738:64738
- 64738:64738/udp
volumes:
- ./mumble:/etc/mumble