feat: initial commit

This commit is contained in:
amy 2025-04-01 17:40:03 +00:00
commit 38f495e3f4
457 changed files with 40577 additions and 0 deletions

View file

@ -0,0 +1,72 @@
services:
redis:
image: docker.io/library/redis:alpine
command: --save 60 1 --loglevel warning
restart: unless-stopped
healthcheck:
test: ["CMD-SHELL", "redis-cli ping | grep PONG"]
start_period: 20s
interval: 30s
retries: 5
timeout: 3s
volumes:
- redis:/data
server:
image: ${AUTHENTIK_IMAGE:-ghcr.io/goauthentik/server}:${AUTHENTIK_TAG:-2025.2.3}
restart: unless-stopped
command: server
environment:
AUTHENTIK_REDIS__HOST: redis
AUTHENTIK_POSTGRESQL__HOST: ${PG_HOST:-postgresql.cluster}
AUTHENTIK_POSTGRESQL__USER: ${PG_USER:-authentik}
AUTHENTIK_POSTGRESQL__NAME: ${PG_DB:-authentik}
AUTHENTIK_POSTGRESQL__PASSWORD: ${PG_PASS}
volumes:
- ./media:/media
- ./custom-templates:/templates
env_file:
- .env
ports:
- "${COMPOSE_PORT_HTTP:-9000}:9000"
- "${COMPOSE_PORT_HTTPS:-9443}:9443"
depends_on:
redis:
condition: service_healthy
rac:
image: ghcr.io/goauthentik/rac:${AUTHENTIK_TAG:-2025.2.3}
restart: unless-stopped
env_file:
- .env
depends_on:
- server
worker:
image: ${AUTHENTIK_IMAGE:-ghcr.io/goauthentik/server}:${AUTHENTIK_TAG:-2025.2.3}
restart: unless-stopped
command: worker
environment:
AUTHENTIK_REDIS__HOST: redis
AUTHENTIK_POSTGRESQL__HOST: ${PG_HOST:-postgresql.cluster}
AUTHENTIK_POSTGRESQL__USER: ${PG_USER:-authentik}
AUTHENTIK_POSTGRESQL__NAME: ${PG_DB:-authentik}
AUTHENTIK_POSTGRESQL__PASSWORD: ${PG_PASS}
# `user: root` and the docker socket volume are optional.
# See more for the docker socket integration here:
# https://goauthentik.io/docs/outposts/integrations/docker
# Removing `user: root` also prevents the worker from fixing the permissions
# on the mounted folders, so when removing this make sure the folders have the correct UID/GID
# (1000:1000 by default)
user: root
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- ./media:/media
- ./certs:/certs
- ./custom-templates:/templates
env_file:
- .env
depends_on:
redis:
condition: service_healthy
volumes:
redis:
driver: local

View file

@ -0,0 +1,30 @@
class authentik {
include docker
contain authentik::install
}
class authentik::install {
package { 'docker-compose':
ensure => installed
}
file { '/opt/authentik':
ensure => directory
}
file { '/opt/authentik/compose.yml':
ensure => file,
source => 'puppet:///modules/authentik/compose.yml'
}
file { '/opt/authentik/.env':
ensure => file,
content => template('authentik/.env.erb')
}
docker_compose { 'authentik':
compose_files => ['/opt/authentik/compose.yml'],
ensure => present,
}
}

View file

@ -0,0 +1,9 @@
AUTHENTIK_HOST=<%= @authentik_host %>
AUTHENTIK_INSECURE=false
PG_HOST=<%= @authentik_pg_host %>
PG_USER=<%= @authentik_pg_user %>
PG_PASS=<%= @authentik_pg_pass %>
AUTHENTIK_SECRET_KEY=<%= @authentik_secret_key %>
AUTHENTIK_TOKEN=<%= @authentik_rac_token %>

View file

@ -0,0 +1,9 @@
class dns {
file { '/etc/resolv.conf':
ensure => file,
owner => 'root',
group => 'root',
mode => '0644',
content => template('dns/resolv.conf.erb'),
}
}

View file

@ -0,0 +1,8 @@
# !! Managed by Puppet !!
domain cluster
search cluster
<% [@nameservers].flatten.each do |ns| -%>
nameserver <%= ns %>
<% end -%>

View file

@ -0,0 +1,88 @@
# ======================== Elasticsearch Configuration =========================
#
# NOTE: Elasticsearch comes with reasonable defaults for most settings.
# Before you set out to tweak and tune the configuration, make sure you
# understand what are you trying to accomplish and the consequences.
#
# The primary way of configuring a node is via this file. This template lists
# the most important settings you may want to configure for a production cluster.
#
# Please consult the documentation for further information on configuration options:
# https://www.elastic.co/guide/en/elasticsearch/reference/index.html
#
# ---------------------------------- Cluster -----------------------------------
#
# Use a descriptive name for your cluster:
#
#cluster.name: my-application
#
# ------------------------------------ Node ------------------------------------
#
# Use a descriptive name for the node:
#
node.name: es-1
#
# Add custom attributes to the node:
#
#node.attr.rack: r1
#
# ----------------------------------- Paths ------------------------------------
#
# Path to directory where to store the data (separate multiple locations by comma):
#
path.data: /var/lib/elasticsearch
#
# Path to log files:
#
path.logs: /var/log/elasticsearch
#
# ----------------------------------- Memory -----------------------------------
#
# Lock the memory on startup:
#
#bootstrap.memory_lock: true
#
# Make sure that the heap size is set to about half the memory available
# on the system and that the owner of the process is allowed to use this
# limit.
#
# Elasticsearch performs poorly when the system is swapping the memory.
#
# ---------------------------------- Network -----------------------------------
#
# Set the bind address to a specific IP (IPv4 or IPv6):
#
network.host: 0.0.0.0
#
# Set a custom port for HTTP:
#
#http.port: 9200
#
# For more information, consult the network module documentation.
#
# --------------------------------- Discovery ----------------------------------
#
# Pass an initial list of hosts to perform discovery when this node is started:
# The default list of hosts is ["127.0.0.1", "[::1]"]
#
#discovery.seed_hosts: ["host1", "host2"]
#
# Bootstrap the cluster using an initial set of master-eligible nodes:
#
cluster.initial_master_nodes: ["es-1"]
#
# For more information, consult the discovery and cluster formation module documentation.
#
# ---------------------------------- Gateway -----------------------------------
#
# Block initial recovery after a full cluster restart until N nodes are started:
#
#gateway.recover_after_nodes: 3
#
# For more information, consult the gateway module documentation.
#
# ---------------------------------- Various -----------------------------------
#
# Require explicit names when deleting indices:
#
#action.destructive_requires_name: true

View file

@ -0,0 +1,18 @@
class elastic::filebeat {
contain elastic::filebeat::install
}
class elastic::filebeat::install {
file { '/opt/filebeat':
ensure => directory
}
file { '/opt/filebeat/filebeat.deb':
ensure => file,
source => 'https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-7.10.2-amd64.deb'
} ~>
package { 'filebeat':
provider => dpkg,
source => "/opt/filebeat/filebeat.deb"
}
}

View file

@ -0,0 +1,3 @@
class elastic {
}

View file

@ -0,0 +1,35 @@
class elastic::kibana {
contain elastic::kibana::install
contain elastic::kibana::config
contain elastic::kibana::service
}
class elastic::kibana::install {
file { '/opt/kibana':
ensure => directory
}
file { '/opt/kibana/kibana.deb':
ensure => file,
source => 'https://artifacts.elastic.co/downloads/kibana/kibana-7.10.2-amd64.deb'
} ~>
package { 'kibana':
provider => dpkg,
source => "/opt/kibana/kibana.deb"
}
}
class elastic::kibana::config {
file { '/etc/kibana/kibana.yml':
ensure => file,
content => template('elastic/kibana.yml.erb'),
notify => Service['kibana']
}
}
class elastic::kibana::service {
service { 'kibana':
ensure => running,
enable => true
}
}

View file

@ -0,0 +1,35 @@
class elastic::search {
contain elastic::search::install
contain elastic::search::config
contain elastic::search::service
}
class elastic::search::install {
file { '/opt/search':
ensure => directory
}
file { '/opt/search/search.deb':
ensure => file,
source => 'https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-7.10.2-amd64.deb'
} ~>
package { 'search':
provider => dpkg,
source => "/opt/search/search.deb"
}
}
class elastic::search::config {
file { '/etc/elasticsearch/elasticsearch.yml':
ensure => file,
source => 'puppet:///modules/elastic/elastic.yml',
notify => Service['elasticsearch']
}
}
class elastic::search::service {
service { 'elasticsearch':
ensure => running,
enable => true
}
}

View file

@ -0,0 +1,111 @@
# Kibana is served by a back end server. This setting specifies the port to use.
server.port: 5601
# Specifies the address to which the Kibana server will bind. IP addresses and host names are both valid values.
# The default is 'localhost', which usually means remote machines will not be able to connect.
# To allow connections from remote users, set this parameter to a non-loopback address.
server.host: "0.0.0.0"
# Enables you to specify a path to mount Kibana at if you are running behind a proxy.
# Use the `server.rewriteBasePath` setting to tell Kibana if it should remove the basePath
# from requests it receives, and to prevent a deprecation warning at startup.
# This setting cannot end in a slash.
#server.basePath: ""
# Specifies whether Kibana should rewrite requests that are prefixed with
# `server.basePath` or require that they are rewritten by your reverse proxy.
# This setting was effectively always `false` before Kibana 6.3 and will
# default to `true` starting in Kibana 7.0.
#server.rewriteBasePath: false
# The maximum payload size in bytes for incoming server requests.
#server.maxPayloadBytes: 1048576
# The Kibana server's name. This is used for display purposes.
server.name: "Kibana"
# The URLs of the Elasticsearch instances to use for all your queries.
elasticsearch.hosts: [ "http://elasticsearch.cluster:9200" ]
# Kibana uses an index in Elasticsearch to store saved searches, visualizations and
# dashboards. Kibana creates a new index if the index doesn't already exist.
#kibana.index: ".kibana"
# The default application to load.
#kibana.defaultAppId: "home"
# If your Elasticsearch is protected with basic authentication, these settings provide
# the username and password that the Kibana server uses to perform maintenance on the Kibana
# index at startup. Your Kibana users still need to authenticate with Elasticsearch, which
# is proxied through the Kibana server.
#elasticsearch.username: "kibana_system"
#elasticsearch.password: "pass"
# Enables SSL and paths to the PEM-format SSL certificate and SSL key files, respectively.
# These settings enable SSL for outgoing requests from the Kibana server to the browser.
#server.ssl.enabled: false
#server.ssl.certificate: /path/to/your/server.crt
#server.ssl.key: /path/to/your/server.key
# Optional settings that provide the paths to the PEM-format SSL certificate and key files.
# These files are used to verify the identity of Kibana to Elasticsearch and are required when
# xpack.security.http.ssl.client_authentication in Elasticsearch is set to required.
#elasticsearch.ssl.certificate: /path/to/your/client.crt
#elasticsearch.ssl.key: /path/to/your/client.key
# Optional setting that enables you to specify a path to the PEM file for the certificate
# authority for your Elasticsearch instance.
#elasticsearch.ssl.certificateAuthorities: [ "/path/to/your/CA.pem" ]
# To disregard the validity of SSL certificates, change this setting's value to 'none'.
#elasticsearch.ssl.verificationMode: full
# Time in milliseconds to wait for Elasticsearch to respond to pings. Defaults to the value of
# the elasticsearch.requestTimeout setting.
#elasticsearch.pingTimeout: 1500
# Time in milliseconds to wait for responses from the back end or Elasticsearch. This value
# must be a positive integer.
#elasticsearch.requestTimeout: 30000
# List of Kibana client-side headers to send to Elasticsearch. To send *no* client-side
# headers, set this value to [] (an empty list).
#elasticsearch.requestHeadersWhitelist: [ authorization ]
# Header names and values that are sent to Elasticsearch. Any custom headers cannot be overwritten
# by client-side headers, regardless of the elasticsearch.requestHeadersWhitelist configuration.
#elasticsearch.customHeaders: {}
# Time in milliseconds for Elasticsearch to wait for responses from shards. Set to 0 to disable.
#elasticsearch.shardTimeout: 30000
# Logs queries sent to Elasticsearch. Requires logging.verbose set to true.
#elasticsearch.logQueries: false
# Specifies the path where Kibana creates the process ID file.
#pid.file: /var/run/kibana.pid
# Enables you to specify a file where Kibana stores log output.
#logging.dest: stdout
# Set the value of this setting to true to suppress all logging output.
#logging.silent: false
# Set the value of this setting to true to suppress all logging output other than error messages.
#logging.quiet: false
# Set the value of this setting to true to log all events, including system usage information
# and all requests.
#logging.verbose: false
# Set the interval in milliseconds to sample system and process performance
# metrics. Minimum is 100ms. Defaults to 5000.
#ops.interval: 5000
# Specifies locale to be used for all localizable strings, dates and number formats.
# Supported languages are the following: English - en , by default , Chinese - zh-CN .
#i18n.locale: "en"
xpack:
encryptedSavedObjects:
encryptionKey: '<%= @kibana_encryption_key %>'

View file

@ -0,0 +1,15 @@
[Unit]
Description=Forgejo
Wants=basic.target
After=basic.target network.target
[Service]
WorkingDirectory=/opt/forgejo
ExecStart=/opt/forgejo/forgejo
User=forgejo
KillMode=process
Restart=on-failure
RestartSec=30s
[Install]
WantedBy=multi-user.target

View file

@ -0,0 +1,41 @@
class forgejo {
contain forgejo::install
contain forgejo::service
}
class forgejo::install {
user { 'forgejo':
ensure => 'present',
}
file { '/opt/forgejo':
ensure => directory,
owner => 'forgejo',
}
file { '/opt/forgejo/forgejo':
source => 'https://codeberg.org/forgejo/forgejo/releases/download/v10.0.3/forgejo-10.0.3-linux-amd64',
ensure => file,
owner => 'forgejo',
mode => '0744'
}
}
class forgejo::service {
file { '/lib/systemd/system/forgejo.service':
ensure => file,
source => 'puppet:///modules/forgejo/forgejo.service',
notify => Service['forgejo']
}~>
exec { 'forgejo-systemd-reload':
command => 'systemctl daemon-reload',
path => [ '/usr/bin', '/bin', '/usr/sbin' ],
refreshonly => true,
}
service { 'forgejo':
ensure => running,
enable => true,
}
}

View file

@ -0,0 +1,48 @@
class garage {
contain garage::install
contain garage::config
contain garage::service
contain garage::webui
}
class garage::install {
file { '/opt/garage':
ensure => directory,
mode => '0640'
}
file { '/opt/garage/garage':
ensure => file,
source => 'https://garagehq.deuxfleurs.fr/_releases/v1.1.0/x86_64-unknown-linux-musl/garage',
mode => '0740'
}
}
class garage::config {
file { '/opt/garage/garage.toml':
content => template('garage/conf.toml.erb'),
ensure => file,
mode => '0644',
notify => [Service['garage'], Service['garage-webui']]
}
}
class garage::service {
file { '/lib/systemd/system/garage.service':
mode => '0644',
owner => 'root',
group => 'root',
content => template('garage/garage.service.erb'),
}~>
exec { 'garage-systemd-reload':
command => 'systemctl daemon-reload',
path => [ '/usr/bin', '/bin', '/usr/sbin' ],
refreshonly => true,
}
service { 'garage':
ensure => running,
enable => true,
}
}

View file

@ -0,0 +1,35 @@
class garage::webui {
contain garage::webui::install
contain garage::webui::service
}
class garage::webui::install {
file { '/opt/garage-webui':
ensure => directory
}
file { '/opt/garage-webui/webui':
source => 'https://github.com/khairul169/garage-webui/releases/download/1.0.8/garage-webui-v1.0.8-linux-amd64',
ensure => 'file',
mode => '0740',
}
}
class garage::webui::service {
file { '/lib/systemd/system/garage-webui.service':
mode => '0644',
owner => 'root',
group => 'root',
content => template('garage/garage-webui.service.erb'),
}~>
exec { 'garage-webui-systemd-reload':
command => 'systemctl daemon-reload',
path => [ '/usr/bin', '/bin', '/usr/sbin' ],
refreshonly => true,
}
service { 'garage-webui':
ensure => running,
enable => true,
}
}

View file

@ -0,0 +1,28 @@
metadata_dir = "/opt/garage/meta"
data_dir = "/opt/garage/data"
db_engine = "sqlite"
replication_factor = 1
rpc_bind_addr = "[::]:3901"
rpc_public_addr = "127.0.0.1:3901"
rpc_secret = "<%= @garage_rpc_secret %>"
[s3_api]
s3_region = "garage"
api_bind_addr = "[::]:3900"
root_domain = ".s3.amy.mov"
[s3_web]
bind_addr = "[::]:3902"
root_domain = ".s3.amy.mov"
index = "index.html"
[k2v_api]
api_bind_addr = "[::]:3904"
[admin]
api_bind_addr = "[::]:3903"
admin_token = "<%= @garage_admin_token %>"
metrics_token = "<%= @garage_metrics_token %>"

View file

@ -0,0 +1,15 @@
[Unit]
Description=GarageHQ WebUI
Wants=basic.target
After=basic.target network.target
[Service]
WorkingDirectory=/opt/garage-webui
Environment="CONFIG_PATH=/opt/garage/garage.toml"
ExecStart=/opt/garage-webui/webui
KillMode=process
Restart=on-failure
RestartSec=30s
[Install]
WantedBy=multi-user.target

View file

@ -0,0 +1,14 @@
[Unit]
Description=GarageHQ
Wants=basic.target
After=basic.target network.target
[Service]
WorkingDirectory=/opt/garage
ExecStart=/opt/garage/garage -c /opt/garage/garage.toml server
KillMode=process
Restart=on-failure
RestartSec=30s
[Install]
WantedBy=multi-user.target

View file

@ -0,0 +1,9 @@
class hosts {
file { '/etc/hosts':
ensure => file,
owner => 'root',
group => 'root',
mode => '0644',
content => template('hosts/hosts.erb'),
}
}

View file

@ -0,0 +1,12 @@
# !! Managed by Puppet !!
127.0.0.1 localhost
::1 localhost ip6-localhost ip6-loopback
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
# --- BEGIN PVE ---
127.0.1.1 <%= @networking['hostname'] %>.cluster <%= @networking['hostname'] %>
192.168.1.200 internal-s3.amy.mov
# --- END PVE ---

View file

@ -0,0 +1,36 @@
services:
backend:
container_name: infisical-backend
restart: unless-stopped
depends_on:
redis:
condition: service_started
image: infisical/infisical:latest-postgres
pull_policy: always
env_file: .env
ports:
- 8080:8080
environment:
- NODE_ENV=production
networks:
- infisical
redis:
image: redis
container_name: infisical-dev-redis
env_file: .env
restart: always
environment:
- ALLOW_EMPTY_PASSWORD=yes
ports:
- 6379:6379
networks:
- infisical
volumes:
- redis_data:/data
volumes:
redis_data:
driver: local
networks:
infisical:

View file

@ -0,0 +1,18 @@
class infisical::cli {
contain infisical::cli::install
}
class infisical::cli::install {
file { '/opt/infisical':
ensure => directory
}
file { '/opt/infisical/infisical-cli.deb':
ensure => file,
source => 'https://github.com/Infisical/infisical/releases/download/infisical-cli%2Fv0.36.22/infisical_0.36.22_linux_amd64.deb'
} ~>
package { '/opt/infisical/infisical-cli.deb':
provider => dpkg,
source => '/opt/infisical/infisical-cli.deb'
}
}

View file

@ -0,0 +1,29 @@
class infisical {
include docker
contain infisical::install
}
class infisical::install {
package { 'docker-compose':
ensure => installed
}
file { '/opt/infisical':
ensure => directory
}
file { '/opt/infisical/compose.yml':
ensure => file,
source => 'puppet:///modules/infisical/compose.yml'
}
file { '/opt/infisical/.env':
ensure => file,
source => 'puppet:///modules/infisical/.env'
}
docker_compose { 'infisical':
compose_files => ['/opt/infisical/compose.yml'],
ensure => present,
}
}

View file

@ -0,0 +1,124 @@
# Keys
# Required key for platform encryption/decryption ops
# THIS IS A SAMPLE ENCRYPTION KEY AND SHOULD NEVER BE USED FOR PRODUCTION
ENCRYPTION_KEY="<%= @infisical_encryption_key %>"
# JWT
# Required secrets to sign JWT tokens
# THIS IS A SAMPLE AUTH_SECRET KEY AND SHOULD NEVER BE USED FOR PRODUCTION
AUTH_SECRET="<%= @infisical_auth_secret %>"
# Postgres creds
PG_HOST="<%= @infisical_pg_host %>"
PG_USER="<%= @infisical_pg_user %>"
PG_PASS="<%= @infisical_pg_pass %>"
PG_DB="<%= @infisical_pg_db %>"
# Required
DB_CONNECTION_URI=postgres://${PG_USER}:${PG_PASS}@${PG_HOST}:5432/${PG_DB}
# Redis
REDIS_URL=redis://redis:6379
# Website URL
# Required
SITE_URL=http://localhost:8080
# Mail/SMTP
SMTP_HOST=
SMTP_PORT=
SMTP_FROM_ADDRESS=
SMTP_FROM_NAME=
SMTP_USERNAME=
SMTP_PASSWORD=
# Integration
# Optional only if integration is used
CLIENT_ID_HEROKU=
CLIENT_ID_VERCEL=
CLIENT_ID_NETLIFY=
CLIENT_ID_GITHUB=
CLIENT_ID_GITHUB_APP=
CLIENT_SLUG_GITHUB_APP=
CLIENT_ID_GITLAB=
CLIENT_ID_BITBUCKET=
CLIENT_SECRET_HEROKU=
CLIENT_SECRET_VERCEL=
CLIENT_SECRET_NETLIFY=
CLIENT_SECRET_GITHUB=
CLIENT_SECRET_GITHUB_APP=
CLIENT_SECRET_GITLAB=
CLIENT_SECRET_BITBUCKET=
CLIENT_SLUG_VERCEL=
CLIENT_PRIVATE_KEY_GITHUB_APP=
CLIENT_APP_ID_GITHUB_APP=
# Sentry (optional) for monitoring errors
SENTRY_DSN=
# Infisical Cloud-specific configs
# Ignore - Not applicable for self-hosted version
POSTHOG_HOST=
POSTHOG_PROJECT_API_KEY=
# SSO-specific variables
CLIENT_ID_GOOGLE_LOGIN=
CLIENT_SECRET_GOOGLE_LOGIN=
CLIENT_ID_GITHUB_LOGIN=
CLIENT_SECRET_GITHUB_LOGIN=
CLIENT_ID_GITLAB_LOGIN=
CLIENT_SECRET_GITLAB_LOGIN=
CAPTCHA_SECRET=
NEXT_PUBLIC_CAPTCHA_SITE_KEY=
OTEL_TELEMETRY_COLLECTION_ENABLED=false
OTEL_EXPORT_TYPE=prometheus
OTEL_EXPORT_OTLP_ENDPOINT=
OTEL_OTLP_PUSH_INTERVAL=
OTEL_COLLECTOR_BASIC_AUTH_USERNAME=
OTEL_COLLECTOR_BASIC_AUTH_PASSWORD=
PLAIN_API_KEY=
PLAIN_WISH_LABEL_IDS=
SSL_CLIENT_CERTIFICATE_HEADER_KEY=
ENABLE_MSSQL_SECRET_ROTATION_ENCRYPT=true
# App Connections
# aws assume-role connection
INF_APP_CONNECTION_AWS_ACCESS_KEY_ID=
INF_APP_CONNECTION_AWS_SECRET_ACCESS_KEY=
# github oauth connection
INF_APP_CONNECTION_GITHUB_OAUTH_CLIENT_ID=
INF_APP_CONNECTION_GITHUB_OAUTH_CLIENT_SECRET=
#github app connection
INF_APP_CONNECTION_GITHUB_APP_CLIENT_ID=
INF_APP_CONNECTION_GITHUB_APP_CLIENT_SECRET=
INF_APP_CONNECTION_GITHUB_APP_PRIVATE_KEY=
INF_APP_CONNECTION_GITHUB_APP_SLUG=
INF_APP_CONNECTION_GITHUB_APP_ID=
#gcp app connection
INF_APP_CONNECTION_GCP_SERVICE_ACCOUNT_CREDENTIAL=
# azure app connection
INF_APP_CONNECTION_AZURE_CLIENT_ID=
INF_APP_CONNECTION_AZURE_CLIENT_SECRET=
# datadog
SHOULD_USE_DATADOG_TRACER=
DATADOG_PROFILING_ENABLED=
DATADOG_ENV=
DATADOG_SERVICE=
DATADOG_HOSTNAME=

View file

@ -0,0 +1,2 @@
ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBK0MxN/ReEZsnPUWJz+UEq8okZIri+hDXClO/EUsaSFeQtuf5unr5zZ9ErMGmPTbyBloBEh7ZauFVmpwn6y9n9M= root@puppet-server
ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBK3ukunRoN0+GupDQwujcN3htQGERmEB+Sd5f33PqNhNXsR2EfBwg463lYiCRZo9CQ/hDjrYv5A9TLg8us1B5iA= amy@nixon

View file

@ -0,0 +1,8 @@
class keys {
file { '/root/.ssh/authorized_keys':
source => 'puppet:///modules/keys/puppet-push.pub',
owner => 'root',
group => 'root',
mode => '0640',
}
}

View file

@ -0,0 +1,43 @@
class owncloud {
contain owncloud::install
contain owncloud::config
contain owncloud::service
}
class owncloud::install {
file { '/opt/owncloud':
ensure => directory
}
file { '/opt/owncloud/ocis':
ensure => file,
source => 'https://github.com/owncloud/ocis/releases/download/v7.1.2/ocis-7.1.2-linux-amd64',
mode => '0744'
}
}
class owncloud::config {
file { '/opt/owncloud/ocis.yaml':
ensure => file,
source => 'puppet:///modules/owncloud/ocis.yaml',
notify => Service['owncloud']
}
}
class owncloud::service {
file { '/lib/systemd/system/owncloud.service':
ensure => file,
content => template('owncloud/owncloud.service.erb'),
notify => Service['owncloud']
}~>
exec { 'owncloud-systemd-reload':
command => 'systemctl daemon-reload',
path => [ '/usr/bin', '/bin', '/usr/sbin' ],
refreshonly => true,
}
service { 'owncloud':
ensure => running,
enable => true,
}
}

View file

@ -0,0 +1,32 @@
[Unit]
Description=OwnCloud
Wants=basic.target
After=basic.target network.target
[Service]
WorkingDirectory=/opt/owncloud
ExecStart=/opt/owncloud/ocis server
Environment="PROXY_HTTP_ADDR=0.0.0.0:9200"
Environment="OCIS_URL=https://cloud.amy.mov"
Environment="OCIS_BASE_DATA_PATH=/opt/owncloud/"
Environment="OCIS_CONFIG_DIR=/opt/owncloud/"
Environment="OCIS_INSECURE=true"
Environment="STORAGE_USERS_DRIVER=s3ng"
Environment="STORAGE_HOME_DRIVER=s3ng"
Environment="STORAGE_METADATA_DRIVER=ocis"
Environment="STORAGE_USERS_S3NG_REGION=garage"
Environment="STORAGE_USERS_S3NG_ENDPOINT=https://internal-s3.amy.mov"
Environment="STORAGE_USERS_S3NG_SECRET_KEY="<%= @oc_s3_secret_key %>"
Environment="STORAGE_USERS_S3NG_ACCESS_KEY="<%= @oc_s3_access_key %>"
Environment="STORAGE_USERS_S3NG_BUCKET=cloud"
KillMode=process
Restart=on-failure
RestartSec=30s
[Install]
WantedBy=multi-user.target

View file

@ -0,0 +1,34 @@
class postgresql {
contain postgresql::install
contain postgresql::pgadmin
}
class postgresql::install {
# https://www.postgresql.org/download/linux/debian/
package { 'postgresql-common':
ensure => installed
} ~>
exec { 'postgresql-install':
command => '/usr/share/postgresql-common/pgdg/apt.postgresql.org.sh -y',
refreshonly => true,
} ~>
exec { 'postgresql-apt-update':
command => 'apt update',
path => ['/usr/bin'],
refreshonly => true,
} ~>
package { 'postgresql-16':
ensure => installed
} ~>
package { 'postgresql-client-16':
ensure => installed
}
}
class postgresql::service {
service { 'postgresql':
ensure => running,
enable => true
}
}

View file

@ -0,0 +1,42 @@
class postgresql::pgadmin {
contain postgresql::pgadmin::install
contain postgresql::pgadmin::config
}
class postgresql::pgadmin::install {
apt::source { 'pgadmin':
comment => 'PGAdmin Repo',
location => 'https://ftp.postgresql.org/pub/pgadmin/pgadmin4/apt/bookworm',
release => 'pgadmin4',
repos => 'main',
key => {
'name' => 'pgadmin.pub',
'source' => 'https://www.pgadmin.org/static/packages_pgadmin_org.pub',
},
include => {
'deb' => true,
},
}
package { 'pgadmin4-web':
ensure => installed
}
}
class postgresql::pgadmin::config {
exec { 'pgadmin-setup':
command => '/usr/pgadmin4/bin/setup-web.sh --yes',
environment => [
"PGADMIN_SETUP_EMAIL=$pg_setup_email",
"PGADMIN_SETUP_PASSWORD=$pg_setup_password"
],
path => ['/usr/bin'],
unless => ['test -f /var/lib/pgadmin', 'test -f /var/log/pgadmin']
}
file { '/usr/pgadmin4/web/config_local.py':
ensure => file,
content => template('postgresql/config_local.py.erb')
}
}

View file

@ -0,0 +1,17 @@
AUTHENTICATION_SOURCES = ['oauth2', 'internal']
OAUTH2_AUTO_CREATE_USER = True
OAUTH2_CONFIG = [{
'OAUTH2_NAME': '<%= @pg_oauth2_name %>',
'OAUTH2_DISPLAY_NAME': '<%= @pg_oauth2_display_name %>',
'OAUTH2_CLIENT_ID': '<%= @pg_oauth2_client_id %>',
'OAUTH2_CLIENT_SECRET': '<%= @pg_oauth2_client_secret %>',
'OAUTH2_TOKEN_URL': '<%= @pg_oauth2_token_url %>',
'OAUTH2_AUTHORIZATION_URL': '<%= @pg_oauth2_authorization_url %>',
'OAUTH2_API_BASE_URL': '<%= @pg_oauth2_api_base_url %>',
'OAUTH2_USERINFO_ENDPOINT': '<%= @pg_oauth2_userinfo_endpoint %>',
'OAUTH2_SERVER_METADATA_URL': '<%= @pg_oauth2_server_metadata_url %>',
'OAUTH2_SCOPE': '<%= @pg_oauth2_scope %>',
'OAUTH2_ICON': '<%= @pg_oauth2_icon %>'
}]
WTF_CSRF_HEADERS = ['X-CSRF']

View file

@ -0,0 +1 @@
cert/

View file

@ -0,0 +1,137 @@
map $http_upgrade $connection_upgrade_keepalive {
default upgrade;
'' '';
}
server {
listen 443 ssl;
server_name auth.amy.mov;
resolver 192.168.1.155;
location / {
proxy_http_version 1.1;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $http_host;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade_keepalive;
proxy_pass http://authentik.cluster:9000;
}
}
server {
listen 443 ssl;
server_name garage.amy.mov;
proxy_buffers 8 16k;
proxy_buffer_size 32k;
location / {
# Where should the authenticated requests go
proxy_pass http://garage.cluster:3909;
proxy_set_header Host $host;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade_keepalive;
auth_request /outpost.goauthentik.io/auth/nginx;
error_page 401 = @goauthentik_proxy_signin;
auth_request_set $auth_cookie $upstream_http_set_cookie;
add_header Set-Cookie $auth_cookie;
auth_request_set $authentik_username $upstream_http_x_authentik_username;
auth_request_set $authentik_groups $upstream_http_x_authentik_groups;
auth_request_set $authentik_entitlements $upstream_http_x_authentik_entitlements;
auth_request_set $authentik_email $upstream_http_x_authentik_email;
auth_request_set $authentik_name $upstream_http_x_authentik_name;
auth_request_set $authentik_uid $upstream_http_x_authentik_uid;
proxy_set_header X-authentik-username $authentik_username;
proxy_set_header X-authentik-groups $authentik_groups;
proxy_set_header X-authentik-entitlements $authentik_entitlements;
proxy_set_header X-authentik-email $authentik_email;
proxy_set_header X-authentik-name $authentik_name;
proxy_set_header X-authentik-uid $authentik_uid;
}
# All requests to /outpost.goauthentik.io must be accessible without authentication
location /outpost.goauthentik.io {
proxy_pass http://authentik.cluster:9000/outpost.goauthentik.io;
# Note: ensure the Host header matches your external authentik URL:
proxy_set_header Host $host;
proxy_set_header X-Original-URL $scheme://$http_host$request_uri;
add_header Set-Cookie $auth_cookie;
auth_request_set $auth_cookie $upstream_http_set_cookie;
proxy_pass_request_body off;
proxy_set_header Content-Length "";
}
location @goauthentik_proxy_signin {
internal;
add_header Set-Cookie $auth_cookie;
return 302 /outpost.goauthentik.io/start?rd=$scheme://$http_host$request_uri;
}
}
server {
listen 443 ssl;
server_name calibre.amy.mov;
proxy_buffers 8 16k;
proxy_buffer_size 32k;
location / {
# Where should the authenticated requests go
proxy_pass http://calibre.cluster:8080;
proxy_set_header Host $host;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade_keepalive;
auth_request /outpost.goauthentik.io/auth/nginx;
error_page 401 = @goauthentik_proxy_signin;
auth_request_set $auth_cookie $upstream_http_set_cookie;
add_header Set-Cookie $auth_cookie;
auth_request_set $authentik_username $upstream_http_x_authentik_username;
auth_request_set $authentik_groups $upstream_http_x_authentik_groups;
auth_request_set $authentik_entitlements $upstream_http_x_authentik_entitlements;
auth_request_set $authentik_email $upstream_http_x_authentik_email;
auth_request_set $authentik_name $upstream_http_x_authentik_name;
auth_request_set $authentik_uid $upstream_http_x_authentik_uid;
proxy_set_header X-authentik-username $authentik_username;
proxy_set_header X-authentik-groups $authentik_groups;
proxy_set_header X-authentik-entitlements $authentik_entitlements;
proxy_set_header X-authentik-email $authentik_email;
proxy_set_header X-authentik-name $authentik_name;
proxy_set_header X-authentik-uid $authentik_uid;
# Since we're overwriting the Basic Auth headers
auth_request_set $authentik_auth $upstream_http_authorization;
proxy_set_header Authorization $authentik_auth;
}
# All requests to /outpost.goauthentik.io must be accessible without authentication
location /outpost.goauthentik.io {
proxy_pass http://authentik.cluster:9000/outpost.goauthentik.io;
# Note: ensure the Host header matches your external authentik URL:
proxy_set_header Host $host;
proxy_set_header X-Original-URL $scheme://$http_host$request_uri;
add_header Set-Cookie $auth_cookie;
auth_request_set $auth_cookie $upstream_http_set_cookie;
proxy_pass_request_body off;
proxy_set_header Content-Length "";
}
location @goauthentik_proxy_signin {
internal;
add_header Set-Cookie $auth_cookie;
return 302 /outpost.goauthentik.io/start?rd=$scheme://$http_host$request_uri;
}
}

View file

@ -0,0 +1,13 @@
server {
listen 443 ssl;
server_name blog.amy.mov;
location / {
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host $http_host;
proxy_pass http://192.168.1.210:2368;
}
}

View file

@ -0,0 +1,13 @@
# !! Managed by Puppet !!
server {
listen 443 ssl;
server_name cloud.amy.mov;
location / {
proxy_pass https://owncloud.cluster:9200;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
}
}

View file

@ -0,0 +1,13 @@
# !! Managed by Puppet !!
server {
listen 443 ssl;
server_name fedi.amy.mov;
location / {
proxy_pass http://192.168.1.201:3000;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
}
}

View file

@ -0,0 +1,14 @@
# !! Managed by Puppet !!
server {
listen 443 ssl;
server_name forge.amy.mov;
resolver 192.168.1.155;
location / {
proxy_pass http://forgejo.cluster:3000;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
}
}

View file

@ -0,0 +1,11 @@
server {
listen 443 ssl;
server_name internal-s3.amy.mov;
resolver 192.168.1.155;
location / {
proxy_set_header Host internal-s3.amy.mov;
proxy_pass http://garage.cluster:3900;
}
}

View file

@ -0,0 +1,13 @@
server {
listen 443 ssl;
server_name pg.amy.mov;
location / {
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host $http_host;
proxy_pass http://postgresql.cluster;
}
}

View file

@ -0,0 +1,15 @@
server {
listen 443 ssl;
server_name s3.amy.mov;
resolver 192.168.1.155;
# Rewriting path based buckets to vhost buckets, ie:
# s3.amy.mov/test-bucket => test-bucket.s3.amy.mov
location ~ ^/([^/]+)(.*)$ {
# Set the host so Garage thinks we are coming from a vhost
proxy_set_header Host $1.s3.amy.mov;
# But pass the path ($2) to the real proxy
proxy_pass http://garage.cluster:3902$2;
}
}

View file

@ -0,0 +1,13 @@
server {
listen 443 ssl;
server_name secrets.amy.mov;
location / {
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host $http_host;
proxy_pass http://infisical.cluster:8080;
}
}

View file

@ -0,0 +1,31 @@
# !! Managed by Puppet !!
user www-data;
worker_processes auto;
pid /run/nginx.pid;
events {
worker_connections 768;
}
http {
access_log /var/log/nginx/access.log;
error_log /var/log/nginx/error.log;
sendfile on;
tcp_nopush on;
types_hash_max_size 2048;
client_max_body_size 15M;
include /etc/nginx/mime.types;
default_type application/octet-stream;
ssl_protocols TLSv1 TLSv1.1 TLSv1.2 TLSv1.3;
ssl_prefer_server_ciphers on;
ssl_certificate /etc/nginx/cert/cf.pem;
ssl_certificate_key /etc/nginx/cert/cf.key;
include /etc/nginx/conf.d/*;
}

View file

@ -0,0 +1,56 @@
class reverse_proxy {
contain reverse_proxy::install
contain reverse_proxy::config
contain reverse_proxy::service
}
class reverse_proxy::install {
package { 'nginx':
ensure => installed,
}
}
define reverse_proxy::conf_file (
$dest_base,
$source_base,
$group = 'www-data',
$owner = 'www-data',
$mode = '0640',
) {
file { "${dest_base}/${name}":
source => "${source_base}/${name}",
ensure => 'present',
group => $group,
owner => $owner,
mode => $mode,
notify => Service['nginx'],
}
}
class reverse_proxy::config {
$config_files = [
# Make our dirs first
'conf.d', 'cert',
# Then we can populate them
'conf.d/fedi.amy.mov', 'conf.d/s3.amy.mov', 'conf.d/blog.amy.mov', 'conf.d/auth.amy.mov', 'conf.d/pg.amy.mov',
'conf.d/forge.amy.mov', 'conf.d/cloud.amy.mov', 'conf.d/secrets.amy.mov',
'conf.d/internal-s3.amy.mov',
'cert/cf.key', 'cert/cf.pem',
'nginx.conf'
]
reverse_proxy::conf_file { $config_files:
source_base => 'puppet:///modules/reverse_proxy',
dest_base => '/etc/nginx'
}
}
class reverse_proxy::service {
service { 'nginx':
ensure => running,
enable => true,
}
}

View file

@ -0,0 +1,74 @@
class unifi {
contain unifi::install
contain unifi::service
}
class unifi::repos {
apt::source { 'unifi':
comment => 'UniFi Repo',
location => 'https://www.ui.com/downloads/unifi/debian',
release => 'stable',
repos => 'ubiquiti',
key => {
'name' => 'unifi.gpg',
'source' => 'https://dl.ui.com/unifi/unifi-repo.gpg',
},
include => {
'deb' => true,
},
}
apt::source { 'mongodb':
comment => 'MongoDB',
location => 'http://repo.mongodb.org/apt/debian',
release => 'buster/mongodb-org/4.4',
repos => 'main',
key => {
'name' => 'mongodb.asc',
'source' => 'https://www.mongodb.org/static/pgp/server-4.4.asc',
},
include => {
'deb' => true,
},
}
}
class unifi::libssl {
# From https://archive.ubuntu.com/ubuntu/pool/main/o/openssl/
file { '/opt/libssl1.1.deb':
ensure => file,
source => 'puppet:///modules/unifi/libssl1.1.deb'
} ~>
package { 'libssl1.1':
provider => dpkg,
source => "/opt/libssl1.1.deb"
}
}
class unifi::install {
contain unifi::repos
contain unifi::libssl
$mongo = [ 'mongodb-org' ]
package { $mongo:
ensure => installed
}
$unifi = ['openjdk-17-jre-headless', 'java-common', 'unifi']
package { $unifi:
ensure => installed
}
}
class unifi::service {
service { 'mongod':
ensure => running,
enable => true
}
service { 'unifi':
ensure => running,
enable => true
}
}

View file

@ -0,0 +1,5 @@
class utils {
$pkgs = ['git', 'net-tools', 'curl', 'sudo']
package { $pkgs: ensure => 'installed' }
}