feat: initial commit

This commit is contained in:
amy 2025-04-01 17:40:03 +00:00
commit 38f495e3f4
457 changed files with 40577 additions and 0 deletions

4
.gitignore vendored Normal file
View file

@ -0,0 +1,4 @@
secrets.pp
.env
.infisical.json
ocis.yaml

4
.vscode/settings.json vendored Normal file
View file

@ -0,0 +1,4 @@
{
"deno.enable": true,
"deno.path": "./secrets/deno-src/deno"
}

22
README.md Normal file
View file

@ -0,0 +1,22 @@
# fruit-bowl automation
## agent bootstrap
```console
apt update && apt install curl -y && curl -sSl https://s3.amy.mov/cluster/agent-bootstrap.sh | sh
```
(on the server)
```console
puppetserver ca sign --certname <host>.cluster
```
## system bootstrap
### deps
- Deno (for running scripts)
- g10k (for pulling Forge modules)
### process
- generate some kind of sample secrets file that will then be migrated into Infisical when it is up
- create databases/users in accordance with config.pp so services can come up
- set values in environments/production/config.pp
- add certs into reverse proxy (files/cert/{.key,.pem})

View file

@ -0,0 +1,5 @@
moduledir 'thirdparty'
mod 'puppetlabs-apt', '10.0.1'
mod 'puppetlabs-stdlib', '9.7.0'
mod 'puppetlabs-docker', '10.2.0'

View file

@ -0,0 +1 @@
modulepath = ./thirdparty:./modules:$basemodulepath

View file

@ -0,0 +1,20 @@
$authentik_host = "https://auth.amy.mov/"
$authentik_pg_host = "postgresql.cluster"
$authentik_pg_db = "authentik"
$authentik_pg_user = "authentik"
$infisical_pg_host = "postgresql.cluster"
$infisical_pg_pass = "infisical"
$infisical_pg_user = "infisical"
$infisical_pg_db = "infisical"
$pg_oauth2_name = 'authentik'
$pg_oauth2_display_name ='authentik'
$pg_oauth2_token_url ='https://auth.amy.mov/application/o/token/'
$pg_oauth2_authorization_url = 'https://auth.amy.mov/application/o/authorize/'
$pg_oauth2_api_base_url = 'https://auth.amy.mov/'
$pg_oauth2_userinfo_endpoint = 'https://auth.amy.mov/application/o/userinfo/'
$pg_oauth2_server_metadata_url = 'https://auth.amy.mov/application/o/pgadmin/.well-known/openid-configuration'
$pg_oauth2_scope = 'openid email profile'
$pg_oauth2_icon = 'fa-key'

View file

@ -0,0 +1,63 @@
$nameservers = ['192.168.1.155', '1.1.1.1']
include stdlib
include apt
include hosts
include dns
include utils
include keys
# For nodes that haven't got the service enabled
service { 'puppet':
ensure => 'running',
enable => true
}
# This server itself
node 'puppet-server.cluster' {
include infisical::cli
}
node 'nginx.cluster' {
include elastic::filebeat
include reverse_proxy
}
node 'garage.cluster' {
include garage
}
node 'postgresql.cluster' {
include postgresql
}
node 'unifi.cluster' {
include unifi
}
node 'authentik.cluster' {
include authentik
}
node 'forgejo.cluster' {
include forgejo
}
node 'kibana.cluster' {
include elastic::kibana
}
node 'elasticsearch.cluster' {
include elastic::search
}
node 'owncloud.cluster' {
include owncloud
}
node 'infisical.cluster' {
include infisical
}
node default {}

View file

@ -0,0 +1,72 @@
services:
redis:
image: docker.io/library/redis:alpine
command: --save 60 1 --loglevel warning
restart: unless-stopped
healthcheck:
test: ["CMD-SHELL", "redis-cli ping | grep PONG"]
start_period: 20s
interval: 30s
retries: 5
timeout: 3s
volumes:
- redis:/data
server:
image: ${AUTHENTIK_IMAGE:-ghcr.io/goauthentik/server}:${AUTHENTIK_TAG:-2025.2.3}
restart: unless-stopped
command: server
environment:
AUTHENTIK_REDIS__HOST: redis
AUTHENTIK_POSTGRESQL__HOST: ${PG_HOST:-postgresql.cluster}
AUTHENTIK_POSTGRESQL__USER: ${PG_USER:-authentik}
AUTHENTIK_POSTGRESQL__NAME: ${PG_DB:-authentik}
AUTHENTIK_POSTGRESQL__PASSWORD: ${PG_PASS}
volumes:
- ./media:/media
- ./custom-templates:/templates
env_file:
- .env
ports:
- "${COMPOSE_PORT_HTTP:-9000}:9000"
- "${COMPOSE_PORT_HTTPS:-9443}:9443"
depends_on:
redis:
condition: service_healthy
rac:
image: ghcr.io/goauthentik/rac:${AUTHENTIK_TAG:-2025.2.3}
restart: unless-stopped
env_file:
- .env
depends_on:
- server
worker:
image: ${AUTHENTIK_IMAGE:-ghcr.io/goauthentik/server}:${AUTHENTIK_TAG:-2025.2.3}
restart: unless-stopped
command: worker
environment:
AUTHENTIK_REDIS__HOST: redis
AUTHENTIK_POSTGRESQL__HOST: ${PG_HOST:-postgresql.cluster}
AUTHENTIK_POSTGRESQL__USER: ${PG_USER:-authentik}
AUTHENTIK_POSTGRESQL__NAME: ${PG_DB:-authentik}
AUTHENTIK_POSTGRESQL__PASSWORD: ${PG_PASS}
# `user: root` and the docker socket volume are optional.
# See more for the docker socket integration here:
# https://goauthentik.io/docs/outposts/integrations/docker
# Removing `user: root` also prevents the worker from fixing the permissions
# on the mounted folders, so when removing this make sure the folders have the correct UID/GID
# (1000:1000 by default)
user: root
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- ./media:/media
- ./certs:/certs
- ./custom-templates:/templates
env_file:
- .env
depends_on:
redis:
condition: service_healthy
volumes:
redis:
driver: local

View file

@ -0,0 +1,30 @@
class authentik {
include docker
contain authentik::install
}
class authentik::install {
package { 'docker-compose':
ensure => installed
}
file { '/opt/authentik':
ensure => directory
}
file { '/opt/authentik/compose.yml':
ensure => file,
source => 'puppet:///modules/authentik/compose.yml'
}
file { '/opt/authentik/.env':
ensure => file,
content => template('authentik/.env.erb')
}
docker_compose { 'authentik':
compose_files => ['/opt/authentik/compose.yml'],
ensure => present,
}
}

View file

@ -0,0 +1,9 @@
AUTHENTIK_HOST=<%= @authentik_host %>
AUTHENTIK_INSECURE=false
PG_HOST=<%= @authentik_pg_host %>
PG_USER=<%= @authentik_pg_user %>
PG_PASS=<%= @authentik_pg_pass %>
AUTHENTIK_SECRET_KEY=<%= @authentik_secret_key %>
AUTHENTIK_TOKEN=<%= @authentik_rac_token %>

View file

@ -0,0 +1,9 @@
class dns {
file { '/etc/resolv.conf':
ensure => file,
owner => 'root',
group => 'root',
mode => '0644',
content => template('dns/resolv.conf.erb'),
}
}

View file

@ -0,0 +1,8 @@
# !! Managed by Puppet !!
domain cluster
search cluster
<% [@nameservers].flatten.each do |ns| -%>
nameserver <%= ns %>
<% end -%>

View file

@ -0,0 +1,88 @@
# ======================== Elasticsearch Configuration =========================
#
# NOTE: Elasticsearch comes with reasonable defaults for most settings.
# Before you set out to tweak and tune the configuration, make sure you
# understand what are you trying to accomplish and the consequences.
#
# The primary way of configuring a node is via this file. This template lists
# the most important settings you may want to configure for a production cluster.
#
# Please consult the documentation for further information on configuration options:
# https://www.elastic.co/guide/en/elasticsearch/reference/index.html
#
# ---------------------------------- Cluster -----------------------------------
#
# Use a descriptive name for your cluster:
#
#cluster.name: my-application
#
# ------------------------------------ Node ------------------------------------
#
# Use a descriptive name for the node:
#
node.name: es-1
#
# Add custom attributes to the node:
#
#node.attr.rack: r1
#
# ----------------------------------- Paths ------------------------------------
#
# Path to directory where to store the data (separate multiple locations by comma):
#
path.data: /var/lib/elasticsearch
#
# Path to log files:
#
path.logs: /var/log/elasticsearch
#
# ----------------------------------- Memory -----------------------------------
#
# Lock the memory on startup:
#
#bootstrap.memory_lock: true
#
# Make sure that the heap size is set to about half the memory available
# on the system and that the owner of the process is allowed to use this
# limit.
#
# Elasticsearch performs poorly when the system is swapping the memory.
#
# ---------------------------------- Network -----------------------------------
#
# Set the bind address to a specific IP (IPv4 or IPv6):
#
network.host: 0.0.0.0
#
# Set a custom port for HTTP:
#
#http.port: 9200
#
# For more information, consult the network module documentation.
#
# --------------------------------- Discovery ----------------------------------
#
# Pass an initial list of hosts to perform discovery when this node is started:
# The default list of hosts is ["127.0.0.1", "[::1]"]
#
#discovery.seed_hosts: ["host1", "host2"]
#
# Bootstrap the cluster using an initial set of master-eligible nodes:
#
cluster.initial_master_nodes: ["es-1"]
#
# For more information, consult the discovery and cluster formation module documentation.
#
# ---------------------------------- Gateway -----------------------------------
#
# Block initial recovery after a full cluster restart until N nodes are started:
#
#gateway.recover_after_nodes: 3
#
# For more information, consult the gateway module documentation.
#
# ---------------------------------- Various -----------------------------------
#
# Require explicit names when deleting indices:
#
#action.destructive_requires_name: true

View file

@ -0,0 +1,18 @@
class elastic::filebeat {
contain elastic::filebeat::install
}
class elastic::filebeat::install {
file { '/opt/filebeat':
ensure => directory
}
file { '/opt/filebeat/filebeat.deb':
ensure => file,
source => 'https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-7.10.2-amd64.deb'
} ~>
package { 'filebeat':
provider => dpkg,
source => "/opt/filebeat/filebeat.deb"
}
}

View file

@ -0,0 +1,3 @@
class elastic {
}

View file

@ -0,0 +1,35 @@
class elastic::kibana {
contain elastic::kibana::install
contain elastic::kibana::config
contain elastic::kibana::service
}
class elastic::kibana::install {
file { '/opt/kibana':
ensure => directory
}
file { '/opt/kibana/kibana.deb':
ensure => file,
source => 'https://artifacts.elastic.co/downloads/kibana/kibana-7.10.2-amd64.deb'
} ~>
package { 'kibana':
provider => dpkg,
source => "/opt/kibana/kibana.deb"
}
}
class elastic::kibana::config {
file { '/etc/kibana/kibana.yml':
ensure => file,
content => template('elastic/kibana.yml.erb'),
notify => Service['kibana']
}
}
class elastic::kibana::service {
service { 'kibana':
ensure => running,
enable => true
}
}

View file

@ -0,0 +1,35 @@
class elastic::search {
contain elastic::search::install
contain elastic::search::config
contain elastic::search::service
}
class elastic::search::install {
file { '/opt/search':
ensure => directory
}
file { '/opt/search/search.deb':
ensure => file,
source => 'https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-7.10.2-amd64.deb'
} ~>
package { 'search':
provider => dpkg,
source => "/opt/search/search.deb"
}
}
class elastic::search::config {
file { '/etc/elasticsearch/elasticsearch.yml':
ensure => file,
source => 'puppet:///modules/elastic/elastic.yml',
notify => Service['elasticsearch']
}
}
class elastic::search::service {
service { 'elasticsearch':
ensure => running,
enable => true
}
}

View file

@ -0,0 +1,111 @@
# Kibana is served by a back end server. This setting specifies the port to use.
server.port: 5601
# Specifies the address to which the Kibana server will bind. IP addresses and host names are both valid values.
# The default is 'localhost', which usually means remote machines will not be able to connect.
# To allow connections from remote users, set this parameter to a non-loopback address.
server.host: "0.0.0.0"
# Enables you to specify a path to mount Kibana at if you are running behind a proxy.
# Use the `server.rewriteBasePath` setting to tell Kibana if it should remove the basePath
# from requests it receives, and to prevent a deprecation warning at startup.
# This setting cannot end in a slash.
#server.basePath: ""
# Specifies whether Kibana should rewrite requests that are prefixed with
# `server.basePath` or require that they are rewritten by your reverse proxy.
# This setting was effectively always `false` before Kibana 6.3 and will
# default to `true` starting in Kibana 7.0.
#server.rewriteBasePath: false
# The maximum payload size in bytes for incoming server requests.
#server.maxPayloadBytes: 1048576
# The Kibana server's name. This is used for display purposes.
server.name: "Kibana"
# The URLs of the Elasticsearch instances to use for all your queries.
elasticsearch.hosts: [ "http://elasticsearch.cluster:9200" ]
# Kibana uses an index in Elasticsearch to store saved searches, visualizations and
# dashboards. Kibana creates a new index if the index doesn't already exist.
#kibana.index: ".kibana"
# The default application to load.
#kibana.defaultAppId: "home"
# If your Elasticsearch is protected with basic authentication, these settings provide
# the username and password that the Kibana server uses to perform maintenance on the Kibana
# index at startup. Your Kibana users still need to authenticate with Elasticsearch, which
# is proxied through the Kibana server.
#elasticsearch.username: "kibana_system"
#elasticsearch.password: "pass"
# Enables SSL and paths to the PEM-format SSL certificate and SSL key files, respectively.
# These settings enable SSL for outgoing requests from the Kibana server to the browser.
#server.ssl.enabled: false
#server.ssl.certificate: /path/to/your/server.crt
#server.ssl.key: /path/to/your/server.key
# Optional settings that provide the paths to the PEM-format SSL certificate and key files.
# These files are used to verify the identity of Kibana to Elasticsearch and are required when
# xpack.security.http.ssl.client_authentication in Elasticsearch is set to required.
#elasticsearch.ssl.certificate: /path/to/your/client.crt
#elasticsearch.ssl.key: /path/to/your/client.key
# Optional setting that enables you to specify a path to the PEM file for the certificate
# authority for your Elasticsearch instance.
#elasticsearch.ssl.certificateAuthorities: [ "/path/to/your/CA.pem" ]
# To disregard the validity of SSL certificates, change this setting's value to 'none'.
#elasticsearch.ssl.verificationMode: full
# Time in milliseconds to wait for Elasticsearch to respond to pings. Defaults to the value of
# the elasticsearch.requestTimeout setting.
#elasticsearch.pingTimeout: 1500
# Time in milliseconds to wait for responses from the back end or Elasticsearch. This value
# must be a positive integer.
#elasticsearch.requestTimeout: 30000
# List of Kibana client-side headers to send to Elasticsearch. To send *no* client-side
# headers, set this value to [] (an empty list).
#elasticsearch.requestHeadersWhitelist: [ authorization ]
# Header names and values that are sent to Elasticsearch. Any custom headers cannot be overwritten
# by client-side headers, regardless of the elasticsearch.requestHeadersWhitelist configuration.
#elasticsearch.customHeaders: {}
# Time in milliseconds for Elasticsearch to wait for responses from shards. Set to 0 to disable.
#elasticsearch.shardTimeout: 30000
# Logs queries sent to Elasticsearch. Requires logging.verbose set to true.
#elasticsearch.logQueries: false
# Specifies the path where Kibana creates the process ID file.
#pid.file: /var/run/kibana.pid
# Enables you to specify a file where Kibana stores log output.
#logging.dest: stdout
# Set the value of this setting to true to suppress all logging output.
#logging.silent: false
# Set the value of this setting to true to suppress all logging output other than error messages.
#logging.quiet: false
# Set the value of this setting to true to log all events, including system usage information
# and all requests.
#logging.verbose: false
# Set the interval in milliseconds to sample system and process performance
# metrics. Minimum is 100ms. Defaults to 5000.
#ops.interval: 5000
# Specifies locale to be used for all localizable strings, dates and number formats.
# Supported languages are the following: English - en , by default , Chinese - zh-CN .
#i18n.locale: "en"
xpack:
encryptedSavedObjects:
encryptionKey: '<%= @kibana_encryption_key %>'

View file

@ -0,0 +1,15 @@
[Unit]
Description=Forgejo
Wants=basic.target
After=basic.target network.target
[Service]
WorkingDirectory=/opt/forgejo
ExecStart=/opt/forgejo/forgejo
User=forgejo
KillMode=process
Restart=on-failure
RestartSec=30s
[Install]
WantedBy=multi-user.target

View file

@ -0,0 +1,41 @@
class forgejo {
contain forgejo::install
contain forgejo::service
}
class forgejo::install {
user { 'forgejo':
ensure => 'present',
}
file { '/opt/forgejo':
ensure => directory,
owner => 'forgejo',
}
file { '/opt/forgejo/forgejo':
source => 'https://codeberg.org/forgejo/forgejo/releases/download/v10.0.3/forgejo-10.0.3-linux-amd64',
ensure => file,
owner => 'forgejo',
mode => '0744'
}
}
class forgejo::service {
file { '/lib/systemd/system/forgejo.service':
ensure => file,
source => 'puppet:///modules/forgejo/forgejo.service',
notify => Service['forgejo']
}~>
exec { 'forgejo-systemd-reload':
command => 'systemctl daemon-reload',
path => [ '/usr/bin', '/bin', '/usr/sbin' ],
refreshonly => true,
}
service { 'forgejo':
ensure => running,
enable => true,
}
}

View file

@ -0,0 +1,48 @@
class garage {
contain garage::install
contain garage::config
contain garage::service
contain garage::webui
}
class garage::install {
file { '/opt/garage':
ensure => directory,
mode => '0640'
}
file { '/opt/garage/garage':
ensure => file,
source => 'https://garagehq.deuxfleurs.fr/_releases/v1.1.0/x86_64-unknown-linux-musl/garage',
mode => '0740'
}
}
class garage::config {
file { '/opt/garage/garage.toml':
content => template('garage/conf.toml.erb'),
ensure => file,
mode => '0644',
notify => [Service['garage'], Service['garage-webui']]
}
}
class garage::service {
file { '/lib/systemd/system/garage.service':
mode => '0644',
owner => 'root',
group => 'root',
content => template('garage/garage.service.erb'),
}~>
exec { 'garage-systemd-reload':
command => 'systemctl daemon-reload',
path => [ '/usr/bin', '/bin', '/usr/sbin' ],
refreshonly => true,
}
service { 'garage':
ensure => running,
enable => true,
}
}

View file

@ -0,0 +1,35 @@
class garage::webui {
contain garage::webui::install
contain garage::webui::service
}
class garage::webui::install {
file { '/opt/garage-webui':
ensure => directory
}
file { '/opt/garage-webui/webui':
source => 'https://github.com/khairul169/garage-webui/releases/download/1.0.8/garage-webui-v1.0.8-linux-amd64',
ensure => 'file',
mode => '0740',
}
}
class garage::webui::service {
file { '/lib/systemd/system/garage-webui.service':
mode => '0644',
owner => 'root',
group => 'root',
content => template('garage/garage-webui.service.erb'),
}~>
exec { 'garage-webui-systemd-reload':
command => 'systemctl daemon-reload',
path => [ '/usr/bin', '/bin', '/usr/sbin' ],
refreshonly => true,
}
service { 'garage-webui':
ensure => running,
enable => true,
}
}

View file

@ -0,0 +1,28 @@
metadata_dir = "/opt/garage/meta"
data_dir = "/opt/garage/data"
db_engine = "sqlite"
replication_factor = 1
rpc_bind_addr = "[::]:3901"
rpc_public_addr = "127.0.0.1:3901"
rpc_secret = "<%= @garage_rpc_secret %>"
[s3_api]
s3_region = "garage"
api_bind_addr = "[::]:3900"
root_domain = ".s3.amy.mov"
[s3_web]
bind_addr = "[::]:3902"
root_domain = ".s3.amy.mov"
index = "index.html"
[k2v_api]
api_bind_addr = "[::]:3904"
[admin]
api_bind_addr = "[::]:3903"
admin_token = "<%= @garage_admin_token %>"
metrics_token = "<%= @garage_metrics_token %>"

View file

@ -0,0 +1,15 @@
[Unit]
Description=GarageHQ WebUI
Wants=basic.target
After=basic.target network.target
[Service]
WorkingDirectory=/opt/garage-webui
Environment="CONFIG_PATH=/opt/garage/garage.toml"
ExecStart=/opt/garage-webui/webui
KillMode=process
Restart=on-failure
RestartSec=30s
[Install]
WantedBy=multi-user.target

View file

@ -0,0 +1,14 @@
[Unit]
Description=GarageHQ
Wants=basic.target
After=basic.target network.target
[Service]
WorkingDirectory=/opt/garage
ExecStart=/opt/garage/garage -c /opt/garage/garage.toml server
KillMode=process
Restart=on-failure
RestartSec=30s
[Install]
WantedBy=multi-user.target

View file

@ -0,0 +1,9 @@
class hosts {
file { '/etc/hosts':
ensure => file,
owner => 'root',
group => 'root',
mode => '0644',
content => template('hosts/hosts.erb'),
}
}

View file

@ -0,0 +1,12 @@
# !! Managed by Puppet !!
127.0.0.1 localhost
::1 localhost ip6-localhost ip6-loopback
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
# --- BEGIN PVE ---
127.0.1.1 <%= @networking['hostname'] %>.cluster <%= @networking['hostname'] %>
192.168.1.200 internal-s3.amy.mov
# --- END PVE ---

View file

@ -0,0 +1,36 @@
services:
backend:
container_name: infisical-backend
restart: unless-stopped
depends_on:
redis:
condition: service_started
image: infisical/infisical:latest-postgres
pull_policy: always
env_file: .env
ports:
- 8080:8080
environment:
- NODE_ENV=production
networks:
- infisical
redis:
image: redis
container_name: infisical-dev-redis
env_file: .env
restart: always
environment:
- ALLOW_EMPTY_PASSWORD=yes
ports:
- 6379:6379
networks:
- infisical
volumes:
- redis_data:/data
volumes:
redis_data:
driver: local
networks:
infisical:

View file

@ -0,0 +1,18 @@
class infisical::cli {
contain infisical::cli::install
}
class infisical::cli::install {
file { '/opt/infisical':
ensure => directory
}
file { '/opt/infisical/infisical-cli.deb':
ensure => file,
source => 'https://github.com/Infisical/infisical/releases/download/infisical-cli%2Fv0.36.22/infisical_0.36.22_linux_amd64.deb'
} ~>
package { '/opt/infisical/infisical-cli.deb':
provider => dpkg,
source => '/opt/infisical/infisical-cli.deb'
}
}

View file

@ -0,0 +1,29 @@
class infisical {
include docker
contain infisical::install
}
class infisical::install {
package { 'docker-compose':
ensure => installed
}
file { '/opt/infisical':
ensure => directory
}
file { '/opt/infisical/compose.yml':
ensure => file,
source => 'puppet:///modules/infisical/compose.yml'
}
file { '/opt/infisical/.env':
ensure => file,
source => 'puppet:///modules/infisical/.env'
}
docker_compose { 'infisical':
compose_files => ['/opt/infisical/compose.yml'],
ensure => present,
}
}

View file

@ -0,0 +1,124 @@
# Keys
# Required key for platform encryption/decryption ops
# THIS IS A SAMPLE ENCRYPTION KEY AND SHOULD NEVER BE USED FOR PRODUCTION
ENCRYPTION_KEY="<%= @infisical_encryption_key %>"
# JWT
# Required secrets to sign JWT tokens
# THIS IS A SAMPLE AUTH_SECRET KEY AND SHOULD NEVER BE USED FOR PRODUCTION
AUTH_SECRET="<%= @infisical_auth_secret %>"
# Postgres creds
PG_HOST="<%= @infisical_pg_host %>"
PG_USER="<%= @infisical_pg_user %>"
PG_PASS="<%= @infisical_pg_pass %>"
PG_DB="<%= @infisical_pg_db %>"
# Required
DB_CONNECTION_URI=postgres://${PG_USER}:${PG_PASS}@${PG_HOST}:5432/${PG_DB}
# Redis
REDIS_URL=redis://redis:6379
# Website URL
# Required
SITE_URL=http://localhost:8080
# Mail/SMTP
SMTP_HOST=
SMTP_PORT=
SMTP_FROM_ADDRESS=
SMTP_FROM_NAME=
SMTP_USERNAME=
SMTP_PASSWORD=
# Integration
# Optional only if integration is used
CLIENT_ID_HEROKU=
CLIENT_ID_VERCEL=
CLIENT_ID_NETLIFY=
CLIENT_ID_GITHUB=
CLIENT_ID_GITHUB_APP=
CLIENT_SLUG_GITHUB_APP=
CLIENT_ID_GITLAB=
CLIENT_ID_BITBUCKET=
CLIENT_SECRET_HEROKU=
CLIENT_SECRET_VERCEL=
CLIENT_SECRET_NETLIFY=
CLIENT_SECRET_GITHUB=
CLIENT_SECRET_GITHUB_APP=
CLIENT_SECRET_GITLAB=
CLIENT_SECRET_BITBUCKET=
CLIENT_SLUG_VERCEL=
CLIENT_PRIVATE_KEY_GITHUB_APP=
CLIENT_APP_ID_GITHUB_APP=
# Sentry (optional) for monitoring errors
SENTRY_DSN=
# Infisical Cloud-specific configs
# Ignore - Not applicable for self-hosted version
POSTHOG_HOST=
POSTHOG_PROJECT_API_KEY=
# SSO-specific variables
CLIENT_ID_GOOGLE_LOGIN=
CLIENT_SECRET_GOOGLE_LOGIN=
CLIENT_ID_GITHUB_LOGIN=
CLIENT_SECRET_GITHUB_LOGIN=
CLIENT_ID_GITLAB_LOGIN=
CLIENT_SECRET_GITLAB_LOGIN=
CAPTCHA_SECRET=
NEXT_PUBLIC_CAPTCHA_SITE_KEY=
OTEL_TELEMETRY_COLLECTION_ENABLED=false
OTEL_EXPORT_TYPE=prometheus
OTEL_EXPORT_OTLP_ENDPOINT=
OTEL_OTLP_PUSH_INTERVAL=
OTEL_COLLECTOR_BASIC_AUTH_USERNAME=
OTEL_COLLECTOR_BASIC_AUTH_PASSWORD=
PLAIN_API_KEY=
PLAIN_WISH_LABEL_IDS=
SSL_CLIENT_CERTIFICATE_HEADER_KEY=
ENABLE_MSSQL_SECRET_ROTATION_ENCRYPT=true
# App Connections
# aws assume-role connection
INF_APP_CONNECTION_AWS_ACCESS_KEY_ID=
INF_APP_CONNECTION_AWS_SECRET_ACCESS_KEY=
# github oauth connection
INF_APP_CONNECTION_GITHUB_OAUTH_CLIENT_ID=
INF_APP_CONNECTION_GITHUB_OAUTH_CLIENT_SECRET=
#github app connection
INF_APP_CONNECTION_GITHUB_APP_CLIENT_ID=
INF_APP_CONNECTION_GITHUB_APP_CLIENT_SECRET=
INF_APP_CONNECTION_GITHUB_APP_PRIVATE_KEY=
INF_APP_CONNECTION_GITHUB_APP_SLUG=
INF_APP_CONNECTION_GITHUB_APP_ID=
#gcp app connection
INF_APP_CONNECTION_GCP_SERVICE_ACCOUNT_CREDENTIAL=
# azure app connection
INF_APP_CONNECTION_AZURE_CLIENT_ID=
INF_APP_CONNECTION_AZURE_CLIENT_SECRET=
# datadog
SHOULD_USE_DATADOG_TRACER=
DATADOG_PROFILING_ENABLED=
DATADOG_ENV=
DATADOG_SERVICE=
DATADOG_HOSTNAME=

View file

@ -0,0 +1,2 @@
ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBK0MxN/ReEZsnPUWJz+UEq8okZIri+hDXClO/EUsaSFeQtuf5unr5zZ9ErMGmPTbyBloBEh7ZauFVmpwn6y9n9M= root@puppet-server
ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBK3ukunRoN0+GupDQwujcN3htQGERmEB+Sd5f33PqNhNXsR2EfBwg463lYiCRZo9CQ/hDjrYv5A9TLg8us1B5iA= amy@nixon

View file

@ -0,0 +1,8 @@
class keys {
file { '/root/.ssh/authorized_keys':
source => 'puppet:///modules/keys/puppet-push.pub',
owner => 'root',
group => 'root',
mode => '0640',
}
}

View file

@ -0,0 +1,43 @@
class owncloud {
contain owncloud::install
contain owncloud::config
contain owncloud::service
}
class owncloud::install {
file { '/opt/owncloud':
ensure => directory
}
file { '/opt/owncloud/ocis':
ensure => file,
source => 'https://github.com/owncloud/ocis/releases/download/v7.1.2/ocis-7.1.2-linux-amd64',
mode => '0744'
}
}
class owncloud::config {
file { '/opt/owncloud/ocis.yaml':
ensure => file,
source => 'puppet:///modules/owncloud/ocis.yaml',
notify => Service['owncloud']
}
}
class owncloud::service {
file { '/lib/systemd/system/owncloud.service':
ensure => file,
content => template('owncloud/owncloud.service.erb'),
notify => Service['owncloud']
}~>
exec { 'owncloud-systemd-reload':
command => 'systemctl daemon-reload',
path => [ '/usr/bin', '/bin', '/usr/sbin' ],
refreshonly => true,
}
service { 'owncloud':
ensure => running,
enable => true,
}
}

View file

@ -0,0 +1,32 @@
[Unit]
Description=OwnCloud
Wants=basic.target
After=basic.target network.target
[Service]
WorkingDirectory=/opt/owncloud
ExecStart=/opt/owncloud/ocis server
Environment="PROXY_HTTP_ADDR=0.0.0.0:9200"
Environment="OCIS_URL=https://cloud.amy.mov"
Environment="OCIS_BASE_DATA_PATH=/opt/owncloud/"
Environment="OCIS_CONFIG_DIR=/opt/owncloud/"
Environment="OCIS_INSECURE=true"
Environment="STORAGE_USERS_DRIVER=s3ng"
Environment="STORAGE_HOME_DRIVER=s3ng"
Environment="STORAGE_METADATA_DRIVER=ocis"
Environment="STORAGE_USERS_S3NG_REGION=garage"
Environment="STORAGE_USERS_S3NG_ENDPOINT=https://internal-s3.amy.mov"
Environment="STORAGE_USERS_S3NG_SECRET_KEY="<%= @oc_s3_secret_key %>"
Environment="STORAGE_USERS_S3NG_ACCESS_KEY="<%= @oc_s3_access_key %>"
Environment="STORAGE_USERS_S3NG_BUCKET=cloud"
KillMode=process
Restart=on-failure
RestartSec=30s
[Install]
WantedBy=multi-user.target

View file

@ -0,0 +1,34 @@
class postgresql {
contain postgresql::install
contain postgresql::pgadmin
}
class postgresql::install {
# https://www.postgresql.org/download/linux/debian/
package { 'postgresql-common':
ensure => installed
} ~>
exec { 'postgresql-install':
command => '/usr/share/postgresql-common/pgdg/apt.postgresql.org.sh -y',
refreshonly => true,
} ~>
exec { 'postgresql-apt-update':
command => 'apt update',
path => ['/usr/bin'],
refreshonly => true,
} ~>
package { 'postgresql-16':
ensure => installed
} ~>
package { 'postgresql-client-16':
ensure => installed
}
}
class postgresql::service {
service { 'postgresql':
ensure => running,
enable => true
}
}

View file

@ -0,0 +1,42 @@
class postgresql::pgadmin {
contain postgresql::pgadmin::install
contain postgresql::pgadmin::config
}
class postgresql::pgadmin::install {
apt::source { 'pgadmin':
comment => 'PGAdmin Repo',
location => 'https://ftp.postgresql.org/pub/pgadmin/pgadmin4/apt/bookworm',
release => 'pgadmin4',
repos => 'main',
key => {
'name' => 'pgadmin.pub',
'source' => 'https://www.pgadmin.org/static/packages_pgadmin_org.pub',
},
include => {
'deb' => true,
},
}
package { 'pgadmin4-web':
ensure => installed
}
}
class postgresql::pgadmin::config {
exec { 'pgadmin-setup':
command => '/usr/pgadmin4/bin/setup-web.sh --yes',
environment => [
"PGADMIN_SETUP_EMAIL=$pg_setup_email",
"PGADMIN_SETUP_PASSWORD=$pg_setup_password"
],
path => ['/usr/bin'],
unless => ['test -f /var/lib/pgadmin', 'test -f /var/log/pgadmin']
}
file { '/usr/pgadmin4/web/config_local.py':
ensure => file,
content => template('postgresql/config_local.py.erb')
}
}

View file

@ -0,0 +1,17 @@
AUTHENTICATION_SOURCES = ['oauth2', 'internal']
OAUTH2_AUTO_CREATE_USER = True
OAUTH2_CONFIG = [{
'OAUTH2_NAME': '<%= @pg_oauth2_name %>',
'OAUTH2_DISPLAY_NAME': '<%= @pg_oauth2_display_name %>',
'OAUTH2_CLIENT_ID': '<%= @pg_oauth2_client_id %>',
'OAUTH2_CLIENT_SECRET': '<%= @pg_oauth2_client_secret %>',
'OAUTH2_TOKEN_URL': '<%= @pg_oauth2_token_url %>',
'OAUTH2_AUTHORIZATION_URL': '<%= @pg_oauth2_authorization_url %>',
'OAUTH2_API_BASE_URL': '<%= @pg_oauth2_api_base_url %>',
'OAUTH2_USERINFO_ENDPOINT': '<%= @pg_oauth2_userinfo_endpoint %>',
'OAUTH2_SERVER_METADATA_URL': '<%= @pg_oauth2_server_metadata_url %>',
'OAUTH2_SCOPE': '<%= @pg_oauth2_scope %>',
'OAUTH2_ICON': '<%= @pg_oauth2_icon %>'
}]
WTF_CSRF_HEADERS = ['X-CSRF']

View file

@ -0,0 +1 @@
cert/

View file

@ -0,0 +1,137 @@
map $http_upgrade $connection_upgrade_keepalive {
default upgrade;
'' '';
}
server {
listen 443 ssl;
server_name auth.amy.mov;
resolver 192.168.1.155;
location / {
proxy_http_version 1.1;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $http_host;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade_keepalive;
proxy_pass http://authentik.cluster:9000;
}
}
server {
listen 443 ssl;
server_name garage.amy.mov;
proxy_buffers 8 16k;
proxy_buffer_size 32k;
location / {
# Where should the authenticated requests go
proxy_pass http://garage.cluster:3909;
proxy_set_header Host $host;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade_keepalive;
auth_request /outpost.goauthentik.io/auth/nginx;
error_page 401 = @goauthentik_proxy_signin;
auth_request_set $auth_cookie $upstream_http_set_cookie;
add_header Set-Cookie $auth_cookie;
auth_request_set $authentik_username $upstream_http_x_authentik_username;
auth_request_set $authentik_groups $upstream_http_x_authentik_groups;
auth_request_set $authentik_entitlements $upstream_http_x_authentik_entitlements;
auth_request_set $authentik_email $upstream_http_x_authentik_email;
auth_request_set $authentik_name $upstream_http_x_authentik_name;
auth_request_set $authentik_uid $upstream_http_x_authentik_uid;
proxy_set_header X-authentik-username $authentik_username;
proxy_set_header X-authentik-groups $authentik_groups;
proxy_set_header X-authentik-entitlements $authentik_entitlements;
proxy_set_header X-authentik-email $authentik_email;
proxy_set_header X-authentik-name $authentik_name;
proxy_set_header X-authentik-uid $authentik_uid;
}
# All requests to /outpost.goauthentik.io must be accessible without authentication
location /outpost.goauthentik.io {
proxy_pass http://authentik.cluster:9000/outpost.goauthentik.io;
# Note: ensure the Host header matches your external authentik URL:
proxy_set_header Host $host;
proxy_set_header X-Original-URL $scheme://$http_host$request_uri;
add_header Set-Cookie $auth_cookie;
auth_request_set $auth_cookie $upstream_http_set_cookie;
proxy_pass_request_body off;
proxy_set_header Content-Length "";
}
location @goauthentik_proxy_signin {
internal;
add_header Set-Cookie $auth_cookie;
return 302 /outpost.goauthentik.io/start?rd=$scheme://$http_host$request_uri;
}
}
server {
listen 443 ssl;
server_name calibre.amy.mov;
proxy_buffers 8 16k;
proxy_buffer_size 32k;
location / {
# Where should the authenticated requests go
proxy_pass http://calibre.cluster:8080;
proxy_set_header Host $host;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade_keepalive;
auth_request /outpost.goauthentik.io/auth/nginx;
error_page 401 = @goauthentik_proxy_signin;
auth_request_set $auth_cookie $upstream_http_set_cookie;
add_header Set-Cookie $auth_cookie;
auth_request_set $authentik_username $upstream_http_x_authentik_username;
auth_request_set $authentik_groups $upstream_http_x_authentik_groups;
auth_request_set $authentik_entitlements $upstream_http_x_authentik_entitlements;
auth_request_set $authentik_email $upstream_http_x_authentik_email;
auth_request_set $authentik_name $upstream_http_x_authentik_name;
auth_request_set $authentik_uid $upstream_http_x_authentik_uid;
proxy_set_header X-authentik-username $authentik_username;
proxy_set_header X-authentik-groups $authentik_groups;
proxy_set_header X-authentik-entitlements $authentik_entitlements;
proxy_set_header X-authentik-email $authentik_email;
proxy_set_header X-authentik-name $authentik_name;
proxy_set_header X-authentik-uid $authentik_uid;
# Since we're overwriting the Basic Auth headers
auth_request_set $authentik_auth $upstream_http_authorization;
proxy_set_header Authorization $authentik_auth;
}
# All requests to /outpost.goauthentik.io must be accessible without authentication
location /outpost.goauthentik.io {
proxy_pass http://authentik.cluster:9000/outpost.goauthentik.io;
# Note: ensure the Host header matches your external authentik URL:
proxy_set_header Host $host;
proxy_set_header X-Original-URL $scheme://$http_host$request_uri;
add_header Set-Cookie $auth_cookie;
auth_request_set $auth_cookie $upstream_http_set_cookie;
proxy_pass_request_body off;
proxy_set_header Content-Length "";
}
location @goauthentik_proxy_signin {
internal;
add_header Set-Cookie $auth_cookie;
return 302 /outpost.goauthentik.io/start?rd=$scheme://$http_host$request_uri;
}
}

View file

@ -0,0 +1,13 @@
server {
listen 443 ssl;
server_name blog.amy.mov;
location / {
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host $http_host;
proxy_pass http://192.168.1.210:2368;
}
}

View file

@ -0,0 +1,13 @@
# !! Managed by Puppet !!
server {
listen 443 ssl;
server_name cloud.amy.mov;
location / {
proxy_pass https://owncloud.cluster:9200;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
}
}

View file

@ -0,0 +1,13 @@
# !! Managed by Puppet !!
server {
listen 443 ssl;
server_name fedi.amy.mov;
location / {
proxy_pass http://192.168.1.201:3000;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
}
}

View file

@ -0,0 +1,14 @@
# !! Managed by Puppet !!
server {
listen 443 ssl;
server_name forge.amy.mov;
resolver 192.168.1.155;
location / {
proxy_pass http://forgejo.cluster:3000;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
}
}

View file

@ -0,0 +1,11 @@
server {
listen 443 ssl;
server_name internal-s3.amy.mov;
resolver 192.168.1.155;
location / {
proxy_set_header Host internal-s3.amy.mov;
proxy_pass http://garage.cluster:3900;
}
}

View file

@ -0,0 +1,13 @@
server {
listen 443 ssl;
server_name pg.amy.mov;
location / {
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host $http_host;
proxy_pass http://postgresql.cluster;
}
}

View file

@ -0,0 +1,15 @@
server {
listen 443 ssl;
server_name s3.amy.mov;
resolver 192.168.1.155;
# Rewriting path based buckets to vhost buckets, ie:
# s3.amy.mov/test-bucket => test-bucket.s3.amy.mov
location ~ ^/([^/]+)(.*)$ {
# Set the host so Garage thinks we are coming from a vhost
proxy_set_header Host $1.s3.amy.mov;
# But pass the path ($2) to the real proxy
proxy_pass http://garage.cluster:3902$2;
}
}

View file

@ -0,0 +1,13 @@
server {
listen 443 ssl;
server_name secrets.amy.mov;
location / {
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host $http_host;
proxy_pass http://infisical.cluster:8080;
}
}

View file

@ -0,0 +1,31 @@
# !! Managed by Puppet !!
user www-data;
worker_processes auto;
pid /run/nginx.pid;
events {
worker_connections 768;
}
http {
access_log /var/log/nginx/access.log;
error_log /var/log/nginx/error.log;
sendfile on;
tcp_nopush on;
types_hash_max_size 2048;
client_max_body_size 15M;
include /etc/nginx/mime.types;
default_type application/octet-stream;
ssl_protocols TLSv1 TLSv1.1 TLSv1.2 TLSv1.3;
ssl_prefer_server_ciphers on;
ssl_certificate /etc/nginx/cert/cf.pem;
ssl_certificate_key /etc/nginx/cert/cf.key;
include /etc/nginx/conf.d/*;
}

View file

@ -0,0 +1,56 @@
class reverse_proxy {
contain reverse_proxy::install
contain reverse_proxy::config
contain reverse_proxy::service
}
class reverse_proxy::install {
package { 'nginx':
ensure => installed,
}
}
define reverse_proxy::conf_file (
$dest_base,
$source_base,
$group = 'www-data',
$owner = 'www-data',
$mode = '0640',
) {
file { "${dest_base}/${name}":
source => "${source_base}/${name}",
ensure => 'present',
group => $group,
owner => $owner,
mode => $mode,
notify => Service['nginx'],
}
}
class reverse_proxy::config {
$config_files = [
# Make our dirs first
'conf.d', 'cert',
# Then we can populate them
'conf.d/fedi.amy.mov', 'conf.d/s3.amy.mov', 'conf.d/blog.amy.mov', 'conf.d/auth.amy.mov', 'conf.d/pg.amy.mov',
'conf.d/forge.amy.mov', 'conf.d/cloud.amy.mov', 'conf.d/secrets.amy.mov',
'conf.d/internal-s3.amy.mov',
'cert/cf.key', 'cert/cf.pem',
'nginx.conf'
]
reverse_proxy::conf_file { $config_files:
source_base => 'puppet:///modules/reverse_proxy',
dest_base => '/etc/nginx'
}
}
class reverse_proxy::service {
service { 'nginx':
ensure => running,
enable => true,
}
}

View file

@ -0,0 +1,74 @@
class unifi {
contain unifi::install
contain unifi::service
}
class unifi::repos {
apt::source { 'unifi':
comment => 'UniFi Repo',
location => 'https://www.ui.com/downloads/unifi/debian',
release => 'stable',
repos => 'ubiquiti',
key => {
'name' => 'unifi.gpg',
'source' => 'https://dl.ui.com/unifi/unifi-repo.gpg',
},
include => {
'deb' => true,
},
}
apt::source { 'mongodb':
comment => 'MongoDB',
location => 'http://repo.mongodb.org/apt/debian',
release => 'buster/mongodb-org/4.4',
repos => 'main',
key => {
'name' => 'mongodb.asc',
'source' => 'https://www.mongodb.org/static/pgp/server-4.4.asc',
},
include => {
'deb' => true,
},
}
}
class unifi::libssl {
# From https://archive.ubuntu.com/ubuntu/pool/main/o/openssl/
file { '/opt/libssl1.1.deb':
ensure => file,
source => 'puppet:///modules/unifi/libssl1.1.deb'
} ~>
package { 'libssl1.1':
provider => dpkg,
source => "/opt/libssl1.1.deb"
}
}
class unifi::install {
contain unifi::repos
contain unifi::libssl
$mongo = [ 'mongodb-org' ]
package { $mongo:
ensure => installed
}
$unifi = ['openjdk-17-jre-headless', 'java-common', 'unifi']
package { $unifi:
ensure => installed
}
}
class unifi::service {
service { 'mongod':
ensure => running,
enable => true
}
service { 'unifi':
ensure => running,
enable => true
}
}

View file

@ -0,0 +1,5 @@
class utils {
$pkgs = ['git', 'net-tools', 'curl', 'sudo']
package { $pkgs: ensure => 'installed' }
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,2 @@
# Setting ownership to the modules team
* @puppetlabs/modules @bastelfreak @smortex

View file

@ -0,0 +1,3 @@
# Contributing to Puppet modules
Check out our [Contributing to Supported Modules Blog Post](https://puppetlabs.github.io/iac/docs/contributing_to_a_module.html) to find all the information that you will need.

View file

@ -0,0 +1,692 @@
## 5.0.1
[Full Changelog](https://github.com/puppetlabs/puppetlabs-apt/compare/5.0.0...5.0.1)
### Fixed
- \(MODULES-7540\) - add apt-transport-https with https [\#775](https://github.com/puppetlabs/puppetlabs-apt/pull/775) ([tphoney](https://github.com/tphoney))
## [5.0.0](https://github.com/puppetlabs/puppetlabs-apt/tree/5.0.0) (2018-07-18)
[Full Changelog](https://github.com/puppetlabs/puppetlabs-apt/compare/4.5.1...5.0.0)
### Changed
- \[FM-6956\] Removal of unsupported Debian 7 from apt [\#760](https://github.com/puppetlabs/puppetlabs-apt/pull/760) ([david22swan](https://github.com/david22swan))
### Added
- \(MODULES-7467\) Update apt to support Ubuntu 18.04 [\#769](https://github.com/puppetlabs/puppetlabs-apt/pull/769) ([david22swan](https://github.com/david22swan))
- Support managing login configurations in /etc/apt/auth.conf [\#752](https://github.com/puppetlabs/puppetlabs-apt/pull/752) ([antaflos](https://github.com/antaflos))
### Fixed
- \(MODULES-7327\) - Update README with supported OS [\#767](https://github.com/puppetlabs/puppetlabs-apt/pull/767) ([pmcmaw](https://github.com/pmcmaw))
- \(bugfix\) Dont run ftp tests in travis [\#766](https://github.com/puppetlabs/puppetlabs-apt/pull/766) ([tphoney](https://github.com/tphoney))
- \(maint\) make apt testing more stable, cleanup [\#764](https://github.com/puppetlabs/puppetlabs-apt/pull/764) ([tphoney](https://github.com/tphoney))
- Remove .length from variable $pin\_release in app [\#754](https://github.com/puppetlabs/puppetlabs-apt/pull/754) ([paladox](https://github.com/paladox))
- Replace UTF-8 whitespace in comment [\#748](https://github.com/puppetlabs/puppetlabs-apt/pull/748) ([bernhardschmidt](https://github.com/bernhardschmidt))
- Fix "E: Unable to locate package -y" [\#747](https://github.com/puppetlabs/puppetlabs-apt/pull/747) ([aboks](https://github.com/aboks))
- Fix automatic coercion warning [\#743](https://github.com/puppetlabs/puppetlabs-apt/pull/743) ([smortex](https://github.com/smortex))
## Supported Release [4.5.1]
### Summary
This release fixes CVE-2018-6508 which is a potential arbitrary code execution via tasks.
### Fixed
- Fix init task for arbitrary remote code
## Supported Release [4.5.0]
### Summary
This release uses the PDK convert functionality which in return makes the module PDK compliant. It also includes a roll up of maintenance changes.
### Added
- PDK convert apt ([MODULES-6452](https://tickets.puppet.com/browse/MODULES-6452)).
- Testing on Travis using rvm 2.4.1.
- Modulesync updates.
### Fixed
- Changes to address additional Rubocop failures.
- (maint) Addressing puppet-lint doc warnings.
### Removed
- `gem update bundler` command in .travis.yml due to ([MODULES-6339](https://tickets.puppet.com/browse/MODULES-6339)).
## Supported Release [4.4.1]
### Summary
This release is to update the formatting of the module, Rubocop having been run for all ruby files and been set to run automatically on all future commits.
### Changed
- Rubocop has been implemented.
## Supported Release [4.4.0]
### Summary
This release is a rollup of new features and fixes.
#### Added
- Install `apt-transport-https` if using Debian 7, 8, 9 or Ubuntu 14.04, 16.04.
- Adds a boolean option `direct` to proxy settings to bypass `https_proxy` if not set.
- Adds facter facts for `dist-upgrade` apt updates.
#### Changed
- Update class is now private.
- Some tidyup of ruby code from Rubocop.
- Fixed circular dependency for package dirmngr.
- Debian updates are no longer treated as security updates.
- Legacy functions have been removed.
- Updates to tests.
#### Fixed
- [(MODULES-4265)](https://tickets.puppetlabs.com/browse/MODULES-4265) Detect security updates from multiple sources.
## Supported Release [4.3.0]
### Summary
This release is adding Tasks to the apt module.
#### Added
- Add a task that allows apt-get update and upgrade
## Supported Release [4.2.0]
### Summary
This release is primarily to fix an error around GPG keys in Debian 9, but includes some other small features and fixes as well.
#### Added
- `apt_package_security_updates` fact
- The ability to modify the loglevel of `Exec['apt_update'}`
- Puppet 5 support
#### Changed
- Ubuntu 16.04 now uses `software-priorities-common`
#### Removed
- Debian 6, Ubuntu 10.04 and 12.04 support. Existing compatibility remains intact but bugs will not be prioritized for these OSes.
#### Fixed
- **[(MODULES-4686)](https://tickets.puppetlabs.com/browse/MODULES-4686) an error that was causing GPG keyserver imports to fail on Debian 9**
## Supported Release 4.1.0
### Summary
This release removes Data in Modules due to current compatibility issues and reinstates the params.pp file. Also includes a couple of bug fixes.
#### Features
- (MODULES-4973) Data in Modules which was introduced in the last release has now been reverted due to compatibility issues.
#### Bugfixes
- Now apt_key only sends the auth basic header when userinfo can be parsed from the URL.
- Reverted the removal of Evolving Web's attribution in NOTICE file.
- Test added to ensure empty string allowed for $release in apt::source.
## Supported Release 3.0.0 and 4.0.0
### Summary
This release adds new Puppet 4 features: data in modules, EPP templates, the $facts hash, and data types. This release is fully backwards compatible to existing Puppet 4 configurations and provides you with deprecation warnings for every argument that will not work as expected with the final 4.0.0 release. See the stdlib docs here for an in-depth discussion of this: https://github.com/puppetlabs/puppetlabs-stdlib#validate_legacy
If you want to learn more about the new features used or you wish to upgrade a module yourself, have a look at the NTP: A Puppet 4 language update blog post.
If you're still running Puppet 3, remain on the latest puppetlabs-apt 2.x release for now, and see the documentation to upgrade to Puppet 4.
#### Changes
Data in modules: Moves all distribution and OS-dependent defaults into YAML files in data/, alleviating the need for a params class. Note that while this feature is currently still classed as experimental, the final implementation will support the changes here.
EPP templating: Uses the Puppet language as a base for templates to create simpler and safer templates. No need for Ruby anymore!
The $facts hash: Makes facts visibly distinct from other variables for more readable and maintainable code. This helps eliminate confusion if you use a local variable whose name happens to match that of a common fact.
Data types for validation: Helps you find and replace deprecated code in existing validate functions with stricter, more readable data type notation. First upgrade to the 3.0.0 release of this module, and address all deprecation warnings before upgrading to the final 4.0.0 release. Please see the stdlib docs for an in-depth discussion of this process.
#### Bugfixes
- Fix apt::source epp template regression introduced in 3.0.0 for the architecture parameter
## Supported Release 2.4.0
### Summary
A release that includes only a couple of additional features, but includes several cleanups and bugfixes around existing issues.
#### Features
- Tests updated to check for idempotency.
- (MODULES-4224) Implementation of beaker-module_install_helper.
- Deprecation warnings are now handled by the deprecation function in stdlib.
#### Bugfixes
- Now http and https sources fixed for apt_key and can take a userinfo.
- GPG key update.
- Notify_update param now defaults to true to avoid validation errors.
- Implement retry on tests which pull key from a key server which sometimes times out (transient error).
- String comparison error now comphensated for in update.pp.
- (MODULES-4104) Removal of the port number from repository location in order to get the host name of the repository.
- Puppet lint warnings addressed.
- A few small readme issues addressed.
## Supported Release 2.3.0
### Summary
A release containing many bugfixes with additional features.
#### Features
- Apt_updates facts now use /usr/bin/apt-get.
- Addition of notify update to apt::source.
- Update to newest modulesync_configs.
- Installs software-properties-common for Xenial.
- Modulesync updates.
- Add ability to specify a hash of apt::conf defines.
#### Bugfixes
- A clean up of spec/defines/key_compat_specs, also now runs under STRICT_VARIABLES.
- Apt::setting expects priority to be an integer, set defaults accordingly.
- Fixed version check for Ubuntu on 16.04.
- Now uses hkps.pool.sks-keyservers.net instead of pgp.mit.edu.
- Updates and fixes to tests. General cleanup.
- Fixed regexp for $ensure params.
- Apt/params: Remove unused LSB facts.
- Replaced `-s` with `-f` in ppa rspec tests - After the repository is added, the "${::apt::sources_list_d}/${sources_list_d_filename}" file is created as an empty file. The unless condition of Exec["add-apt-repository-${name}"] calls test -s, which returns 1 if the file is empty. Because the file is empty, the unless condition is never true and the repository is added on every execution. This change replaces the -s test condition with -f, which is true if the file exists or false otherwise.
- Limit non-strict parsing to pre-3.5.0 only - Puppet 3.5.0 introduced strict variables and the module handles strict variables by using the defined() function. This does not work on prior versions of puppet so we now gate based on that version. Puppet 4 series has a new setting `strict` that may be set to enforce strict variables while `strict_variables` remains unset (see PUP-6358) which causes the conditional in manifests/params.pp to erroniously use non-strict 3.5-era parsing and fail. This new conditional corrects the cases such that strict variable behavior happens on versions 3.5.0 and later.
## Supported Release 2.2.2
### Summary
Several bug fixes and the addition of support updates to Debian 8 and Ubuntu Wily.
#### Bugfixes
- Small fixes to descriptions within the readme and the addition of some examples.
- Updates to run on Ubuntu Wily.
- Fixed apt_key tempfile race condition.
- Run stages limitation added to the documentation.
- Remove unneeded whitespace in source.list template.
- Handle PPA names that contain a plus character.
- Update to current msync configs.
- Avoid duplicate package resources when package_manage => true.
- Avoid multiple package resource declarations.
- Ensure PPAs in tests have valid form.
- Look for correct sources.list.d file for apt::ppa.
- Debian 8 support addiiton to metadata.
## Supported Release 2.2.1
### Summary
Small release for support of newer PE versions. This increments the version of PE in the metadata.json file.
## 2015-09-29 - Supported Release 2.2.0
### Summary
This release includes a few bugfixes.
#### Features
- Adds an `ensure` parameter for user control of proxy presence.
- Adds ability to set `notify_update` to `apt::conf` (MODULES-2269).
- Apt pins no longer trigger an `apt-get update` run.
- Adds support for creating pins from main class.
#### Bugfixes
- Updates to use the official Debian mirrors.
- Fixes path to `preferences` and `preferences.d`
- Fixes pinning for backports (MODULES-2446).
- Fixes the name/extension of the preferences files.
## 2015-07-28 - Supported Release 2.1.1
### Summary
This release includes a few bugfixes.
#### Bugfixes
- Fix incorrect use of anchoring (MODULES-2190)
- Use correct comment type for apt.conf files
- Test fixes
- Documentation fixes
## 2015-06-16 - Supported Release 2.1.0
### Summary
This release largely makes `apt::key` and `apt::source` API-compatible with the 1.8.x versions for ease in upgrading, and also addresses some compatibility issues with older versions of Puppet.
#### Features
- Add API compatibility to `apt::key` and `apt::source`
- Added `apt_reboot_required` fact
#### Bugfixes
- Fix compatibility with Puppet versions 3.0-3.4
- Work around future parser bug PUP-4133
## 2015-04-28 - Supported Release 2.0.1
### Summary
This bug fixes a few compatibility issues that came up with the 2.0.0 release, and includes test and documentation updates.
#### Bugfixes
- Fix incompatibility with keyrings containing multiple keys
- Fix bugs preventing the module from working with Puppet < 3.5.0
## 2015-04-07 - Supported Release 2.0.0
### Summary
This is a major rewrite of the apt module. Many classes and defines were removed, but all existing functionality should still work. Please carefully review documentation before upgrading.
#### Backwards-incompatible changes
As this is a major rewrite of the module there are a great number of backwards incompatible changes. Please review this and the updated README carefully before upgrading.
##### `apt_key`
- `keyserver_options` parameter renamed to `options`
##### `apt::backports`
- This no longer works out of the box on Linux Mint. If using this on mint, you must specify the `location`, `release`, `repos`, and `key` parameters. [Example](examples/backports.pp)
##### `apt::builddep`
- This define was removed. Functionality can be matched passing 'build-dep' to `install_options` in the package resource. [Example](examples/builddep.pp)
##### `apt::debian::testing`
- This class was removed. Manually add an `apt::source` instead. [Example](examples/debian_testing.pp)
##### `apt::debian::unstable`
- This class was removed. Manually add an `apt::source` instead. [Example](examples/debian_unstable.pp)
##### `apt::force`
- This define was removed. Functionallity can be matched by setting `install_options` in the package resource. See [here](examples/force.pp) for how to set the options.
##### `apt::hold`
- This define was removed. Simply use an `apt::pin` with `priority => 1001` for the same functionality.
##### `apt`
- `always_apt_update` - This parameter was removed. Use `update => { 'frequency' => 'always' }` instead.
- `apt_update_frequency` - This parameter was removed. Use `update => { 'frequency' => <frequency> }` instead.
- `disable_keys` - This parameter was removed. See this [example](examples/disable_keys.pp) if you need this functionality.
- `proxy_host` - This parameter was removed. Use `proxy => { 'host' => <host> }` instead.
- `proxy_port` - This parameter was removed. Use `proxy => { 'port' => <port> }` instead.
- `purge_sources_list` - This parameter was removed. Use `purge => { 'sources.list' => <bool> }` instead.
- `purge_sources_list_d` - This parameter was removed. Use `purge => { 'sources.list.d' => <bool> }` instead.
- `purge_preferences` - This parameter was removed. Use `purge => { 'preferences' => <bool> }` instead.
- `purge_preferences_d` - This parameter was removed. Use `purge => { 'preferences.d' => <bool> }` instead.
- `update_timeout` - This parameter was removed. Use `update => { 'timeout' => <timeout> }` instead.
- `update_tries` - This parameter was removed. Use `update => { 'tries' => <tries> }` instead.
##### `apt::key`
- `key` - This parameter was renamed to `id`.
- `key_content` - This parameter was renamed to `content`.
- `key_source` - This parameter was renamed to `source`.
- `key_server` - This parameter was renamed to `server`.
- `key_options` - This parameter was renamed to `options`.
##### `apt::release`
- This class was removed. See this [example](examples/release.pp) for how to achieve this functionality.
##### `apt::source`
- `include_src` - This parameter was removed. Use `include => { 'src' => <bool> }` instead. ***NOTE*** This now defaults to false.
- `include_deb` - This parameter was removed. Use `include => { 'deb' => <bool> }` instead.
- `required_packages` - This parameter was removed. Use package resources for these packages if needed.
- `key` - This can either be a key id or a hash including key options. If using a hash, `key => { 'id' => <id> }` must be specified.
- `key_server` - This parameter was removed. Use `key => { 'server' => <server> }` instead.
- `key_content` - This parameter was removed. Use `key => { 'content' => <content> }` instead.
- `key_source` - This parameter was removed. Use `key => { 'source' => <source> }` instead.
- `trusted_source` - This parameter was renamed to `allow_unsigned`.
##### `apt::unattended_upgrades`
- This class was removed and is being republished under the puppet-community namespace. The git repository is available [here](https://github.com/puppet-community/puppet-unattended_upgrades) and it will be published to the forge [here](https://forge.puppetlabs.com/puppet/unattended_upgrades).
#### Changes to default behavior
- By default purge unmanaged files in 'sources.list', 'sources.list.d', 'preferences', and 'preferences.d'.
- Changed default for `package_manage` in `apt::ppa` to `false`. Set to `true` in a single PPA if you need the package to be managed.
- `apt::source` will no longer include the `src` entries by default.
- `pin` in `apt::source` now defaults to `undef` instead of `false`
#### Features
- Added the ability to pass hashes of `apt::key`s, `apt::ppa`s, and `apt::setting`s to `apt`.
- Added 'https' key to `proxy` hash to allow disabling `https_proxy` for the `apt::ppa` environment.
- Added `apt::setting` define to abstract away configuration.
- Added the ability to pass hashes to `pin` and `key` in `apt::backports` and `apt::source`.
#### Bugfixes
- Fixes for strict variables.
## 2015-03-17 - Supported Release 1.8.0
### Summary
This is the last planned feature release of the 1.x series of this module. All new features will be evaluated for puppetlabs-apt 2.x.
This release includes many important features, including support for full fingerprints, and fixes issues where `apt_key` was not supporting user/password and `apt_has_updates` was not properly parsing the `apt-check` output.
#### Changes to default behavior
- The apt module will now throw warnings if you don't use full fingerprints for `apt_key`s
#### Features
- Use gpg to check keys to work around https://bugs.launchpad.net/ubuntu/+source/gnupg2/+bug/1409117 (MODULES-1675)
- Add 'oldstable' to the default update origins for wheezy
- Add utopic, vivid, and cumulus compatibility
- Add support for full fingerprints
- New parameter for `apt::source`
- `trusted_source`
- New parameters for `apt::ppa`
- `package_name`
- `package_manage`
- New parameter for `apt::unattended_upgrades`
- `legacy_origin`
- Separate `apt::pin` from `apt::backports` to allow pin by release instead of origin
#### Bugfixes
- Cleanup lint and future parser issues
- Fix to support username and passwords again for `apt_key` (MODULES-1119)
- Fix issue where `apt::force` `$install_check` didn't work with non-English locales (MODULES-1231)
- Allow 5 digit ports in `apt_key`
- Fix for `ensure => absent` in `apt_key` (MODULES-1661)
- Fix `apt_has_updates` not parsing `apt-check` output correctly
- Fix inconsistent headers across files (MODULES-1200)
- Clean up formatting for 50unattended-upgrades.erb
## 2014-10-28 - Supported Release 1.7.0
### Summary
This release includes several new features, documentation and test improvements, and a few bug fixes.
#### Features
- Updated unit and acceptance tests
- Update module to work with Linux Mint
- Documentation updates
- Future parser / strict variables support
- Improved support for long GPG keys
- New parameters!
- Added `apt_update_frequency` to apt
- Added `cfg_files` and `cfg_missing` parameters to apt::force
- Added `randomsleep` to apt::unattended_upgrades
- Added `apt_update_last_success` fact
- Refactored facts for performance improvements
#### Bugfixes
- Update apt::builddep to require Exec['apt_update'] instead of notifying it
- Clean up lint errors
## 2014-08-20 - Supported Release 1.6.0
### Summary
#### Features
- Allow URL or domain name for key_server parameter
- Allow custom comment for sources list
- Enable auto-update for Debian squeeze LTS
- Add facts showing available updates
- Test refactoring
#### Bugfixes
- Allow dashes in URL or domain for key_server parameter
## 2014-08-13 - Supported Release 1.5.3
### Summary
This is a bugfix releases. It addresses a bad regex, failures with unicode
characters, and issues with the $proxy_host handling in apt::ppa.
#### Features
- Synced files from Modulesync
#### Bugfixes
- Fix regex to follow APT requirements in apt::pin
- Fix for unicode characters
- Fix inconsistent $proxy_host handling in apt and apt::ppa
- Fix typo in README
- Fix broken acceptance tests
## 2014-07-15 - Supported Release 1.5.2
### Summary
This release merely updates metadata.json so the module can be uninstalled and
upgraded via the puppet module command.
## 2014-07-10 - Supported Release 1.5.1
### Summary
This release has added tests to ensure graceful failure on OSX.
## 2014-06-04 - Release 1.5.0
### Summary
This release adds support for Ubuntu 14.04. It also includes many new features
and important bugfixes. One huge change is that apt::key was replaced with
apt_key, which allows you to use puppet resource apt_key to inventory keys on
your system.
Special thanks to daenney, our intrepid unofficial apt maintainer!
#### Features
- Add support for Ubuntu Trusty!
- Add apt::hold define
- Generate valid *.pref files in apt::pin
- Made pin_priority configurable for apt::backports
- Add apt_key type and provider
- Rename "${apt_conf_d}/proxy" to "${apt_conf_d}/01proxy"
- apt::key rewritten to use apt_key type
- Add support for update_tries to apt::update
#### Bugfixes
- Typo fixes
- Fix unattended upgrades
- Removed bogus line when using purge_preferences
- Fix apt::force to upgrade allow packages to be upgraded to the pacakge from the specified release
## 2014-03-04 - Supported Release 1.4.2
### Summary
This is a supported release. This release tidies up 1.4.1 and re-enables
support for Ubuntu 10.04
#### Features
#### Bugfixes
- Fix apt:ppa to include the -y Ubuntu 10.04 requires.
- Documentation changes.
- Test fixups.
#### Known Bugs
* No known issues.
## 2014-02-13 1.4.1
### Summary
This is a bugfix release.
#### Bugfixes
- Fix apt::force unable to upgrade packages from releases other than its original
- Removed a few refeneces to aptitude instead of apt-get for portability
- Removed call to getparam() due to stdlib dependency
- Correct apt::source template when architecture is provided
- Retry package installs if apt is locked
- Use root to exec in apt::ppa
- Updated tests and converted acceptance tests to beaker
## 2013-10-08 - Release 1.4.0
### Summary
Minor bugfix and allow the timeout to be adjusted.
#### Features
- Add an `updates_timeout` to apt::params
#### Bugfixes
- Ensure apt::ppa can read a ppa removed by hand.
## 2013-10-08 - Release 1.3.0
### Summary
This major feature in this release is the new apt::unattended_upgrades class,
allowing you to handle Ubuntu's unattended feature. This allows you to select
specific packages to automatically upgrade without any further user
involvement.
In addition we extend our Wheezy support, add proxy support to apt:ppa and do
various cleanups and tweaks.
#### Features
- Add apt::unattended_upgrades support for Ubuntu.
- Add wheezy backports support.
- Use the geoDNS http.debian.net instead of the main debian ftp server.
- Add `options` parameter to apt::ppa in order to pass options to apt-add-repository command.
- Add proxy support for apt::ppa (uses proxy_host and proxy_port from apt).
#### Bugfixes
- Fix regsubst() calls to quote single letters (for future parser).
- Fix lint warnings and other misc cleanup.
## 2013-07-03 - Release 1.2.0
#### Features
- Add geppetto `.project` natures
- Add GH auto-release
- Add `apt::key::key_options` parameter
- Add complex pin support using distribution properties for `apt::pin` via new properties:
- `apt::pin::codename`
- `apt::pin::release_version`
- `apt::pin::component`
- `apt::pin::originator`
- `apt::pin::label`
- Add source architecture support to `apt::source::architecture`
#### Bugfixes
- Use apt-get instead of aptitude in apt::force
- Update default backports location
- Add dependency for required packages before apt-get update
## 2013-06-02 - Release 1.1.1
### Summary
This is a bug fix release that resolves a number of issues:
* By changing template variable usage, we remove the deprecation warnings
for Puppet 3.2.x
* Fixed proxy file removal, when proxy absent
Some documentation, style and whitespaces changes were also merged. This
release also introduced proper rspec-puppet unit testing on Travis-CI to help
reduce regression.
Thanks to all the community contributors below that made this patch possible.
#### Detail Changes
* fix minor comment type (Chris Rutter)
* whitespace fixes (Michael Moll)
* Update travis config file (William Van Hevelingen)
* Build all branches on travis (William Van Hevelingen)
* Standardize travis.yml on pattern introduced in stdlib (William Van Hevelingen)
* Updated content to conform to README best practices template (Lauren Rother)
* Fix apt::release example in readme (Brian Galey)
* add @ to variables in template (Peter Hoeg)
* Remove deprecation warnings for pin.pref.erb as well (Ken Barber)
* Update travis.yml to latest versions of puppet (Ken Barber)
* Fix proxy file removal (Scott Barber)
* Add spec test for removing proxy configuration (Dean Reilly)
* Fix apt::key listing longer than 8 chars (Benjamin Knofe)
## Release 1.1.0
### Summary
This release includes Ubuntu 12.10 (Quantal) support for PPAs.
---
## 2012-05-25 - Puppet Labs <info@puppetlabs.com> - Release 0.0.4
### Summary
* Fix ppa list filename when there is a period in the PPA name
* Add .pref extension to apt preferences files
* Allow preferences to be purged
* Extend pin support
## 2012-05-04 - Puppet Labs <info@puppetlabs.com> - Release 0.0.3
### Summary
* only invoke apt-get update once
* only install python-software-properties if a ppa is added
* support 'ensure => absent' for all defined types
* add apt::conf
* add apt::backports
* fixed Modulefile for module tool dependency resolution
* configure proxy before doing apt-get update
* use apt-get update instead of aptitude for apt::ppa
* add support to pin release
## 2012-03-26 - Puppet Labs <info@puppetlabs.com> - Release 0.0.2
### Summary
* 41cedbb (#13261) Add real examples to smoke tests.
* d159a78 (#13261) Add key.pp smoke test
* 7116c7a (#13261) Replace foo source with puppetlabs source
* 1ead0bf Ignore pkg directory.
* 9c13872 (#13289) Fix some more style violations
* 0ea4ffa (#13289) Change test scaffolding to use a module & manifest dir fixture path
* a758247 (#13289) Clean up style violations and fix corresponding tests
* 99c3fd3 (#13289) Add puppet lint tests to Rakefile
* 5148cbf (#13125) Apt keys should be case insensitive
* b9607a4 Convert apt::key to use anchors
## 2012-03-07 - Puppet Labs <info@puppetlabs.com> - Release 0.0.1
### Summary
* d4fec56 Modify apt::source release parameter test
* 1132a07 (#12917) Add contributors to README
* 8cdaf85 (#12823) Add apt::key defined type and modify apt::source to use it
* 7c0d10b (#12809) $release should use $lsbdistcodename and fall back to manual input
* be2cc3e (#12522) Adjust spec test for splitting purge
* 7dc60ae (#12522) Split purge option to spare sources.list
* 9059c4e Fix source specs to test all key permutations
* 8acb202 Add test for python-software-properties package
* a4af11f Check if python-software-properties is defined before attempting to define it.
* 1dcbf3d Add tests for required_packages change
* f3735d2 Allow duplicate $required_packages
* 74c8371 (#12430) Add tests for changes to apt module
* 97ebb2d Test two sources with the same key
* 1160bcd (#12526) Add ability to reverse apt { disable_keys => true }
* 2842d73 Add Modulefile to puppet-apt
* c657742 Allow the use of the same key in multiple sources
* 8c27963 (#12522) Adding purge option to apt class
* 997c9fd (#12529) Add unit test for apt proxy settings
* 50f3cca (#12529) Add parameter to support setting a proxy for apt
* d522877 (#12094) Replace chained .with_* with a hash
* 8cf1bd0 (#12094) Remove deprecated spec.opts file
* 2d688f4 (#12094) Add rspec-puppet tests for apt
* 0fb5f78 (#12094) Replace name with path in file resources
* f759bc0 (#11953) Apt::force passes $version to aptitude
* f71db53 (#11413) Add spec test for apt::force to verify changes to unless
* 2f5d317 (#11413) Update dpkg query used by apt::force
* cf6caa1 (#10451) Add test coverage to apt::ppa
* 0dd697d include_src parameter in example; Whitespace cleanup
* b662eb8 fix typos in "repositories"
* 1be7457 Fix (#10451) - apt::ppa fails to "apt-get update" when new PPA source is added
* 864302a Set the pin priority before adding the source (Fix #10449)
* 1de4e0a Refactored as per mlitteken
* 1af9a13 Added some crazy bash madness to check if the ppa is installed already. Otherwise the manifest tries to add it on every run!
* 52ca73e (#8720) Replace Apt::Ppa with Apt::Builddep
* 5c05fa0 added builddep command.
* a11af50 added the ability to specify the content of a key
* c42db0f Fixes ppa test.
* 77d2b0d reformatted whitespace to match recommended style of 2 space indentation.
* 27ebdfc ignore swap files.
* 377d58a added smoke tests for module.
* 18f614b reformatted apt::ppa according to recommended style.
* d8a1e4e Created a params class to hold global data.
* 636ae85 Added two params for apt class
* 148fc73 Update LICENSE.
* ed2d19e Support ability to add more than one PPA
* 420d537 Add call to apt-update after add-apt-repository in apt::ppa
* 945be77 Add package definition for python-software-properties
* 71fc425 Abs paths for all commands
* 9d51cd1 Adding LICENSE
* 71796e3 Heading fix in README
* 87777d8 Typo in README
* f848bac First commit
[5.0.1]:https://github.com/puppetlabs/puppetlabs-apt/compare/5.0.0...5.0.1
[5.0.0]:https://github.com/puppetlabs/puppetlabs-apt/compare/4.5.1...5.0.0
[4.5.1]:https://github.com/puppetlabs/puppetlabs-apt/compare/4.5.0...4.5.1
[4.5.0]:https://github.com/puppetlabs/puppetlabs-apt/compare/4.4.1...4.5.0
[4.4.1]:https://github.com/puppetlabs/puppetlabs-apt/compare/4.4.0...4.4.1
[4.4.0]:https://github.com/puppetlabs/puppetlabs-apt/compare/4.3.0...4.4.0
[4.3.0]:https://github.com/puppetlabs/puppetlabs-apt/compare/4.2.0...4.3.0
[4.2.0]:https://github.com/puppetlabs/puppetlabs-apt/compare/4.1.0...4.2.0

View file

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View file

@ -0,0 +1,6 @@
## Maintenance
Maintainers:
- Puppet Forge Modules Team `forge-modules |at| puppet |dot| com`
Tickets: https://tickets.puppet.com/browse/MODULES. Make sure to set component to `apt`.

View file

@ -0,0 +1,37 @@
Puppet Module - puppetlabs-apt
Copyright 2017 Puppet, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Copyright (c) 2011 Evolving Web Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

View file

@ -0,0 +1,401 @@
# apt
#### Table of Contents
1. [Module Description - What the module does and why it is useful](#module-description)
1. [Setup - The basics of getting started with apt](#setup)
* [What apt affects](#what-apt-affects)
* [Beginning with apt](#beginning-with-apt)
1. [Usage - Configuration options and additional functionality](#usage)
* [Add GPG keys](#add-gpg-keys)
* [Prioritize backports](#prioritize-backports)
* [Update the list of packages](#update-the-list-of-packages)
* [Pin a specific release](#pin-a-specific-release)
* [Add a Personal Package Archive repository](#add-a-personal-package-archive-repository)
* [Configure Apt from Hiera](#configure-apt-from-hiera)
* [Replace the default sources.list file](#replace-the-default-sourceslist-file)
1. [Reference - An under-the-hood peek at what the module is doing and how](#reference)
1. [Limitations - OS compatibility, etc.](#limitations)
1. [License](#license)
1. [Development - Guide for contributing to the module](#development)
<a id="module-description"></a>
## Module Description
The apt module lets you use Puppet to manage APT (Advanced Package Tool) sources, keys, and other configuration options.
APT is a package manager available on Debian, Ubuntu, and several other operating systems. The apt module provides a series of classes, defines, types, and facts to help you automate APT package management.
**Note**: Prior to Puppet 7, for this module to correctly autodetect which version of
Debian/Ubuntu (or derivative) you're running, you need to make sure the `lsb-release` package is
installed. With Puppet 7 the `lsb-release` package is not needed.
<a id="setup"></a>
## Setup
<a id="what-apt-affects"></a>
### What apt affects
* Your system's `preferences` file and `preferences.d` directory
* Your system's `sources.list` file and `sources.list.d` directory
* Your system's `apt.conf.d` directory
* System repositories
* Authentication keys
**Note:** This module offers `purge` parameters which, if set to `true`, **destroy** any configuration on the node's `sources.list(.d)`, `preferences(.d)` and `apt.conf.d` that you haven't declared through Puppet. The default for these parameters is `false`.
<a id="beginning-with-apt"></a>
### Beginning with apt
To use the apt module with default parameters, declare the `apt` class.
```puppet
include apt
```
**Note:** The main `apt` class is required by all other classes, types, and defined types in this module. You must declare it whenever you use the module.
<a id="usage"></a>
## Usage
<a id="add-gpg-keys"></a>
### Add GPG keys
You can fetch GPG keys via HTTP, Puppet URI, or local filesystem. The key can be in GPG binary format, or ASCII armored, but the filename should have the appropriate extension (`.gpg` for keys in binary format; or `.asc` for ASCII armored keys).
#### Fetch via HTTP
```puppet
apt::keyring { 'puppetlabs-keyring.gpg':
source => 'https://apt.puppetlabs.com/keyring.gpg',
}
```
#### Fetch via Puppet URI
```puppet
apt::keyring { 'puppetlabs-keyring.gpg':
source => 'puppet:///modules/my_module/local_puppetlabs-keyring.gpg',
}
```
Alternatively `apt::key` can be used.
**Warning** `apt::key` is deprecated in the latest Debian and Ubuntu releases. Please use apt::keyring instead.
**Warning:** Using short key IDs presents a serious security issue, potentially leaving you open to collision attacks. We recommend you always use full fingerprints to identify your GPG keys. This module allows short keys, but issues a security warning if you use them.
Declare the `apt::key` defined type:
```puppet
apt::key { 'puppetlabs':
id => '6F6B15509CF8E59E6E469F327F438280EF8D349F',
server => 'pgp.mit.edu',
options => 'http-proxy="http://proxyuser:proxypass@example.org:3128"',
}
```
<a id="prioritize-backports"></a>
### Prioritize backports
```puppet
class { 'apt::backports':
pin => 500,
}
```
By default, the `apt::backports` class drops a pin file for backports, pinning it to a priority of 200. This is lower than the normal default of 500, so packages with `ensure => latest` don't get upgraded from backports without your explicit permission.
If you raise the priority through the `pin` parameter to 500, normal policy goes into effect and Apt installs or upgrades to the newest version. This means that if a package is available from backports, it and its dependencies are pulled in from backports unless you explicitly set the `ensure` attribute of the `package` resource to `installed`/`present` or a specific version.
<a id="update-the-list-of-packages"></a>
### Update the list of packages
By default, Puppet runs `apt-get update` on the first Puppet run after you include the `apt` class, and anytime `notify => Exec['apt_update']` occurs; i.e., whenever config files get updated or other relevant changes occur. If you set `update['frequency']` to 'always', the update runs on every Puppet run. You can also set `update['frequency']` to 'hourly', 'daily', 'weekly' or any integer value >= 60:
```puppet
class { 'apt':
update => {
frequency => 'daily',
},
}
```
When `Exec['apt_update']` is triggered, it generates a `notice`-level message. Because the default [logging level for agents](https://puppet.com/docs/puppet/latest/configuration.html#loglevel) is `notice`, this causes the repository update to appear in agent logs. To silence these updates from the default log output, set the [loglevel](https://puppet.com/docs/puppet/latest/metaparameter.html#loglevel) metaparameter for `Exec['apt_update']` above the agent logging level:
```puppet
class { 'apt':
update => {
frequency => 'daily',
loglevel => 'debug',
},
}
```
> **NOTE:** Every `Exec['apt_update']` run will generate a corrective change, even if the apt caches are not updated. For example, setting an update frequency of `always` can result in every Puppet run resulting in a corrective change. This is a known issue. For details, see [MODULES-10763](https://tickets.puppetlabs.com/browse/MODULES-10763).
<a id="pin-a-specific-release"></a>
### Pin a specific release
```puppet
apt::pin { 'karmic': priority => 700 }
apt::pin { 'karmic-updates': priority => 700 }
apt::pin { 'karmic-security': priority => 700 }
```
You can also specify more complex pins using distribution properties:
```puppet
apt::pin { 'stable':
priority => -10,
originator => 'Debian',
release_version => '3.0',
component => 'main',
label => 'Debian'
}
```
To pin multiple packages, pass them to the `packages` parameter as an array or a space-delimited string.
<a id="add-a-personal-package-archive-repository"></a>
### Add a Personal Package Archive (PPA) repository
```puppet
apt::ppa { 'ppa:drizzle-developers/ppa': }
```
### Add an Apt source to `/etc/apt/sources.list.d/`
```puppet
apt::source { 'debian_unstable':
comment => 'This is the iWeb Debian unstable mirror',
location => 'http://debian.mirror.iweb.ca/debian/',
release => 'unstable',
repos => 'main contrib non-free non-free-firmware',
pin => '-10',
key => {
'id' => 'A1BD8E9D78F7FE5C3E65D8AF8B48AD6246925553',
'server' => 'subkeys.pgp.net',
},
include => {
'src' => true,
'deb' => true,
},
}
```
To use the Puppet Apt repository as a source:
```puppet
apt::source { 'puppetlabs':
location => 'http://apt.puppetlabs.com',
repos => 'main',
key => {
'id' => '6F6B15509CF8E59E6E469F327F438280EF8D349F',
'server' => 'pgp.mit.edu',
},
}
```
### Adding name and source to the key parameter of apt::source, which then manages modern apt gpg keyrings
The `name` parameter of key hash should contain the filename with extension (such as `puppetlabs.gpg`).
```puppet
apt::source { 'puppetlabs':
comment => 'Puppet8',
location => 'https://apt.puppetlabs.com/',
repos => 'puppet8',
key => {
'name' => 'puppetlabs.gpg',
'source' => 'https://apt.puppetlabs.com/keyring.gpg',
},
}
```
<a id="configure-apt-from-hiera"></a>
### Generating a DEB822 .sources file
You can also generate a DEB822 format .sources file. This example covers most of the available options.
Use the `source_format` parameter to choose between 'list' and 'sources' (DEB822) formats.
```puppet
apt::source { 'debian':
source_format => 'sources'
comment => 'Official Debian Repository',
enabled => true,
types => ['deb', 'deb-src'],
location => ['http://fr.debian.org/debian', 'http://de.debian.org/debian']
release => ['stable', 'stable-updates', 'stable-backports'],
repos => ['main', 'contrib', 'non-free'],
architecture => ['amd64', 'i386'],
allow_unsigned => true,
keyring => '/etc/apt/keyrings/debian.gpg'
notify_update => false
}
```
### Configure Apt from Hiera
Instead of specifying your sources directly as resources, you can instead just include the `apt` class, which will pick up the values automatically from hiera.
```yaml
apt::sources:
'debian_unstable':
comment: 'This is the iWeb Debian unstable mirror'
location: 'http://debian.mirror.iweb.ca/debian/'
release: 'unstable'
repos: 'main contrib non-free non-free-firmware'
pin: '-10'
key:
id: 'A1BD8E9D78F7FE5C3E65D8AF8B48AD6246925553'
server: 'subkeys.pgp.net'
include:
src: true
deb: true
'puppetlabs':
location: 'http://apt.puppetlabs.com'
repos: 'main'
key:
id: '6F6B15509CF8E59E6E469F327F438280EF8D349F'
server: 'pgp.mit.edu'
```
<a id="replace-the-default-sourceslist-file"></a>
### Replace the default `sources.list` file
The following example replaces the default `/etc/apt/sources.list`. Along with this code, be sure to use the `purge` parameter, or you might get duplicate source warnings when running Apt.
```puppet
apt::source { "archive.ubuntu.com-${facts['os']['distro']['codename']}":
location => 'http://archive.ubuntu.com/ubuntu',
key => '630239CC130E1A7FD81A27B140976EAF437D05B5',
repos => 'main universe multiverse restricted',
}
apt::source { "archive.ubuntu.com-${facts['os']['distro']['codename']}-security":
location => 'http://archive.ubuntu.com/ubuntu',
key => '630239CC130E1A7FD81A27B140976EAF437D05B5',
repos => 'main universe multiverse restricted',
release => "${facts['os']['distro']['codename']}-security"
}
apt::source { "archive.ubuntu.com-${facts['os']['distro']['codename']}-updates":
location => 'http://archive.ubuntu.com/ubuntu',
key => '630239CC130E1A7FD81A27B140976EAF437D05B5',
repos => 'main universe multiverse restricted',
release => "${facts['os']['distro']['codename']}-updates"
}
apt::source { "archive.ubuntu.com-${facts['os']['distro']['codename']}-backports":
location => 'http://archive.ubuntu.com/ubuntu',
key => '630239CC130E1A7FD81A27B140976EAF437D05B5',
repos => 'main universe multiverse restricted',
release => "${facts['os']['distro']['codename']}-backports"
}
```
### Manage login configuration settings for an APT source or proxy in `/etc/apt/auth.conf`
Starting with APT version 1.5, you can define login configuration settings, such as
username and password, for APT sources or proxies that require authentication
in the `/etc/apt/auth.conf` file. This is preferable to embedding login
information directly in `source.list` entries, which are usually world-readable.
The `/etc/apt/auth.conf` file follows the format of netrc (used by ftp or
curl) and has restrictive file permissions. See [here](https://manpages.debian.org/testing/apt/apt_auth.conf.5.en.html) for details.
Use the optional `apt::auth_conf_entries` parameter to specify an array of hashes containing login configuration settings. These hashes may only contain the `machine`, `login` and `password` keys.
```puppet
class { 'apt':
auth_conf_entries => [
{
'machine' => 'apt-proxy.example.net',
'login' => 'proxylogin',
'password' => 'proxypassword',
},
{
'machine' => 'apt.example.com/ubuntu',
'login' => 'reader',
'password' => 'supersecret',
},
],
}
```
<a id="reference"></a>
## Reference
### Facts
* `apt_updates`: The number of installed packages with available updates from `upgrade`.
* `apt_dist_updates`: The number of installed packages with available updates from `dist-upgrade`.
* `apt_security_updates`: The number of installed packages with available security updates from `upgrade`.
* `apt_security_dist_updates`: The number of installed packages with available security updates from `dist-upgrade`.
* `apt_package_updates`: The names of all installed packages with available updates from `upgrade`. In Facter 2.0 and later this data is formatted as an array; in earlier versions it is a comma-delimited string.
* `apt_package_dist_updates`: The names of all installed packages with available updates from `dist-upgrade`. In Facter 2.0 and later this data is formatted as an array; in earlier versions it is a comma-delimited string.
* `apt_update_last_success`: The date, in epochtime, of the most recent successful `apt-get update` run (based on the mtime of /var/lib/apt/periodic/update-success-stamp).
* `apt_reboot_required`: Determines if a reboot is necessary after updates have been installed.
### More Information
See [REFERENCE.md](https://github.com/puppetlabs/puppetlabs-apt/blob/main/REFERENCE.md) for all other reference documentation.
<a id="limitations"></a>
## Limitations
This module is not designed to be split across [run stages](https://docs.puppetlabs.com/puppet/latest/reference/lang_run_stages.html).
For an extensive list of supported operating systems, see [metadata.json](https://github.com/puppetlabs/puppetlabs-apt/blob/main/metadata.json)
### Adding new sources or PPAs
If you are adding a new source or PPA and trying to install packages from the new source or PPA on the same Puppet run, your `package` resource should depend on `Class['apt::update']`, as well as depending on the `Apt::Source` or the `Apt::Ppa`. You can also add [collectors](https://docs.puppetlabs.com/puppet/latest/reference/lang_collectors.html) to ensure that all packages happen after `apt::update`, but this can lead to dependency cycles and has implications for [virtual resources](https://docs.puppetlabs.com/puppet/latest/reference/lang_collectors.html#behavior). Before running the command below, ensure that all packages have the provider set to apt.
```puppet
Class['apt::update'] -> Package <| provider == 'apt' |>
```
## License
This codebase is licensed under the Apache2.0 licensing, however due to the nature of the codebase the open source dependencies may also use a combination of [AGPL](https://opensource.org/license/agpl-v3/), [BSD-2](https://opensource.org/license/bsd-2-clause/), [BSD-3](https://opensource.org/license/bsd-3-clause/), [GPL2.0](https://opensource.org/license/gpl-2-0/), [LGPL](https://opensource.org/license/lgpl-3-0/), [MIT](https://opensource.org/license/mit/) and [MPL](https://opensource.org/license/mpl-2-0/) Licensing.
## Development
Acceptance tests for this module leverage [puppet_litmus](https://github.com/puppetlabs/puppet_litmus).
To run the acceptance tests follow the instructions [here](https://puppetlabs.github.io/litmus/Running-acceptance-tests.html).
You can also find a tutorial and walkthrough of using Litmus and the PDK on [YouTube](https://www.youtube.com/watch?v=FYfR7ZEGHoE).
If you run into an issue with this module, or if you would like to request a feature, please [file a ticket](https://github.com/puppetlabs/puppetlabs-apt/issues).
Every Monday the Puppet IA Content Team has [office hours](https://puppet.com/community/office-hours) in the [Puppet Community Slack](http://slack.puppet.com/), alternating between an EMEA friendly time (1300 UTC) and an Americas friendly time (0900 Pacific, 1700 UTC).
If you have problems getting this module up and running, please [contact Support](http://puppetlabs.com/services/customer-support).
If you submit a change to this module, be sure to regenerate the reference documentation as follows:
```bash
puppet strings generate --format markdown --out REFERENCE.md
```

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1 @@
--- {}

View file

@ -0,0 +1,3 @@
apt::backports:
location: http://deb.debian.org/debian
repos: main contrib non-free

View file

@ -0,0 +1,3 @@
apt::backports:
location: http://deb.debian.org/debian
repos: main contrib non-free non-free-firmware

View file

@ -0,0 +1,7 @@
apt::backports:
location: http://archive.ubuntu.com/ubuntu
repos: main universe multiverse restricted
apt::ppa_options:
- -y
apt::ppa_package: software-properties-common

View file

@ -0,0 +1,7 @@
# Set up a backport for Linux Mint qiana
class { 'apt': }
class { 'apt::backports':
location => 'http://us.archive.ubuntu.com/ubuntu',
release => 'trusty-backports',
repos => 'main universe multiverse restricted',
}

View file

@ -0,0 +1,3 @@
package { 'glusterfs-server':
install_options => 'build-dep',
}

View file

@ -0,0 +1,18 @@
package { 'debian-keyring':
ensure => present,
}
package { 'debian-archive-keyring':
ensure => present,
}
apt::source { 'debian_testing':
location => 'http://debian.mirror.iweb.ca/debian/',
release => 'testing',
repos => 'main contrib non-free',
pin => '-10',
key => {
id => 'A1BD8E9D78F7FE5C3E65D8AF8B48AD6246925553',
server => 'subkeys.pgp.net',
},
}

View file

@ -0,0 +1,18 @@
package { 'debian-keyring':
ensure => present,
}
package { 'debian-archive-keyring':
ensure => present,
}
apt::source { 'debian_unstable':
location => 'http://debian.mirror.iweb.ca/debian/',
release => 'unstable',
repos => 'main contrib non-free',
pin => '-10',
key => {
id => 'A1BD8E9D78F7FE5C3E65D8AF8B48AD6246925553',
server => 'subkeys.pgp.net',
},
}

View file

@ -0,0 +1,5 @@
#Note: This is generally a bad idea. You should not disable verifying repository signatures.
apt::conf { 'unauth':
priority => 99,
content => 'APT::Get::AllowUnauthenticated 1;',
}

View file

@ -0,0 +1,4 @@
apt::conf { 'progressbar':
priority => 99,
content => 'Dpkg::Progress-Fancy "1";',
}

View file

@ -0,0 +1,28 @@
#if you need to specify a release
$rel_string = '-t <release>'
#else
$rel_string = ''
#if you need to specify a version
$ensure = '<version>'
#else
$ensure = installed
#if overwrite existing cfg files
$config_files = '-o Dpkg::Options::="--force-confnew"'
#elsif force use of old files
$config_files = '-o Dpkg::Options::="--force-confold"'
#elsif update only unchanged files
$config_files = '-o Dpkg::Options::="--force-confdef"'
#else
$config_files = ''
#if install missing configuration files for the package
$config_missing = '-o Dpkg::Options::="--force-confmiss"'
#else
$config_missing = ''
package { '<package>':
ensure => $ensure,
install_options => "${config_files} ${config_missing} ${rel_string}",
}

View file

@ -0,0 +1,5 @@
apt::pin { 'hold-vim':
packages => 'vim',
version => '2:7.4.488-5',
priority => 1001,
}

View file

@ -0,0 +1,6 @@
# Declare Apt key for apt.puppetlabs.com source
apt::key { 'puppetlabs':
id => 'D6811ED3ADEEB8441AF5AA8F4528B6CD9E61EF26',
server => 'keyserver.ubuntu.com',
options => 'http-proxy="http://proxyuser:proxypass@example.org:3128"',
}

View file

@ -0,0 +1,3 @@
apt::mark { 'vim':
setting => 'auto',
}

View file

@ -0,0 +1,5 @@
# pin a release in apt, useful for unstable repositories
apt::pin { 'foo':
packages => '*',
priority => 0,
}

View file

@ -0,0 +1,4 @@
class { 'apt': }
# Example declaration of an Apt PPA
apt::ppa { 'ppa:ubuntuhandbook1/apps': }

View file

@ -0,0 +1,4 @@
apt::conf { 'release':
content => 'APT::Default-Release "karmic";',
priority => '01',
}

View file

@ -0,0 +1,35 @@
# Declare the apt class to manage /etc/apt/sources.list and /etc/sources.list.d
class { 'apt': }
# Install the puppetlabs apt source
# Release is automatically obtained from facts.
apt::source { 'puppetlabs':
location => 'http://apt.puppetlabs.com',
repos => 'main',
key => {
id => '6F6B15509CF8E59E6E469F327F438280EF8D349F',
server => 'keyserver.ubuntu.com',
},
}
# test two sources with the same key
apt::source { 'debian_testing':
location => 'http://debian.mirror.iweb.ca/debian/',
release => 'testing',
repos => 'main contrib non-free non-free-firmware',
key => {
id => 'A1BD8E9D78F7FE5C3E65D8AF8B48AD6246925553',
server => 'keyserver.ubuntu.com',
},
pin => '-10',
}
apt::source { 'debian_unstable':
location => 'http://debian.mirror.iweb.ca/debian/',
release => 'unstable',
repos => 'main contrib non-free non-free-firmware',
key => {
id => 'A1BD8E9D78F7FE5C3E65D8AF8B48AD6246925553',
server => 'keyserver.ubuntu.com',
},
pin => '-10',
}

View file

@ -0,0 +1 @@
# TODO

View file

@ -0,0 +1 @@
APT::Update::Post-Invoke-Success {"touch /var/lib/apt/periodic/update-success-stamp 2>/dev/null || true";};

View file

@ -0,0 +1,21 @@
---
version: 5
defaults: # Used for any hierarchy level that omits these keys.
datadir: data # This path is relative to hiera.yaml's directory.
data_hash: yaml_data # Use the built-in YAML backend.
hierarchy:
- name: "os.family/major release"
paths:
# Used to distinguish between Debian and Ubuntu
- "os/%{facts.os.name}/%{facts.os.release.major}.yaml"
- "os/%{facts.os.family}/%{facts.os.release.major}.yaml"
# Used for Solaris
- "os/%{facts.os.family}/%{facts.kernelrelease}.yaml"
- name: "os.family"
paths:
- "os/%{facts.os.name}.yaml"
- "os/%{facts.os.family}.yaml"
- name: 'common'
path: 'common.yaml'

View file

@ -0,0 +1,9 @@
# frozen_string_literal: true
# apt_reboot_required.rb
Facter.add(:apt_reboot_required) do
confine 'os.family': 'Debian'
setcode do
File.file?('/var/run/reboot-required')
end
end

View file

@ -0,0 +1,13 @@
# frozen_string_literal: true
# This fact lists the .list filenames that are used by apt.
Facter.add(:apt_sources) do
confine 'os.family': 'Debian'
setcode do
sources = ['sources.list']
Dir.glob('/etc/apt/sources.list.d/*.{list,sources}').each do |file|
sources.push(File.basename(file))
end
sources
end
end

View file

@ -0,0 +1,18 @@
# frozen_string_literal: true
require 'facter'
# This is derived from the file /var/lib/apt/periodic/update-success-stamp
# This is generated upon a successful apt-get update run natively in ubuntu.
# the Puppetlabs-apt module deploys this same functionality for other debian-ish OSes
Facter.add('apt_update_last_success') do
confine 'os.family': 'Debian'
setcode do
if File.exist?('/var/lib/apt/periodic/update-success-stamp')
# get epoch time
File.mtime('/var/lib/apt/periodic/update-success-stamp').to_i
else
-1
end
end
end

View file

@ -0,0 +1,103 @@
# frozen_string_literal: true
apt_package_updates = nil
apt_dist_updates = nil
# Executes the upgrading of packages
# @param
# upgrade_option Type of upgrade passed into apt-get command arguments i.e. 'upgrade' or 'dist-upgrade'
def get_updates(upgrade_option)
apt_updates = nil
if File.executable?('/usr/bin/apt-get')
apt_get_result = Facter::Core::Execution.execute("/usr/bin/apt-get -s -o Debug::NoLocking=true #{upgrade_option} 2>&1")
unless apt_get_result.nil?
apt_updates = [[], []]
apt_get_result.each_line do |line|
next unless %r{^Inst\s}.match?(line)
package = line.gsub(%r{^Inst\s([^\s]+)\s.*}, '\1').strip
apt_updates[0].push(package)
security_matches = [
%r{ Debian-Security:},
%r{ Ubuntu[^\s]+-security[, ]},
%r{ gNewSense[^\s]+-security[, ]},
]
re = Regexp.union(security_matches)
apt_updates[1].push(package) if line.match(re)
end
end
end
apt_updates
end
Facter.add('apt_has_updates') do
confine 'os.family': 'Debian'
setcode do
apt_package_updates = get_updates('upgrade')
apt_package_updates != [[], []] if !apt_package_updates.nil? && apt_package_updates.length == 2
end
end
Facter.add('apt_has_dist_updates') do
confine 'os.family': 'Debian'
setcode do
apt_dist_updates = get_updates('dist-upgrade')
apt_dist_updates != [[], []] if !apt_dist_updates.nil? && apt_dist_updates.length == 2
end
end
Facter.add('apt_package_updates') do
confine apt_has_updates: true
setcode do
apt_package_updates[0]
end
end
Facter.add('apt_package_dist_updates') do
confine apt_has_dist_updates: true
setcode do
apt_dist_updates[0]
end
end
Facter.add('apt_package_security_updates') do
confine apt_has_updates: true
setcode do
apt_package_updates[1]
end
end
Facter.add('apt_package_security_dist_updates') do
confine apt_has_dist_updates: true
setcode do
apt_dist_updates[1]
end
end
Facter.add('apt_updates') do
confine apt_has_updates: true
setcode do
Integer(apt_package_updates[0].length)
end
end
Facter.add('apt_dist_updates') do
confine apt_has_dist_updates: true
setcode do
Integer(apt_dist_updates[0].length)
end
end
Facter.add('apt_security_updates') do
confine apt_has_updates: true
setcode do
Integer(apt_package_updates[1].length)
end
end
Facter.add('apt_security_dist_updates') do
confine apt_has_dist_updates: true
setcode do
Integer(apt_dist_updates[1].length)
end
end

View file

@ -0,0 +1,240 @@
# frozen_string_literal: true
require 'open-uri'
begin
require 'net/ftp'
rescue LoadError
# Ruby 3.0 changed net-ftp to a default gem
end
require 'tempfile'
Puppet::Type.type(:apt_key).provide(:apt_key) do
desc 'apt-key provider for apt_key resource'
confine 'os.family': :debian
defaultfor 'os.family': :debian
commands apt_key: 'apt-key'
commands gpg: '/usr/bin/gpg'
def self.instances
key_array = []
cli_args = ['adv', '--no-tty', '--list-keys', '--with-colons', '--fingerprint', '--fixed-list-mode']
key_output = apt_key(cli_args).encode('UTF-8', 'binary', invalid: :replace, undef: :replace, replace: '')
pub_line = nil
fpr_lines = []
sub_lines = []
lines = key_output.split("\n")
lines.each_index do |i|
if lines[i].start_with?('pub')
pub_line = lines[i]
# starting a new public key, so reset fpr_lines and sub_lines
fpr_lines = []
sub_lines = []
elsif lines[i].start_with?('fpr')
fpr_lines << lines[i]
elsif lines[i].start_with?('sub')
sub_lines << lines[i]
end
next unless (pub_line && !fpr_lines.empty?) && (!lines[i + 1] || lines[i + 1].start_with?('pub'))
line_hash = key_line_hash(pub_line, fpr_lines)
expired = line_hash[:key_expired] || subkeys_all_expired(sub_lines)
key_array << new(
name: line_hash[:key_fingerprint],
id: line_hash[:key_long],
fingerprint: line_hash[:key_fingerprint],
short: line_hash[:key_short],
long: line_hash[:key_long],
ensure: :present,
expired: expired,
expiry: line_hash[:key_expiry].nil? ? nil : line_hash[:key_expiry].strftime('%Y-%m-%d'),
size: line_hash[:key_size],
type: line_hash[:key_type],
created: line_hash[:key_created].strftime('%Y-%m-%d'),
)
end
key_array
end
def self.prefetch(resources)
apt_keys = instances
resources.each_key do |name|
case name.length
when 40
provider = apt_keys.find { |key| key.fingerprint == name }
resources[name].provider = provider if provider
when 16
provider = apt_keys.find { |key| key.long == name }
resources[name].provider = provider if provider
when 8
provider = apt_keys.find { |key| key.short == name }
resources[name].provider = provider if provider
end
end
end
def self.subkeys_all_expired(sub_lines)
return false if sub_lines.empty?
sub_lines.each do |line|
return false if line.split(':')[1] == '-'
end
true
end
def self.key_line_hash(pub_line, fpr_lines)
pub_split = pub_line.split(':')
fpr_split = fpr_lines.first.split(':')
fingerprint = fpr_split.last
return_hash = {
key_fingerprint: fingerprint,
key_long: fingerprint[-16..], # last 16 characters of fingerprint
key_short: fingerprint[-8..], # last 8 characters of fingerprint
key_size: pub_split[2],
key_type: nil,
key_created: Time.at(pub_split[5].to_i),
key_expired: pub_split[1] == 'e',
key_expiry: pub_split[6].empty? ? nil : Time.at(pub_split[6].to_i)
}
# set key type based on types defined in /usr/share/doc/gnupg/DETAILS.gz
case pub_split[3]
when '1'
return_hash[:key_type] = :rsa
when '17'
return_hash[:key_type] = :dsa
when '18'
return_hash[:key_type] = :ecc
when '19'
return_hash[:key_type] = :ecdsa
end
return_hash
end
def source_to_file(value)
parsed_value = URI.parse(value)
if parsed_value.scheme.nil?
raise(_('The file %{_value} does not exist') % { _value: value }) unless File.exist?(value)
# Because the tempfile method has to return a live object to prevent GC
# of the underlying file from occuring too early, we also have to return
# a file object here. The caller can still call the #path method on the
# closed file handle to get the path.
f = File.open(value, 'r')
f.close
f
else
exceptions = [OpenURI::HTTPError]
exceptions << Net::FTPPermError if defined?(Net::FTPPermError)
begin
# Only send basic auth if URL contains userinfo
# Some webservers (e.g. Amazon S3) return code 400 if empty basic auth is sent
if parsed_value.userinfo.nil?
key = if parsed_value.scheme == 'https' && resource[:weak_ssl] == true
URI.open(parsed_value, ssl_verify_mode: OpenSSL::SSL::VERIFY_NONE).read
else
parsed_value.read
end
else
user_pass = parsed_value.userinfo.split(':')
parsed_value.userinfo = ''
key = URI.open(parsed_value, http_basic_authentication: user_pass).read
end
rescue *exceptions => e
raise(_('%{_e} for %{_resource}') % { _e: e.message, _resource: resource[:source] })
rescue SocketError
raise(_('could not resolve %{_resource}') % { _resource: resource[:source] })
else
tempfile(key)
end
end
end
# The tempfile method needs to return the tempfile object to the caller, so
# that it doesn't get deleted by the GC immediately after it returns. We
# want the caller to control when it goes out of scope.
def tempfile(content)
file = Tempfile.new('apt_key')
file.write content
file.close
# confirm that the fingerprint from the file, matches the long key that is in the manifest
if name.size == 40
if File.executable? command(:gpg)
extracted_key = execute(["#{command(:gpg)} --no-tty --with-fingerprint --with-colons #{file.path} | awk -F: '/^fpr:/ { print $10 }'"], failonfail: false)
extracted_key = extracted_key.chomp
found_match = false
extracted_key.each_line do |line|
found_match = true if line.chomp == name
end
unless found_match
raise(_('The id in your manifest %{_resource} and the fingerprint from content/source don\'t match. Check for an error in the id and content/source is legitimate.') % { _resource: resource[:name] }) # rubocop:disable Layout/LineLength
end
else
warning('/usr/bin/gpg cannot be found for verification of the id.')
end
end
file
end
def exists?
# report expired keys as non-existing when refresh => true
@property_hash[:ensure] == :present && !(resource[:refresh] && @property_hash[:expired])
end
def create
command = []
if resource[:source].nil? && resource[:content].nil?
# Breaking up the command like this is needed because it blows up
# if --recv-keys isn't the last argument.
command.push('adv', '--no-tty', '--keyserver', resource[:server])
command.push('--keyserver-options', resource[:options]) unless resource[:options].nil?
command.push('--recv-keys', resource[:id])
elsif resource[:content]
key_file = tempfile(resource[:content])
command.push('add', key_file.path)
elsif resource[:source]
key_file = source_to_file(resource[:source])
command.push('add', key_file.path)
# In case we really screwed up, better safe than sorry.
else
raise(_('an unexpected condition occurred while trying to add the key: %{_resource}') % { _resource: resource[:id] })
end
apt_key(command)
@property_hash[:ensure] = :present
end
def destroy
loop do
apt_key('del', resource.provider.short)
r = execute(["#{command(:apt_key)} list | grep '/#{resource.provider.short}\s'"], failonfail: false)
break unless r.exitstatus.zero?
end
@property_hash.clear
end
def read_only(_value)
raise(_('This is a read-only property.'))
end
mk_resource_methods
# Alias the setters of read-only properties
# to the read_only function.
alias_method :created=, :read_only
alias_method :expired=, :read_only
alias_method :expiry=, :read_only
alias_method :size=, :read_only
alias_method :type=, :read_only
end

View file

@ -0,0 +1,146 @@
# frozen_string_literal: true
require 'pathname'
require 'puppet/parameter/boolean'
Puppet::Type.newtype(:apt_key) do
@doc = <<-MANIFEST
@summary This type provides Puppet with the capabilities to manage GPG keys needed
by apt to perform package validation. Apt has it's own GPG keyring that can
be manipulated through the `apt-key` command.
@example Basic usage
apt_key { '6F6B15509CF8E59E6E469F327F438280EF8D349F':
source => 'http://apt.puppetlabs.com/pubkey.gpg'
}
**Autorequires**
If Puppet is given the location of a key file which looks like an absolute
path this type will autorequire that file.
@api private
MANIFEST
ensurable
validate do
raise(_('ensure => absent and refresh => true are mutually exclusive')) if self[:refresh] == true && self[:ensure] == :absent
raise(_('The properties content and source are mutually exclusive.')) if self[:content] && self[:source]
warning(_('The id should be a full fingerprint (40 characters), see README.')) if self[:id].length < 40
end
newparam(:id, namevar: true) do
desc 'The ID of the key you want to manage.'
# GPG key ID's should be either 32-bit (short) or 64-bit (long) key ID's
# and may start with the optional 0x, or they can be 40-digit key fingerprints
newvalues(%r{\A(0x)?[0-9a-fA-F]{8}\Z}, %r{\A(0x)?[0-9a-fA-F]{16}\Z}, %r{\A(0x)?[0-9a-fA-F]{40}\Z})
munge do |value|
id = if value.start_with?('0x')
value.partition('0x').last.upcase
else
value.upcase
end
id
end
end
newparam(:content) do
desc 'The content of, or string representing, a GPG key.'
end
newparam(:source) do
desc 'Location of a GPG key file, /path/to/file, ftp://, http:// or https://'
newvalues(%r{\Ahttps?://}, %r{\Aftp://}, %r{\A/\w+})
end
autorequire(:file) do
self[:source] if self[:source] && Pathname.new(self[:source]).absolute?
end
newparam(:server) do
desc 'The key server to fetch the key from based on the ID. It can either be a domain name or url.'
defaultto :'keyserver.ubuntu.com'
newvalues(%r{\A((hkp|hkps|http|https)://)?([a-z\d])([a-z\d-]{0,61}\.)+[a-z\d]+(:\d{2,5})?(/[a-zA-Z\d\-_.]+)*/?$})
end
newparam(:options) do
desc 'Additional options to pass to apt-key\'s --keyserver-options.'
end
newparam(:refresh, boolean: true, parent: Puppet::Parameter::Boolean) do
desc 'When true, recreate an existing expired key'
defaultto false
end
newparam(:weak_ssl, boolean: true, parent: Puppet::Parameter::Boolean) do
desc 'When true and source uses https, accepts download of keys without SSL verification'
defaultto false
end
newproperty(:fingerprint) do
desc <<-MANIFEST
The 40-digit hexadecimal fingerprint of the specified GPG key.
This property is read-only.
MANIFEST
end
newproperty(:long) do
desc <<-MANIFEST
The 16-digit hexadecimal id of the specified GPG key.
This property is read-only.
MANIFEST
end
newproperty(:short) do
desc <<-MANIFEST
The 8-digit hexadecimal id of the specified GPG key.
This property is read-only.
MANIFEST
end
newproperty(:expired) do
desc <<-MANIFEST
Indicates if the key has expired.
This property is read-only.
MANIFEST
end
newproperty(:expiry) do
desc <<-MANIFEST
The date the key will expire, or nil if it has no expiry date.
This property is read-only.
MANIFEST
end
newproperty(:size) do
desc <<-MANIFEST
The key size, usually a multiple of 1024.
This property is read-only.
MANIFEST
end
newproperty(:type) do
desc <<-MANIFEST
The key type, one of: rsa, dsa, ecc, ecdsa
This property is read-only.
MANIFEST
end
newproperty(:created) do
desc <<-MANIFEST
Date the key was created.
This property is read-only.
MANIFEST
end
end

View file

@ -0,0 +1,119 @@
# @summary Manages backports.
#
# @example Set up a backport source for Ubuntu
# include apt::backports
#
# @param location
# Specifies an Apt repository containing the backports to manage. Valid options: a string containing a URL. Default value for Debian and
# Ubuntu varies:
#
# - Debian: 'http://deb.debian.org/debian'
#
# - Ubuntu: 'http://archive.ubuntu.com/ubuntu'
#
# @param release
# Specifies a distribution of the Apt repository containing the backports to manage. Used in populating the `sources.list` configuration file.
# Default: on Debian and Ubuntu, `${fact('os.distro.codename')}-backports`. We recommend keeping this default, except on other operating
# systems.
#
# @param repos
# Specifies a component of the Apt repository containing the backports to manage. Used in populating the `sources.list` configuration file.
# Default value for Debian and Ubuntu varies:
#
# - Debian: 'main contrib non-free non-free-firmware'
#
# - Ubuntu: 'main universe multiverse restricted'
#
# @param key
# Specifies a key to authenticate the backports. Valid options: a string to be passed to the id parameter of the apt::key defined type, or a
# hash of parameter => value pairs to be passed to apt::key's id, server, content, source, and/or options parameters.
#
# @param keyring
# Absolute path to a file containing the PGP keyring used to sign this
# repository. Value is passed to the apt::source and used to set signed-by on
# the source entry.
#
# @param pin
# Specifies a pin priority for the backports. Valid options: a number or string to be passed to the `id` parameter of the `apt::pin` defined
# type, or a hash of `parameter => value` pairs to be passed to `apt::pin`'s corresponding parameters.
#
# @param include
# Specifies whether to include 'deb' or 'src', or both.
#
class apt::backports (
Optional[Stdlib::HTTPUrl] $location = undef,
Optional[String[1]] $release = undef,
Optional[String[1]] $repos = undef,
Optional[Variant[String[1], Hash]] $key = undef,
Stdlib::AbsolutePath $keyring = "/usr/share/keyrings/${facts['os']['name'].downcase}-archive-keyring.gpg",
Variant[Integer, String[1], Hash] $pin = 200,
Hash $include = {},
) {
include apt
if $location {
$_location = $location
}
if $release {
$_release = $release
}
if $repos {
$_repos = $repos
}
if (!($facts['os']['name'] == 'Debian' or $facts['os']['name'] == 'Ubuntu')) {
unless $location and $release and $repos {
fail('If not on Debian or Ubuntu, you must explicitly pass location, release, and repos')
}
}
unless $location {
$_location = $apt::backports['location']
}
unless $release {
if fact('os.distro.codename') {
$_release = "${fact('os.distro.codename')}-backports"
} else {
fail('os.distro.codename fact not available: release parameter required')
}
}
unless $repos {
$_repos = $apt::backports['repos']
}
$_keyring = if $key {
undef
} else {
$keyring
}
if $pin =~ Hash {
$_pin = $pin
} elsif $pin =~ Numeric or $pin =~ String {
$pin_type = $facts['os']['name'] ? {
'Debian' => 'codename',
'Ubuntu' => 'release',
}
$_pin = {
'priority' => $pin,
$pin_type => $_release,
}
} else {
fail('pin must be either a string, number or hash')
}
apt::source { 'backports':
location => $_location,
release => $_release,
repos => $_repos,
include => $include,
key => $key,
keyring => $_keyring,
pin => $_pin,
}
}

View file

@ -0,0 +1,35 @@
# @summary Specifies a custom Apt configuration file.
#
# @param content
# Required unless `ensure` is set to 'absent'. Directly supplies content for the configuration file.
#
# @param ensure
# Specifies whether the configuration file should exist.
#
# @param priority
# Determines the order in which Apt processes the configuration file. Files with lower priority numbers are loaded first.
# Valid options: a string containing an integer or an integer.
#
# @param notify_update
# Specifies whether to trigger an `apt-get update` run.
#
define apt::conf (
Optional[String[1]] $content = undef,
Enum['present', 'absent'] $ensure = present,
Variant[String[1], Integer[0]] $priority = 50,
Optional[Boolean] $notify_update = undef,
) {
unless $ensure == 'absent' {
unless $content {
fail('Need to pass in content parameter')
}
}
$confheadertmp = epp('apt/_conf_header.epp')
apt::setting { "conf-${name}":
ensure => $ensure,
priority => $priority,
content => "${confheadertmp}${content}",
notify_update => $notify_update,
}
}

View file

@ -0,0 +1,457 @@
# @summary Main class, includes all other classes.
#
# @see https://docs.puppetlabs.com/references/latest/function.html#createresources for the create resource function
#
# @param provider
# Specifies the provider that should be used by apt::update.
#
# @param keyserver
# Specifies a keyserver to provide the GPG key. Valid options: a string containing a domain name or a full URL (http://, https://, or
# hkp://).
#
# @param key_options
# Specifies the default options for apt::key resources.
#
# @param ppa_options
# Supplies options to be passed to the `add-apt-repository` command.
#
# @param ppa_package
# Names the package that provides the `apt-add-repository` command.
#
# @param backports
# Specifies some of the default parameters used by apt::backports. Valid options: a hash made up from the following keys:
#
# @option backports [String] :location
# See apt::backports for documentation.
#
# @option backports [String] :repos
# See apt::backports for documentation.
#
# @option backports [String] :key
# See apt::backports for documentation.
#
# @param confs
# Hash of `apt::conf` resources.
#
# @param update
# Configures various update settings. Valid options: a hash made up from the following keys:
#
# @option update [String] :frequency
# Specifies how often to run `apt-get update`. If the exec resource `apt_update` is notified,
# `apt-get update` runs regardless of this value.
# Valid options:
# 'always' (at every Puppet run);
# 'hourly' (if the value of `apt_update_last_success` is less than current epoch time minus 3600);
# 'daily' (if the value of `apt_update_last_success` is less than current epoch time minus 86400);
# 'weekly' (if the value of `apt_update_last_success` is less than current epoch time minus 604800);
# Integer (if the value of `apt_update_last_success` is less than current epoch time minus provided Integer value);
# 'reluctantly' (only if the exec resource `apt_update` is notified).
# Default: 'reluctantly'.
#
# @option update [Integer] :loglevel
# Specifies the log level of logs outputted to the console. Default: undef.
#
# @option update [Integer] :timeout
# Specifies how long to wait for the update to complete before canceling it. Valid options: an integer, in seconds. Default: undef.
#
# @option update [Integer] :tries
# Specifies how many times to retry the update after receiving a DNS or HTTP error. Default: undef.
#
# @param update_defaults
# The default update settings that are combined and merged with the passed `update` value
#
# @param purge
# Specifies whether to purge any existing settings that aren't managed by Puppet. Valid options: a hash made up from the following keys:
#
# @option purge [Boolean] :sources.list
# Specifies whether to purge any unmanaged entries from sources.list. Default false.
#
# @option purge [Boolean] :sources.list.d
# Specifies whether to purge any unmanaged entries from sources.list.d. Default false.
#
# @option purge [Boolean] :preferences
# Specifies whether to purge any unmanaged entries from preferences. Default false.
#
# @option purge [Boolean] :preferences.d.
# Specifies whether to purge any unmanaged entries from preferences.d. Default false.
#
# @param purge_defaults
# The default purge settings that are combined and merged with the passed `purge` value
#
# @param proxy
# Configures Apt to connect to a proxy server. Valid options: a hash matching the locally defined type apt::proxy.
#
# @param proxy_defaults
# The default proxy settings that are combined and merged with the passed `proxy` value
#
# @param sources
# Hash of `apt::source` resources.
#
# @param keys
# Hash of `apt::key` resources.
#
# @param keyrings
# Hash of `apt::keyring` resources.
#
# @param ppas
# Hash of `apt::ppa` resources.
#
# @param pins
# Hash of `apt::pin` resources.
#
# @param settings
# Hash of `apt::setting` resources.
#
# @param manage_auth_conf
# Specifies whether to manage the /etc/apt/auth.conf file. When true, the file will be overwritten with the entries specified in
# the auth_conf_entries parameter. When false, the file will be ignored (note that this does not set the file to absent.
#
# @param auth_conf_entries
# An optional array of login configuration settings (hashes) that are recorded in the file /etc/apt/auth.conf. This file has a netrc-like
# format (similar to what curl uses) and contains the login configuration for APT sources and proxies that require authentication. See
# https://manpages.debian.org/testing/apt/apt_auth.conf.5.en.html for details. If specified each hash must contain the keys machine, login and
# password and no others. Specifying manage_auth_conf and not specifying this parameter will set /etc/apt/auth.conf to absent.
#
# @param auth_conf_owner
# The owner of the file /etc/apt/auth.conf.
#
# @param root
# Specifies root directory of Apt executable.
#
# @param sources_list
# Specifies the path of the sources_list file to use.
#
# @param sources_list_d
# Specifies the path of the sources_list.d file to use.
#
# @param conf_d
# Specifies the path of the conf.d file to use.
#
# @param preferences
# Specifies the path of the preferences file to use.
#
# @param preferences_d
# Specifies the path of the preferences.d file to use.
#
# @param config_files
# A hash made up of the various configuration files used by Apt.
#
# @param sources_list_force
# Specifies whether to perform force purge or delete.
#
# @param include_defaults
# The package types to include by default.
#
# @param apt_conf_d
# The path to the file `apt.conf.d`
#
# @param source_key_defaults
# The fault `source_key` settings
#
class apt (
Hash $update_defaults = {
'frequency' => 'reluctantly',
'loglevel' => undef,
'timeout' => undef,
'tries' => undef,
},
Hash $purge_defaults = {
'sources.list' => false,
'sources.list.d' => false,
'preferences' => false,
'preferences.d' => false,
'apt.conf.d' => false,
},
Hash $proxy_defaults = {
'ensure' => undef,
'host' => undef,
'port' => 8080,
'https' => false,
'https_acng' => false,
'direct' => false,
},
Hash $include_defaults = {
'deb' => true,
'src' => false,
},
Stdlib::Absolutepath $provider = '/usr/bin/apt-get',
Stdlib::Host $keyserver = 'keyserver.ubuntu.com',
Optional[String[1]] $key_options = undef,
Optional[Array[String[1]]] $ppa_options = undef,
Optional[String[1]] $ppa_package = undef,
Optional[Hash] $backports = undef,
Hash $confs = {},
Hash $update = {},
Hash $purge = {},
Apt::Proxy $proxy = {},
Hash $sources = {},
Hash $keys = {},
Hash $keyrings = {},
Hash $ppas = {},
Hash $pins = {},
Hash $settings = {},
Boolean $manage_auth_conf = true,
Array[Apt::Auth_conf_entry] $auth_conf_entries = [],
String[1] $auth_conf_owner = '_apt',
Stdlib::Absolutepath $root = '/etc/apt',
Stdlib::Absolutepath $sources_list = "${root}/sources.list",
Stdlib::Absolutepath $sources_list_d = "${root}/sources.list.d",
Stdlib::Absolutepath $conf_d = "${root}/apt.conf.d",
Stdlib::Absolutepath $preferences = "${root}/preferences",
Stdlib::Absolutepath $preferences_d = "${root}/preferences.d",
Stdlib::Absolutepath $apt_conf_d = "${root}/apt.conf.d",
Hash $config_files = {
'conf' => {
'path' => $conf_d,
'ext' => '',
},
'pref' => {
'path' => $preferences_d,
'ext' => '.pref',
},
'list' => {
'path' => $sources_list_d,
'ext' => '.list',
},
'sources' => {
'path' => $sources_list_d,
'ext' => '.sources',
},
},
Boolean $sources_list_force = false,
Hash $source_key_defaults = {
'server' => $keyserver,
'options' => undef,
'content' => undef,
'source' => undef,
},
) {
if $facts['os']['family'] != 'Debian' {
fail('This module only works on Debian or derivatives like Ubuntu')
}
if $update['frequency'] {
assert_type(
Variant[Enum['always','hourly','daily','weekly','reluctantly'],Integer[60]],
$update['frequency'],
)
}
if $update['timeout'] {
assert_type(Integer, $update['timeout'])
}
if $update['tries'] {
assert_type(Integer, $update['tries'])
}
$_update = $apt::update_defaults + $update
include apt::update
if $purge['sources.list'] {
assert_type(Boolean, $purge['sources.list'])
}
if $purge['sources.list.d'] {
assert_type(Boolean, $purge['sources.list.d'])
}
if $purge['preferences'] {
assert_type(Boolean, $purge['preferences'])
}
if $purge['preferences.d'] {
assert_type(Boolean, $purge['preferences.d'])
}
if $sources_list_force {
assert_type(Boolean, $sources_list_force)
}
if $purge['apt.conf.d'] {
assert_type(Boolean, $purge['apt.conf.d'])
}
$_purge = $apt::purge_defaults + $purge
if $proxy['perhost'] {
$_perhost = $proxy['perhost'].map |$item| {
$_item = $apt::proxy_defaults + $item
$_scheme = $_item['https'] ? {
true => 'https',
default => 'http',
}
$_port = $_item['port'] ? {
Integer => ":${_item['port']}",
default => ''
}
$_target = $_item['direct'] ? {
true => 'DIRECT',
default => "${_scheme}://${_item['host']}${_port}/",
}
$item + { 'scheme' => $_scheme, 'target' => $_target, }
}
} else {
$_perhost = {}
}
$_proxy = $apt::proxy_defaults + $proxy + { 'perhost' => $_perhost }
$confheadertmp = epp('apt/_conf_header.epp')
$proxytmp = epp('apt/proxy.epp', { 'proxies' => $_proxy })
$updatestamptmp = file('apt/15update-stamp')
if $_proxy['ensure'] == 'absent' or $_proxy['host'] {
apt::setting { 'conf-proxy':
ensure => $_proxy['ensure'],
priority => '01',
content => "${confheadertmp}${proxytmp}",
}
}
if $sources_list_force {
$sources_list_ensure = $_purge['sources.list'] ? {
true => absent,
default => file,
}
$sources_list_content = $_purge['sources.list'] ? {
true => nil,
default => undef,
}
} else {
$sources_list_ensure = $_purge['sources.list'] ? {
true => file,
default => file,
}
$sources_list_content = $_purge['sources.list'] ? {
true => "# Repos managed by puppet.\n",
default => undef,
}
}
$preferences_ensure = $_purge['preferences'] ? {
true => absent,
default => file,
}
apt::setting { 'conf-update-stamp':
priority => 15,
content => "${confheadertmp}${updatestamptmp}",
}
file { 'sources.list':
ensure => $sources_list_ensure,
path => $apt::sources_list,
owner => root,
group => root,
content => $sources_list_content,
notify => Class['apt::update'],
}
file { 'sources.list.d':
ensure => directory,
path => $apt::sources_list_d,
owner => root,
group => root,
purge => $_purge['sources.list.d'],
recurse => $_purge['sources.list.d'],
notify => Class['apt::update'],
}
file { 'preferences':
ensure => $preferences_ensure,
path => $apt::preferences,
owner => root,
group => root,
notify => Class['apt::update'],
}
file { 'preferences.d':
ensure => directory,
path => $apt::preferences_d,
owner => root,
group => root,
purge => $_purge['preferences.d'],
recurse => $_purge['preferences.d'],
notify => Class['apt::update'],
}
file { 'apt.conf.d':
ensure => directory,
path => $apt::apt_conf_d,
owner => root,
group => root,
purge => $_purge['apt.conf.d'],
recurse => $_purge['apt.conf.d'],
notify => Class['apt::update'],
}
$confs.each |$key, $value| {
apt::conf { $key:
* => $value,
}
}
$sources.each |$key, $value| {
apt::source { $key:
* => $value,
}
}
$keys.each |$key, $value| {
apt::key { $key:
* => $value,
}
}
$keyrings.each |$key, $data| {
apt::keyring { $key:
* => $data,
}
}
$ppas.each |$key, $value| {
apt::ppa { $key:
* => $value,
}
}
$settings.each |$key, $value| {
apt::setting { $key:
* => $value,
}
}
if $manage_auth_conf {
$auth_conf_ensure = $auth_conf_entries ? {
[] => 'absent',
default => 'present',
}
$auth_conf_tmp = stdlib::deferrable_epp('apt/auth_conf.epp',
{
'auth_conf_entries' => $auth_conf_entries,
},
)
file { '/etc/apt/auth.conf':
ensure => $auth_conf_ensure,
owner => $auth_conf_owner,
group => 'root',
mode => '0600',
content => Sensitive($auth_conf_tmp),
notify => Class['apt::update'],
}
}
$pins.each |$key, $value| {
apt::pin { $key:
* => $value,
}
}
case $facts['os']['name'] {
'Debian': {
stdlib::ensure_packages(['gnupg'])
}
'Ubuntu': {
stdlib::ensure_packages(['gnupg'])
}
default: {
# Nothing in here
}
}
}

View file

@ -0,0 +1,104 @@
# @summary Manages the GPG keys that Apt uses to authenticate packages.
#
# @note
# The apt::key defined type makes use of the apt_key type, but includes extra functionality to help prevent duplicate keys.
#
# @example Declare Apt key for apt.puppetlabs.com source
# apt::key { 'puppetlabs':
# id => '6F6B15509CF8E59E6E469F327F438280EF8D349F',
# server => 'keyserver.ubuntu.com',
# options => 'http-proxy="http://proxyuser:proxypass@example.org:3128"',
# }
#
# @param id
# Specifies a GPG key to authenticate Apt package signatures. Valid options: a string containing a key ID (8 or 16 hexadecimal
# characters, optionally prefixed with "0x") or a full key fingerprint (40 hexadecimal characters).
#
# @param ensure
# Specifies whether the key should exist. Using `refreshed` will make keys
# auto update when they have expired (assuming a new key exists on the key
# server).
#
# @param content
# Supplies the entire GPG key. Useful in case the key can't be fetched from a remote location and using a file resource is inconvenient.
#
# @param source
# Specifies the location of an existing GPG key file to copy. Valid options: a string containing a URL (ftp://, http://, or https://) or
# an absolute path.
#
# @param server
# Specifies a keyserver to provide the GPG key. Valid options: a string containing a domain name or a full URL (http://, https://,
# hkp:// or hkps://). The hkps:// protocol is currently only supported on Ubuntu 18.04.
#
# @param weak_ssl
# Specifies whether strict SSL verification on a https URL should be disabled.
#
# @param options
# Passes additional options to `apt-key adv --keyserver-options`.
#
define apt::key (
Pattern[/\A(0x)?[0-9a-fA-F]{8}\Z/, /\A(0x)?[0-9a-fA-F]{16}\Z/, /\A(0x)?[0-9a-fA-F]{40}\Z/] $id = $title,
Enum['present', 'absent', 'refreshed'] $ensure = present,
Optional[String[1]] $content = undef,
Optional[Pattern[/\Ahttps?:\/\//, /\Aftp:\/\//, /\A\/\w+/]] $source = undef,
Pattern[/\A((hkp|hkps|http|https):\/\/)?([a-z\d])([a-z\d-]{0,61}\.)+[a-z\d]+(:\d{2,5})?(\/[a-zA-Z\d\-_.]+)*\/?$/] $server = $apt::keyserver,
Boolean $weak_ssl = false,
Optional[String[1]] $options = $apt::key_options,
) {
case $ensure {
/^(refreshed|present)$/: {
if defined(Anchor["apt_key ${id} absent"]) {
fail("key with id ${id} already ensured as absent")
}
if !defined(Anchor["apt_key ${id} present"]) {
apt_key { $title:
ensure => present,
refresh => $ensure == 'refreshed',
id => $id,
source => $source,
content => $content,
server => $server,
weak_ssl => $weak_ssl,
options => $options,
} -> anchor { "apt_key ${id} present": }
case $facts['os']['name'] {
'Debian': {
stdlib::ensure_packages(['gnupg'])
Apt::Key<| title == $title |>
}
'Ubuntu': {
stdlib::ensure_packages(['gnupg'])
Apt::Key<| title == $title |>
}
default: {
# Nothing in here
}
}
}
}
/^absent$/: {
if defined(Anchor["apt_key ${id} present"]) {
fail("key with id ${id} already ensured as present")
}
if !defined(Anchor["apt_key ${id} absent"]) {
apt_key { $title:
ensure => $ensure,
id => $id,
source => $source,
content => $content,
server => $server,
weak_ssl => $weak_ssl,
options => $options,
} -> anchor { "apt_key ${id} absent": }
}
}
default: {
fail("Invalid \'ensure\' value \'${ensure}\' for apt::key")
}
}
}

View file

@ -0,0 +1,72 @@
# @summary Manage GPG keyrings for apt repositories
#
# @example Download the puppetlabs apt keyring
# apt::keyring { 'puppetlabs-keyring.gpg':
# source => 'https://apt.puppetlabs.com/keyring.gpg',
# }
# @example Deploy the apt source and associated keyring file
# apt::source { 'puppet8-release':
# location => 'http://apt.puppetlabs.com',
# repos => 'puppet8',
# key => {
# name => 'puppetlabs-keyring.gpg',
# source => 'https://apt.puppetlabs.com/keyring.gpg'
# }
# }
#
# @param dir
# Path to the directory where the keyring will be stored.
#
# @param filename
# Optional filename for the keyring. It should also contain extension along with the filename.
#
# @param mode
# File permissions of the keyring.
#
# @param source
# Source of the keyring file. Mutually exclusive with 'content'.
#
# @param content
# Content of the keyring file. Mutually exclusive with 'source'.
#
# @param ensure
# Ensure presence or absence of the resource.
#
define apt::keyring (
Stdlib::Absolutepath $dir = '/etc/apt/keyrings',
String[1] $filename = $name,
Stdlib::Filemode $mode = '0644',
Optional[Stdlib::Filesource] $source = undef,
Optional[String[1]] $content = undef,
Enum['present','absent'] $ensure = 'present',
) {
ensure_resource('file', $dir, { ensure => 'directory', mode => '0755', })
if $source and $content {
fail("Parameters 'source' and 'content' are mutually exclusive")
} elsif $ensure == 'present' and ! $source and ! $content {
fail("One of 'source' or 'content' parameters are required")
}
$file = "${dir}/${filename}"
case $ensure {
'present': {
file { $file:
ensure => 'file',
mode => $mode,
owner => 'root',
group => 'root',
source => $source,
content => $content,
}
}
'absent': {
file { $file:
ensure => $ensure,
}
}
default: {
fail("Invalid 'ensure' value '${ensure}' for apt::keyring")
}
}
}

View file

@ -0,0 +1,37 @@
# @summary Manages apt-mark settings
#
# @param setting
# Specifies the behavior of apt in case of no more dependencies installed
# https://manpages.debian.org/stable/apt/apt-mark.8.en.html
#
define apt::mark (
Enum['auto','manual','hold','unhold'] $setting,
) {
if $title !~ /^[a-z0-9][a-z0-9.+\-]+$/ {
fail("Invalid package name: ${title}")
}
if $setting == 'unhold' {
$unless_cmd = undef
} else {
$action = "show${setting}"
# It would be ideal if we could break out this command in to an array of args, similar
# to $onlyif_cmd and $command. However, in this case it wouldn't work as expected due
# to the inclusion of a pipe character.
# When passed to the exec function, the posix provider will strip everything to the right of the pipe,
# causing the command to return a full list of packages for the given action.
# The trade off is to use an interpolated string knowing that action is built from an enum value and
# title is pre-validated.
$unless_cmd = ["/usr/bin/apt-mark ${action} ${title} | grep ${title} -q"]
}
$onlyif_cmd = [['/usr/bin/dpkg', '-l', $title]]
$command = ['/usr/bin/apt-mark', $setting, $title]
exec { "apt-mark ${setting} ${title}":
command => $command,
onlyif => $onlyif_cmd,
unless => $unless_cmd,
}
}

View file

@ -0,0 +1,136 @@
# @summary Manages Apt pins. Does not trigger an apt-get update run.
#
# @see https://manpages.debian.org/stable/apt/apt_preferences.5.en.html for context on these parameters
#
# @param ensure
# Specifies whether the pin should exist.
#
# @param explanation
# Supplies a comment to explain the pin. Default: "${caller_module_name}: ${name}".
#
# @param order
# Determines the order in which Apt processes the pin file. Files with lower order numbers are loaded first.
#
# @param packages
# Specifies which package(s) to pin.
#
# @param priority
# Sets the priority of the package. If multiple versions of a given package are available, `apt-get` installs the one with the highest
# priority number (subject to dependency constraints).
#
# @param release
# Tells APT to prefer packages that support the specified release. Typical values include 'stable', 'testing', and 'unstable'.
#
# @param release_version
# Tells APT to prefer packages that support the specified operating system release version (such as Debian release version 7).
#
# @param component
# Names the licensing component associated with the packages in the directory tree of the Release file.
#
# @param originator
# Names the originator of the packages in the directory tree of the Release file.
#
# @param label
# Names the label of the packages in the directory tree of the Release file.
#
# @param origin
# The package origin (the hostname part of the package's sources.list entry)
#
# @param version
# The version of the package
#
# @param codename
# The codename of the release
#
define apt::pin (
Enum['file', 'present', 'absent'] $ensure = present,
Optional[String[1]] $explanation = undef,
Variant[Integer[0]] $order = 50,
Variant[String[1], Array[String[1]]] $packages = '*',
Variant[Integer, String[1]] $priority = 0,
Optional[String[1]] $release = undef, # a=
Optional[String[1]] $origin = undef,
Optional[String[1]] $version = undef,
Optional[String[1]] $codename = undef, # n=
Optional[String[1]] $release_version = undef, # v=
Optional[String[1]] $component = undef, # c=
Optional[String[1]] $originator = undef, # o=
Optional[String[1]] $label = undef, # l=
) {
if $explanation {
$_explanation = $explanation
} else {
if defined('$caller_module_name') { # strict vars check
$_explanation = "${caller_module_name}: ${name}"
} else {
$_explanation = ": ${name}"
}
}
$pin_release_array = [
$release,
$codename,
$release_version,
$component,
$originator,
$label,
]
$pin_release = join($pin_release_array, '')
# Read the manpage 'apt_preferences(5)', especially the chapter
# 'The Effect of APT Preferences' to understand the following logic
# and the difference between specific and general form
if $packages =~ Array {
$packages_string = join($packages, ' ')
} else {
$packages_string = $packages
}
if $packages_string != '*' { # specific form
if ( $pin_release != '' and ( $origin or $version )) or
( $version and ( $pin_release != '' or $origin )) {
fail('parameters release, origin, and version are mutually exclusive')
}
} else { # general form
if $version {
fail('parameter version cannot be used in general form')
}
if ( $pin_release != '' and $origin ) {
fail('parameters release and origin are mutually exclusive')
}
}
# According to man 5 apt_preferences:
# The files have either no or "pref" as filename extension
# and only contain alphanumeric, hyphen (-), underscore (_) and period
# (.) characters. Otherwise APT will print a notice that it has ignored a
# file, unless that file matches a pattern in the
# Dir::Ignore-Files-Silently configuration list - in which case it will
# be silently ignored.
$file_name = regsubst($title, '[^0-9a-z\-_\.]', '_', 'IG')
$headertmp = epp('apt/_header.epp')
$pinpreftmp = epp('apt/pin.pref.epp', {
'name' => $name,
'pin_release' => $pin_release,
'release' => $release,
'codename' => $codename,
'release_version' => $release_version,
'component' => $component,
'originator' => $originator,
'label' => $label,
'version' => $version,
'origin' => $origin,
'explanation' => $_explanation,
'packages_string' => $packages_string,
'priority' => $priority,
})
apt::setting { "pref-${file_name}":
ensure => $ensure,
priority => $order,
content => "${headertmp}${pinpreftmp}",
notify_update => false,
}
}

View file

@ -0,0 +1,128 @@
# @summary Manages PPA repositories using `add-apt-repository`. Not supported on Debian.
#
# @example Declaration of an Apt PPA
# apt::ppa { 'ppa:openstack-ppa/bleeding-edge': }
#
# @param ensure
# Specifies whether the PPA should exist.
#
# @param options
# Supplies options to be passed to the `add-apt-repository` command.
#
# @param release
# Specifies the operating system of your node. Valid options: a string containing a valid LSB distribution codename.
# Optional if `puppet facts show os.distro.codename` returns your correct distribution release codename.
#
# @param dist
# Specifies the distribution of your node. Valid options: a string containing a valid distribution codename.
# Optional if `puppet facts show os.name` returns your correct distribution name.
#
# @param package_name
# Names the package that provides the `apt-add-repository` command.
#
# @param package_manage
# Specifies whether Puppet should manage the package that provides `apt-add-repository`.
#
define apt::ppa (
Enum['present', 'absent'] $ensure = 'present',
Optional[Array[String[1]]] $options = $apt::ppa_options,
Optional[String[1]] $release = fact('os.distro.codename'),
Optional[String[1]] $dist = $facts['os']['name'],
Optional[String[1]] $package_name = $apt::ppa_package,
Boolean $package_manage = false,
) {
unless $release {
fail('os.distro.codename fact not available: release parameter required')
}
if $dist == 'Debian' {
fail('apt::ppa is not currently supported on Debian.')
}
# Validate the resource name
if $name !~ /^ppa:([a-zA-Z0-9\-_.]+)\/([a-zA-z0-9\-_\.]+)$/ {
fail("Invalid PPA name: ${name}")
}
$distid = downcase($dist)
$dash_filename = regsubst($name, '^ppa:([^/]+)/(.+)$', "\\1-${distid}-\\2")
$underscore_filename = regsubst($name, '^ppa:([^/]+)/(.+)$', "\\1_${distid}_\\2")
$dash_filename_no_slashes = regsubst($dash_filename, '/', '-', 'G')
$dash_filename_no_specialchars = regsubst($dash_filename_no_slashes, '[\.\+]', '_', 'G')
$underscore_filename_no_slashes = regsubst($underscore_filename, '/', '-', 'G')
$underscore_filename_no_specialchars = regsubst($underscore_filename_no_slashes, '[\.\+]', '_', 'G')
$sources_list_d_filename = if versioncmp($facts['os']['release']['full'], '23.10') < 0 {
"${dash_filename_no_specialchars}-${release}.list"
} else {
"${dash_filename_no_specialchars}-${release}.sources"
}
if versioncmp($facts['os']['release']['full'], '21.04') < 0 {
$trusted_gpg_d_filename = "${underscore_filename_no_specialchars}.gpg"
} else {
$trusted_gpg_d_filename = "${dash_filename_no_specialchars}.gpg"
}
# This is the location of our main exec script.
$cache_path = $facts['puppet_vardir']
$script_path = "${cache_path}/add-apt-repository-${dash_filename_no_specialchars}-${release}.sh"
if $ensure == 'present' {
if $package_manage {
stdlib::ensure_packages($package_name)
$_require = [File['sources.list.d'], Package[$package_name]]
} else {
$_require = File['sources.list.d']
}
$_proxy = $apt::_proxy
if $_proxy['host'] {
if $_proxy['https'] {
$_proxy_env = ["http_proxy=http://${$_proxy['host']}:${$_proxy['port']}", "https_proxy=https://${$_proxy['host']}:${$_proxy['port']}"]
} else {
$_proxy_env = ["http_proxy=http://${$_proxy['host']}:${$_proxy['port']}"]
}
} else {
$_proxy_env = []
}
unless $sources_list_d_filename in $facts['apt_sources'] {
$script_content = epp('apt/add-apt-repository.sh.epp', {
command => ['/usr/bin/add-apt-repository', shell_join($options), $name],
sources_list_d_path => $apt::sources_list_d,
sources_list_d_filename => $sources_list_d_filename,
}
)
file { "add-apt-repository-script-${name}":
ensure => 'file',
path => $script_path,
content => $script_content,
mode => '0755',
}
exec { "add-apt-repository-${name}":
environment => $_proxy_env,
command => $script_path,
logoutput => 'on_failure',
notify => Class['apt::update'],
require => $_require,
before => File["${apt::sources_list_d}/${sources_list_d_filename}"],
}
}
file { "${apt::sources_list_d}/${sources_list_d_filename}": }
}
else {
tidy { "remove-apt-repository-script-${name}":
path => $script_path,
}
tidy { "remove-apt-repository-${name}":
path => "${apt::sources_list_d}/${sources_list_d_filename}",
notify => Class['apt::update'],
}
}
}

View file

@ -0,0 +1,77 @@
# @summary Manages Apt configuration files.
#
# @see https://www.puppet.com/docs/puppet/latest/types/file.html#file-attributes for more information on source and content parameters
#
# @param priority
# Determines the order in which Apt processes the configuration file. Files with higher priority numbers are loaded first.
#
# @param ensure
# Specifies whether the file should exist.
#
# @param source
# Required, unless `content` is set. Specifies a source file to supply the content of the configuration file. Cannot be used in combination
# with `content`. Valid options: see link above for Puppet's native file type source attribute.
#
# @param content
# Required, unless `source` is set. Directly supplies content for the configuration file. Cannot be used in combination with `source`. Valid
# options: see link above for Puppet's native file type content attribute.
#
# @param notify_update
# Specifies whether to trigger an `apt-get update` run.
#
define apt::setting (
Variant[String[1], Integer[0]] $priority = 50,
Enum['file', 'present', 'absent'] $ensure = file,
Optional[String[1]] $source = undef,
Optional[String[1]] $content = undef,
Boolean $notify_update = true,
) {
if $content and $source {
fail('apt::setting cannot have both content and source')
}
if $ensure != 'absent' {
if !$content and !$source {
fail('apt::setting needs either of content or source')
}
}
$title_array = split($title, '-')
$setting_type = $title_array[0]
$base_name = join(delete_at($title_array, 0), '-')
assert_type(Pattern[/\Aconf\z/, /\Apref\z/, /\Alist\z/, /\Asources\z/], $setting_type) |$a, $b| {
fail("apt::setting resource name/title must start with either 'conf-', 'pref-', 'list-', or 'sources-'")
}
if $priority !~ Integer {
# need this to allow zero-padded priority.
assert_type(Pattern[/^\d+$/], $priority) |$a, $b| {
fail('apt::setting priority must be an integer or a zero-padded integer')
}
}
if $setting_type in ['list', 'pref', 'sources'] {
$_priority = ''
} else {
$_priority = $priority
}
$_path = $apt::config_files[$setting_type]['path']
$_ext = $apt::config_files[$setting_type]['ext']
if $notify_update {
$_notify = Class['apt::update']
} else {
$_notify = undef
}
file { "${_path}/${_priority}${base_name}${_ext}":
ensure => $ensure,
owner => 'root',
group => 'root',
content => $content,
source => $source,
notify => $_notify,
}
}

View file

@ -0,0 +1,363 @@
# @summary Manages the Apt sources in /etc/apt/sources.list.d/.
#
# @example Install the puppetlabs apt source
# apt::source { 'puppetlabs':
# location => 'http://apt.puppetlabs.com',
# repos => 'main',
# key => {
# id => '6F6B15509CF8E59E6E469F327F438280EF8D349F',
# server => 'keyserver.ubuntu.com',
# },
# }
#
# @example Download key behaviour to handle modern apt gpg keyrings. The `name` parameter in the key hash should be given with
# extension. Absence of extension will result in file formation with just name and no extension.
# apt::source { 'puppetlabs':
# location => 'http://apt.puppetlabs.com',
# comment => 'Puppet8',
# key => {
# 'name' => 'puppetlabs.gpg',
# 'source' => 'https://apt.puppetlabs.com/keyring.gpg',
# },
# }
#
# @example Install the puppetlabs apt source (deb822 format)
# apt::source { 'puppetlabs':
# source_format => 'sources'
# location => ['http://apt.puppetlabs.com'],
# repos => ['puppet8'],
# keyring => '/etc/apt/keyrings/puppetlabs.gpg',
# }
#
# @param source_format
# The file format to use for the apt source. See https://wiki.debian.org/SourcesList
#
# @param location
# Required, unless ensure is set to 'absent'. Specifies an Apt repository. Valid options: a string containing a repository URL.
# DEB822: Supports an array of URL values
#
# @param types
# DEB822: The package types this source manages.
#
# @param enabled
# DEB822: Enable or Disable the APT source.
#
# @param comment
# Supplies a comment for adding to the Apt source file.
#
# @param ensure
# Specifies whether the Apt source file should exist.
#
# @param release
# Specifies a distribution of the Apt repository.
# DEB822: Supports an array of values
#
# @param repos
# Specifies a component of the Apt repository.
# DEB822: Supports an array of values
#
# @param include
# Configures include options. Valid options: a hash of available keys.
#
# @option include [Boolean] :deb
# Specifies whether to request the distribution's compiled binaries.
#
# @option include [Boolean] :src
# Specifies whether to request the distribution's uncompiled source code.
#
# @param key
# Creates an `apt::keyring` in `/etc/apt/keyrings` (or anywhere on disk given `filename`) Valid options:
# * a hash of `parameter => value` pairs to be passed to `file`: `name` (title), `content`, `source`, `filename`
#
# The following inputs are valid for the (deprecated) `apt::key` defined type. Valid options:
# * a string to be passed to the `id` parameter of the `apt::key` defined type
# * a hash of `parameter => value` pairs to be passed to `apt::key`: `id`, `server`, `content`, `source`, `weak_ssl`, `options`
#
# @param keyring
# Absolute path to a file containing the PGP keyring used to sign this repository. Value is used to set signed-by on the source entry.
# This is not necessary if the key is installed with `key` param above.
# See https://wiki.debian.org/DebianRepository/UseThirdParty for details.
#
# @param pin
# Creates a declaration of the apt::pin defined type. Valid options: a number or string to be passed to the `priority` parameter of the
# `apt::pin` defined type, or a hash of `parameter => value` pairs to be passed to `apt::pin`'s corresponding parameters.
#
# @param architecture
# Tells Apt to only download information for specified architectures. Valid options: a string containing one or more architecture names,
# separated by commas (e.g., 'i386' or 'i386,alpha,powerpc').
# (if unspecified, Apt downloads information for all architectures defined in the Apt::Architectures option)
# DEB822: Supports an array of values
#
# @param allow_unsigned
# Specifies whether to authenticate packages from this release, even if the Release file is not signed or the signature can't be checked.
#
# @param allow_insecure
# Specifies whether to allow downloads from insecure repositories.
#
# @param notify_update
# Specifies whether to trigger an `apt-get update` run.
#
# @param check_valid_until
# Specifies whether to check if the package release date is valid.
#
define apt::source (
Enum['list', 'sources'] $source_format = 'list',
Array[Enum['deb','deb-src'], 1, 2] $types = ['deb'],
Optional[Variant[String[1], Array[String[1]]]] $location = undef,
String[1] $comment = $name,
Boolean $enabled = true, # deb822
Enum['present', 'absent'] $ensure = present,
Optional[Variant[String[0], Array[String[0]]]] $release = undef,
Variant[String[1], Array[String[1]]] $repos = 'main',
Hash $include = {},
Optional[Variant[String[1], Hash]] $key = undef,
Optional[Stdlib::AbsolutePath] $keyring = undef,
Optional[Variant[Hash, Integer, String[1]]] $pin = undef,
Optional[Variant[String[1], Array[String[1]]]] $architecture = undef,
Optional[Boolean] $allow_unsigned = undef,
Optional[Boolean] $allow_insecure = undef,
Optional[Boolean] $check_valid_until = undef,
Boolean $notify_update = true,
) {
include apt
$_before = Apt::Setting["list-${title}"]
case $source_format {
'list': {
$_file_suffix = $source_format
if !$release {
if fact('os.distro.codename') {
$_release = fact('os.distro.codename')
} else {
fail('os.distro.codename fact not available: release parameter required')
}
} else {
$_release = $release
}
if $release =~ Pattern[/\/$/] {
$_components = $_release
} elsif $repos =~ Array {
$_components = join([$_release] + $repos, ' ')
} else {
$_components = "${_release} ${repos}"
}
if $ensure == 'present' {
if ! $location {
fail('cannot create a source entry without specifying a location')
}
elsif ($apt::proxy['https_acng']) and ($location =~ /(?i:^https:\/\/)/) {
$_location = regsubst($location, 'https://','http://HTTPS///')
}
else {
$_location = $location
}
} else {
$_location = undef
}
$includes = $apt::include_defaults + $include
if $keyring {
if $key {
fail('parameters key and keyring are mutually exclusive')
} else {
$_list_keyring = $keyring
}
} elsif $key {
if $key =~ Hash {
unless $key['name'] or $key['id'] {
fail('key hash must contain a key name (for apt::keyring) or an id (for apt::key)')
}
if $key['id'] {
# defaults like keyserver are only relevant to apt::key
$_key = $apt::source_key_defaults + $key
} else {
$_key = $key
}
} else {
$_key = { 'id' => assert_type(String[1], $key) }
}
if $_key['ensure'] {
$_key_ensure = $_key['ensure']
} else {
$_key_ensure = $ensure
}
# Old keyserver keys handled by apt-key
if $_key =~ Hash and $_key['id'] {
# We do not want to remove keys when the source is absent.
if $ensure == 'present' {
apt::key { "Add key: ${$_key['id']} from Apt::Source ${title}":
ensure => $_key_ensure,
id => $_key['id'],
server => $_key['server'],
content => $_key['content'],
source => $_key['source'],
options => $_key['options'],
weak_ssl => $_key['weak_ssl'],
before => $_before,
}
}
$_list_keyring = undef
}
# Modern apt keyrings
elsif $_key =~ Hash and $_key['name'] {
apt::keyring { $_key['name']:
ensure => $_key_ensure,
content => $_key['content'],
source => $_key['source'],
dir => $_key['dir'],
filename => $_key['filename'],
mode => $_key['mode'],
before => $_before,
}
$_list_keyring = if $_key['dir'] and $_key['filename'] {
"${_key['dir']}${_key['filename']}"
} elsif $_key['filename'] {
"/etc/apt/keyrings/${_key['filename']}"
} elsif $_key['dir'] {
"${_key['dir']}${_key['name']}"
} else {
"/etc/apt/keyrings/${_key['name']}"
}
}
} else {
# No `key` nor `keyring` provided
$_list_keyring = undef
}
$header = epp('apt/_header.epp')
if $architecture {
$_architecture = regsubst($architecture, '\baarch64\b', 'arm64')
} else {
$_architecture = undef
}
$source_content = epp('apt/source.list.epp', {
'comment' => $comment,
'includes' => $includes,
'options' => delete_undef_values({
'arch' => $_architecture,
'trusted' => $allow_unsigned ? { true => 'yes', false => undef, default => undef },
'allow-insecure' => $allow_insecure ? { true => 'yes', false => undef, default => undef },
'signed-by' => $_list_keyring,
'check-valid-until' => $check_valid_until? { true => undef, false => 'false', default => undef },
},
),
'location' => $_location,
'components' => $_components,
}
)
if $pin {
if $pin =~ Hash {
$_pin = $pin + { 'ensure' => $ensure, 'before' => $_before }
} elsif ($pin =~ Numeric or $pin =~ String) {
$url_split = split($location, '[:\/]+')
$host = $url_split[1]
$_pin = {
'ensure' => $ensure,
'priority' => $pin,
'before' => $_before,
'origin' => $host,
}
} else {
fail('Received invalid value for pin parameter')
}
apt::pin { $name:
* => $_pin,
}
}
}
'sources': {
$_file_suffix = $source_format
if $pin {
warning("'pin' parameter is not supported with deb822 format.")
}
if $key {
warning("'key' parameter is not supported with deb822 format.")
}
if $ensure == 'present' {
if ! $location {
fail('cannot create a source entry without specifying a location')
}
}
if $location !~ Array {
warning('For deb822 sources, location must be specified as an array.')
$_location = [$location]
} else {
$_location = $location
}
if !$release {
if fact('os.distro.codename') {
$_release = [fact('os.distro.codename')]
} else {
fail('os.distro.codename fact not available: release parameter required')
}
} elsif $release !~ Array {
warning("For deb822 sources, 'release' must be specified as an array. Converting to array.")
$_release = [$release]
} else {
$_release = $release
}
if $repos !~ Array {
warning("For deb822 sources, 'repos' must be specified as an array. Converting to array.")
$_repos = split($repos, /\s+/)
} else {
$_repos = $repos
}
if $architecture and $architecture !~ Array {
warning("For deb822 sources, 'architecture' must be specified as an array. Converting to array.")
$_architecture = split($architecture, '[,]')
} else {
$_architecture = $architecture
}
case $ensure {
'present': {
$header = epp('apt/_header.epp')
$source_content = epp('apt/source_deb822.epp', delete_undef_values({
'uris' => $_location,
'suites' => $_release,
'components' => $_repos,
'types' => $types,
'comment' => $comment,
'enabled' => $enabled ? { true => 'yes', false => 'no' },
'architectures' => $_architecture,
'allow_insecure' => $allow_insecure ? { true => 'yes', false => 'no', default => undef },
'repo_trusted' => $allow_unsigned ? { true => 'yes', false => 'no', default => undef },
'check_valid_until' => $check_valid_until ? { true => 'yes', false => 'no', default => undef },
'signed_by' => $keyring,
}
)
)
}
'absent': {
$header = undef
$source_content = undef
}
default: {
fail('Unexpected value for $ensure parameter.')
}
}
}
default: {
fail("Unexpected APT source format: ${source_format}")
}
}
apt::setting { "${_file_suffix}-${name}":
ensure => $ensure,
content => "${header}${source_content}",
notify_update => $notify_update,
}
}

View file

@ -0,0 +1,98 @@
# @summary Updates the list of available packages using apt-get update.
#
# @api private
#
class apt::update {
assert_private()
#TODO: to catch if apt_update_last_success has the value of -1 here. If we
#opt to do this, a info/warn would likely be all you'd need likely to happen
#on the first run, but if it's not run in awhile something is likely borked
#with apt and we'd want to know about it.
case $apt::_update['frequency'] {
'always': {
$_kick_apt = true
}
Integer[60]:{
#compare current date with the apt_update_last_success fact to determine
#if we should kick apt_update.
$int_threshold = (Integer(Timestamp().strftime('%s')) - Integer($apt::_update['frequency']))
if $facts['apt_update_last_success'] {
if $facts['apt_update_last_success'] + 0 < $int_threshold {
$_kick_apt = true
} else {
$_kick_apt = false
}
} else {
#if apt-get update has not successfully run, we should kick apt_update
$_kick_apt = true
}
}
'hourly':{
#compare current date with the apt_update_last_success fact to determine
#if we should kick apt_update.
$hourly_threshold = (Integer(Timestamp().strftime('%s')) - 3600)
if $facts['apt_update_last_success'] {
if $facts['apt_update_last_success'] + 0 < $hourly_threshold {
$_kick_apt = true
} else {
$_kick_apt = false
}
} else {
#if apt-get update has not successfully run, we should kick apt_update
$_kick_apt = true
}
}
'daily': {
#compare current date with the apt_update_last_success fact to determine
#if we should kick apt_update.
$daily_threshold = (Integer(Timestamp().strftime('%s')) - 86400)
if $facts['apt_update_last_success'] {
if $facts['apt_update_last_success'] + 0 < $daily_threshold {
$_kick_apt = true
} else {
$_kick_apt = false
}
} else {
#if apt-get update has not successfully run, we should kick apt_update
$_kick_apt = true
}
}
'weekly':{
#compare current date with the apt_update_last_success fact to determine
#if we should kick apt_update.
$weekly_threshold = (Integer(Timestamp().strftime('%s')) - 604800)
if $facts['apt_update_last_success'] {
if $facts['apt_update_last_success'] + 0 < $weekly_threshold {
$_kick_apt = true
} else {
$_kick_apt = false
}
} else {
#if apt-get update has not successfully run, we should kick apt_update
$_kick_apt = true
}
}
default: {
#catches 'reluctantly', and any other value (which should not occur).
#do nothing.
$_kick_apt = false
}
}
if $_kick_apt {
$_refresh = false
} else {
$_refresh = true
}
exec { 'apt_update':
command => "${apt::provider} update",
loglevel => $apt::_update['loglevel'],
logoutput => 'on_failure',
refreshonly => $_refresh,
timeout => $apt::_update['timeout'],
tries => $apt::_update['tries'],
try_sleep => 1,
}
}

Some files were not shown because too many files have changed in this diff Show more