...
 
Commits (22)
---
- name: debian bootstrap fact gathering
hosts: all
user: root
gather_facts: False
# Install the basics required to gather facts.
# This shouldn't be run normally, however, can't find a way to
# conditionally run it so far.
tasks:
- name: update apt repository
action: raw apt-get -q -y update
- name: install python
action: raw apt-get -q -y install python
# the command succeeds (returns code 0) if python needs simplejson
- name: check if python is old enough to need simplejson
action: raw python -c 'import sys; sys.stdout.write("%s" % (sys.version_info<(2,6)))'
register: need_simplejson
- name: ensure other prereqs installed
action: raw apt-get -qy install python-simplejson
when: need_simplejson.stdout
- name: ensure other prereqs installed
action: raw apt-get -qy install python-paramiko python-yaml python-jinja2 python-apt python-docker
- name: update packages
tags:
- update
hosts: all
user: root
roles:
- role: apt-upgrade
\ No newline at end of file
---
- name: bootstrap fact gathering
hosts: all
user: root
gather_facts: False
# Probe the system for package management type
tasks:
- name: check package management
action: raw apt-get
ignore_errors: yes
register: has_apt
# For now we don't support other package management systems!
- name: fail if no apt package management
fail:
msg: We currently only support Linux with apt
when: not has_apt
- import_playbook: bootstrap-debian.yml
when: has_apt
# Maybe add these somewhere later.
# # Needs to be included before sshd, since root needs to have a key installed
# # before sshd port changes when bootstrapping
# - role: ssh-key
# ssh_key_user: root
# ssh_key_pubfile: "{{userdefs.root.pubkey}}"
# - role: sshd
# sshd_port: "{{sshd.port}}"
# # ... moves port
......@@ -4,18 +4,13 @@ use warnings;
use FindBin qw($Bin);
my $ansible_root = shift
or die "Please supply a path to the ansible root directory\n";
$ENV{PATH} = "$Bin/../ansible-src/bin:$ENV{PATH}";
$ansible_root =~ s{/+$}{};
my $env_setup = "$Bin/../ansible-src/hacking/env-setup";
my $ansible = "$ansible_root/bin/ansible";
-x $ansible
or die "No ansible executable can be found at $ansible\n";
# Set the environment so that pass will use the referenced
# password-store directory, not ~/.password-store.
$ENV{PATH} = "$ansible_root/bin:$ENV{PATH}";
#$ENV{ANSIBLE_HOSTS} = "$Bin/ansible_hosts";
my $env_setup = "$ansible_root/hacking/env-setup";
$ENV{PASSWORD_STORE_DIR} = "$Bin/../pass";
exec '/bin/bash', '--rcfile', $env_setup;
# apt-update
This role simply runs an `apt update`
---
- name: update apt
apt: update_cache=yes
# apt-upgrade
This role simply runs an `apt update; apt upgrade`
---
dependencies:
- role: apt-update
---
- name: update apt safely
apt: upgrade=safe
async: 600
poll: 5
---
## Installs docker-CE
# Following guide from here:
# https://docs.docker.com/install/linux/docker-ce/ubuntu/#set-up-the-repository
# The docker apt repo key uri
docker_compose_install_apt_key_uri: https://download.docker.com/linux/ubuntu/gpg
# The docker apt repo config line
docker_compose_install_apt_repo: deb https://download.docker.com/linux/ubuntu bionic stable
# Get this version from https://github.com/docker/compose/releases/
# Check compatibility with docker.
docker_compose_install_compose_verion: 1.22.0
---
- name: install prereqs (apt)
apt:
update_cache: true
name:
- apt-transport-https
- ca-certificates
- software-properties-common
- python-pip
- virtualenv
- python-setuptools
- python-docker
- name: add docker repository key
apt_key:
url: "{{ docker_compose_install_apt_key_uri }}"
state: present
- name: add docker repository
apt_repository:
repo: "{{ docker_compose_install_apt_repo }}"
filename: docker-ce
state: present
update_cache: true
- name: install docker-ce
apt:
name:
- docker-ce
# Oddly, there is no docker-compose PPA, the suggested linux install
# method is to download a binary. See:
# https://docs.docker.com/compose/install/#master-builds
- name: install docker-compose
pip:
name:
- docker-compose
- name: enable docker
service:
name: docker
state: started
enabled: yes
......@@ -37,5 +37,13 @@ pg_dump_to_s3_encrypt_path: /usr/bin/gpgwrapper
# Options for encrypt
pg_dump_to_s3_encrypt_opts: "-ek {{pg_dump_to_s3_keychain_path}}"
# Path of select-expired-backups executable
pg_dump_to_s3_select_expired_backups_path: /usr/bin/select-expired-backups
# Defines the backup origin date and rotation schedule
# Be careful about quoting! The quotes will stay in the options
# verbatim.
pg_dump_to_s3_select_expired_backups_opts: -c 7/4/3/4/2 -i 2019-01-24 -p dump.pg.%Y-%m-%d-%H%M.gz.gpg
# What to put into the systemd timer section
pg_dump_to_s3_systemd_timer_section: OnCalendar=00:40:00
This diff is collapsed.
---
- name: reload systemd services
systemd:
daemon_reload: yes
......@@ -16,7 +16,7 @@
- gpg # should be v2
- rclone
- name: install pg-dump-to-s3 and gpgwrapper script
- name: install pg-dump-to-s3 and pg-prune-from-s3 scripts
template:
src: "{{item}}.j2"
dest: "/usr/bin/{{item}}"
......@@ -25,7 +25,18 @@
mode: 0755
with_items:
- pg-dump-to-s3
- pg-prune-from-s3
- name: install select-expired-backups and gpgwrapper scripts
copy:
src: "{{item}}"
dest: "/usr/bin/{{item}}"
owner: root
group: root
mode: 0755
with_items:
- gpgwrapper
- select-expired-backups
- name: create paths
file:
......@@ -66,9 +77,11 @@
with_items:
- pg-dump-to-s3.service
- pg-dump-to-s3.timer
notify: reload systemd services
- name: enable pg-dump-to-s3 timed service
systemd:
name: pg-dump-to-s3.timer
state: started
enabled: yes
daemon_reload: yes
......@@ -3,4 +3,5 @@ Description=pg-dump-to-s3 backup
[Service]
Type=oneshot
ExecStart=/usr/bin/pg-prune-from-s3 execute
ExecStart=/usr/bin/pg-dump-to-s3
#!/bin/bash
# Prunes backup data stored on an S3 service
# {{ansible_managed}}
# Utilises a script {{ pg_dump_to_s3_select_expired_backups_path|basename }}
#
# This implements the backup rotation schedule, indicating which files
# have expired and can be deleted. See the inline documentation of
# that script for more information about this. For the purposes of
# this script, all you need to know is that it accepts a list of
# filenames on the input which include dates which can be parsed, one
# file per line, and it outputs the subset of these which can be
# deleted (also one per line). Options to the script define a nominal
# origin date and rotation period for the backup schedule. Backups are
# assumed to be made daily.
#
# If run with no arguments, it merely prints the commands executed to
# query the backups, and shows the commands which would be used to
# prune them, without actually deleting them. To delete, the first
# parameter must be the word 'execute'.
set -o errexit
set -o pipefail
SELECT_EXPIRED_BACKUPS="{{ pg_dump_to_s3_select_expired_backups_path }}"
SELECT_OPTS="{{ pg_dump_to_s3_select_expired_backups_opts }}"
RCLONE="{{ pg_dump_to_s3_rclone_path }}"
DUMP_STEM="{{ pg_dump_to_s3_archive_path_stem | basename }}"
RCLONE_CONFIG="{{ pg_dump_to_s3_rclone_conf_path}}"
DESTURL="{{ pg_dump_to_s3_desturl }}"
echo "selecting prunable backups ..."
echo " $RCLONE --max-depth 1 ls $DESTURL | grep -e '$DUMP_STEM' | sed 's/^ *[0-9]* //' | $SELECT_EXPIRED_BACKUPS $SELECT_OPTS"
export RCLONE_CONFIG
expired=$(
"$RCLONE" --max-depth 1 ls "$DESTURL" \
| /bin/grep -e "$DUMP_STEM" \
| /bin/sed 's/^ *[0-9]* //' \
| "$SELECT_EXPIRED_BACKUPS" $SELECT_OPTS
) || {
echo "selecting expired backups failed."
exit 1
}
echo "pruning backups ..."
if [[ $1 == 'execute' ]]; then
rc=0
for file in $expired; do
path="${DESTURL%/}/$file"
printf " $RCLONE delete ${DESTURL%/}/%s\n" $file
"$RCLONE" delete "$path" || {
echo "failed to delete $path"
rc=1
}
done
exit $rc
else
[[ -n "$expired" ]] && printf " $RCLONE delete ${DESTURL%/}/%s\n" $expired
echo "no deletions performed - add the parameter 'execute' to delete"
fi
These are test scripts for select-expired-backups.
Run them (from this directory), and the output should indicate if they
pass or not.
If they don't pass, something is wrong.
package schedules;
# This datastructure is a map of schedule names to schedule tables.
# Tables show how the schedule is calculated, and are used to test
# the scheduling code.
#
# Use this command to regenerate the data:
#
# select-expired-backups --cycles 5/4/3 --initial 2019-01/24 --dump
#
# Columns as follows:
# +------------------ Index number of the backup (zero based)
# v +------------- Cycle counts as 4 irregular base encoded integers
# v +------- Level of backup
# v +--- Lifetime in days
# v +- Retention: . means expired, else bash64 encoded age
# v
{
543 => [split '\n', <<HERE],
0 0,0,0,0 4 60 0
1 0,0,0,1 0 5 10
2 0,0,0,2 0 5 210
3 0,0,0,3 0 5 3210
4 0,0,0,4 0 5 43210
5 0,0,1,0 1 20 543210
6 0,0,1,1 0 5 6.43210
7 0,0,1,2 0 5 7..43210
8 0,0,1,3 0 5 8...43210
9 0,0,1,4 0 5 9....43210
10 0,0,2,0 1 20 a....543210
11 0,0,2,1 0 5 b....6.43210
12 0,0,2,2 0 5 c....7..43210
13 0,0,2,3 0 5 d....8...43210
14 0,0,2,4 0 5 e....9....43210
15 0,0,3,0 1 20 f....a....543210
16 0,0,3,1 0 5 g....b....6.43210
17 0,0,3,2 0 5 h....c....7..43210
18 0,0,3,3 0 5 i....d....8...43210
19 0,0,3,4 0 5 j....e....9....43210
20 0,1,0,0 2 60 k....f....a....543210
21 0,1,0,1 0 5 l....g....b....6.43210
22 0,1,0,2 0 5 m....h....c....7..43210
23 0,1,0,3 0 5 n....i....d....8...43210
24 0,1,0,4 0 5 o....j....e....9....43210
25 0,1,1,0 1 20 p.........f....a....543210
26 0,1,1,1 0 5 q.........g....b....6.43210
27 0,1,1,2 0 5 r.........h....c....7..43210
28 0,1,1,3 0 5 s.........i....d....8...43210
29 0,1,1,4 0 5 t.........j....e....9....43210
30 0,1,2,0 1 20 u..............f....a....543210
31 0,1,2,1 0 5 v..............g....b....6.43210
32 0,1,2,2 0 5 w..............h....c....7..43210
33 0,1,2,3 0 5 x..............i....d....8...43210
34 0,1,2,4 0 5 y..............j....e....9....43210
35 0,1,3,0 1 20 z...................f....a....543210
36 0,1,3,1 0 5 A...................g....b....6.43210
37 0,1,3,2 0 5 B...................h....c....7..43210
38 0,1,3,3 0 5 C...................i....d....8...43210
39 0,1,3,4 0 5 D...................j....e....9....43210
40 0,2,0,0 2 60 E...................k....f....a....543210
41 0,2,0,1 0 5 F...................l....g....b....6.43210
42 0,2,0,2 0 5 G...................m....h....c....7..43210
43 0,2,0,3 0 5 H...................n....i....d....8...43210
44 0,2,0,4 0 5 I...................o....j....e....9....43210
45 0,2,1,0 1 20 J...................p.........f....a....543210
46 0,2,1,1 0 5 K...................q.........g....b....6.43210
47 0,2,1,2 0 5 L...................r.........h....c....7..43210
48 0,2,1,3 0 5 M...................s.........i....d....8...43210
49 0,2,1,4 0 5 N...................t.........j....e....9....43210
50 0,2,2,0 1 20 O...................u..............f....a....543210
51 0,2,2,1 0 5 P...................v..............g....b....6.43210
52 0,2,2,2 0 5 Q...................w..............h....c....7..43210
53 0,2,2,3 0 5 R...................x..............i....d....8...43210
54 0,2,2,4 0 5 S...................y..............j....e....9....43210
55 0,2,3,0 1 20 T...................z...................f....a....543210
56 0,2,3,1 0 5 U...................A...................g....b....6.43210
57 0,2,3,2 0 5 V...................B...................h....c....7..43210
58 0,2,3,3 0 5 W...................C...................i....d....8...43210
59 0,2,3,4 0 5 X...................D...................j....e....9....43210
60 1,0,0,0 3 60 ....................E...................k....f....a....543210
61 1,0,0,1 0 5 ....................F...................l....g....b....6.43210
HERE
};
#!/usr/bin/perl
package Test;
use strict;
use warnings;
use Test::More;
# This is a test script for pg-prune-from-s3.j2
require_ok '../files/select-expired-backups';
my $schedules = require './schedules.pm';
my $scheduler = new Schedule(cycles => [5, 4, 3], initial => '2019-01-01');
isa_ok $scheduler, 'Schedule';
is_deeply [$scheduler->day_cycles], [5, 20, 60], 'day_cycles are correct';
my @debug_out = map { $scheduler->debug_index($_) } 0..61;
#print "$_\n" for @debug_out; # DEBUG
# This tests that the computation does what we want, for a specific case.
is_deeply \@debug_out, $schedules->{543}, 'dump_indexes match the expected values';
# Test some random counter bases (from the documentation)
my @etc = (initial => '2019-01-01');
is_deeply [Schedule->new(cycles => [10,10,10], @etc)->decompose(356)], [0,3,5,6],
"356 in basis 10,10,10 is 0,3,5,6";
is_deeply [Schedule->new(cycles => [2,2,2,2], @etc)->decompose(0xFA)], [15,1,0,1,0],
"0xFA in basis 2,2,2,2 is 15,1,0,1,0";
is_deeply [Schedule->new(cycles => [5,4,3], @etc)->decompose(119)], [1,2,3,4],
"119 in basis 5,4,3 is 1,2,3,4";
done_testing;
#!/usr/bin/perl
use strict;
use warnings;
use Test::More;
use Time::Piece;
use Time::Seconds;
# This is a test script for select-expired-backups
# Redirects the output of anything run in the $block and returns the
# captured standard input/output.
sub capture(&) {
my ($block) = @_;
my $dest = '';
do {
local (*STDOUT, *STDERR);
open STDOUT, '>>', \$dest or die $!;
# open STDERR, '>>', \$dest or die $!;
eval { $block->() };
};
return $dest;
}
require_ok '../files/select-expired-backups';
# Alias fully qualified names into our namespace, for convenience
{
no strict 'refs';
*{$_} = \&{$SelectExpiredBackups::{$_}}
for qw(parse_args trim);
}
######################################################################
# Test ancillary functions
for my $str ('trimmed ham', 'trimmed ham ', 'trimmed ham', ' trimmed ham ') {
is trim($str), 'trimmed ham', "trimming '$str' correctly";
}
for my $str (qw(x2019-02-01y x2019-2-1y)) {
my $time = Schedule::_parse_time('something', $str, 'x%Y-%m-%dy', 'querty');
isa_ok $time, 'Time::Piece';
is $time->datetime, '2019-02-01T00:00:00', "parsed $str correctly";
}
for my $str (qw(1 2019 2019-02 19-02-01 2019-02-01-01 2019_02_01 2019-o1-o2 2019-02-01x x2019-02-01)) {
eval {
Schedule::_parse_time('something', $str, '%Y-%m-%d', 'querty');
};
ok !@$, "parsing $str fails correctly";
}
######################################################################
# Test feeding params to parse_args
# This is the backup schedule start date. Just an arbitrary date.
my $initial = Time::Piece->strptime('2019-2-1', '%Y-%m-%d');
#use Data::Dumper; print Dumper \@dates;
# This describes the schedule in a visual way. Expiries are marked
# with an underscore. It describes the first 62 days of a 5/4/3 nested
# rotation schedule.
my $schedules = require './schedules.pm';
my @expected_schedule_543 = @{ $schedules->{543} };
ok @expected_schedule_543 > 1, "more than one test schedule item exists";
# Strip all data but the final schedule part
s/^.* // for @expected_schedule_543;
# Compute a list of date strings, one for every day of the schedule
# period.
my @dates = map { ($initial + ONE_DAY*$_)->ymd } 0..$#expected_schedule_543;
# Check that every day of the backup matches our expected schedule.
for my $ix (0..$#expected_schedule_543) {
diag "schedule $expected_schedule_543[$ix] for day $ix";
my %expiry = map {
$dates[$_] => substr $expected_schedule_543[$ix], $_, 1
} 0..length($expected_schedule_543[$ix])-1;
# Calculate the dates we expect to be marked expired
# from the expiry schedule string
my @expected_expired = grep { $expiry{$_} && $expiry{$_} eq '.' } sort @dates;
my @expected_unexpired = grep { !$expiry{$_} || $expiry{$_} ne '.' } sort @dates;
# Get the output of parse_args for the full list of @dates (passed as params)
my $out = capture {
parse_args(
'--initial', $initial->ymd,
'--now', ($initial + $ix*ONE_DAY)->ymd,
qw(--cycles 5/4/3), @dates
);
};
is $out, join("\n", @expected_expired, ''), @expected_expired." expiries for day $ix";
# Get the output of parse_args for the full list of @dates (passed as params)
# inverted output
$out = capture {
parse_args(
'--initial', $initial->ymd,
'--now', ($initial + $ix*ONE_DAY)->ymd,
'--invert',
qw(--cycles 5/4/3), @dates
);
};
is $out, join("\n", @expected_unexpired, ''), @expected_unexpired." preserved for day $ix";
# Get the output of parse_args for the full list of @dates (passed on STDIN)
$out = capture {
local *STDIN;
my $stream = join "\n", @dates;
open STDIN, "<", \$stream
or die $!;
parse_args(
'--initial', $initial->ymd,
'--now', ($initial + $ix*ONE_DAY)->ymd,
qw(--cycles 5/4/3)
);
};
is $out, join("\n", @expected_expired, ''), @expected_expired." expiries for day $ix";
# Get the output of parse_args for the full list of @dates (passed
# as params) however with prefixes and suffixes. NOTE:
# Time::Piece::strptime seems to happily ignore missing suffixes
# if one is specified (but warns if they are present when not
# specified). Seems to be a potential bug, but for our purposes
# this is probably ok to ignore.
$out = capture {
parse_args(
'--initial', $initial->ymd,
'--now', ($initial + $ix*ONE_DAY)->ymd,
qw(--cycles 5/4/3 --pattern foo.%Y-%m-%d.bak),
map "foo.$_.bak", @dates
);
};
is $out, join("\n", map("foo.$_.bak", @expected_expired), ''), @expected_expired." expiries for day $ix";
}
done_testing;
---
# This is the user name to check out from.
sauce_git_user: deployment
# This is the token to check out with. To not keep the default. It
# should allow only read access. This avoids needing to mess around
# with ssh-agent and ssh keys and needing to worry about a commit
# access token being stolen.
sauce_git_token: Change me!
# This is where to clone the sauce repo from. You should not normally
# need to alter this.
#
# The default includes a username/token pair issued for
# deployment. e.g. (the default value embeds the user/token pair
# defined above).
#
# https://<uri-encoded username>:<uri-encoded token>@git.coop.social.coop/tech/sauce.git
#
sauce_git_uri: https://{{ sauce_git_user | urlencode }}:{{ sauce_git_token | urlencode }}@git.coop/social.coop/tech/sauce.git
# This is the branch, tag, or commit ID to clone
sauce_version: rebuild
# Where the docker-compose binary is (assumed executable)
sauce_docker_compose: /usr/local/bin/docker-compose
# The base dir to install everything
sauce_base_dir: /opt/social.coop
# HTTP host names for the mastodon server.
sauce_http_server_names: runko.social.coop social.coop
# Root directory for serving HTTP (should be no need to change; only
# really used for certbot)
# Needs to correlate with paths in docker-compose.yml
sauce_http_server_root: "{{ sauce_base_dir }}/var/www/html"
# HTTPS host names for the mastodon server
sauce_https_server_names: social.coop
# Root directory for serving HTTPS (should be no need to change)
# Needs to correlate with paths in docker-compose.yml
sauce_https_server_root: "{{ sauce_base_dir }}/var/www/mastodon"
# SSL cert/key kept here (whether obtained by letsencrypt or otherwise)
# Needs to correlate with paths in docker-compose.yml
sauce_ssl_cert_path: "{{ sauce_base_dir }}/etc/letsencrypt/live/social.coop/fullchain.pem"
sauce_ssl_key_path: "{{ sauce_base_dir }}/etc/letsencrypt/live/social.coop/privkey.pem"
self-signed-ssl.key and .crt generated with:
openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout self-signed-ssl.key -out self-signed-ssl.crt
(Requires interactive input)
-----BEGIN CERTIFICATE-----
MIIDazCCAlOgAwIBAgIUCUoH9tjDhBBEXFOeBT3jTFKLR4QwDQYJKoZIhvcNAQEL
BQAwRTELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM
GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDAeFw0xOTA5MTQxODEwNDBaFw0yMDA5
MTMxODEwNDBaMEUxCzAJBgNVBAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEw
HwYDVQQKDBhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQwggEiMA0GCSqGSIb3DQEB
AQUAA4IBDwAwggEKAoIBAQCks+iwbcnG1xNI5nTsQ0FRF8XvoSHs5Kb8K34ZeTEq
t7v4ubxu4UvAGp0oLAS/Ki/X/qqjD/HQuyXp2LmMM9LMewmT1URoLaQ1lCI/4eSA
R9ocwtHFmocEYas60SjVYVAmxJq/qv4oUaCqn30uooc5Sik7DuBUOtPYW6l8EWEX
TwZxco8IJFpiH8odXg/nLMTz+EJgg1l1oRffrQCDBGMz98Q6bWX96oAnyTTnOsF9
W7pFBfWGNxBIW8+MW95qylyBgKqggMXdf8ViQH/TCZZO8shNx4pxkOXzLAhC9yqx
wCvtStRPxU6FGY9R+p2uiGIQwc+qjPBR4ibAKWvt8bHnAgMBAAGjUzBRMB0GA1Ud
DgQWBBTB6B8xudWA0bdAFWJJmMvTBPnI6jAfBgNVHSMEGDAWgBTB6B8xudWA0bdA
FWJJmMvTBPnI6jAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQCb
qfTLoLtq63vcXXQ5PwFqaUSVNPAb6EDrsCY/ZFXfB8rPyxjlSKcahz6YaRSTeRww
CJjub7a9rIXU1sg3qZB5vUPSlASU1eB8wVKm17TRxCARJQTgkrglz/bk+1Z+jwOC
uY0TEiiqKy9coyj5ltOeP6JiWw9wz+ffqVHg8RNkf2FKzY9Cecxl+wUDtzHyKaBR
j3I3XYWXC5gVN9qohe1rL4DHk68mrxX4br0/XrGZz2r/Npf/+crjOCilo0rnG6lS
A1TuTE+0qmsG2u/OcWTc0CHJsrVmcYjjjS3dtZ+U7yCgiiubf9w0hmbBotyrRly3
zVUfMzhjAx/Hq3BFNYqX
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCks+iwbcnG1xNI
5nTsQ0FRF8XvoSHs5Kb8K34ZeTEqt7v4ubxu4UvAGp0oLAS/Ki/X/qqjD/HQuyXp
2LmMM9LMewmT1URoLaQ1lCI/4eSAR9ocwtHFmocEYas60SjVYVAmxJq/qv4oUaCq
n30uooc5Sik7DuBUOtPYW6l8EWEXTwZxco8IJFpiH8odXg/nLMTz+EJgg1l1oRff
rQCDBGMz98Q6bWX96oAnyTTnOsF9W7pFBfWGNxBIW8+MW95qylyBgKqggMXdf8Vi
QH/TCZZO8shNx4pxkOXzLAhC9yqxwCvtStRPxU6FGY9R+p2uiGIQwc+qjPBR4ibA
KWvt8bHnAgMBAAECggEARB7tJNuj0wvtu/8laPrALywn/96jS3me3zmV5C9N+J8Z
rdwUZb/lNsVLLuxV4kMumqWIKf1URMLDAZ9Tzvs4Or7AzVYt/vCxtD9akxV5Dt2p
BrBFVvd81IEY8zDDNM4U+pPidVfh2wc37pociodKapWTzAtxGSVjrV7sEJaXk62/
3tP60p+/ogXU9DehLjQAyQfEUuUlGUf140hWtsgFLRtuFqVGfC33NjM5KqQ90k6g
/ONsd+LuEVHx41Bzs87/s52YfZcZGy8q03vG2PYczvFHN7ETRRw9JVYW3DBf3/oz
Dxyuy5D8lMdvY6g2kBgrCdcYfICtCnorPSGB8KWZ4QKBgQDNYrSEAQCpZeyM5q43
1JNyI9rCBepzedQ8PMPQf3zdRIZz7b7xEpieq7wxfPaBU7SLhD0ek3j0kvrRzp62
hoxYhlIFK+EMIfj+3oTumDTBASZeVsbXCdH0C2/LHUfkLvQAojrfaV9Y7ilDEj7S
3f2HLKcK9SMzAo1LnuUIq6/dLwKBgQDNSp8Mwox76Oad7IFJ75sNStl4UeUAXgMT
oUPyxASuSkEZH+bxRPY88U71zCxB6ggDaGZg3KX814LFl3xgYrI5X8C7OSDJQac+
xgIAI05FgyiW/JJ5vGj/moZsO//kawQNImDW4iGn8J9gaXGdI3nrg1rWDWevVxVV
6e9NbAF4yQKBgFaZRMXX6mCCsk/iwBiYUCczSPzAI8/W0Y8oi+PwrURarVpaJ0Vc
cu+MN8iiCQoUsi6VbnPi+IAfLRSjyzXo22pEEHdPvg8wj91fE6vKj4vFoVjru+FV
qan93IBG81YaGhXeUXZ75pEHsmwuWEDvCwQg5rnvDI1oiQ0WjTmvsnJPAoGBALCv
mcYspyZ6JAolpHzS+Pruwc7ZhSPUxgceE9IcPP8Yr4CFQYnD1b5L3a6VZD3yw6TT
NNIG9eFwRn/zSjvWlhflPoaku4Abvmq7lrsxp8sSAFw604Js012yTCFZXnrkVB9N
yFUJWPgmk7Ux63PYNbWqXwK9lNCHPbis16Yl3/ppAoGATrqP0JirpumOeX4/VePO
9fwqHYagUf/QAqQgnsQMVcxmHGkg+quKag2Q3GyskPX7TKAX5bcDOnMquyDXVc2l
nf1BsjzsuOp0NJyV+eiN2Q8zZwqneeZmWeY0Yy3+5icy5D9FMH1gIfKeDdcj0tNe
tsQcPURbHS7htLlC+7ANRmI=
-----END PRIVATE KEY-----
---
- name: restart docker compose services
docker_service:
restarted: yes
project_src: "{{ sauce_base_dir }}/docker"
build: no
- name: restart systemd services
systemd:
name: social.coop-mastodon.service
state: restarted
- name: restart systemd media timer
systemd:
name: social.coop-media-remove.timer
state: restarted
---
## This playbook creates the directory structure
- name: make directories
file:
state: directory
path: '{{ sauce_base_dir }}/{{ item }}'
owner: root
group: root
mode: 0775
loop:
- docker
- var
- var/lib
- var/lib/postgresql
- var/lib/postgresql/data
- var/lib/redis
- var/lib/redis/data
- var/log
- var/www
- var/www/mastodon
- var/www/mastodon/system
- etc/nginx
# In case you are wondering what user/group IDs 991:991 are, these are
# hard-coded into the docker image, so we need to accomodate them.
- name: set permissions
file:
state: directory
path: "{{ sauce_base_dir }}/var/www/mastodon"
recurse: yes
owner: 991
group: 991
---
- include_tasks: dirs.yml
# Install nginx configs (requires letsencrypt or self-signed-ssl)
- include_tasks: nginx.yml
notify:
- restart systemd services
- restart nginx services
# FIXME replace with letsencrypt for production
- include_tasks: self-signed-ssl.yml
# Deploy Mastodon docker container
# (Needs to come after config creation or it will create empty dirs)
- include_tasks: mastodon-docker.yml
notify: restart systemd services
# Enable systemd service
- include_tasks: systemd.yml
notify:
- restart systemd services
---
# Prerequisites:
# Remote server should have docker-compose installed.
# And sauce checked out.
# In order to make mastodon-owned files appear as a name, rather than
# just the number used in the docker image, and to ensure this UID
# isn't inadvertently used, we add a `mastodon` user with UID/GID 991,
# and a docker-postgres user with UID/GID 70/70
- name: create a mastodon group with GID 991
group:
name: mastodon
state: present
gid: 991
- name: create a mastodon user with UID 991
user:
name: mastodon
state: present
shell: /usr/sbin/nologin
uid: 991
group: mastodon
home: "{{ sauce_base_dir }}/var/www/mastodon"
create_home: no
system: yes
- name: create a postgres group with GID 70
group:
name: docker-postgres
state: present
gid: 70
- name: create a postgres user with UID 70
user:
name: docker-postgres
state: present
shell: /usr/sbin/nologin
uid: 70
group: mastodon
home: "{{ sauce_base_dir }}/var/lib/postgresql"
create_home: no
system: yes
- name: copy a custom favicon into place
copy:
dest: "{{ sauce_base_dir }}/var/www/mastodon/favicon.ico"
src: social-coop.ico
mode: '0660'
owner: mastodon
group: mastodon
- name: write mastodon docker compose config
template:
dest: "{{ sauce_base_dir }}/docker/docker-compose.yml"
src: docker-compose.yml
mode: '0655'
owner: root
group: root
# This creates the mastodon secrets config, which includes various
# passwords and keys taken from the password repository. The password
# repository is assumed to have been set up on the local machine
# running ansible, such that the pass lookup extension can be used.
- name: write mastodon secrets config
copy:
dest: "{{ sauce_base_dir }}/docker/.env.production"
content: "{{lookup('passwordstore', 'deployment/social.coop.env.production returnall=true')}}"
mode: '0600'
owner: root
group: root
- name: create and start the mastodon docker-compose services
docker_service:
state: present
project_src: "{{ sauce_base_dir }}/docker"
build: yes
register: output
- debug:
var: output
# Note, creates the database if absent.
# Note, if the database exists and is populated, this command will refuse to execute
# and fail. A bit ugly, but this is what we want.
- name: initialise the mastodon database
command:
argv: [docker, container, exec,
'-e', SAFETY_ASSURED=1, '-e', RAILS_ENV=production,
docker_web_1, rails, 'db:setup']
ignore_errors: yes
register: rails
notify:
- restart docker compose services
# Check we got the result we want
- assert:
that:
- rails.rc == 0 or rails.stderr_lines[0] == "Database 'mastodon-live' already exists"
# restart the containers if the database changed
---
# nginx runs in a docker container, so we just populate the configs here.
- name: install social.coop nginx config
template:
src: "nginx/{{ item }}.j2"
dest: "{{ sauce_base_dir }}/etc/nginx/{{item}}"
owner: root
group: root
mode: "0755"
with_items:
- social-coop.conf
- websocket-proxying.conf
notify: restart docker compose services
---
- name: create target directories for SSL cert for nginx
file:
state: directory
path: "{{ item }}"
mode: 0700
owner: root
group: root
with_items:
- "{{ sauce_ssl_cert_path | dirname }}"
- "{{ sauce_ssl_key_path | dirname }}"
- name: install self-signed SSL cert for nginx
copy:
src: self-signed-ssl.crt
dest: "{{ sauce_ssl_cert_path }}"
owner: root
group: root
mode: 0600
- name: install self-signed SSL key for nginx
copy:
src: self-signed-ssl.key
dest: "{{ sauce_ssl_key_path }}"
owner: root
group: root
mode: 0600
---
- name: add social.coop systemd files
copy:
template:
dest: "/etc/systemd/system/{{item}}"
src: "files/{{item}}"
src: "systemd/{{item}}.j2"
owner: root
group: root
mode: 0644
......@@ -13,7 +13,7 @@
- name: enable social.coop-mastodon service
systemd:
name: social.coop-mastodon
name: social.coop-mastodon.service
state: started
enabled: yes
daemon_reload: yes
......@@ -23,4 +23,5 @@
name: social.coop-media-remove.timer
state: started
enabled: yes
daemon_reload: yes
\ No newline at end of file
daemon_reload: yes
---
version: '3'
services:
db:
restart: always
image: postgres:9.6-alpine
networks:
- internal_network
- external_network
### Uncomment to enable DB persistance
volumes:
- ../var/lib/postgresql/data:/var/lib/postgresql/data
# Expose postgresql's port to allow dumping to back-up
ports:
- "127.0.0.1:5432:5432"
redis:
restart: always
image: redis:4.0-alpine
networks:
- internal_network
### Uncomment to enable REDIS persistance
volumes:
- ../var/lib/redis/data:/data
# es:
# restart: always
# image: docker.elastic.co/elasticsearch/elasticsearch-oss:6.1.3
# environment:
# - "ES_JAVA_OPTS=-Xms512m -Xmx512m"
# networks:
# - internal_network
#### Uncomment to enable ES persistance
## volumes:
## - ./elasticsearch:/usr/share/elasticsearch/data
nginx:
image: nginx:1.17.6
restart: always
networks:
- internal_network
- external_network
ports:
# - "127.0.0.1:8000:80"
# - "127.0.0.1:4430:443"
- 80:80
- 443:443
depends_on:
- web
- streaming
# FIXME sidekiq?
volumes:
- ../etc/letsencrypt/live/social.coop/fullchain.pem:/etc/ssl/certs/fullchain.pem # SSL cert
- ../etc/letsencrypt/live/social.coop/privkey.pem:/etc/ssl/certs/privkey.pem # SSL private key
- ../var/www/html:/usr/share/nginx/html # http root
- ../var/www/mastodon:/usr/share/nginx/mastodon # https root
- ../etc/nginx/social-coop.conf:/etc/nginx/conf.d/default.conf # site config
- ../etc/nginx/websocket-proxying.conf:/etc/nginx/conf.d/websocket-proxying.conf # websocket config
- ../var/log/nginx:/var/log/nginx # logs
web:
# build: .
image: tootsuite/mastodon:v2.6.4
restart: always
env_file: .env.production
command: bash -c "rm -f /mastodon/tmp/pids/server.pid; bundle exec rails s -p 3000 -b '0.0.0.0'"
networks:
- internal_network
depends_on:
- db
- redis
# - es
volumes:
- ../var/www/mastodon/favicon.ico:/mastodon/public/favicon.ico
streaming:
# build: .
image: tootsuite/mastodon:v2.6.4
restart: always
env_file: .env.production
command: yarn start
networks:
- internal_network
depends_on:
- db
- redis
sidekiq:
# build: .
image: tootsuite/mastodon:v2.6.4
restart: always
env_file: .env.production
environment:
- DB_POOL=5
command: bundle exec sidekiq -q default -q mailers
depends_on:
- db
- redis
networks:
- maybe_external_network
- internal_network
sidekiq-default-q:
# build: .
image: tootsuite/mastodon:v2.6.4
restart: always
env_file: .env.production
environment:
- DB_POOL=5
command: bundle exec sidekiq -q default
depends_on:
- db
- redis
networks:
- maybe_external_network
- internal_network
sidekiq-pull-q:
# build: .
image: tootsuite/mastodon:v2.6.4
restart: always
env_file: .env.production
environment:
- DB_POOL=5
command: bundle exec sidekiq -q pull
depends_on:
- db
- redis
networks:
- maybe_external_network
- internal_network
sidekiq-push-q:
# build: .
image: tootsuite/mastodon:v2.6.4
restart: always
env_file: .env.production
environment:
- DB_POOL=5
command: bundle exec sidekiq -q push
depends_on:
- db
- redis
networks:
- maybe_external_network
- internal_network
## Uncomment to enable federation with tor instances along with adding the following ENV variables
## http_proxy=http://privoxy:8118
## ALLOW_ACCESS_TO_HIDDEN_SERVICE=true
# tor:
# build: https://github.com/usbsnowcrash/docker-tor.git
# networks:
# - maybe_external_network
# - internal_network
#
# privoxy:
# build: https://github.com/usbsnowcrash/docker-privoxy.git
# command: /opt/sbin/privoxy --no-daemon --user privoxy.privoxy /opt/config
# volumes:
# - ./priv-config:/opt/config
# networks:
# - maybe_external_network
# - internal_network
networks:
external_network:
internal: false
maybe_external_network:
# This clause will isolates mastodon from federating with the outside world.
internal: true
# ipam:
# config:
# - subnet: 172.16.238.0/24
internal_network:
internal: true
# redirect all sites from http to https
server {
listen 80;
listen [::]:80;
server_name {{ sauce_http_server_names }};
root /usr/share/nginx/html;
# Useful for Let's Encrypt
location /.well-known/acme-challenge/ {
allow all;
try_files $uri $uri/;
}
location / {
return 301 https://$host$request_uri;
}
}
# social.coop site
server {
listen 443 ssl http2;
listen [::]:443 ssl http2;
server_name {{ sauce_https_server_names }};
ssl_protocols TLSv1.2;
ssl_ciphers HIGH:!MEDIUM:!LOW:!aNULL:!NULL:!SHA;
ssl_prefer_server_ciphers on;
ssl_session_cache shared:SSL:10m;
ssl_certificate /etc/ssl/certs/fullchain.pem;
ssl_certificate_key /etc/ssl/certs/privkey.pem;
keepalive_timeout 70;
sendfile on;
client_max_body_size 80m;
root /var/share/nginx/mastodon;
gzip on;
gzip_disable "msie6";
gzip_vary on;
gzip_proxied any;
gzip_comp_level 6;
gzip_buffers 16 8k;
gzip_http_version 1.1;
gzip_types text/plain text/css application/json application/javascript text/xml application/xml application/xml+rss text/javascript;
add_header Strict-Transport-Security "max-age=31536000";
# Legacy custom /bylaws page now uses standard mastodon /terms page
rewrite ^/bylaws$ /terms permanent;
location / {
try_files $uri @proxy;
}
location ~ ^/(emoji|packs|system/accounts/avatars|system/media_attachments/files) {
add_header Cache-Control "public, max-age=31536000, immutable";
try_files $uri @proxy;
}
location /sw.js {
add_header Cache-Control "public, max-age=0";
try_files $uri @proxy;
}
location @proxy {
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto https;
proxy_set_header Proxy "";
proxy_pass_header Server;
proxy_pass http://docker_web_1:3000;
proxy_buffering off;
proxy_redirect off;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
tcp_nodelay on;
}
location /api/v1/streaming {
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto https;
proxy_set_header Proxy "";
proxy_pass http://docker_streaming_1:4000;
proxy_buffering off;
proxy_redirect off;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
tcp_nodelay on;
}
error_page 500 501 502 503 504 /500.html;
}
# Enable proxying of websocket upgrades
map $http_upgrade $connection_upgrade {
default upgrade;
'' close;
}
......@@ -6,10 +6,10 @@ After=docker.service
[Service]
Type=oneshot
RemainAfterExit=yes
WorkingDirectory=/opt/social.coop/sauce/docker
ExecStart=/usr/local/bin/docker-compose up -d
ExecReload=/usr/local/bin/docker-compose up -d
ExecStop=/usr/local/bin/docker-compose stop
WorkingDirectory={{ sauce_base_dir }}/docker
ExecStart={{ sauce_docker_compose }} up -d
ExecReload={{ sauce_docker_compose }} up -d
ExecStop={{ sauce_docker_compose }} stop
TimeoutStartSec=0
[Install]
......
......@@ -5,8 +5,8 @@ After=docker.service
[Service]
Type=oneshot
WorkingDirectory=/opt/social.coop/sauce/docker
ExecStart=/usr/local/bin/docker-compose run \
WorkingDirectory={{ sauce_base_dir }}/sauce/docker
ExecStart={{ sauce_docker_compose }} run \
--rm \
--no-deps \
web \
......
......@@ -9,7 +9,7 @@
s3_secret_access_key: "{{lookup('passwordstore', 'deployment/backupninja/s3sec')}}"
roles:
- role: server
- role: social.coop
- role: social-coop
- role: logcheck-custom
tags: logcheck-custom
......