Compare commits
2 Commits
6ba433d53c
...
0c5a7ef7f3
| Author | SHA1 | Date | |
|---|---|---|---|
| 0c5a7ef7f3 | |||
| 7ae2cd6588 |
10
Taskfile.yml
10
Taskfile.yml
@@ -12,9 +12,12 @@ tasks:
|
||||
tf/apply:
|
||||
- $TF apply -var-file=secret.tfvars
|
||||
- $TF output -json > secrets.tf.json
|
||||
tf/import:
|
||||
- $TF import -var-file=secret.tfvars {{.CLI_ARGS}}
|
||||
|
||||
build: ansible-playbook playbooks/build.yml
|
||||
deploy: ansible-playbook playbooks/deploy.yml
|
||||
restore: ansible-playbook playbooks/restore.yml
|
||||
run:
|
||||
- task: build
|
||||
- task: deploy
|
||||
@@ -26,3 +29,10 @@ tasks:
|
||||
AWS_REGION: { sh: jq -r .aws_region < secrets/gitea.json }
|
||||
AWS_ACCESS_KEY_ID: { sh: jq -r .aws_access_key < secrets/gitea.json }
|
||||
AWS_SECRET_ACCESS_KEY: { sh: jq -r .aws_secret_key < secrets/gitea.json }
|
||||
|
||||
prune:
|
||||
- docker system prune -af
|
||||
- docker image prune -af
|
||||
- docker system prune -af --volumes
|
||||
- docker volume prune -af
|
||||
- docker system df
|
||||
|
||||
@@ -15,6 +15,7 @@ services:
|
||||
ports:
|
||||
- 80:80
|
||||
- 443:443
|
||||
- 3000:3000
|
||||
- 2222:2222
|
||||
labels:
|
||||
- docker-volume-backup.stop-during-backup=true
|
||||
|
||||
@@ -3,3 +3,7 @@ FROM gitea/gitea:latest-rootless
|
||||
ADD --chown=git:git config /etc/gitea
|
||||
ADD --chown=git:git custom /etc/gitea-custom
|
||||
ENV GITEA_CUSTOM /etc/gitea-custom
|
||||
|
||||
WORKDIR /etc/gitea-custom
|
||||
|
||||
RUN gitea cert --host localhost --ca
|
||||
@@ -6,3 +6,7 @@ ENV GITEA_CUSTOM /etc/gitea-custom
|
||||
|
||||
RUN rm /etc/gitea/app.ini
|
||||
RUN mv /etc/gitea/dev.app.ini /etc/gitea/app.ini
|
||||
|
||||
WORKDIR /etc/gitea-custom
|
||||
|
||||
RUN gitea cert --host code.maximhutz.com --ca
|
||||
@@ -23,13 +23,32 @@ OFFLINE_MODE = true
|
||||
DISABLE_SSH = false
|
||||
START_SSH_SERVER = true
|
||||
SSH_PORT = 22
|
||||
SSH_LISTEN_PORT = 2222
|
||||
SSH_DOMAIN = maximhutz.com
|
||||
SSH_LISTEN_PORT = 22
|
||||
SSH_DOMAIN = code.maximhutz.com
|
||||
BUILTIN_SSH_SERVER_USER = git
|
||||
|
||||
DOMAIN = code.maximhutz.com
|
||||
; --- Signed SSL ---
|
||||
; PROTOCOL=https
|
||||
; ENABLE_ACME=true
|
||||
; ACME_ACCEPTTOS=true
|
||||
; ACME_DIRECTORY=https
|
||||
; ACME_EMAIL=proxy@maximhutz.com
|
||||
; DOMAIN = code.maximhutz.com
|
||||
; ROOT_URL = https://code.maximhutz.com/
|
||||
; HTTP_PORT = 443
|
||||
|
||||
; --- No SSL ---
|
||||
; DOMAIN = code.maximhutz.com
|
||||
; ROOT_URL = http://code.maximhutz.com/
|
||||
; HTTP_PORT = 80
|
||||
|
||||
; --- Self-Signed SSL ---
|
||||
PROTOCOL = https
|
||||
ROOT_URL = https://code.maximhutz.com/
|
||||
HTTP_PORT = 80
|
||||
DOMAIN = code.maximhutz.com
|
||||
HTTP_PORT = 443
|
||||
CERT_FILE = cert.pem
|
||||
KEY_FILE = key.pem
|
||||
|
||||
[database]
|
||||
DB_TYPE = sqlite3
|
||||
|
||||
@@ -17,18 +17,29 @@ TEMP_PATH = /tmp/gitea/uploads
|
||||
|
||||
[server]
|
||||
APP_DATA_PATH = /var/lib/gitea
|
||||
LFS_START_SERVER = true
|
||||
OFFLINE_MODE = true
|
||||
LFS_JWT_SECRET = x-----------------------------------------x
|
||||
|
||||
DISABLE_SSH = false
|
||||
START_SSH_SERVER = true
|
||||
SSH_PORT = 2222
|
||||
SSH_LISTEN_PORT = 2222
|
||||
BUILTIN_SSH_SERVER_USER = git
|
||||
LFS_START_SERVER = true
|
||||
OFFLINE_MODE = true
|
||||
SSH_DOMAIN = localhost
|
||||
BUILTIN_SSH_SERVER_USER = git
|
||||
|
||||
; --- No SSL ---
|
||||
; DOMAIN = localhost
|
||||
; ROOT_URL = http://localhost:80/
|
||||
; HTTP_PORT = 80
|
||||
|
||||
; --- Self-Signed Certificate ---
|
||||
PROTOCOL = https
|
||||
ROOT_URL = https://localhost:443/
|
||||
DOMAIN = localhost
|
||||
ROOT_URL = http://localhost:80/
|
||||
HTTP_PORT = 80
|
||||
LFS_JWT_SECRET = x-----------------------------------------x
|
||||
HTTP_PORT = 443
|
||||
CERT_FILE = cert.pem
|
||||
KEY_FILE = key.pem
|
||||
|
||||
[database]
|
||||
DB_TYPE = sqlite3
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
- name: Build image.
|
||||
community.docker.docker_image_build:
|
||||
name: "{{ image_name }}"
|
||||
path: ../image
|
||||
path: ../gitea
|
||||
nocache: true
|
||||
rebuild: always
|
||||
pull: true
|
||||
|
||||
@@ -20,7 +20,7 @@
|
||||
mode: get
|
||||
bucket: "{{ image_bucket }}"
|
||||
object: "{{ image_key }}"
|
||||
dest: /root/image.tar.xz
|
||||
dest: /root/image.tar.gz
|
||||
|
||||
region: "{{ aws_region }}"
|
||||
access_key: "{{ aws_access_key }}"
|
||||
@@ -28,9 +28,13 @@
|
||||
|
||||
- name: Load image.
|
||||
community.docker.docker_image_load:
|
||||
path: /root/image.tar.xz
|
||||
path: /root/image.tar.gz
|
||||
register: image
|
||||
|
||||
- name: Create a volume.
|
||||
community.docker.docker_volume:
|
||||
name: data
|
||||
|
||||
- name: Run image.
|
||||
community.docker.docker_container:
|
||||
name: server
|
||||
@@ -40,16 +44,32 @@
|
||||
restart_policy: unless-stopped
|
||||
memory: 425m
|
||||
memory_swap: 900m
|
||||
ports: [80:80, 2222:2222]
|
||||
ports: [80:80, 2222:2222, 443:443, "22:22"]
|
||||
env:
|
||||
GITEA__security__INTERNAL_TOKEN: "{{ internal_secret }}"
|
||||
GITEA__server__LFS_JWT_SECRET: "{{ lfs_secret }}"
|
||||
GITEA__oauth2__JWT_SECRET: "{{ jwt_secret }}"
|
||||
labels:
|
||||
docker-volume-backup.stop-during-backup: "true"
|
||||
volumes:
|
||||
- /home/ssm-user/data:/var/lib/gitea
|
||||
- /etc/timezone:/etc/timezone:ro
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
|
||||
- name: Run backup.
|
||||
community.docker.docker_container:
|
||||
name: backup
|
||||
image: offen/docker-volume-backup:v2
|
||||
state: started
|
||||
recreate: true
|
||||
restart_policy: unless-stopped
|
||||
volumes:
|
||||
- /home/ssm-user/data:/backup/my-app-backup:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
env:
|
||||
AWS_S3_BUCKET_NAME: "{{ boot_bucket }}"
|
||||
AWS_S3_PATH: "{{ boot_key }}"
|
||||
AWS_REGION: "{{ boot_region.value }}"
|
||||
AWS_ACCESS_KEY_ID: "{{ boot_id.value }}"
|
||||
AWS_SECRET_ACCESS_KEY: "{{ boot_secret.value }}"
|
||||
BOOT_URI: "s3://{{ boot_bucket }}/{{ boot_key }}"
|
||||
volumes:
|
||||
- /root/boot:/var/lib/gitea
|
||||
- /etc/timezone:/etc/timezone:ro
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
BACKUP_CRON_EXPRESSION: "0 0 * * *"
|
||||
|
||||
51
playbooks/restore.yml
Normal file
51
playbooks/restore.yml
Normal file
@@ -0,0 +1,51 @@
|
||||
- name: Deploy artifact to instance.
|
||||
hosts: localhost
|
||||
become: true
|
||||
vars_files:
|
||||
- ../secrets/gitea.json
|
||||
- ../secrets.tf.json
|
||||
vars:
|
||||
ansible_connection: aws_ssm
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
ansible_aws_ssm_plugin: "{{ ssm_plugin }}"
|
||||
ansible_aws_ssm_bucket_name: "{{ image_bucket }}"
|
||||
ansible_aws_ssm_instance_id: "{{ instance_id.value }}"
|
||||
|
||||
ansible_aws_ssm_region: "{{ aws_region }}"
|
||||
ansible_aws_ssm_access_key_id: "{{ aws_access_key }}"
|
||||
ansible_aws_ssm_secret_access_key: "{{ aws_secret_key }}"
|
||||
tasks:
|
||||
- name: Stop server.
|
||||
community.docker.docker_container:
|
||||
name: "{{ item }}"
|
||||
state: stopped
|
||||
loop: [server, backup]
|
||||
|
||||
- name: Copy backup from S3.
|
||||
environment:
|
||||
region: "{{ boot_region.value }}"
|
||||
access_key: "{{ boot_id.value }}"
|
||||
secret_key: "{{ boot_secret.value }}"
|
||||
amazon.aws.s3_object:
|
||||
bucket: "{{ boot_bucket }}"
|
||||
object: "{{ boot_key }}"
|
||||
dest: /home/ssm-user/backup.tar.xz
|
||||
mode: get
|
||||
|
||||
- name: Ensure backup directory exists.
|
||||
ansible.builtin.file:
|
||||
path: /home/ssm-user/data
|
||||
state: directory
|
||||
mode: '0777'
|
||||
|
||||
- name: Extract backup.
|
||||
ansible.builtin.unarchive:
|
||||
src: /home/ssm-user/backup.tar.xz
|
||||
dest: /home/ssm-user/data
|
||||
remote_src: true
|
||||
|
||||
- name: Restart containers.
|
||||
community.docker.docker_container:
|
||||
name: "{{ item }}"
|
||||
state: started
|
||||
loop: [server, backup]
|
||||
@@ -6,7 +6,10 @@ data "aws_iam_policy_document" "boot" {
|
||||
statement {
|
||||
effect = "Allow"
|
||||
actions = ["s3:*", "s3-object-lambda:*"]
|
||||
resources = ["${data.aws_s3_bucket.storage_bucket.arn}/${var.boot_key}"]
|
||||
resources = [
|
||||
"${data.aws_s3_bucket.storage_bucket.arn}/${var.boot_key}",
|
||||
"${data.aws_s3_bucket.storage_bucket.arn}/${var.boot_key}/*",
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
#!/bin/sh
|
||||
|
||||
rpm --rebuilddb
|
||||
amazon-linux-extras install docker ansible2 python3.8 -y
|
||||
|
||||
# Make Docker work.
|
||||
@@ -10,7 +11,7 @@ sudo usermod -aG docker ssm-user
|
||||
# Set up the correct version of Python (for Ansible).
|
||||
ln -sf /usr/bin/python3.8 /usr/bin/python3
|
||||
ln -sf /usr/bin/pip3.8 /usr/bin/pip3
|
||||
pip3 install botocore boto3 requests
|
||||
pip3 install botocore boto3 requests packaging
|
||||
python3 -m pip install -U pip
|
||||
|
||||
# Add some swap space.
|
||||
@@ -20,9 +21,8 @@ mkswap /swapfile
|
||||
swapon /swapfile
|
||||
|
||||
# Stop SSH (because we have SSM.)
|
||||
sudo service sshd stop
|
||||
service sshd stop
|
||||
|
||||
# Install Docker Compose.
|
||||
curl -L "https://github.com/docker/compose/releases/latest/download/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
|
||||
chmod +x /usr/local/bin/docker-compose
|
||||
docker-compose version
|
||||
|
||||
@@ -1,3 +1,9 @@
|
||||
# An elastic IP, so if the reverse proxy is modified, the route tables won't.
|
||||
resource "aws_eip" "public" {
|
||||
instance = aws_instance.gitea.id
|
||||
domain = "vpc"
|
||||
}
|
||||
|
||||
data "aws_iam_instance_profile" "ssm" {
|
||||
name = "SSMInstanceProfile"
|
||||
}
|
||||
@@ -7,12 +13,13 @@ resource "aws_instance" "gitea" {
|
||||
# ami = data.aws_ami.amazon-linux-2.id
|
||||
ami = "ami-0adec96dc0cdc7bca"
|
||||
instance_type = "t4g.nano"
|
||||
subnet_id = data.aws_subnet.subnet.id
|
||||
subnet_id = module.vpc.public_subnets[0]
|
||||
|
||||
user_data = file("install.sh")
|
||||
user_data_replace_on_change = true
|
||||
|
||||
iam_instance_profile = data.aws_iam_instance_profile.ssm.name
|
||||
vpc_security_group_ids = [aws_security_group.public_access.id]
|
||||
|
||||
root_block_device {
|
||||
volume_type = "gp3"
|
||||
|
||||
@@ -1,6 +1,55 @@
|
||||
data "aws_subnet" "subnet" {
|
||||
tags = {
|
||||
SubnetType = "Private"
|
||||
SubnetOf = "Main"
|
||||
}
|
||||
locals {
|
||||
# The IP block for the VPC.
|
||||
vpc_cidr = "10.0.0.0/16"
|
||||
|
||||
# Here is the domain name changes.
|
||||
domain_name = "maximhutz.com"
|
||||
}
|
||||
|
||||
data "aws_availability_zones" "all" {}
|
||||
|
||||
# The main VPC.
|
||||
module "vpc" {
|
||||
source = "terraform-aws-modules/vpc/aws"
|
||||
|
||||
name = "Main"
|
||||
cidr = local.vpc_cidr
|
||||
|
||||
azs = [data.aws_availability_zones.all.names[0]]
|
||||
private_subnets = [cidrsubnet(local.vpc_cidr, 8, 0)]
|
||||
public_subnets = [cidrsubnet(local.vpc_cidr, 8, 4)]
|
||||
|
||||
private_subnet_tags = { SubnetOf = "Main", SubnetType = "Private" }
|
||||
public_subnet_tags = { SubnetOf = "Main", SubnetType = "Public" }
|
||||
|
||||
map_public_ip_on_launch = true
|
||||
enable_dns_hostnames = true
|
||||
enable_dns_support = true
|
||||
}
|
||||
|
||||
# Only allow HTTP(s) and SSH traffic. Allow full access to internet.
|
||||
resource "aws_security_group" "public_access" {
|
||||
vpc_id = module.vpc.vpc_id
|
||||
}
|
||||
|
||||
resource "aws_vpc_security_group_ingress_rule" "ingress" {
|
||||
for_each = toset(["80", "443", "22", "2222", "81", "8080", "4321", "1234"])
|
||||
|
||||
security_group_id = aws_security_group.public_access.id
|
||||
|
||||
from_port = each.value
|
||||
to_port = each.value
|
||||
ip_protocol = "tcp"
|
||||
cidr_ipv4 = "0.0.0.0/0"
|
||||
}
|
||||
|
||||
resource "aws_vpc_security_group_egress_rule" "egress" {
|
||||
for_each = toset(["-1"])
|
||||
|
||||
security_group_id = aws_security_group.public_access.id
|
||||
|
||||
from_port = each.value
|
||||
to_port = each.value
|
||||
ip_protocol = "-1"
|
||||
cidr_ipv4 = "0.0.0.0/0"
|
||||
}
|
||||
13
terraform/routing.tf
Normal file
13
terraform/routing.tf
Normal file
@@ -0,0 +1,13 @@
|
||||
# The Route53 DNS zone.
|
||||
data "aws_route53_zone" "main" {
|
||||
name = local.domain_name
|
||||
}
|
||||
|
||||
# Push all domain traffic through the reverse proxy.
|
||||
resource "aws_route53_record" "domain" {
|
||||
zone_id = data.aws_route53_zone.main.zone_id
|
||||
name = "code.${data.aws_route53_zone.main.name}"
|
||||
type = "A"
|
||||
ttl = "60"
|
||||
records = [aws_eip.public.public_ip]
|
||||
}
|
||||
Reference in New Issue
Block a user