4 Commits

Author SHA1 Message Date
Max
b6b4978e68 feat: stuff 2025-12-22 10:14:58 -05:00
Max
b41d9f0e82 feat: basic routing to caddy 2025-06-22 04:02:13 -04:00
Max
06646e7ec7 feat: added config edit to taskfile 2025-06-22 03:47:54 -04:00
Max
80270b9a93 feat: created vaults, added to config 2025-06-22 03:44:41 -04:00
18 changed files with 116 additions and 408 deletions

View File

@@ -1,21 +1,13 @@
version: 3
includes:
tf: { taskfile: terraform, dir: terraform }
tasks:
dev:
- docker compose -f compose.dev.yml rm -fsv
- docker compose -f compose.dev.yml up --build --force-recreate --no-deps
deploy:fast: ansible-playbook playbooks/fast.yml
deploy:slow: ansible-playbook playbooks/slow.yml
deploy:slow: ansible-playbook playbooks/slow.yml {{.CLI_ARGS}}
deploy:restore: ansible-playbook playbooks/restore.yml -e "restore_bucket={{.BUCKET}} restore_key={{.KEY}}"
enter:
cmd: aws ssm start-session --target $INSTANCE_ID
env:
INSTANCE_ID: { sh: jq -r .instance_id.value < config/infrastructure.secret.json }
AWS_REGION: { sh: jq -r .aws_region < config/ansible.secret.json }
AWS_ACCESS_KEY_ID: { sh: jq -r .aws_access_key < config/ansible.secret.json }
AWS_SECRET_ACCESS_KEY: { sh: jq -r .aws_secret_key < config/ansible.secret.json }
vault: ansible-vault edit vault.yml
inventory: ansible-vault edit inventory.ini

View File

@@ -1,6 +1,14 @@
[defaults]
callbacks_enabled = profile_tasks
localhost_warning = False
inventory = inventory.ini
host_key_checking = False
interpreter_python = /usr/bin/python3
vault_password_file = secret.key
[inventory]
inventory_unparsed_warning = False
[ssh_connection]
ssh_args = -o ControlMaster=auto -o ControlPersist=60s -o ForwardAgent=yes -o IdentityAgent=none
pipelining = True
retries = 256

26
inventory.ini Normal file
View File

@@ -0,0 +1,26 @@
$ANSIBLE_VAULT;1.1;AES256
36326131353430646433363636333433313266666434623134633136373566306534323231373637
6532386232643232343464393964623065326639643866640a353461626332623134613530663136
35643737623066313565633035623161366631663630663664313736613063303333373634353064
3333376338656539640a313561666239643466616161383561613833323765356238393034663865
36643538346263653263646334343063326464656264633461363136383530393931393764356534
38663963303737666632363239613836386235343730383530363536386165616339376435326639
37333866323262336637383431323538393334393136623838343766636634316338633566343366
32306461396134373161633437373730383933343865326363326435393232646163663461666437
36613664633633306264656230363862306661363930376666616630363036396639643639343336
65653162303435663166383934343936313935643936656235383930616539393239643634323237
38613032323336333764633339396163306665666430333762343631383430613463666339323361
63333964313832366532363334623236626232633132653639333231386663333865663665343530
33613364386531633561373537353432643332663735663833663532373763383237316331306366
64616462323739623833303661353764623537313432646137336230383830643761646131386263
33663034303064373066363731653737363033373163386466663734643065613039336330313664
37666332623264346132343638623332323661363338623335366230323737333961613035646366
63316164316135633136326337363464373036383433333830646131363533363338383262623261
63353737343236356561323738396631333133366338366538356232663834316230333265626562
32363862396362376332383131316665383166386631336631656231636130323339623032386535
38383339636339633961393632393063613261653061623465356238306330346464333039393134
37356232626434646566346464636131396339646663383333393963336332313931656436353334
36303039643837663130336362656636393737633962396531326231383862646631613061323737
32346166396139383231663233356233646634633361346564356366343834313835343332363565
66303933353231386331326462366239336361386638383861326662613732373661306330616334
6634

View File

@@ -52,7 +52,7 @@
state: started
recreate: true
restart_policy: unless-stopped
memory: 300m
memory: 425m
memory_swap: 900m
ports: [80:80, 2222:2222, 443:443, "22:22"]
env:

19
playbooks/route.yml Normal file
View File

@@ -0,0 +1,19 @@
- name: "Create route to instance."
hosts: router
gather_facts: false
tasks:
- name: Modify base Caddyfile.
ansible.builtin.blockinfile:
dest: ~/app/caddy/etc/Caddyfile
marker: '# GITEA {mark}'
content: "{{ lookup('file', '../router/Caddyfile') }}"
notify:
- Restart Caddy.
handlers:
- name: Restart Caddy.
ansible.builtin.systemd_service:
name: container-caddy
state: restarted
enabled: true
scope: user

View File

@@ -2,6 +2,8 @@
hosts: localhost
vars_files: ../config/ansible.secret.json
gather_facts: false
vars:
image_name: "service/gitea"
tasks:
- name: Build image.
community.docker.docker_image_build:
@@ -11,6 +13,12 @@
rebuild: always
pull: true
- name: Create build directory.
ansible.builtin.file:
path: ../dist
state: directory
mode: '0777'
- name: Push image to archive.
community.docker.docker_image:
name: "{{ image_name }}"
@@ -21,101 +29,56 @@
register: compress_image
community.general.archive:
path: ../dist/image.tar
dest: ../dist/image.tar.xz
format: xz
dest: ../dist/image.tar.gz
format: gz
mode: "0644"
- name: Push artifact to S3.
amazon.aws.s3_object:
bucket: "{{ image_bucket }}"
object: "{{ image_key }}"
src: ../dist/image.tar.xz
mode: put
region: "{{ aws_region }}"
access_key: "{{ aws_access_key }}"
secret_key: "{{ aws_secret_key }}"
- name: Deploy artifact to instance.
hosts: localhost
become: true
hosts: compute
gather_facts: false
vars_files:
- ../config/ansible.secret.json
- ../config/infrastructure.secret.json
vars:
ansible_connection: aws_ssm
ansible_python_interpreter: /usr/bin/python3
ansible_aws_ssm_plugin: "{{ ssm_plugin }}"
ansible_aws_ssm_bucket_name: "{{ image_bucket }}"
ansible_aws_ssm_instance_id: "{{ instance_id.value }}"
ansible_aws_ssm_region: "{{ aws_region }}"
ansible_aws_ssm_access_key_id: "{{ aws_access_key }}"
ansible_aws_ssm_secret_access_key: "{{ aws_secret_key }}"
tasks:
- name: Fetch image.
amazon.aws.s3_object:
mode: get
bucket: "{{ image_bucket }}"
object: "{{ image_key }}"
dest: /root/image.tar.gz
region: "{{ aws_region }}"
access_key: "{{ aws_access_key }}"
secret_key: "{{ aws_secret_key }}"
- name: Create data directory.
ansible.builtin.file:
path: /home/ssm-user/data
path: "{{ item }}"
state: directory
mode: '0777'
loop:
- ~/app
- ~/app/gitea
- name: Pull image to remote.
ansible.posix.synchronize:
src: ../dist/image.tar.gz
dest: ~/app/gitea/image.tar.gz
- name: Load image.
community.docker.docker_image_load:
path: /root/image.tar.gz
containers.podman.podman_load:
path: ~/app/gitea/image.tar.gz
register: image
- name: Run image.
community.docker.docker_container:
name: server
image: "{{ image.image_names[0] }}"
state: started
recreate: true
restart_policy: unless-stopped
memory: 300m
memory_swap: 900m
ports: [80:80, 2222:2222, 443:443, "22:22"]
env:
GITEA__security__INTERNAL_TOKEN: "{{ internal_secret }}"
GITEA__server__LFS_JWT_SECRET: "{{ lfs_secret }}"
GITEA__oauth2__JWT_SECRET: "{{ jwt_secret }}"
GITEA__server__ACME_EMAIL: "{{ email }}"
GITEA__server__SSH_DOMAIN: "{{ full_domain.value }}"
GITEA__server__DOMAIN: "{{ full_domain.value }}"
GITEA__server__ROOT_URL: "https://{{ full_domain.value }}/"
GITEA__storage__MINIO_ACCESS_KEY_ID: "{{ minio_access_key }}"
GITEA__storage__MINIO_SECRET_ACCESS_KEY: "{{ minio_secret_key }}"
labels:
docker-volume-backup.stop-during-backup: "true"
volumes:
- /home/ssm-user/data:/var/lib/gitea
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
- name: Run backup.
community.docker.docker_container:
name: backup
image: offen/docker-volume-backup:v2
state: started
recreate: true
restart_policy: unless-stopped
volumes:
- /home/ssm-user/data:/backup/my-app-backup:ro
- /var/run/docker.sock:/var/run/docker.sock:ro
env:
AWS_S3_BUCKET_NAME: "{{ boot_bucket }}"
AWS_S3_PATH: "{{ boot_key }}"
AWS_REGION: "{{ boot_region.value }}"
AWS_ACCESS_KEY_ID: "{{ boot_id.value }}"
AWS_SECRET_ACCESS_KEY: "{{ boot_secret.value }}"
BACKUP_CRON_EXPRESSION: "0 0 * * *"
# - name: Run image.
# community.docker.docker_container:
# name: server
# image: "{{ image.image_names[0] }}"
# state: started
# recreate: true
# restart_policy: unless-stopped
# memory: 425m
# memory_swap: 900m
# ports: [80:80, 2222:2222, 443:443, "22:22"]
# env:
# GITEA__security__INTERNAL_TOKEN: "{{ internal_secret }}"
# GITEA__server__LFS_JWT_SECRET: "{{ lfs_secret }}"
# GITEA__oauth2__JWT_SECRET: "{{ jwt_secret }}"
# GITEA__server__ACME_EMAIL: "{{ email }}"
# GITEA__server__SSH_DOMAIN: "{{ full_domain.value }}"
# GITEA__server__DOMAIN: "{{ full_domain.value }}"
# GITEA__server__ROOT_URL: "https://{{ full_domain.value }}/"
# GITEA__storage__MINIO_ACCESS_KEY_ID: "{{ minio_access_key }}"
# GITEA__storage__MINIO_SECRET_ACCESS_KEY: "{{ minio_secret_key }}"
# labels:
# docker-volume-backup.stop-during-backup: "true"
# volumes:
# - /home/ssm-user/data:/var/lib/gitea
# - /etc/timezone:/etc/timezone:ro
# - /etc/localtime:/etc/localtime:ro

3
router/Caddyfile Normal file
View File

@@ -0,0 +1,3 @@
code.maximhutz.com {
respond "WIP!"
}

View File

@@ -1,24 +0,0 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/aws" {
version = "5.83.1"
hashes = [
"h1:Yy3K7R7881H72rQDzG6qjZVkrWA6DGJzfE21TionY7w=",
"zh:0313253c78f195973752c4d1f62bfdd345a9c99c1bc7a612a8c1f1e27d51e49e",
"zh:108523f3e9ebc93f7d900c51681f6edbd3f3a56b8a62b0afc31d8214892f91e0",
"zh:175b9bf2a00bea6ac1c73796ad77b0e00dcbbde166235017c49377d7763861d8",
"zh:1c8bf55b8548bbad683cd6d7bdb03e8840a00b2422dc1529ffb9892820657130",
"zh:22338f09bae62d5ff646de00182417f992548da534fee7d98c5d0136d4bd5d7a",
"zh:92de1107ec43de60612be5f6255616f16a9cf82d88df1af1c0471b81f3a82c16",
"zh:9b12af85486a96aedd8d7984b0ff811a4b42e3d88dad1a3fb4c0b580d04fa425",
"zh:9c7bfb7afea330e6d90e1466125a8cba3db1ed4043c5da52f737459c89290a6e",
"zh:ba59b374d477e5610674b70f5abfe0408e8f809390347372751384151440d3d0",
"zh:bd1c433966002f586d63cb1e3e16326991f238bc6beeb2352be36ec651917b0b",
"zh:ca2b4d1d02651c15261fffa4b142e45def9a22c6069353f0f663fd2046e268f8",
"zh:d8ed98c748f7a3f1a72277cfee9afe346aca39ab319d17402277852551d8f14a",
"zh:ed3d8bc89de5f35f3c5f4802ff7c749fda2e2be267f9af4a850694f099960a72",
"zh:f698732a4391c3f4d7079b4aaa52389da2a460cac5eed438ed688f147d603689",
"zh:f9f51b17f2978394954e9f6ab9ef293b8e11f1443117294ccf87f7f8212b3439",
]
}

View File

@@ -1,18 +0,0 @@
version: 3
silent: true
vars:
BACKEND: ../config/backend.secret.json
VARIABLES: ../config/variables.secret.json
OUTPUT: ../config/infrastructure.secret.json
tasks:
init: terraform init -backend-config={{.BACKEND}}
plan: terraform plan -var-file={{.VARIABLES}}
destroy: terraform destroy
format: terraform fmt -recursive
out: terraform output -json > {{.OUTPUT}}
apply:
- terraform apply -var-file={{.VARIABLES}}
- task: out
import: terraform import -var-file={{.VARIABLES}} {{.CLI_ARGS}}

View File

@@ -1,31 +0,0 @@
data "aws_s3_bucket" "storage_bucket" {
bucket = var.boot_bucket
}
data "aws_iam_policy_document" "boot" {
statement {
effect = "Allow"
actions = ["s3:*", "s3-object-lambda:*"]
resources = [
"${data.aws_s3_bucket.storage_bucket.arn}/${var.boot_key}",
"${data.aws_s3_bucket.storage_bucket.arn}/${var.boot_key}/*",
]
}
}
resource "aws_iam_policy" "boot" {
name = "${var.boot_role}Policy"
description = "The policy that manages the Gitea Boot."
policy = data.aws_iam_policy_document.boot.json
}
module "boot_user" {
source = "terraform-aws-modules/iam/aws//modules/iam-user"
version = "5.52.2"
create_iam_user_login_profile = false
name = "${var.boot_role}User"
password_reset_required = false
policy_arns = [aws_iam_policy.boot.arn]
}

View File

@@ -1,31 +0,0 @@
#!/bin/sh
## Install extras.
rpm --rebuilddb
amazon-linux-extras install docker ansible2 python3.8 -y
# Make Docker work.
systemctl enable docker
systemctl start docker
# Set up the correct version of Python (for Ansible).
ln -sf /usr/bin/python3.8 /usr/bin/python3
ln -sf /usr/bin/pip3.8 /usr/bin/pip3
pip3 install botocore boto3 requests packaging --user ssm-user
python3 -m pip install -U pip
# Add some swap space.
dd if=/dev/zero of=/swapfile bs=128M count=8
chmod 600 /swapfile
mkswap /swapfile
swapon /swapfile
# Stop SSH (because we have SSM.)
service sshd stop
# Install Docker Compose.
curl -L "https://github.com/docker/compose/releases/latest/download/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
chmod +x /usr/local/bin/docker-compose
# ERROR: SSM User not created yet.
sudo usermod -aG docker ssm-user

View File

@@ -1,43 +0,0 @@
# An elastic IP, so if the reverse proxy is modified, the route tables won't.
resource "aws_eip" "public" {
instance = aws_instance.this.id
domain = "vpc"
}
# An instance profile for access via AWS SSM.
resource "aws_iam_instance_profile" "ssm" {
name = "SSMInstanceProfile"
role = "AmazonSSMRoleForInstancesQuickSetup"
}
# The Gitea instance.
resource "aws_instance" "this" {
# ami = data.aws_ami.amazon-linux-2.id
ami = "ami-0adec96dc0cdc7bca"
instance_type = "t4g.nano"
subnet_id = module.vpc.public_subnets[0]
user_data = file("install.sh")
user_data_replace_on_change = false
iam_instance_profile = aws_iam_instance_profile.ssm.name
vpc_security_group_ids = [aws_security_group.public_access.id]
metadata_options {
http_tokens = "required"
}
root_block_device {
volume_type = "gp3"
volume_size = 8
}
tags = {
Name = "Codebase: Gitea"
}
}
resource "aws_ec2_instance_state" "this" {
instance_id = aws_instance.this.id
state = "running"
}

View File

@@ -1,66 +0,0 @@
locals {
# The IP block for the VPC.
vpc_cidr = "10.0.0.0/16"
}
data "aws_availability_zones" "all" {}
# The main VPC.
module "vpc" {
source = "terraform-aws-modules/vpc/aws"
name = "Main"
cidr = local.vpc_cidr
azs = [data.aws_availability_zones.all.names[0]]
private_subnets = [cidrsubnet(local.vpc_cidr, 8, 0)]
public_subnets = [cidrsubnet(local.vpc_cidr, 8, 4)]
private_subnet_tags = { SubnetOf = "Main", SubnetType = "Private" }
public_subnet_tags = { SubnetOf = "Main", SubnetType = "Public" }
map_public_ip_on_launch = true
enable_dns_hostnames = true
enable_dns_support = true
private_route_table_tags = { TableOf = "Main", TableType = "Public" }
}
# Only allow HTTP(s) and SSH traffic. Allow full access to internet.
resource "aws_security_group" "public_access" {
vpc_id = module.vpc.vpc_id
tags = { GroupOf = "Main", GroupType = "Public" }
}
resource "aws_vpc_security_group_ingress_rule" "tcp" {
for_each = toset(["80", "443", "22", "51821"])
security_group_id = aws_security_group.public_access.id
from_port = each.value
to_port = each.value
ip_protocol = "tcp"
cidr_ipv4 = "0.0.0.0/0"
}
resource "aws_vpc_security_group_ingress_rule" "udp" {
for_each = toset(["51820", "53"])
security_group_id = aws_security_group.public_access.id
from_port = each.value
to_port = each.value
ip_protocol = "udp"
cidr_ipv4 = "0.0.0.0/0"
}
resource "aws_vpc_security_group_egress_rule" "egress" {
for_each = toset(["-1"])
security_group_id = aws_security_group.public_access.id
from_port = each.value
to_port = each.value
ip_protocol = "-1"
cidr_ipv4 = "0.0.0.0/0"
}

View File

@@ -1,33 +0,0 @@
output "instance_id" {
value = aws_instance.this.id
description = "The instance ID of the Gitea instance."
}
output "ip_address" {
value = aws_instance.this.private_ip
description = "The Gitea IP address."
}
output "boot_region" {
value = var.aws_region
description = "The region to manipulate the codebase repository boot."
sensitive = true
}
output "boot_id" {
value = module.boot_user.iam_access_key_id
description = "The access id to manipulate the codebase repository boot."
sensitive = true
}
output "boot_secret" {
value = module.boot_user.iam_access_key_secret
description = "The access secret to manipulate the codebase repository boot."
sensitive = true
}
output "full_domain" {
value = "${var.subdomain}.${var.domain}"
description = "The domain of the Gitea instance."
sensitive = true
}

View File

@@ -1,11 +0,0 @@
terraform {
# The backend is stored in an S3 bucket.
backend "s3" {}
}
# Access AWS through the IaC roles.
provider "aws" {
region = var.aws_region
access_key = var.aws_access
secret_key = var.aws_secret
}

View File

@@ -1,13 +0,0 @@
# The Route53 DNS zone.
data "aws_route53_zone" "main" {
name = var.domain
}
# Push all domain traffic through the reverse proxy.
resource "aws_route53_record" "domain" {
zone_id = data.aws_route53_zone.main.zone_id
name = "${var.subdomain}.${data.aws_route53_zone.main.name}"
type = "A"
ttl = "60"
records = [aws_eip.public.public_ip]
}

View File

@@ -1,39 +0,0 @@
variable "aws_region" {
type = string
description = "The AWS region things are created in."
}
variable "aws_access" {
type = string
description = "The access key to generate the Gitea instance."
}
variable "aws_secret" {
type = string
description = "The access secret to generate the Gitea instance."
}
variable "boot_bucket" {
type = string
description = "The name of the bucket to store the boot in."
}
variable "boot_key" {
type = string
description = "The path that will hold the boot data."
}
variable "boot_role" {
type = string
description = "The name of the role for boot access."
}
variable "domain" {
type = string
description = "The name of the domain."
}
variable "subdomain" {
type = string
description = "The name of the subdomain."
}

6
vault.yml Normal file
View File

@@ -0,0 +1,6 @@
$ANSIBLE_VAULT;1.1;AES256
66386561623561353461656662653566353665646466336635626561303031383735636666623063
3336313938313562323336383765323932666638373232390a653163323131323564306438363864
31333632326135353966656363633962616165623036373764646433353966616364376162636234
3837333961383333640a383732346534346465353062653531353638663465306432396266663534
3731