I lost my data. (#18)
All checks were successful
🔧 Pipeline / 🪨 Terraform (push) Successful in 7s

## 🔍 Motivation & Context

My data is lost.

## 🔖 Related Issue

None.

##  Changes

I accidentally overwrote my Gitea data.

Reviewed-on: https://code.maximhutz.com/Infrastructure/Codebase/pulls/18
Co-authored-by: Max <git@maximhutz.me>
Co-committed-by: Max <git@maximhutz.me>
This commit is contained in:
2025-01-08 23:20:08 +00:00
committed by Maxim Hutz
parent 1a9bbe797c
commit ea7e38e12d
43 changed files with 3584 additions and 232 deletions

2
.commitlintrc.yml Normal file
View File

@@ -0,0 +1,2 @@
extends:
- "@commitlint/config-conventional"

View File

@@ -0,0 +1,17 @@
name: 🔧 Pipeline
on:
pull_request:
branches: [main]
push:
branches: [main]
jobs:
tf-lint:
name: 🪨 Terraform
runs-on: ubuntu-latest
steps:
- name: 🔍️ Checkout
uses: actions/checkout@main
- name: 🎨 Lint Terraform
uses: actionshub/terraform-lint@main

8
.gitignore vendored
View File

@@ -40,8 +40,14 @@ terraform.rc
boot
ssh.pem
.DS_Store
*.secret
*secret*
*.tfbackend
*.env
*.tar.gz
*.tar.xz
*.tar
*.pem
.venv
.vscode
tmp
node_modules

4
.husky/commit-msg Executable file
View File

@@ -0,0 +1,4 @@
#!/usr/bin/env sh
. "$(dirname -- "$0")/_/husky.sh"
npx --no -- commitlint --edit "$1"

5
.husky/pre-commit Executable file
View File

@@ -0,0 +1,5 @@
#!/usr/bin/env sh
. "$(dirname -- "$0")/_/husky.sh"
terraform -chdir=terraform validate
terraform -chdir=terraform fmt -recursive

4
.husky/prepare-commit-msg Executable file
View File

@@ -0,0 +1,4 @@
#!/usr/bin/env sh
. "$(dirname -- "$0")/_/husky.sh"
exec < /dev/tty && node_modules/.bin/cz --hook || true

1
Codebase Submodule

Submodule Codebase added at 1a9bbe797c

View File

@@ -1,20 +0,0 @@
dev:
@ docker-compose -f compose.dev.yml up --build --force-recreate
prod:
@ docker-compose -f compose.yml up --build
init:
@ terraform -chdir=terraform init -backend-config=a2a.tfvars
plan:
@ terraform -chdir=terraform plan -var-file=a2a.tfvars
provision:
@ terraform -chdir=terraform apply -var-file=a2a.tfvars
deploy:
@ ./scripts/deploy.sh
destroy:
@ terraform -chdir=terraform destroy -var-file=a2a.tfvars

33
Taskfile.yml Normal file
View File

@@ -0,0 +1,33 @@
version: 3
env: { TF: terraform -chdir=terraform }
silent: true
tasks:
tf/init: $TF init -backend-config=../secrets/backend.json
tf/plan: $TF plan -var-file=../secrets/general.json
tf/destroy: $TF destroy -var-file=../secrets/general.json
tf/format: $TF fmt -recursive
tf/apply:
- $TF apply -var-file=../secrets/general.json
- $TF output -json > secrets/terraform.json
proxy/build: ansible-playbook playbooks/proxy/build.yml
proxy/deploy: ansible-playbook playbooks/proxy/deployment.yml
proxy/enter: ./ssm/proxy.sh
proxy:
# - task: proxy/build
- task: proxy/deploy
gitea/build: ansible-playbook playbooks/gitea/build.yml
gitea/deploy: ansible-playbook playbooks/gitea/deployment.yml
gitea/enter: ./ssm/gitea.sh
gitea:
- task: gitea/build
- task: gitea/deploy
runner: ansible-playbook playbooks/runner/deployment.yml
repository/copy: ansible-playbook playbooks/repository.yml
dev: docker compose -f compose.dev.yml up

6
ansible.cfg Normal file
View File

@@ -0,0 +1,6 @@
[defaults]
callbacks_enabled = profile_tasks
localhost_warning = False
[inventory]
inventory_unparsed_warning = False

View File

@@ -1,19 +1,34 @@
name: codebase-dev
services:
website:
container_name: codebase-dev-website
image: gitea/gitea:latest-rootless
restart: always
volumes:
# Data.
- ./boot:/var/lib/gitea
# Static configuration.
- ./config:/etc/gitea:ro
- ./custom:/var/lib/gitea/custom:ro
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
ports:
- 80:80
- 2222:2222
# proxy:
# container_name: codebase-dev-proxy
# build:
# context: proxy
# dockerfile: Dockerfile
# environment:
# GITEA_IP: http://httpforever.com/
# ports:
# - 80:80
# gitea:
# container_name: codebase-dev-gitea
# build: gitea
# volumes:
# - ./gitea/boot:/var/lib/gitea
# - /etc/timezone:/etc/timezone:ro
# - /etc/localtime:/etc/localtime:ro
# ports:
# - 80:80
# - 443:443
# - 2222:2222
# environment:
# GITEA_APP_INI: /etc/gitea/dev.app.ini
runner:
container_name: codebase-dev-runner
image: gitea/act_runner
environment:
GITEA_APP_INI: /etc/gitea/dev.app.ini
GITEA_INSTANCE_URL: "https://code.maximhutz.com/"
GITEA_RUNNER_REGISTRATION_TOKEN: "bgM1Ux9do7EWj6JwniXjdfs8fmjuzWgMeeNF5vhd"
volumes:
- /var/run/docker.sock:/var/run/docker.sock

View File

@@ -1,20 +0,0 @@
name: codebase
services:
website:
container_name: codebase-website
image: gitea/gitea:latest-rootless
restart: always
volumes:
# Data.
- ~/boot:/var/lib/gitea
# Static configuration.
- ./config:/etc/gitea:ro
- ./custom:/var/lib/gitea/custom:ro
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
ports:
- "80:80"
- "443:443"
- "2222:2222"
environment:
GITEA_APP_INI: /etc/gitea/app.ini

5
gitea/Dockerfile Normal file
View File

@@ -0,0 +1,5 @@
FROM gitea/gitea:latest-rootless
ADD --chown=git:git config /etc/gitea
ADD --chown=git:git custom /etc/gitea-custom
ENV GITEA_CUSTOM /etc/gitea-custom

View File

@@ -1,4 +1,4 @@
APP_NAME = """Max`s Code"""
APP_NAME = """Max's Code"""
RUN_USER = git
RUN_MODE = prod
WORK_PATH = /var/lib/gitea
@@ -17,23 +17,19 @@ TEMP_PATH = /tmp/gitea/uploads
[server]
APP_DATA_PATH = /var/lib/gitea
DISABLE_SSH = false
START_SSH_SERVER = true
SSH_PORT = 2222
SSH_LISTEN_PORT = 2222
BUILTIN_SSH_SERVER_USER = git
LFS_START_SERVER = true
OFFLINE_MODE = true
SSH_DOMAIN = git1.maximhutz.com
DOMAIN = git1.maximhutz.com
HTTP_PORT = 443
ROOT_URL = https://git1.maximhutz.com/
PROTOCOL = https
ENABLE_ACME = true
ACME_ACCEPTTOS = true
ACME_DIRECTORY = https
ACME_EMAIL = whois@maximhutz.me
LFS_JWT_SECRET_URI = file:/etc/gitea/lfs.secret
DISABLE_SSH = false
START_SSH_SERVER = true
SSH_PORT = 22
SSH_LISTEN_PORT = 2222
SSH_DOMAIN = maximhutz.com
BUILTIN_SSH_SERVER_USER = git
DOMAIN = code.maximhutz.com
ROOT_URL = https://code.maximhutz.com/
HTTP_PORT = 80
[database]
DB_TYPE = sqlite3
@@ -60,7 +56,6 @@ SECRET_KEY =
REVERSE_PROXY_LIMIT = 1
REVERSE_PROXY_TRUSTED_PROXIES = *
PASSWORD_HASH_ALGO = pbkdf2
INTERNAL_TOKEN_URI = file:/etc/gitea/internal.secret
[service]
DISABLE_REGISTRATION = true
@@ -92,6 +87,3 @@ DEFAULT_MERGE_STYLE = merge
[repository.signing]
DEFAULT_TRUST_MODEL = committer
[oauth2]
JWT_SECRET_URI = file:/etc/gitea/jwt.secret

View File

@@ -28,7 +28,7 @@ SSH_DOMAIN = localhost
DOMAIN = localhost
ROOT_URL = http://localhost:80/
HTTP_PORT = 80
LFS_JWT_SECRET_URI = file:/etc/gitea/lfs.secret
LFS_JWT_SECRET = x-----------------------------------------x
[database]
DB_TYPE = sqlite3
@@ -55,7 +55,7 @@ SECRET_KEY =
REVERSE_PROXY_LIMIT = 1
REVERSE_PROXY_TRUSTED_PROXIES = *
PASSWORD_HASH_ALGO = pbkdf2
INTERNAL_TOKEN_URI = file:/etc/gitea/internal.secret
INTERNAL_TOKEN = x-----------------------------------------x
[service]
DISABLE_REGISTRATION = true
@@ -89,4 +89,4 @@ DEFAULT_MERGE_STYLE = merge
DEFAULT_TRUST_MODEL = committer
[oauth2]
JWT_SECRET_URI = file:/etc/gitea/jwt.secret
JWT_SECRET = x-----------------------------------------x

View File

Before

Width:  |  Height:  |  Size: 2.3 KiB

After

Width:  |  Height:  |  Size: 2.3 KiB

View File

Before

Width:  |  Height:  |  Size: 2.3 KiB

After

Width:  |  Height:  |  Size: 2.3 KiB

View File

Before

Width:  |  Height:  |  Size: 819 B

After

Width:  |  Height:  |  Size: 819 B

View File

Before

Width:  |  Height:  |  Size: 2.3 KiB

After

Width:  |  Height:  |  Size: 2.3 KiB

View File

Before

Width:  |  Height:  |  Size: 819 B

After

Width:  |  Height:  |  Size: 819 B

2891
package-lock.json generated Normal file

File diff suppressed because it is too large Load Diff

25
package.json Normal file
View File

@@ -0,0 +1,25 @@
{
"name": "codebase",
"version": "1.0.0",
"description": "This is the [Gitea](https://about.gitea.com/) instance that stores all repositories seen on [this site](https://git1.maximhutz.com)!",
"main": "index.js",
"scripts": {
"test": "echo \"Error: no test specified\" && exit 1",
"prepare": "husky install",
"cz": "cz"
},
"keywords": [],
"author": "",
"license": "ISC",
"devDependencies": {
"@commitlint/cli": "19.4.0",
"@commitlint/config-conventional": "19.2.2",
"cz-conventional-changelog": "^3.3.0",
"husky": "^8.0.0"
},
"config": {
"commitizen": {
"path": "./node_modules/cz-conventional-changelog"
}
}
}

41
playbooks/gitea/build.yml Normal file
View File

@@ -0,0 +1,41 @@
- name: Make build artifact.
hosts: localhost
vars_files: ../../secrets/gitea.json
tasks:
- name: Build image.
community.docker.docker_image_build:
name: "{{ image_name }}"
path: ../../gitea
nocache: true
rebuild: always
pull: true
- name: Make temp file.
ansible.builtin.tempfile:
suffix: .tar
register: tar_file
- name: Push image to archive.
community.docker.docker_image:
name: "{{ image_name }}"
archive_path: "{{ tar_file.path }}"
source: local
- name: Compress archive to artifact.
register: compress_image
community.general.archive:
path: "{{ tar_file.path }}"
dest: "{{ tar_file.path }}.xz"
format: xz
mode: "0644"
- name: Push artifact to S3.
amazon.aws.s3_object:
bucket: "{{ image_bucket }}"
object: "{{ image_key }}"
src: "{{ tar_file.path }}.xz"
mode: put
region: "{{ aws_region }}"
access_key: "{{ aws_access_key }}"
secret_key: "{{ aws_secret_key }}"

View File

@@ -0,0 +1,68 @@
- name: Deploy artifact to instance.
hosts: localhost
become: true
vars_files:
- ../../secrets/gitea.json
- ../../secrets/terraform.json
vars:
ansible_connection: aws_ssm
ansible_python_interpreter: /usr/bin/python3
ansible_aws_ssm_plugin: "{{ ssm_plugin }}"
ansible_aws_ssm_bucket_name: "{{ image_bucket }}"
ansible_aws_ssm_instance_id: "{{ private_instance_id.value }}"
ansible_aws_ssm_region: "{{ aws_region }}"
ansible_aws_ssm_access_key_id: "{{ aws_access_key }}"
ansible_aws_ssm_secret_access_key: "{{ aws_secret_key }}"
tasks:
- name: Fetch image.
amazon.aws.s3_object:
mode: get
bucket: "{{ image_bucket }}"
object: "{{ image_key }}"
dest: ~/image.tar.xz
region: "{{ aws_region }}"
access_key: "{{ aws_access_key }}"
secret_key: "{{ aws_secret_key }}"
- name: Load image.
community.docker.docker_image_load:
path: ~/image.tar.xz
register: image
- name: Fetch repository.
amazon.aws.s3_object:
mode: get
bucket: "{{ boot_bucket }}"
object: "{{ boot_key }}"
dest: ~/boot.tar.xz
region: "{{ aws_region }}"
access_key: "{{ aws_access_key }}"
secret_key: "{{ aws_secret_key }}"
- name: Unarchive image.
ansible.builtin.unarchive:
src: ~/boot.tar.xz
remote_src: true
dest: "~"
group: 1000
owner: 1000
- name: Run image.
community.docker.docker_container:
name: server
image: "{{ image.image_names[0] }}"
state: started
recreate: true
restart_policy: always
ports: [80:80, 2222:2222]
env:
GITEA__security__INTERNAL_TOKEN: "{{ internal_secret }}"
GITEA__server__LFS_JWT_SECRET: "{{ lfs_secret }}"
GITEA__oauth2__JWT_SECRET: "{{ jwt_secret }}"
volumes:
- ~/boot:/var/lib/gitea
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro

View File

@@ -0,0 +1,46 @@
- name: Deploy artifact to instance.
hosts: localhost
vars_files:
- ../../secrets/proxy.json
- ../../secrets/terraform.json
vars:
ansible_connection: aws_ssm
ansible_python_interpreter: /usr/bin/python3
ansible_aws_ssm_plugin: "{{ ssm_plugin }}"
ansible_aws_ssm_bucket_name: "{{ image_bucket }}"
ansible_aws_ssm_instance_id: "{{ public_instance_id.value }}"
ansible_aws_ssm_region: "{{ aws_region }}"
ansible_aws_ssm_access_key_id: "{{ aws_access_key }}"
ansible_aws_ssm_secret_access_key: "{{ aws_secret_key }}"
tasks:
# - name: Fetch image.
# amazon.aws.s3_object:
# mode: get
# bucket: "{{ image_bucket }}"
# object: "{{ image_key }}"
# dest: ~/image.tar.xz
# region: "{{ aws_region }}"
# access_key: "{{ aws_access_key }}"
# secret_key: "{{ aws_secret_key }}"
# - name: Load image.
# community.docker.docker_image_load:
# path: ~/image.tar.xz
# register: image
- name: Run image.
community.docker.docker_container:
name: server
image: "jc21/nginx-proxy-manager:latest"
state: started
recreate: true
restart_policy: always
ports: ["80:80", "443:443", "81:81", "22:22"]
volumes:
- ./data:/data
- ./letsencrypt:/etc/letsencrypt
env:
INITIAL_ADMIN_EMAIL: "proxy@maximhutz.com"
INITIAL_ADMIN_PASSWORD: "expensive-giraffe"

27
playbooks/repository.yml Normal file
View File

@@ -0,0 +1,27 @@
---
- name: Pull Gitea data.
hosts: localhost
vars_files:
../secrets/gitea.json
tasks:
- name: Temp file.
ansible.builtin.tempfile:
suffix: .tar.xz
register: file
- name: Fetch from S3.
amazon.aws.s3_object:
bucket: "acer-saccharum"
object: "codebase/gitea/boot"
dest: "{{ file.path }}"
mode: get
region: "{{ aws_region }}"
access_key: "{{ aws_access_key }}"
secret_key: "{{ aws_secret_key }}"
- name: Unarchive image.
ansible.builtin.unarchive:
src: "{{ file.path }}"
dest: ../gitea

View File

@@ -0,0 +1,51 @@
- name: Get registration token.
hosts: localhost
vars_files:
- ../../secrets/proxy.json
- ../../secrets/terraform.json
vars:
ansible_connection: aws_ssm
ansible_python_interpreter: /usr/bin/python3
ansible_aws_ssm_plugin: "{{ ssm_plugin }}"
ansible_aws_ssm_bucket_name: "{{ image_bucket }}"
ansible_aws_ssm_instance_id: "{{ private_instance_id.value }}"
ansible_aws_ssm_region: "{{ aws_region }}"
ansible_aws_ssm_access_key_id: "{{ aws_access_key }}"
ansible_aws_ssm_secret_access_key: "{{ aws_secret_key }}"
tasks:
- name: Generate registration token.
community.docker.docker_container_exec:
container: server
command: gitea actions grt
register: token
- name: Deploy artifact to instance.
hosts: localhost
vars_files:
- ../../secrets/proxy.json
- ../../secrets/terraform.json
vars:
ansible_connection: aws_ssm
ansible_python_interpreter: /usr/bin/python3
ansible_aws_ssm_plugin: "{{ ssm_plugin }}"
ansible_aws_ssm_bucket_name: "{{ image_bucket }}"
ansible_aws_ssm_instance_id: "{{ runner_instance_id.value }}"
ansible_aws_ssm_region: "{{ aws_region }}"
ansible_aws_ssm_access_key_id: "{{ aws_access_key }}"
ansible_aws_ssm_secret_access_key: "{{ aws_secret_key }}"
tasks:
- name: Run image.
community.docker.docker_container:
name: server
image: gitea/act_runner
state: started
recreate: true
restart_policy: always
ports: [80:80, 443:443]
volumes:
- /var/run/docker.sock:/var/run/docker.sock
env:
GITEA_INSTANCE_URL: https://code.maximhutz.com/
GITEA_RUNNER_REGISTRATION_TOKEN: "{{ token.stdout }}"

View File

@@ -1,24 +0,0 @@
#!/bin/bash
set -e
# Get variables from A2A credential file.
AWS_REGION="$(jq -r ".region" < credentials/a2a.json.secret)"
AWS_ACCESS_KEY_ID="$(jq -r ".access_key" < credentials/a2a.json.secret)"
AWS_SECRET_ACCESS_KEY="$(jq -r ".secret_key" < credentials/a2a.json.secret)"
export AWS_REGION
export AWS_ACCESS_KEY_ID
export AWS_SECRET_ACCESS_KEY
AWS_SSH_PEM="$(aws ssm get-parameter --name "/codebase/main" --with-decryption | jq -r ".Parameter.Value")"
# Arhive stuff.
git archive --format tar.gz -o archive.tar.gz HEAD
# SSH into it and copy needed files.
ssh-add - <<< "$AWS_SSH_PEM"
scp -O -ro StrictHostKeyChecking=no archive.tar.gz ec2-user@git1.maximhutz.com:~/archive.tar.gz
# Run.
ssh -o StrictHostKeyChecking=no ec2-user@git1.maximhutz.com < scripts/start.sh

View File

@@ -1,11 +0,0 @@
#!/bin/bash
set -e
# Install docker compose.
curl -L "https://github.com/docker/compose/releases/latest/download/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
chmod +x /usr/local/bin/docker-compose
ln -s /usr/local/bin/docker-compose /usr/bin/docker-compose
# Install git.
sudo yum install git -y

View File

@@ -1,18 +0,0 @@
#!/bin/bash
set -e
# shellcheck disable=SC2046
docker-compose down
# Create and accessify correct folders.
mkdir -p boot
sudo chown 1000:1000 -R config
sudo chown 1000:1000 -R boot
sudo chown 1000:1000 -R custom
# Update contents.
sudo tar xvfz archive.tar.gz -C ./
# Run.
docker-compose -f compose.yml up -d --build

19
ssm/gitea.sh Executable file
View File

@@ -0,0 +1,19 @@
#!/bin/sh
set -e
# ---------------------------------------------------------------------------- #
AWS_REGION="us-east-1"
AWS_ACCESS_KEY_ID="$(jq -r '.aws_access_key' < secrets/gitea.json)"
AWS_SECRET_ACCESS_KEY="$(jq -r '.aws_secret_key' < secrets/gitea.json)"
export AWS_REGION
export AWS_ACCESS_KEY_ID
export AWS_SECRET_ACCESS_KEY
# ---------------------------------------------------------------------------- #
INSTANCE_ID="$(jq -r '.private_instance_id.value' < secrets/terraform.json)"
aws ssm start-session --target "$INSTANCE_ID"

19
ssm/proxy.sh Executable file
View File

@@ -0,0 +1,19 @@
#!/bin/sh
set -e
# ---------------------------------------------------------------------------- #
AWS_REGION="us-east-1"
AWS_ACCESS_KEY_ID="$(jq -r '.aws_access_key' < secrets/proxy.json)"
AWS_SECRET_ACCESS_KEY="$(jq -r '.aws_secret_key' < secrets/proxy.json)"
export AWS_REGION
export AWS_ACCESS_KEY_ID
export AWS_SECRET_ACCESS_KEY
# ---------------------------------------------------------------------------- #
INSTANCE_ID="$(jq -r '.public_instance_id.value' < secrets/terraform.json)"
aws ssm start-session --target "$INSTANCE_ID"

View File

@@ -22,22 +22,3 @@ provider "registry.terraform.io/hashicorp/aws" {
"zh:ffb40a66b4d000a8ee4c54227eeb998f887ad867419c3af7d3981587788de074",
]
}
provider "registry.terraform.io/hashicorp/tls" {
version = "4.0.5"
hashes = [
"h1:zeG5RmggBZW/8JWIVrdaeSJa0OG62uFX5HY1eE8SjzY=",
"zh:01cfb11cb74654c003f6d4e32bbef8f5969ee2856394a96d127da4949c65153e",
"zh:0472ea1574026aa1e8ca82bb6df2c40cd0478e9336b7a8a64e652119a2fa4f32",
"zh:1a8ddba2b1550c5d02003ea5d6cdda2eef6870ece86c5619f33edd699c9dc14b",
"zh:1e3bb505c000adb12cdf60af5b08f0ed68bc3955b0d4d4a126db5ca4d429eb4a",
"zh:6636401b2463c25e03e68a6b786acf91a311c78444b1dc4f97c539f9f78de22a",
"zh:76858f9d8b460e7b2a338c477671d07286b0d287fd2d2e3214030ae8f61dd56e",
"zh:a13b69fb43cb8746793b3069c4d897bb18f454290b496f19d03c3387d1c9a2dc",
"zh:a90ca81bb9bb509063b736842250ecff0f886a91baae8de65c8430168001dad9",
"zh:c4de401395936e41234f1956ebadbd2ed9f414e6908f27d578614aaa529870d4",
"zh:c657e121af8fde19964482997f0de2d5173217274f6997e16389e7707ed8ece8",
"zh:d68b07a67fbd604c38ec9733069fbf23441436fecf554de6c75c032f82e1ef19",
"zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c",
]
}

92
terraform/compute.tf Normal file
View File

@@ -0,0 +1,92 @@
# An `t4g.nano` comaptible AMI for Amazon Linux 2.
data "aws_ami" "amazon-linux-2" {
most_recent = true
filter {
name = "owner-alias"
values = ["amazon"]
}
filter {
name = "architecture"
values = ["arm64"]
}
filter {
name = "name"
values = ["amzn2-ami-hvm*"]
}
}
# An instance profile for access via AWS SSM.
resource "aws_iam_instance_profile" "ssm" {
name = "SSMInstanceProfile"
role = "AmazonSSMRoleForInstancesQuickSetup"
}
# An elastic IP, so if the reverse proxy is modified, the route tables won't.
resource "aws_eip" "public" {
instance = aws_instance.public.id
domain = "vpc"
}
# The reverse proxy.
resource "aws_instance" "public" {
# ami = data.aws_ami.amazon-linux-2.id
ami = "ami-0adec96dc0cdc7bca"
instance_type = "t4g.nano"
subnet_id = module.vpc.public_subnets[0]
vpc_security_group_ids = [aws_security_group.public_access.id]
user_data = file("install.sh")
iam_instance_profile = aws_iam_instance_profile.ssm.name
root_block_device {
volume_type = "gp3"
volume_size = 8
}
tags = {
Name = "Codebase: Reverse Proxy"
}
}
# The Gitea instance.
resource "aws_instance" "private" {
# ami = data.aws_ami.amazon-linux-2.id
ami = "ami-0adec96dc0cdc7bca"
instance_type = "t4g.nano"
subnet_id = module.vpc.private_subnets[0]
user_data = file("install.sh")
iam_instance_profile = aws_iam_instance_profile.ssm.name
root_block_device {
volume_type = "gp3"
volume_size = 8
}
tags = {
Name = "Codebase: Gitea"
}
}
# The Gitea Runner instance.
resource "aws_instance" "runner" {
# ami = data.aws_ami.amazon-linux-2.id
ami = "ami-0adec96dc0cdc7bca"
instance_type = "t4g.nano"
subnet_id = module.vpc.private_subnets[0]
user_data = file("install.sh")
iam_instance_profile = aws_iam_instance_profile.ssm.name
root_block_device {
volume_type = "gp3"
volume_size = 8
}
tags = {
Name = "Codebase: Runner"
}
}

10
terraform/install.sh Executable file
View File

@@ -0,0 +1,10 @@
#!/bin/sh
amazon-linux-extras install docker ansible2 python3.8 -y
systemctl enable docker
systemctl start docker
usermod -a -G docker ssm-user
ln -sf /usr/bin/python3.8 /usr/bin/python3
pip install botocore boto3 requests

View File

@@ -1,64 +1,7 @@
locals {
domain = "maximhutz.com"
subdomain = "git1"
instance_name = "codebase-main"
ssh_key_name = "codebase-main-key"
ssh_key_parameter = "/codebase/main"
}
/*----------------------------------------------------------------------------*/
resource "aws_key_pair" "tf_key" {
key_name = local.ssh_key_name
public_key = tls_private_key.rsa.public_key_openssh
}
resource "tls_private_key" "rsa" {
algorithm = "RSA"
rsa_bits = 4096
}
resource "aws_ssm_parameter" "secret" {
name = local.ssh_key_parameter
type = "SecureString"
value = tls_private_key.rsa.private_key_pem
}
/*----------------------------------------------------------------------------*/
data "aws_ami" "amazon_linux" {
most_recent = true
owners = ["amazon"]
filter {
name = "name"
values = ["amzn-ami-*"]
}
}
resource "aws_instance" "server" {
ami = data.aws_ami.amazon_linux.id
instance_type = "t3a.nano"
user_data = file("../scripts/prepare.sh")
key_name = local.ssh_key_name
tags = {
Name = local.instance_name
}
depends_on = [aws_key_pair.tf_key]
}
/*----------------------------------------------------------------------------*/
data "aws_route53_zone" "domain" {
name = local.domain
}
resource "aws_route53_record" "subdomain" {
zone_id = data.aws_route53_zone.domain.zone_id
name = "${local.subdomain}.${local.domain}"
type = "A"
ttl = "300"
records = [aws_instance.server.public_ip]
# The IP block for the VPC.
vpc_cidr = "10.0.0.0/16"
# Here is the domain name changes.
domain_name = "maximhutz.com"
}

102
terraform/network.tf Normal file
View File

@@ -0,0 +1,102 @@
data "aws_availability_zones" "all" {}
# The main VPC.
module "vpc" {
source = "terraform-aws-modules/vpc/aws"
name = "Main"
cidr = local.vpc_cidr
azs = [data.aws_availability_zones.all.names[0]]
private_subnets = [cidrsubnet(local.vpc_cidr, 8, 0)]
public_subnets = [cidrsubnet(local.vpc_cidr, 8, 4)]
map_public_ip_on_launch = true
enable_dns_hostnames = true
enable_dns_support = true
}
# Only allow HTTP(s) and SSH traffic. Allow full access to internet.
resource "aws_security_group" "public_access" {
vpc_id = module.vpc.vpc_id
ingress {
from_port = 80
to_port = 80
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
from_port = 443
to_port = 443
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
from_port = 2222
to_port = 2222
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
from_port = 81
to_port = 81
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
from_port = 8080
to_port = 8080
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
from_port = 4321
to_port = 4321
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
from_port = 1234
to_port = 1234
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
}
# Give the private subnet full access to the internet, too.
module "fck-nat" {
source = "RaJiska/fck-nat/aws"
name = "NatInstance"
vpc_id = module.vpc.vpc_id
subnet_id = module.vpc.public_subnets[0]
instance_type = "t4g.nano"
update_route_table = true
route_table_id = module.vpc.private_route_table_ids[0]
tags = {
Name = "Codebase: Nat"
}
}

19
terraform/outputs.tf Normal file
View File

@@ -0,0 +1,19 @@
# The instance ID (`i-*****************`) of the reverse proxy.
output "public_instance_id" {
value = aws_instance.public.id
}
# The instance ID of the Gitea instance.
output "private_instance_id" {
value = aws_instance.private.id
}
# The instance ID of the Gitea runner.
output "runner_instance_id" {
value = aws_instance.runner.id
}
# The private IP (not accessible from internet) of the Gitea instnace.
output "private_instance_ip" {
value = aws_instance.private.private_ip
}

View File

@@ -1,15 +1,11 @@
terraform {
backend "s3" {
bucket = "tsuga-sieboldii"
key = "codebase"
region = var.region
access_key = var.access_key
secret_key = var.secret_key
}
# The backend is stored in an S3 bucket.
backend "s3" {}
}
# Access AWS through the IaC roles.
provider "aws" {
region = var.region
access_key = var.access_key
secret_key = var.secret_key
access_key = var.roles["terraform"].access
secret_key = var.roles["terraform"].secret
}

22
terraform/routing.tf Normal file
View File

@@ -0,0 +1,22 @@
# The Route53 DNS zone.
data "aws_route53_zone" "main" {
name = local.domain_name
}
# Push all domain traffic through the reverse proxy.
resource "aws_route53_record" "main_domain" {
zone_id = data.aws_route53_zone.main.zone_id
name = data.aws_route53_zone.main.name
type = "A"
ttl = "60"
records = [aws_eip.public.public_ip]
}
# Also push all subdomain traffic.
resource "aws_route53_record" "sub_domains" {
zone_id = data.aws_route53_zone.main.zone_id
name = "*.${data.aws_route53_zone.main.name}"
type = "A"
ttl = "60"
records = [aws_eip.public.public_ip]
}

View File

@@ -1,11 +1,14 @@
variable "access_key" {
description = "The IAM public access key."
}
variable "secret_key" {
description = "IAM secret access key."
}
# The AWS region we are connecting from.
variable "region" {
type = string
description = "The AWS region things are created in."
}
# The various roles Terraform uses and gives to resources.
variable "roles" {
type = map(object({
access = string
secret = string
}))
description = "The different roles that are used by Terraform."
}