19 Commits

24 changed files with 680 additions and 161 deletions

BIN
.DS_Store vendored

Binary file not shown.

174
.gitignore vendored
View File

@@ -37,7 +37,179 @@ override.tf.json
.terraformrc
terraform.rc
# ---> Ansible
*.retry
# ---> Python
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# UV
# Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
#uv.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/latest/usage/project/#working-with-version-control
.pdm.toml
.pdm-python
.pdm-build/
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
*secret*
.vscode
.env
.DS_Store

View File

@@ -1,28 +1,21 @@
version: 3
env: { TF: terraform -chdir=terraform }
silent: true
includes:
tf: { taskfile: terraform, dir: terraform }
tasks:
dev: docker compose -f compose.dev.yml up --build --force-recreate --no-deps
dev:
- docker compose -f compose.dev.yml rm -fsv
- docker compose -f compose.dev.yml up --build --force-recreate --no-deps
tf/init: $TF init -backend-config=backend.tfvars
tf/plan: $TF plan -var-file=secret.tfvars
tf/destroy: $TF destroy
tf/format: $TF fmt -recursive
tf/apply:
- $TF apply -var-file=secret.tfvars
- $TF output -json > secrets.tf.json
build: ansible-playbook playbooks/build.yml
deploy: ansible-playbook playbooks/deploy.yml
run:
- task: build
- task: deploy
deploy:fast: ansible-playbook playbooks/fast.yml
deploy:slow: ansible-playbook playbooks/slow.yml
deploy:restore: ansible-playbook playbooks/restore.yml -e "restore_bucket={{.BUCKET}} restore_key={{.KEY}}"
enter:
cmd: aws ssm start-session --target $INSTANCE_ID
env:
INSTANCE_ID: { sh: jq -r .instance_id.value < secrets.tf.json }
AWS_REGION: { sh: jq -r .aws_region < secrets/gitea.json }
AWS_ACCESS_KEY_ID: { sh: jq -r .aws_access_key < secrets/gitea.json }
AWS_SECRET_ACCESS_KEY: { sh: jq -r .aws_secret_key < secrets/gitea.json }
INSTANCE_ID: { sh: jq -r .instance_id.value < config/infrastructure.secret.json }
AWS_REGION: { sh: jq -r .aws_region < config/ansible.secret.json }
AWS_ACCESS_KEY_ID: { sh: jq -r .aws_access_key < config/ansible.secret.json }
AWS_SECRET_ACCESS_KEY: { sh: jq -r .aws_secret_key < config/ansible.secret.json }

View File

@@ -3,8 +3,10 @@ services:
# Gitea itself.
gitea:
container_name: web-git-instance
restart: unless-stopped
depends_on:
- backup
- bucket-script
build:
context: gitea
dockerfile: Dockerfile.dev
@@ -15,6 +17,7 @@ services:
ports:
- 80:80
- 443:443
- 3000:3000
- 2222:2222
labels:
- docker-volume-backup.stop-during-backup=true
@@ -30,7 +33,7 @@ services:
- /var/run/docker.sock:/var/run/docker.sock:ro
environment:
AWS_ENDPOINT: localstack:4566
AWS_S3_BUCKET_NAME: test
AWS_S3_BUCKET_NAME: backup
AWS_ACCESS_KEY_ID: _
AWS_SECRET_ACCESS_KEY: _
BACKUP_CRON_EXPRESSION: "* * * * *"
@@ -58,7 +61,11 @@ services:
AWS_ACCESS_KEY_ID: _
AWS_SECRET_ACCESS_KEY: _
AWS_ENDPOINT_URL: http://localstack:4566
command: '"aws s3api create-bucket --bucket test"'
command: |
"
aws s3api create-bucket --bucket backup
aws s3api create-bucket --bucket storage
"
volumes:
data:

BIN
gitea/.DS_Store vendored

Binary file not shown.

View File

@@ -2,4 +2,5 @@ FROM gitea/gitea:latest-rootless
ADD --chown=git:git config /etc/gitea
ADD --chown=git:git custom /etc/gitea-custom
ENV GITEA_CUSTOM /etc/gitea-custom
ENV GITEA_CUSTOM=/etc/gitea-custom

View File

@@ -2,7 +2,12 @@ FROM gitea/gitea:latest-rootless
ADD --chown=git:git config /etc/gitea
ADD --chown=git:git custom /etc/gitea-custom
ENV GITEA_CUSTOM /etc/gitea-custom
ENV GITEA_CUSTOM=/etc/gitea-custom
RUN rm /etc/gitea/app.ini
RUN mv /etc/gitea/dev.app.ini /etc/gitea/app.ini
WORKDIR /etc/gitea-custom
RUN gitea cert --host localhost --ca

View File

@@ -23,13 +23,18 @@ OFFLINE_MODE = true
DISABLE_SSH = false
START_SSH_SERVER = true
SSH_PORT = 22
SSH_LISTEN_PORT = 2222
SSH_DOMAIN = maximhutz.com
SSH_LISTEN_PORT = 22
# SSH_DOMAIN = %(FULL_DOMAIN)s
BUILTIN_SSH_SERVER_USER = git
DOMAIN = code.maximhutz.com
ROOT_URL = https://code.maximhutz.com/
HTTP_PORT = 80
PROTOCOL=https
ENABLE_ACME=true
ACME_ACCEPTTOS=true
ACME_DIRECTORY=https
# ACME_EMAIL=%(EMAIL)s
# DOMAIN = %(FULL_DOMAIN)s
# ROOT_URL = %(ROOT_URL)s
HTTP_PORT = 443
[database]
DB_TYPE = sqlite3
@@ -89,3 +94,10 @@ DEFAULT_MERGE_STYLE = merge
[repository.signing]
DEFAULT_TRUST_MODEL = committer
[storage]
STORAGE_TYPE = minio
MINIO_ENDPOINT = s3.us-east-1.amazonaws.com
MINIO_BUCKET = myrica-faya
MINIO_USE_SSL = true
MINIO_INSECURE_SKIP_VERIFY = false

View File

@@ -17,18 +17,23 @@ TEMP_PATH = /tmp/gitea/uploads
[server]
APP_DATA_PATH = /var/lib/gitea
LFS_START_SERVER = true
OFFLINE_MODE = true
LFS_JWT_SECRET = x-----------------------------------------x
DISABLE_SSH = false
START_SSH_SERVER = true
SSH_PORT = 2222
SSH_LISTEN_PORT = 2222
BUILTIN_SSH_SERVER_USER = git
LFS_START_SERVER = true
OFFLINE_MODE = true
SSH_DOMAIN = localhost
BUILTIN_SSH_SERVER_USER = git
PROTOCOL = https
ROOT_URL = https://localhost:443/
DOMAIN = localhost
ROOT_URL = http://localhost:80/
HTTP_PORT = 80
LFS_JWT_SECRET = x-----------------------------------------x
HTTP_PORT = 443
CERT_FILE = /etc/gitea-custom/cert.pem
KEY_FILE = /etc/gitea-custom/key.pem
[database]
DB_TYPE = sqlite3
@@ -90,3 +95,12 @@ DEFAULT_TRUST_MODEL = committer
[oauth2]
JWT_SECRET = x-----------------------------------------x
[storage]
STORAGE_TYPE = minio
MINIO_ENDPOINT = localstack:4566
MINIO_ACCESS_KEY_ID = test
MINIO_SECRET_ACCESS_KEY = test
MINIO_BUCKET = storage
MINIO_USE_SSL = false
MINIO_INSECURE_SKIP_VERIFY = true

BIN
gitea/custom/.DS_Store vendored

Binary file not shown.

View File

@@ -1,41 +0,0 @@
- name: Make build artifact.
hosts: localhost
vars_files: ../secrets/gitea.json
tasks:
- name: Build image.
community.docker.docker_image_build:
name: "{{ image_name }}"
path: ../image
nocache: true
rebuild: always
pull: true
- name: Make temp file.
ansible.builtin.tempfile:
suffix: .tar
register: tar_file
- name: Push image to archive.
community.docker.docker_image:
name: "{{ image_name }}"
archive_path: "{{ tar_file.path }}"
source: local
- name: Compress archive to artifact.
register: compress_image
community.general.archive:
path: "{{ tar_file.path }}"
dest: "{{ tar_file.path }}.xz"
format: xz
mode: "0644"
- name: Push artifact to S3.
amazon.aws.s3_object:
bucket: "{{ image_bucket }}"
object: "{{ image_key }}"
src: "{{ tar_file.path }}.xz"
mode: put
region: "{{ aws_region }}"
access_key: "{{ aws_access_key }}"
secret_key: "{{ aws_secret_key }}"

View File

@@ -1,55 +0,0 @@
- name: Deploy artifact to instance.
hosts: localhost
become: true
vars_files:
- ../secrets/gitea.json
- ../secrets.tf.json
vars:
ansible_connection: aws_ssm
ansible_python_interpreter: /usr/bin/python3
ansible_aws_ssm_plugin: "{{ ssm_plugin }}"
ansible_aws_ssm_bucket_name: "{{ image_bucket }}"
ansible_aws_ssm_instance_id: "{{ instance_id.value }}"
ansible_aws_ssm_region: "{{ aws_region }}"
ansible_aws_ssm_access_key_id: "{{ aws_access_key }}"
ansible_aws_ssm_secret_access_key: "{{ aws_secret_key }}"
tasks:
- name: Fetch image.
amazon.aws.s3_object:
mode: get
bucket: "{{ image_bucket }}"
object: "{{ image_key }}"
dest: /root/image.tar.xz
region: "{{ aws_region }}"
access_key: "{{ aws_access_key }}"
secret_key: "{{ aws_secret_key }}"
- name: Load image.
community.docker.docker_image_load:
path: /root/image.tar.xz
register: image
- name: Run image.
community.docker.docker_container:
name: server
image: "{{ image.image_names[0] }}"
state: started
recreate: true
restart_policy: unless-stopped
memory: 425m
memory_swap: 900m
ports: [80:80, 2222:2222]
env:
GITEA__security__INTERNAL_TOKEN: "{{ internal_secret }}"
GITEA__server__LFS_JWT_SECRET: "{{ lfs_secret }}"
GITEA__oauth2__JWT_SECRET: "{{ jwt_secret }}"
AWS_REGION: "{{ boot_region.value }}"
AWS_ACCESS_KEY_ID: "{{ boot_id.value }}"
AWS_SECRET_ACCESS_KEY: "{{ boot_secret.value }}"
BOOT_URI: "s3://{{ boot_bucket }}/{{ boot_key }}"
volumes:
- /root/boot:/var/lib/gitea
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro

91
playbooks/fast.yml Normal file
View File

@@ -0,0 +1,91 @@
- name: Make build artifact.
hosts: localhost
vars_files:
- ../config/ansible.secret.json
- ../config/infrastructure.secret.json
gather_facts: false
tasks:
- name: Log into Docker.
community.docker.docker_login:
registry_url: '{{ full_domain.value }}'
username: '{{ username }}'
password: '{{ api_key }}'
reauthorize: true
- name: Build image.
community.docker.docker_image_build:
name: "{{ full_domain.value }}/{{ image_name }}:latest"
path: ../gitea
nocache: true
rebuild: always
pull: true
outputs: [{ type: image, push: true }]
platform:
- linux/amd64
- linux/arm64/v8
- name: Log out of Docker.
community.docker.docker_login:
state: absent
- name: Deploy artifact to instance.
hosts: localhost
become: true
gather_facts: false
vars_files:
- ../config/ansible.secret.json
- ../config/infrastructure.secret.json
vars:
ansible_connection: aws_ssm
ansible_aws_ssm_plugin: "{{ ssm_plugin }}"
ansible_aws_ssm_bucket_name: "{{ image_bucket }}"
ansible_aws_ssm_instance_id: "{{ instance_id.value }}"
ansible_python_interpreter: /usr/bin/python3
ansible_aws_ssm_region: "{{ aws_region }}"
ansible_aws_ssm_access_key_id: "{{ aws_access_key }}"
ansible_aws_ssm_secret_access_key: "{{ aws_secret_key }}"
tasks:
- name: Run image.
community.docker.docker_container:
name: server
image: "{{ full_domain.value }}/{{ image_name }}:latest"
state: started
recreate: true
restart_policy: unless-stopped
memory: 300m
memory_swap: 900m
ports: [80:80, 2222:2222, 443:443, "22:22"]
env:
GITEA__security__INTERNAL_TOKEN: "{{ internal_secret }}"
GITEA__server__LFS_JWT_SECRET: "{{ lfs_secret }}"
GITEA__oauth2__JWT_SECRET: "{{ jwt_secret }}"
GITEA__server__ACME_EMAIL: "{{ email }}"
GITEA__server__SSH_DOMAIN: "{{ full_domain.value }}"
GITEA__server__DOMAIN: "{{ full_domain.value }}"
GITEA__server__ROOT_URL: "https://{{ full_domain.value }}/"
GITEA__storage__MINIO_ACCESS_KEY_ID: "{{ minio_access_key }}"
GITEA__storage__MINIO_SECRET_ACCESS_KEY: "{{ minio_secret_key }}"
labels:
docker-volume-backup.stop-during-backup: "true"
volumes:
- /home/ssm-user/data:/var/lib/gitea
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
- name: Run backup.
community.docker.docker_container:
name: backup
image: offen/docker-volume-backup:v2
state: started
recreate: true
restart_policy: unless-stopped
volumes:
- /home/ssm-user/data:/backup/my-app-backup:ro
- /var/run/docker.sock:/var/run/docker.sock:ro
env:
AWS_S3_BUCKET_NAME: "{{ boot_bucket }}"
AWS_S3_PATH: "{{ boot_key }}"
AWS_REGION: "{{ boot_region.value }}"
AWS_ACCESS_KEY_ID: "{{ boot_id.value }}"
AWS_SECRET_ACCESS_KEY: "{{ boot_secret.value }}"
BACKUP_CRON_EXPRESSION: "0 0 * * *"

66
playbooks/restore.yml Normal file
View File

@@ -0,0 +1,66 @@
- name: Deploy artifact to instance.
hosts: localhost
become: true
gather_facts: false
vars_files:
- ../config/ansible.secret.json
- ../config/infrastructure.secret.json
vars:
ansible_connection: aws_ssm
ansible_python_interpreter: /usr/bin/python3
ansible_aws_ssm_plugin: "{{ ssm_plugin }}"
ansible_aws_ssm_bucket_name: "{{ image_bucket }}"
ansible_aws_ssm_instance_id: "{{ instance_id.value }}"
ansible_aws_ssm_region: "{{ aws_region }}"
ansible_aws_ssm_access_key_id: "{{ aws_access_key }}"
ansible_aws_ssm_secret_access_key: "{{ aws_secret_key }}"
tasks:
- name: Stop server.
community.docker.docker_container:
name: "{{ item }}"
state: stopped
loop: [server, backup]
- name: Copy backup from S3.
environment:
region: "{{ boot_region.value }}"
access_key: "{{ boot_id.value }}"
secret_key: "{{ boot_secret.value }}"
amazon.aws.s3_object:
bucket: "{{ restore_bucket | mandatory(msg='You must specify the bucket of the data.') }}"
object: "{{ restore_key | mandatory(msg='You must specify the key of the data.') }}"
dest: /home/ssm-user/backup.tar.gz
mode: get
- name: Ensure backup directory exists.
ansible.builtin.file:
path: /home/ssm-user/backup
state: directory
mode: '0777'
- name: Extract backup.
ansible.builtin.unarchive:
src: /home/ssm-user/backup.tar.gz
dest: /home/ssm-user/backup
remote_src: true
- name: Move backup files to data folder.
ansible.builtin.copy:
remote_src: true
src: /home/ssm-user/backup/backup/my-app-backup/
dest: /home/ssm-user/data/
mode: '0777'
- name: Update permissions.
ansible.builtin.file:
path: /home/ssm-user/data
recurse: true
mode: '0777'
owner: 1000
group: 1000
- name: Restart containers.
community.docker.docker_container:
name: "{{ item }}"
state: started
loop: [server, backup]

121
playbooks/slow.yml Normal file
View File

@@ -0,0 +1,121 @@
- name: Make build artifact.
hosts: localhost
vars_files: ../config/ansible.secret.json
gather_facts: false
tasks:
- name: Build image.
community.docker.docker_image_build:
name: "{{ image_name }}"
path: ../gitea
nocache: true
rebuild: always
pull: true
- name: Push image to archive.
community.docker.docker_image:
name: "{{ image_name }}"
archive_path: ../dist/image.tar
source: local
- name: Compress archive to artifact.
register: compress_image
community.general.archive:
path: ../dist/image.tar
dest: ../dist/image.tar.xz
format: xz
mode: "0644"
- name: Push artifact to S3.
amazon.aws.s3_object:
bucket: "{{ image_bucket }}"
object: "{{ image_key }}"
src: ../dist/image.tar.xz
mode: put
region: "{{ aws_region }}"
access_key: "{{ aws_access_key }}"
secret_key: "{{ aws_secret_key }}"
- name: Deploy artifact to instance.
hosts: localhost
become: true
gather_facts: false
vars_files:
- ../config/ansible.secret.json
- ../config/infrastructure.secret.json
vars:
ansible_connection: aws_ssm
ansible_python_interpreter: /usr/bin/python3
ansible_aws_ssm_plugin: "{{ ssm_plugin }}"
ansible_aws_ssm_bucket_name: "{{ image_bucket }}"
ansible_aws_ssm_instance_id: "{{ instance_id.value }}"
ansible_aws_ssm_region: "{{ aws_region }}"
ansible_aws_ssm_access_key_id: "{{ aws_access_key }}"
ansible_aws_ssm_secret_access_key: "{{ aws_secret_key }}"
tasks:
- name: Fetch image.
amazon.aws.s3_object:
mode: get
bucket: "{{ image_bucket }}"
object: "{{ image_key }}"
dest: /root/image.tar.gz
region: "{{ aws_region }}"
access_key: "{{ aws_access_key }}"
secret_key: "{{ aws_secret_key }}"
- name: Create data directory.
ansible.builtin.file:
path: /home/ssm-user/data
state: directory
mode: '0777'
- name: Load image.
community.docker.docker_image_load:
path: /root/image.tar.gz
register: image
- name: Run image.
community.docker.docker_container:
name: server
image: "{{ image.image_names[0] }}"
state: started
recreate: true
restart_policy: unless-stopped
memory: 300m
memory_swap: 900m
ports: [80:80, 2222:2222, 443:443, "22:22"]
env:
GITEA__security__INTERNAL_TOKEN: "{{ internal_secret }}"
GITEA__server__LFS_JWT_SECRET: "{{ lfs_secret }}"
GITEA__oauth2__JWT_SECRET: "{{ jwt_secret }}"
GITEA__server__ACME_EMAIL: "{{ email }}"
GITEA__server__SSH_DOMAIN: "{{ full_domain.value }}"
GITEA__server__DOMAIN: "{{ full_domain.value }}"
GITEA__server__ROOT_URL: "https://{{ full_domain.value }}/"
GITEA__storage__MINIO_ACCESS_KEY_ID: "{{ minio_access_key }}"
GITEA__storage__MINIO_SECRET_ACCESS_KEY: "{{ minio_secret_key }}"
labels:
docker-volume-backup.stop-during-backup: "true"
volumes:
- /home/ssm-user/data:/var/lib/gitea
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
- name: Run backup.
community.docker.docker_container:
name: backup
image: offen/docker-volume-backup:v2
state: started
recreate: true
restart_policy: unless-stopped
volumes:
- /home/ssm-user/data:/backup/my-app-backup:ro
- /var/run/docker.sock:/var/run/docker.sock:ro
env:
AWS_S3_BUCKET_NAME: "{{ boot_bucket }}"
AWS_S3_PATH: "{{ boot_key }}"
AWS_REGION: "{{ boot_region.value }}"
AWS_ACCESS_KEY_ID: "{{ boot_id.value }}"
AWS_SECRET_ACCESS_KEY: "{{ boot_secret.value }}"
BACKUP_CRON_EXPRESSION: "0 0 * * *"

View File

@@ -3,6 +3,7 @@ ansible-compat==24.10.0
ansible-core==2.18.1
ansible-lint==24.12.2
attrs==24.3.0
awscli-local==0.22.0
black==24.10.0
boto3==1.35.95
botocore==1.35.95
@@ -19,6 +20,7 @@ Jinja2==3.1.5
jmespath==1.0.1
jsonschema==4.23.0
jsonschema-specifications==2024.10.1
localstack-client==2.7
MarkupSafe==3.0.2
mypy-extensions==1.0.0
packaging==24.2

18
terraform/Taskfile.yml Normal file
View File

@@ -0,0 +1,18 @@
version: 3
silent: true
vars:
BACKEND: ../config/backend.secret.json
VARIABLES: ../config/variables.secret.json
OUTPUT: ../config/infrastructure.secret.json
tasks:
init: terraform init -backend-config={{.BACKEND}}
plan: terraform plan -var-file={{.VARIABLES}}
destroy: terraform destroy
format: terraform fmt -recursive
out: terraform output -json > {{.OUTPUT}}
apply:
- terraform apply -var-file={{.VARIABLES}}
- task: out
import: terraform import -var-file={{.VARIABLES}} {{.CLI_ARGS}}

View File

@@ -4,9 +4,12 @@ data "aws_s3_bucket" "storage_bucket" {
data "aws_iam_policy_document" "boot" {
statement {
effect = "Allow"
actions = ["s3:*", "s3-object-lambda:*"]
resources = ["${data.aws_s3_bucket.storage_bucket.arn}/${var.boot_key}"]
effect = "Allow"
actions = ["s3:*", "s3-object-lambda:*"]
resources = [
"${data.aws_s3_bucket.storage_bucket.arn}/${var.boot_key}",
"${data.aws_s3_bucket.storage_bucket.arn}/${var.boot_key}/*",
]
}
}
@@ -22,7 +25,7 @@ module "boot_user" {
version = "5.52.2"
create_iam_user_login_profile = false
name = "${var.boot_role}User"
password_reset_required = false
policy_arns = [aws_iam_policy.boot.arn]
name = "${var.boot_role}User"
password_reset_required = false
policy_arns = [aws_iam_policy.boot.arn]
}

View File

@@ -1,16 +1,17 @@
#!/bin/sh
## Install extras.
rpm --rebuilddb
amazon-linux-extras install docker ansible2 python3.8 -y
# Make Docker work.
systemctl enable docker
systemctl start docker
sudo usermod -aG docker ssm-user
# Set up the correct version of Python (for Ansible).
ln -sf /usr/bin/python3.8 /usr/bin/python3
ln -sf /usr/bin/pip3.8 /usr/bin/pip3
pip3 install botocore boto3 requests
pip3 install botocore boto3 requests packaging --user ssm-user
python3 -m pip install -U pip
# Add some swap space.
@@ -20,9 +21,11 @@ mkswap /swapfile
swapon /swapfile
# Stop SSH (because we have SSM.)
sudo service sshd stop
service sshd stop
# Install Docker Compose.
curl -L "https://github.com/docker/compose/releases/latest/download/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
chmod +x /usr/local/bin/docker-compose
docker-compose version
# ERROR: SSM User not created yet.
sudo usermod -aG docker ssm-user

View File

@@ -1,18 +1,31 @@
data "aws_iam_instance_profile" "ssm" {
# An elastic IP, so if the reverse proxy is modified, the route tables won't.
resource "aws_eip" "public" {
instance = aws_instance.this.id
domain = "vpc"
}
# An instance profile for access via AWS SSM.
resource "aws_iam_instance_profile" "ssm" {
name = "SSMInstanceProfile"
role = "AmazonSSMRoleForInstancesQuickSetup"
}
# The Gitea instance.
resource "aws_instance" "gitea" {
resource "aws_instance" "this" {
# ami = data.aws_ami.amazon-linux-2.id
ami = "ami-0adec96dc0cdc7bca"
instance_type = "t4g.nano"
subnet_id = data.aws_subnet.subnet.id
subnet_id = module.vpc.public_subnets[0]
user_data = file("install.sh")
user_data_replace_on_change = true
user_data_replace_on_change = false
iam_instance_profile = data.aws_iam_instance_profile.ssm.name
iam_instance_profile = aws_iam_instance_profile.ssm.name
vpc_security_group_ids = [aws_security_group.public_access.id]
metadata_options {
http_tokens = "required"
}
root_block_device {
volume_type = "gp3"
@@ -22,4 +35,9 @@ resource "aws_instance" "gitea" {
tags = {
Name = "Codebase: Gitea"
}
}
}
resource "aws_ec2_instance_state" "this" {
instance_id = aws_instance.this.id
state = "running"
}

View File

@@ -1,6 +1,66 @@
data "aws_subnet" "subnet" {
tags = {
SubnetType = "Private"
SubnetOf = "Main"
}
locals {
# The IP block for the VPC.
vpc_cidr = "10.0.0.0/16"
}
data "aws_availability_zones" "all" {}
# The main VPC.
module "vpc" {
source = "terraform-aws-modules/vpc/aws"
name = "Main"
cidr = local.vpc_cidr
azs = [data.aws_availability_zones.all.names[0]]
private_subnets = [cidrsubnet(local.vpc_cidr, 8, 0)]
public_subnets = [cidrsubnet(local.vpc_cidr, 8, 4)]
private_subnet_tags = { SubnetOf = "Main", SubnetType = "Private" }
public_subnet_tags = { SubnetOf = "Main", SubnetType = "Public" }
map_public_ip_on_launch = true
enable_dns_hostnames = true
enable_dns_support = true
private_route_table_tags = { TableOf = "Main", TableType = "Public" }
}
# Only allow HTTP(s) and SSH traffic. Allow full access to internet.
resource "aws_security_group" "public_access" {
vpc_id = module.vpc.vpc_id
tags = { GroupOf = "Main", GroupType = "Public" }
}
resource "aws_vpc_security_group_ingress_rule" "tcp" {
for_each = toset(["80", "443", "22", "51821"])
security_group_id = aws_security_group.public_access.id
from_port = each.value
to_port = each.value
ip_protocol = "tcp"
cidr_ipv4 = "0.0.0.0/0"
}
resource "aws_vpc_security_group_ingress_rule" "udp" {
for_each = toset(["51820", "53"])
security_group_id = aws_security_group.public_access.id
from_port = each.value
to_port = each.value
ip_protocol = "udp"
cidr_ipv4 = "0.0.0.0/0"
}
resource "aws_vpc_security_group_egress_rule" "egress" {
for_each = toset(["-1"])
security_group_id = aws_security_group.public_access.id
from_port = each.value
to_port = each.value
ip_protocol = "-1"
cidr_ipv4 = "0.0.0.0/0"
}

View File

@@ -1,27 +1,33 @@
output "instance_id" {
value = aws_instance.gitea.id
value = aws_instance.this.id
description = "The instance ID of the Gitea instance."
}
output "ip_address" {
value = aws_instance.gitea.private_ip
value = aws_instance.this.private_ip
description = "The Gitea IP address."
}
output "boot_region" {
value = var.aws_region
value = var.aws_region
description = "The region to manipulate the codebase repository boot."
sensitive = true
}
output "boot_id" {
value = module.boot_user.iam_access_key_id
value = module.boot_user.iam_access_key_id
description = "The access id to manipulate the codebase repository boot."
sensitive = true
}
output "boot_secret" {
value = module.boot_user.iam_access_key_secret
value = module.boot_user.iam_access_key_secret
description = "The access secret to manipulate the codebase repository boot."
sensitive = true
}
output "full_domain" {
value = "${var.subdomain}.${var.domain}"
description = "The domain of the Gitea instance."
sensitive = true
}

13
terraform/routing.tf Normal file
View File

@@ -0,0 +1,13 @@
# The Route53 DNS zone.
data "aws_route53_zone" "main" {
name = var.domain
}
# Push all domain traffic through the reverse proxy.
resource "aws_route53_record" "domain" {
zone_id = data.aws_route53_zone.main.zone_id
name = "${var.subdomain}.${data.aws_route53_zone.main.name}"
type = "A"
ttl = "60"
records = [aws_eip.public.public_ip]
}

View File

@@ -14,16 +14,26 @@ variable "aws_secret" {
}
variable "boot_bucket" {
type = string
type = string
description = "The name of the bucket to store the boot in."
}
variable "boot_key" {
type = string
type = string
description = "The path that will hold the boot data."
}
variable "boot_role" {
type = string
type = string
description = "The name of the role for boot access."
}
variable "domain" {
type = string
description = "The name of the domain."
}
variable "subdomain" {
type = string
description = "The name of the subdomain."
}