17 Commits

23 changed files with 479 additions and 152 deletions

BIN
.DS_Store vendored

Binary file not shown.

174
.gitignore vendored
View File

@@ -37,7 +37,179 @@ override.tf.json
.terraformrc .terraformrc
terraform.rc terraform.rc
# ---> Ansible
*.retry
# ---> Python
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# UV
# Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
#uv.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/latest/usage/project/#working-with-version-control
.pdm.toml
.pdm-python
.pdm-build/
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
*secret* *secret*
.vscode .vscode
.env
.DS_Store .DS_Store

View File

@@ -1,38 +1,21 @@
version: 3 version: 3
env: { TF: terraform -chdir=terraform }
silent: true includes:
tf: { taskfile: terraform, dir: terraform }
tasks: tasks:
dev: docker compose -f compose.dev.yml up --build --force-recreate --no-deps dev:
- docker compose -f compose.dev.yml rm -fsv
- docker compose -f compose.dev.yml up --build --force-recreate --no-deps
tf/init: $TF init -backend-config=backend.tfvars deploy:fast: ansible-playbook playbooks/fast.yml
tf/plan: $TF plan -var-file=secret.tfvars deploy:slow: ansible-playbook playbooks/slow.yml
tf/destroy: $TF destroy deploy:restore: ansible-playbook playbooks/restore.yml -e "restore_bucket={{.BUCKET}} restore_key={{.KEY}}"
tf/format: $TF fmt -recursive
tf/apply:
- $TF apply -var-file=secret.tfvars
- $TF output -json > secrets.tf.json
tf/import:
- $TF import -var-file=secret.tfvars {{.CLI_ARGS}}
build: ansible-playbook playbooks/build.yml
deploy: ansible-playbook playbooks/deploy.yml
restore: ansible-playbook playbooks/restore.yml
run:
- task: build
- task: deploy
enter: enter:
cmd: aws ssm start-session --target $INSTANCE_ID cmd: aws ssm start-session --target $INSTANCE_ID
env: env:
INSTANCE_ID: { sh: jq -r .instance_id.value < secrets.tf.json } INSTANCE_ID: { sh: jq -r .instance_id.value < config/infrastructure.secret.json }
AWS_REGION: { sh: jq -r .aws_region < secrets/gitea.json } AWS_REGION: { sh: jq -r .aws_region < config/ansible.secret.json }
AWS_ACCESS_KEY_ID: { sh: jq -r .aws_access_key < secrets/gitea.json } AWS_ACCESS_KEY_ID: { sh: jq -r .aws_access_key < config/ansible.secret.json }
AWS_SECRET_ACCESS_KEY: { sh: jq -r .aws_secret_key < secrets/gitea.json } AWS_SECRET_ACCESS_KEY: { sh: jq -r .aws_secret_key < config/ansible.secret.json }
prune:
- docker system prune -af
- docker image prune -af
- docker system prune -af --volumes
- docker volume prune -af
- docker system df

View File

@@ -3,8 +3,10 @@ services:
# Gitea itself. # Gitea itself.
gitea: gitea:
container_name: web-git-instance container_name: web-git-instance
restart: unless-stopped
depends_on: depends_on:
- backup - backup
- bucket-script
build: build:
context: gitea context: gitea
dockerfile: Dockerfile.dev dockerfile: Dockerfile.dev
@@ -31,7 +33,7 @@ services:
- /var/run/docker.sock:/var/run/docker.sock:ro - /var/run/docker.sock:/var/run/docker.sock:ro
environment: environment:
AWS_ENDPOINT: localstack:4566 AWS_ENDPOINT: localstack:4566
AWS_S3_BUCKET_NAME: test AWS_S3_BUCKET_NAME: backup
AWS_ACCESS_KEY_ID: _ AWS_ACCESS_KEY_ID: _
AWS_SECRET_ACCESS_KEY: _ AWS_SECRET_ACCESS_KEY: _
BACKUP_CRON_EXPRESSION: "* * * * *" BACKUP_CRON_EXPRESSION: "* * * * *"
@@ -59,7 +61,11 @@ services:
AWS_ACCESS_KEY_ID: _ AWS_ACCESS_KEY_ID: _
AWS_SECRET_ACCESS_KEY: _ AWS_SECRET_ACCESS_KEY: _
AWS_ENDPOINT_URL: http://localstack:4566 AWS_ENDPOINT_URL: http://localstack:4566
command: '"aws s3api create-bucket --bucket test"' command: |
"
aws s3api create-bucket --bucket backup
aws s3api create-bucket --bucket storage
"
volumes: volumes:
data: data:

BIN
gitea/.DS_Store vendored

Binary file not shown.

View File

@@ -2,8 +2,5 @@ FROM gitea/gitea:latest-rootless
ADD --chown=git:git config /etc/gitea ADD --chown=git:git config /etc/gitea
ADD --chown=git:git custom /etc/gitea-custom ADD --chown=git:git custom /etc/gitea-custom
ENV GITEA_CUSTOM /etc/gitea-custom
WORKDIR /etc/gitea-custom ENV GITEA_CUSTOM=/etc/gitea-custom
RUN gitea cert --host localhost --ca

View File

@@ -2,11 +2,12 @@ FROM gitea/gitea:latest-rootless
ADD --chown=git:git config /etc/gitea ADD --chown=git:git config /etc/gitea
ADD --chown=git:git custom /etc/gitea-custom ADD --chown=git:git custom /etc/gitea-custom
ENV GITEA_CUSTOM /etc/gitea-custom
ENV GITEA_CUSTOM=/etc/gitea-custom
RUN rm /etc/gitea/app.ini RUN rm /etc/gitea/app.ini
RUN mv /etc/gitea/dev.app.ini /etc/gitea/app.ini RUN mv /etc/gitea/dev.app.ini /etc/gitea/app.ini
WORKDIR /etc/gitea-custom WORKDIR /etc/gitea-custom
RUN gitea cert --host code.maximhutz.com --ca RUN gitea cert --host localhost --ca

View File

@@ -24,31 +24,17 @@ DISABLE_SSH = false
START_SSH_SERVER = true START_SSH_SERVER = true
SSH_PORT = 22 SSH_PORT = 22
SSH_LISTEN_PORT = 22 SSH_LISTEN_PORT = 22
SSH_DOMAIN = code.maximhutz.com # SSH_DOMAIN = %(FULL_DOMAIN)s
BUILTIN_SSH_SERVER_USER = git BUILTIN_SSH_SERVER_USER = git
; --- Signed SSL ---
; PROTOCOL=https
; ENABLE_ACME=true
; ACME_ACCEPTTOS=true
; ACME_DIRECTORY=https
; ACME_EMAIL=proxy@maximhutz.com
; DOMAIN = code.maximhutz.com
; ROOT_URL = https://code.maximhutz.com/
; HTTP_PORT = 443
; --- No SSL ---
; DOMAIN = code.maximhutz.com
; ROOT_URL = http://code.maximhutz.com/
; HTTP_PORT = 80
; --- Self-Signed SSL ---
PROTOCOL=https PROTOCOL=https
ROOT_URL = https://code.maximhutz.com/ ENABLE_ACME=true
DOMAIN = code.maximhutz.com ACME_ACCEPTTOS=true
ACME_DIRECTORY=https
# ACME_EMAIL=%(EMAIL)s
# DOMAIN = %(FULL_DOMAIN)s
# ROOT_URL = %(ROOT_URL)s
HTTP_PORT = 443 HTTP_PORT = 443
CERT_FILE = cert.pem
KEY_FILE = key.pem
[database] [database]
DB_TYPE = sqlite3 DB_TYPE = sqlite3
@@ -108,3 +94,10 @@ DEFAULT_MERGE_STYLE = merge
[repository.signing] [repository.signing]
DEFAULT_TRUST_MODEL = committer DEFAULT_TRUST_MODEL = committer
[storage]
STORAGE_TYPE = minio
MINIO_ENDPOINT = s3.us-east-1.amazonaws.com
MINIO_BUCKET = myrica-faya
MINIO_USE_SSL = true
MINIO_INSECURE_SKIP_VERIFY = false

View File

@@ -28,18 +28,12 @@ SSH_LISTEN_PORT = 2222
SSH_DOMAIN = localhost SSH_DOMAIN = localhost
BUILTIN_SSH_SERVER_USER = git BUILTIN_SSH_SERVER_USER = git
; --- No SSL ---
; DOMAIN = localhost
; ROOT_URL = http://localhost:80/
; HTTP_PORT = 80
; --- Self-Signed Certificate ---
PROTOCOL = https PROTOCOL = https
ROOT_URL = https://localhost:443/ ROOT_URL = https://localhost:443/
DOMAIN = localhost DOMAIN = localhost
HTTP_PORT = 443 HTTP_PORT = 443
CERT_FILE = cert.pem CERT_FILE = /etc/gitea-custom/cert.pem
KEY_FILE = key.pem KEY_FILE = /etc/gitea-custom/key.pem
[database] [database]
DB_TYPE = sqlite3 DB_TYPE = sqlite3
@@ -101,3 +95,12 @@ DEFAULT_TRUST_MODEL = committer
[oauth2] [oauth2]
JWT_SECRET = x-----------------------------------------x JWT_SECRET = x-----------------------------------------x
[storage]
STORAGE_TYPE = minio
MINIO_ENDPOINT = localstack:4566
MINIO_ACCESS_KEY_ID = test
MINIO_SECRET_ACCESS_KEY = test
MINIO_BUCKET = storage
MINIO_USE_SSL = false
MINIO_INSECURE_SKIP_VERIFY = true

BIN
gitea/custom/.DS_Store vendored

Binary file not shown.

View File

@@ -1,41 +0,0 @@
- name: Make build artifact.
hosts: localhost
vars_files: ../secrets/gitea.json
tasks:
- name: Build image.
community.docker.docker_image_build:
name: "{{ image_name }}"
path: ../gitea
nocache: true
rebuild: always
pull: true
- name: Make temp file.
ansible.builtin.tempfile:
suffix: .tar
register: tar_file
- name: Push image to archive.
community.docker.docker_image:
name: "{{ image_name }}"
archive_path: "{{ tar_file.path }}"
source: local
- name: Compress archive to artifact.
register: compress_image
community.general.archive:
path: "{{ tar_file.path }}"
dest: "{{ tar_file.path }}.xz"
format: xz
mode: "0644"
- name: Push artifact to S3.
amazon.aws.s3_object:
bucket: "{{ image_bucket }}"
object: "{{ image_key }}"
src: "{{ tar_file.path }}.xz"
mode: put
region: "{{ aws_region }}"
access_key: "{{ aws_access_key }}"
secret_key: "{{ aws_secret_key }}"

91
playbooks/fast.yml Normal file
View File

@@ -0,0 +1,91 @@
- name: Make build artifact.
hosts: localhost
vars_files:
- ../config/ansible.secret.json
- ../config/infrastructure.secret.json
gather_facts: false
tasks:
- name: Log into Docker.
community.docker.docker_login:
registry_url: '{{ full_domain.value }}'
username: '{{ username }}'
password: '{{ api_key }}'
reauthorize: true
- name: Build image.
community.docker.docker_image_build:
name: "{{ full_domain.value }}/{{ image_name }}:latest"
path: ../gitea
nocache: true
rebuild: always
pull: true
outputs: [{ type: image, push: true }]
platform:
- linux/amd64
- linux/arm64/v8
- name: Log out of Docker.
community.docker.docker_login:
state: absent
- name: Deploy artifact to instance.
hosts: localhost
become: true
gather_facts: false
vars_files:
- ../config/ansible.secret.json
- ../config/infrastructure.secret.json
vars:
ansible_connection: aws_ssm
ansible_aws_ssm_plugin: "{{ ssm_plugin }}"
ansible_aws_ssm_bucket_name: "{{ image_bucket }}"
ansible_aws_ssm_instance_id: "{{ instance_id.value }}"
ansible_python_interpreter: /usr/bin/python3
ansible_aws_ssm_region: "{{ aws_region }}"
ansible_aws_ssm_access_key_id: "{{ aws_access_key }}"
ansible_aws_ssm_secret_access_key: "{{ aws_secret_key }}"
tasks:
- name: Run image.
community.docker.docker_container:
name: server
image: "{{ full_domain.value }}/{{ image_name }}:latest"
state: started
recreate: true
restart_policy: unless-stopped
memory: 300m
memory_swap: 900m
ports: [80:80, 2222:2222, 443:443, "22:22"]
env:
GITEA__security__INTERNAL_TOKEN: "{{ internal_secret }}"
GITEA__server__LFS_JWT_SECRET: "{{ lfs_secret }}"
GITEA__oauth2__JWT_SECRET: "{{ jwt_secret }}"
GITEA__server__ACME_EMAIL: "{{ email }}"
GITEA__server__SSH_DOMAIN: "{{ full_domain.value }}"
GITEA__server__DOMAIN: "{{ full_domain.value }}"
GITEA__server__ROOT_URL: "https://{{ full_domain.value }}/"
GITEA__storage__MINIO_ACCESS_KEY_ID: "{{ minio_access_key }}"
GITEA__storage__MINIO_SECRET_ACCESS_KEY: "{{ minio_secret_key }}"
labels:
docker-volume-backup.stop-during-backup: "true"
volumes:
- /home/ssm-user/data:/var/lib/gitea
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
- name: Run backup.
community.docker.docker_container:
name: backup
image: offen/docker-volume-backup:v2
state: started
recreate: true
restart_policy: unless-stopped
volumes:
- /home/ssm-user/data:/backup/my-app-backup:ro
- /var/run/docker.sock:/var/run/docker.sock:ro
env:
AWS_S3_BUCKET_NAME: "{{ boot_bucket }}"
AWS_S3_PATH: "{{ boot_key }}"
AWS_REGION: "{{ boot_region.value }}"
AWS_ACCESS_KEY_ID: "{{ boot_id.value }}"
AWS_SECRET_ACCESS_KEY: "{{ boot_secret.value }}"
BACKUP_CRON_EXPRESSION: "0 0 * * *"

View File

@@ -1,16 +1,16 @@
- name: Deploy artifact to instance. - name: Deploy artifact to instance.
hosts: localhost hosts: localhost
become: true become: true
gather_facts: false
vars_files: vars_files:
- ../secrets/gitea.json - ../config/ansible.secret.json
- ../secrets.tf.json - ../config/infrastructure.secret.json
vars: vars:
ansible_connection: aws_ssm ansible_connection: aws_ssm
ansible_python_interpreter: /usr/bin/python3 ansible_python_interpreter: /usr/bin/python3
ansible_aws_ssm_plugin: "{{ ssm_plugin }}" ansible_aws_ssm_plugin: "{{ ssm_plugin }}"
ansible_aws_ssm_bucket_name: "{{ image_bucket }}" ansible_aws_ssm_bucket_name: "{{ image_bucket }}"
ansible_aws_ssm_instance_id: "{{ instance_id.value }}" ansible_aws_ssm_instance_id: "{{ instance_id.value }}"
ansible_aws_ssm_region: "{{ aws_region }}" ansible_aws_ssm_region: "{{ aws_region }}"
ansible_aws_ssm_access_key_id: "{{ aws_access_key }}" ansible_aws_ssm_access_key_id: "{{ aws_access_key }}"
ansible_aws_ssm_secret_access_key: "{{ aws_secret_key }}" ansible_aws_ssm_secret_access_key: "{{ aws_secret_key }}"
@@ -27,23 +27,38 @@
access_key: "{{ boot_id.value }}" access_key: "{{ boot_id.value }}"
secret_key: "{{ boot_secret.value }}" secret_key: "{{ boot_secret.value }}"
amazon.aws.s3_object: amazon.aws.s3_object:
bucket: "{{ boot_bucket }}" bucket: "{{ restore_bucket | mandatory(msg='You must specify the bucket of the data.') }}"
object: "{{ boot_key }}" object: "{{ restore_key | mandatory(msg='You must specify the key of the data.') }}"
dest: /home/ssm-user/backup.tar.xz dest: /home/ssm-user/backup.tar.gz
mode: get mode: get
- name: Ensure backup directory exists. - name: Ensure backup directory exists.
ansible.builtin.file: ansible.builtin.file:
path: /home/ssm-user/data path: /home/ssm-user/backup
state: directory state: directory
mode: '0777' mode: '0777'
- name: Extract backup. - name: Extract backup.
ansible.builtin.unarchive: ansible.builtin.unarchive:
src: /home/ssm-user/backup.tar.xz src: /home/ssm-user/backup.tar.gz
dest: /home/ssm-user/data dest: /home/ssm-user/backup
remote_src: true remote_src: true
- name: Move backup files to data folder.
ansible.builtin.copy:
remote_src: true
src: /home/ssm-user/backup/backup/my-app-backup/
dest: /home/ssm-user/data/
mode: '0777'
- name: Update permissions.
ansible.builtin.file:
path: /home/ssm-user/data
recurse: true
mode: '0777'
owner: 1000
group: 1000
- name: Restart containers. - name: Restart containers.
community.docker.docker_container: community.docker.docker_container:
name: "{{ item }}" name: "{{ item }}"

View File

@@ -1,16 +1,54 @@
- name: Make build artifact.
hosts: localhost
vars_files: ../config/ansible.secret.json
gather_facts: false
tasks:
- name: Build image.
community.docker.docker_image_build:
name: "{{ image_name }}"
path: ../gitea
nocache: true
rebuild: always
pull: true
- name: Push image to archive.
community.docker.docker_image:
name: "{{ image_name }}"
archive_path: ../dist/image.tar
source: local
- name: Compress archive to artifact.
register: compress_image
community.general.archive:
path: ../dist/image.tar
dest: ../dist/image.tar.xz
format: xz
mode: "0644"
- name: Push artifact to S3.
amazon.aws.s3_object:
bucket: "{{ image_bucket }}"
object: "{{ image_key }}"
src: ../dist/image.tar.xz
mode: put
region: "{{ aws_region }}"
access_key: "{{ aws_access_key }}"
secret_key: "{{ aws_secret_key }}"
- name: Deploy artifact to instance. - name: Deploy artifact to instance.
hosts: localhost hosts: localhost
become: true become: true
gather_facts: false
vars_files: vars_files:
- ../secrets/gitea.json - ../config/ansible.secret.json
- ../secrets.tf.json - ../config/infrastructure.secret.json
vars: vars:
ansible_connection: aws_ssm ansible_connection: aws_ssm
ansible_python_interpreter: /usr/bin/python3 ansible_python_interpreter: /usr/bin/python3
ansible_aws_ssm_plugin: "{{ ssm_plugin }}" ansible_aws_ssm_plugin: "{{ ssm_plugin }}"
ansible_aws_ssm_bucket_name: "{{ image_bucket }}" ansible_aws_ssm_bucket_name: "{{ image_bucket }}"
ansible_aws_ssm_instance_id: "{{ instance_id.value }}" ansible_aws_ssm_instance_id: "{{ instance_id.value }}"
ansible_aws_ssm_region: "{{ aws_region }}" ansible_aws_ssm_region: "{{ aws_region }}"
ansible_aws_ssm_access_key_id: "{{ aws_access_key }}" ansible_aws_ssm_access_key_id: "{{ aws_access_key }}"
ansible_aws_ssm_secret_access_key: "{{ aws_secret_key }}" ansible_aws_ssm_secret_access_key: "{{ aws_secret_key }}"
@@ -26,15 +64,17 @@
access_key: "{{ aws_access_key }}" access_key: "{{ aws_access_key }}"
secret_key: "{{ aws_secret_key }}" secret_key: "{{ aws_secret_key }}"
- name: Create data directory.
ansible.builtin.file:
path: /home/ssm-user/data
state: directory
mode: '0777'
- name: Load image. - name: Load image.
community.docker.docker_image_load: community.docker.docker_image_load:
path: /root/image.tar.gz path: /root/image.tar.gz
register: image register: image
- name: Create a volume.
community.docker.docker_volume:
name: data
- name: Run image. - name: Run image.
community.docker.docker_container: community.docker.docker_container:
name: server name: server
@@ -42,13 +82,19 @@
state: started state: started
recreate: true recreate: true
restart_policy: unless-stopped restart_policy: unless-stopped
memory: 425m memory: 300m
memory_swap: 900m memory_swap: 900m
ports: [80:80, 2222:2222, 443:443, "22:22"] ports: [80:80, 2222:2222, 443:443, "22:22"]
env: env:
GITEA__security__INTERNAL_TOKEN: "{{ internal_secret }}" GITEA__security__INTERNAL_TOKEN: "{{ internal_secret }}"
GITEA__server__LFS_JWT_SECRET: "{{ lfs_secret }}" GITEA__server__LFS_JWT_SECRET: "{{ lfs_secret }}"
GITEA__oauth2__JWT_SECRET: "{{ jwt_secret }}" GITEA__oauth2__JWT_SECRET: "{{ jwt_secret }}"
GITEA__server__ACME_EMAIL: "{{ email }}"
GITEA__server__SSH_DOMAIN: "{{ full_domain.value }}"
GITEA__server__DOMAIN: "{{ full_domain.value }}"
GITEA__server__ROOT_URL: "https://{{ full_domain.value }}/"
GITEA__storage__MINIO_ACCESS_KEY_ID: "{{ minio_access_key }}"
GITEA__storage__MINIO_SECRET_ACCESS_KEY: "{{ minio_secret_key }}"
labels: labels:
docker-volume-backup.stop-during-backup: "true" docker-volume-backup.stop-during-backup: "true"
volumes: volumes:

View File

@@ -3,6 +3,7 @@ ansible-compat==24.10.0
ansible-core==2.18.1 ansible-core==2.18.1
ansible-lint==24.12.2 ansible-lint==24.12.2
attrs==24.3.0 attrs==24.3.0
awscli-local==0.22.0
black==24.10.0 black==24.10.0
boto3==1.35.95 boto3==1.35.95
botocore==1.35.95 botocore==1.35.95
@@ -19,6 +20,7 @@ Jinja2==3.1.5
jmespath==1.0.1 jmespath==1.0.1
jsonschema==4.23.0 jsonschema==4.23.0
jsonschema-specifications==2024.10.1 jsonschema-specifications==2024.10.1
localstack-client==2.7
MarkupSafe==3.0.2 MarkupSafe==3.0.2
mypy-extensions==1.0.0 mypy-extensions==1.0.0
packaging==24.2 packaging==24.2

18
terraform/Taskfile.yml Normal file
View File

@@ -0,0 +1,18 @@
version: 3
silent: true
vars:
BACKEND: ../config/backend.secret.json
VARIABLES: ../config/variables.secret.json
OUTPUT: ../config/infrastructure.secret.json
tasks:
init: terraform init -backend-config={{.BACKEND}}
plan: terraform plan -var-file={{.VARIABLES}}
destroy: terraform destroy
format: terraform fmt -recursive
out: terraform output -json > {{.OUTPUT}}
apply:
- terraform apply -var-file={{.VARIABLES}}
- task: out
import: terraform import -var-file={{.VARIABLES}} {{.CLI_ARGS}}

View File

@@ -1,17 +1,17 @@
#!/bin/sh #!/bin/sh
## Install extras.
rpm --rebuilddb rpm --rebuilddb
amazon-linux-extras install docker ansible2 python3.8 -y amazon-linux-extras install docker ansible2 python3.8 -y
# Make Docker work. # Make Docker work.
systemctl enable docker systemctl enable docker
systemctl start docker systemctl start docker
sudo usermod -aG docker ssm-user
# Set up the correct version of Python (for Ansible). # Set up the correct version of Python (for Ansible).
ln -sf /usr/bin/python3.8 /usr/bin/python3 ln -sf /usr/bin/python3.8 /usr/bin/python3
ln -sf /usr/bin/pip3.8 /usr/bin/pip3 ln -sf /usr/bin/pip3.8 /usr/bin/pip3
pip3 install botocore boto3 requests packaging pip3 install botocore boto3 requests packaging --user ssm-user
python3 -m pip install -U pip python3 -m pip install -U pip
# Add some swap space. # Add some swap space.
@@ -26,3 +26,6 @@ service sshd stop
# Install Docker Compose. # Install Docker Compose.
curl -L "https://github.com/docker/compose/releases/latest/download/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose curl -L "https://github.com/docker/compose/releases/latest/download/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
chmod +x /usr/local/bin/docker-compose chmod +x /usr/local/bin/docker-compose
# ERROR: SSM User not created yet.
sudo usermod -aG docker ssm-user

View File

@@ -1,26 +1,32 @@
# An elastic IP, so if the reverse proxy is modified, the route tables won't. # An elastic IP, so if the reverse proxy is modified, the route tables won't.
resource "aws_eip" "public" { resource "aws_eip" "public" {
instance = aws_instance.gitea.id instance = aws_instance.this.id
domain = "vpc" domain = "vpc"
} }
data "aws_iam_instance_profile" "ssm" { # An instance profile for access via AWS SSM.
resource "aws_iam_instance_profile" "ssm" {
name = "SSMInstanceProfile" name = "SSMInstanceProfile"
role = "AmazonSSMRoleForInstancesQuickSetup"
} }
# The Gitea instance. # The Gitea instance.
resource "aws_instance" "gitea" { resource "aws_instance" "this" {
# ami = data.aws_ami.amazon-linux-2.id # ami = data.aws_ami.amazon-linux-2.id
ami = "ami-0adec96dc0cdc7bca" ami = "ami-0adec96dc0cdc7bca"
instance_type = "t4g.nano" instance_type = "t4g.nano"
subnet_id = module.vpc.public_subnets[0] subnet_id = module.vpc.public_subnets[0]
user_data = file("install.sh") user_data = file("install.sh")
user_data_replace_on_change = true user_data_replace_on_change = false
iam_instance_profile = data.aws_iam_instance_profile.ssm.name iam_instance_profile = aws_iam_instance_profile.ssm.name
vpc_security_group_ids = [aws_security_group.public_access.id] vpc_security_group_ids = [aws_security_group.public_access.id]
metadata_options {
http_tokens = "required"
}
root_block_device { root_block_device {
volume_type = "gp3" volume_type = "gp3"
volume_size = 8 volume_size = 8
@@ -30,3 +36,8 @@ resource "aws_instance" "gitea" {
Name = "Codebase: Gitea" Name = "Codebase: Gitea"
} }
} }
resource "aws_ec2_instance_state" "this" {
instance_id = aws_instance.this.id
state = "running"
}

View File

@@ -1,9 +1,6 @@
locals { locals {
# The IP block for the VPC. # The IP block for the VPC.
vpc_cidr = "10.0.0.0/16" vpc_cidr = "10.0.0.0/16"
# Here is the domain name changes.
domain_name = "maximhutz.com"
} }
data "aws_availability_zones" "all" {} data "aws_availability_zones" "all" {}
@@ -25,15 +22,18 @@ module "vpc" {
map_public_ip_on_launch = true map_public_ip_on_launch = true
enable_dns_hostnames = true enable_dns_hostnames = true
enable_dns_support = true enable_dns_support = true
private_route_table_tags = { TableOf = "Main", TableType = "Public" }
} }
# Only allow HTTP(s) and SSH traffic. Allow full access to internet. # Only allow HTTP(s) and SSH traffic. Allow full access to internet.
resource "aws_security_group" "public_access" { resource "aws_security_group" "public_access" {
vpc_id = module.vpc.vpc_id vpc_id = module.vpc.vpc_id
tags = { GroupOf = "Main", GroupType = "Public" }
} }
resource "aws_vpc_security_group_ingress_rule" "ingress" { resource "aws_vpc_security_group_ingress_rule" "tcp" {
for_each = toset(["80", "443", "22", "2222", "81", "8080", "4321", "1234"]) for_each = toset(["80", "443", "22", "51821"])
security_group_id = aws_security_group.public_access.id security_group_id = aws_security_group.public_access.id
@@ -43,6 +43,17 @@ resource "aws_vpc_security_group_ingress_rule" "ingress" {
cidr_ipv4 = "0.0.0.0/0" cidr_ipv4 = "0.0.0.0/0"
} }
resource "aws_vpc_security_group_ingress_rule" "udp" {
for_each = toset(["51820", "53"])
security_group_id = aws_security_group.public_access.id
from_port = each.value
to_port = each.value
ip_protocol = "udp"
cidr_ipv4 = "0.0.0.0/0"
}
resource "aws_vpc_security_group_egress_rule" "egress" { resource "aws_vpc_security_group_egress_rule" "egress" {
for_each = toset(["-1"]) for_each = toset(["-1"])

View File

@@ -1,10 +1,10 @@
output "instance_id" { output "instance_id" {
value = aws_instance.gitea.id value = aws_instance.this.id
description = "The instance ID of the Gitea instance." description = "The instance ID of the Gitea instance."
} }
output "ip_address" { output "ip_address" {
value = aws_instance.gitea.private_ip value = aws_instance.this.private_ip
description = "The Gitea IP address." description = "The Gitea IP address."
} }
@@ -25,3 +25,9 @@ output "boot_secret" {
description = "The access secret to manipulate the codebase repository boot." description = "The access secret to manipulate the codebase repository boot."
sensitive = true sensitive = true
} }
output "full_domain" {
value = "${var.subdomain}.${var.domain}"
description = "The domain of the Gitea instance."
sensitive = true
}

View File

@@ -1,12 +1,12 @@
# The Route53 DNS zone. # The Route53 DNS zone.
data "aws_route53_zone" "main" { data "aws_route53_zone" "main" {
name = local.domain_name name = var.domain
} }
# Push all domain traffic through the reverse proxy. # Push all domain traffic through the reverse proxy.
resource "aws_route53_record" "domain" { resource "aws_route53_record" "domain" {
zone_id = data.aws_route53_zone.main.zone_id zone_id = data.aws_route53_zone.main.zone_id
name = "code.${data.aws_route53_zone.main.name}" name = "${var.subdomain}.${data.aws_route53_zone.main.name}"
type = "A" type = "A"
ttl = "60" ttl = "60"
records = [aws_eip.public.public_ip] records = [aws_eip.public.public_ip]

View File

@@ -27,3 +27,13 @@ variable "boot_role" {
type = string type = string
description = "The name of the role for boot access." description = "The name of the role for boot access."
} }
variable "domain" {
type = string
description = "The name of the domain."
}
variable "subdomain" {
type = string
description = "The name of the subdomain."
}