feat: gitea now standalone
This commit is contained in:
@@ -12,13 +12,17 @@ tasks:
|
|||||||
tf/apply:
|
tf/apply:
|
||||||
- $TF apply -var-file=secret.tfvars
|
- $TF apply -var-file=secret.tfvars
|
||||||
- $TF output -json > secrets.tf.json
|
- $TF output -json > secrets.tf.json
|
||||||
|
tf/import:
|
||||||
|
- $TF import -var-file=secret.tfvars {{.CLI_ARGS}}
|
||||||
|
|
||||||
build: ansible-playbook playbooks/build.yml
|
build: ansible-playbook playbooks/build.yml
|
||||||
deploy: ansible-playbook playbooks/deploy.yml
|
deploy: ansible-playbook playbooks/deploy.yml
|
||||||
|
restore: ansible-playbook playbooks/restore.yml
|
||||||
run:
|
run:
|
||||||
- task: build
|
- task: build
|
||||||
- task: deploy
|
- task: deploy
|
||||||
|
|
||||||
|
|
||||||
enter:
|
enter:
|
||||||
cmd: aws ssm start-session --target $INSTANCE_ID
|
cmd: aws ssm start-session --target $INSTANCE_ID
|
||||||
env:
|
env:
|
||||||
|
|||||||
@@ -27,8 +27,18 @@ SSH_LISTEN_PORT = 2222
|
|||||||
SSH_DOMAIN = maximhutz.com
|
SSH_DOMAIN = maximhutz.com
|
||||||
BUILTIN_SSH_SERVER_USER = git
|
BUILTIN_SSH_SERVER_USER = git
|
||||||
|
|
||||||
|
# PROTOCOL=https
|
||||||
|
# ENABLE_ACME=true
|
||||||
|
# ACME_ACCEPTTOS=true
|
||||||
|
# ACME_DIRECTORY=https
|
||||||
|
# ACME_EMAIL=proxy@maximhutz.com
|
||||||
|
|
||||||
|
# DOMAIN = code.maximhutz.com
|
||||||
|
# ROOT_URL = https://code.maximhutz.com/
|
||||||
|
# HTTP_PORT = 443
|
||||||
|
|
||||||
DOMAIN = code.maximhutz.com
|
DOMAIN = code.maximhutz.com
|
||||||
ROOT_URL = https://code.maximhutz.com/
|
ROOT_URL = http://code.maximhutz.com/
|
||||||
HTTP_PORT = 80
|
HTTP_PORT = 80
|
||||||
|
|
||||||
[database]
|
[database]
|
||||||
|
|||||||
@@ -5,7 +5,7 @@
|
|||||||
- name: Build image.
|
- name: Build image.
|
||||||
community.docker.docker_image_build:
|
community.docker.docker_image_build:
|
||||||
name: "{{ image_name }}"
|
name: "{{ image_name }}"
|
||||||
path: ../image
|
path: ../gitea
|
||||||
nocache: true
|
nocache: true
|
||||||
rebuild: always
|
rebuild: always
|
||||||
pull: true
|
pull: true
|
||||||
|
|||||||
@@ -20,7 +20,7 @@
|
|||||||
mode: get
|
mode: get
|
||||||
bucket: "{{ image_bucket }}"
|
bucket: "{{ image_bucket }}"
|
||||||
object: "{{ image_key }}"
|
object: "{{ image_key }}"
|
||||||
dest: /root/image.tar.xz
|
dest: /root/image.tar.gz
|
||||||
|
|
||||||
region: "{{ aws_region }}"
|
region: "{{ aws_region }}"
|
||||||
access_key: "{{ aws_access_key }}"
|
access_key: "{{ aws_access_key }}"
|
||||||
@@ -28,9 +28,13 @@
|
|||||||
|
|
||||||
- name: Load image.
|
- name: Load image.
|
||||||
community.docker.docker_image_load:
|
community.docker.docker_image_load:
|
||||||
path: /root/image.tar.xz
|
path: /root/image.tar.gz
|
||||||
register: image
|
register: image
|
||||||
|
|
||||||
|
- name: Create a volume.
|
||||||
|
community.docker.docker_volume:
|
||||||
|
name: data
|
||||||
|
|
||||||
- name: Run image.
|
- name: Run image.
|
||||||
community.docker.docker_container:
|
community.docker.docker_container:
|
||||||
name: server
|
name: server
|
||||||
@@ -40,16 +44,32 @@
|
|||||||
restart_policy: unless-stopped
|
restart_policy: unless-stopped
|
||||||
memory: 425m
|
memory: 425m
|
||||||
memory_swap: 900m
|
memory_swap: 900m
|
||||||
ports: [80:80, 2222:2222]
|
ports: [80:80, 2222:2222, 443:443]
|
||||||
env:
|
env:
|
||||||
GITEA__security__INTERNAL_TOKEN: "{{ internal_secret }}"
|
GITEA__security__INTERNAL_TOKEN: "{{ internal_secret }}"
|
||||||
GITEA__server__LFS_JWT_SECRET: "{{ lfs_secret }}"
|
GITEA__server__LFS_JWT_SECRET: "{{ lfs_secret }}"
|
||||||
GITEA__oauth2__JWT_SECRET: "{{ jwt_secret }}"
|
GITEA__oauth2__JWT_SECRET: "{{ jwt_secret }}"
|
||||||
|
labels:
|
||||||
|
docker-volume-backup.stop-during-backup: "true"
|
||||||
|
volumes:
|
||||||
|
- /home/ssm-user/data:/var/lib/gitea
|
||||||
|
- /etc/timezone:/etc/timezone:ro
|
||||||
|
- /etc/localtime:/etc/localtime:ro
|
||||||
|
|
||||||
|
- name: Run backup.
|
||||||
|
community.docker.docker_container:
|
||||||
|
name: backup
|
||||||
|
image: offen/docker-volume-backup:v2
|
||||||
|
state: started
|
||||||
|
recreate: true
|
||||||
|
restart_policy: unless-stopped
|
||||||
|
volumes:
|
||||||
|
- /home/ssm-user/data:/backup/my-app-backup:ro
|
||||||
|
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||||
|
env:
|
||||||
|
AWS_S3_BUCKET_NAME: "{{ boot_bucket }}"
|
||||||
|
AWS_S3_PATH: "{{ boot_key }}"
|
||||||
AWS_REGION: "{{ boot_region.value }}"
|
AWS_REGION: "{{ boot_region.value }}"
|
||||||
AWS_ACCESS_KEY_ID: "{{ boot_id.value }}"
|
AWS_ACCESS_KEY_ID: "{{ boot_id.value }}"
|
||||||
AWS_SECRET_ACCESS_KEY: "{{ boot_secret.value }}"
|
AWS_SECRET_ACCESS_KEY: "{{ boot_secret.value }}"
|
||||||
BOOT_URI: "s3://{{ boot_bucket }}/{{ boot_key }}"
|
BACKUP_CRON_EXPRESSION: "0 0 * * *"
|
||||||
volumes:
|
|
||||||
- /root/boot:/var/lib/gitea
|
|
||||||
- /etc/timezone:/etc/timezone:ro
|
|
||||||
- /etc/localtime:/etc/localtime:ro
|
|
||||||
|
|||||||
51
playbooks/restore.yml
Normal file
51
playbooks/restore.yml
Normal file
@@ -0,0 +1,51 @@
|
|||||||
|
- name: Deploy artifact to instance.
|
||||||
|
hosts: localhost
|
||||||
|
become: true
|
||||||
|
vars_files:
|
||||||
|
- ../secrets/gitea.json
|
||||||
|
- ../secrets.tf.json
|
||||||
|
vars:
|
||||||
|
ansible_connection: aws_ssm
|
||||||
|
ansible_python_interpreter: /usr/bin/python3
|
||||||
|
ansible_aws_ssm_plugin: "{{ ssm_plugin }}"
|
||||||
|
ansible_aws_ssm_bucket_name: "{{ image_bucket }}"
|
||||||
|
ansible_aws_ssm_instance_id: "{{ instance_id.value }}"
|
||||||
|
|
||||||
|
ansible_aws_ssm_region: "{{ aws_region }}"
|
||||||
|
ansible_aws_ssm_access_key_id: "{{ aws_access_key }}"
|
||||||
|
ansible_aws_ssm_secret_access_key: "{{ aws_secret_key }}"
|
||||||
|
tasks:
|
||||||
|
- name: Stop server.
|
||||||
|
community.docker.docker_container:
|
||||||
|
name: "{{ item }}"
|
||||||
|
state: stopped
|
||||||
|
loop: [server, backup]
|
||||||
|
|
||||||
|
- name: Copy backup from S3.
|
||||||
|
environment:
|
||||||
|
region: "{{ boot_region.value }}"
|
||||||
|
access_key: "{{ boot_id.value }}"
|
||||||
|
secret_key: "{{ boot_secret.value }}"
|
||||||
|
amazon.aws.s3_object:
|
||||||
|
bucket: "{{ boot_bucket }}"
|
||||||
|
object: "{{ boot_key }}"
|
||||||
|
dest: /home/ssm-user/backup.tar.xz
|
||||||
|
mode: get
|
||||||
|
|
||||||
|
- name: Ensure backup directory exists.
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: /home/ssm-user/data
|
||||||
|
state: directory
|
||||||
|
mode: '0777'
|
||||||
|
|
||||||
|
- name: Extract backup.
|
||||||
|
ansible.builtin.unarchive:
|
||||||
|
src: /home/ssm-user/backup.tar.xz
|
||||||
|
dest: /home/ssm-user/data
|
||||||
|
remote_src: true
|
||||||
|
|
||||||
|
- name: Restart containers.
|
||||||
|
community.docker.docker_container:
|
||||||
|
name: "{{ item }}"
|
||||||
|
state: started
|
||||||
|
loop: [server, backup]
|
||||||
@@ -6,7 +6,10 @@ data "aws_iam_policy_document" "boot" {
|
|||||||
statement {
|
statement {
|
||||||
effect = "Allow"
|
effect = "Allow"
|
||||||
actions = ["s3:*", "s3-object-lambda:*"]
|
actions = ["s3:*", "s3-object-lambda:*"]
|
||||||
resources = ["${data.aws_s3_bucket.storage_bucket.arn}/${var.boot_key}"]
|
resources = [
|
||||||
|
"${data.aws_s3_bucket.storage_bucket.arn}/${var.boot_key}",
|
||||||
|
"${data.aws_s3_bucket.storage_bucket.arn}/${var.boot_key}/*",
|
||||||
|
]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -22,7 +25,7 @@ module "boot_user" {
|
|||||||
version = "5.52.2"
|
version = "5.52.2"
|
||||||
|
|
||||||
create_iam_user_login_profile = false
|
create_iam_user_login_profile = false
|
||||||
name = "${var.boot_role}User"
|
name = "${var.boot_role}User"
|
||||||
password_reset_required = false
|
password_reset_required = false
|
||||||
policy_arns = [aws_iam_policy.boot.arn]
|
policy_arns = [aws_iam_policy.boot.arn]
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
#!/bin/sh
|
#!/bin/sh
|
||||||
|
|
||||||
|
rpm --rebuilddb
|
||||||
amazon-linux-extras install docker ansible2 python3.8 -y
|
amazon-linux-extras install docker ansible2 python3.8 -y
|
||||||
|
|
||||||
# Make Docker work.
|
# Make Docker work.
|
||||||
@@ -10,7 +11,7 @@ sudo usermod -aG docker ssm-user
|
|||||||
# Set up the correct version of Python (for Ansible).
|
# Set up the correct version of Python (for Ansible).
|
||||||
ln -sf /usr/bin/python3.8 /usr/bin/python3
|
ln -sf /usr/bin/python3.8 /usr/bin/python3
|
||||||
ln -sf /usr/bin/pip3.8 /usr/bin/pip3
|
ln -sf /usr/bin/pip3.8 /usr/bin/pip3
|
||||||
pip3 install botocore boto3 requests
|
pip3 install botocore boto3 requests packaging
|
||||||
python3 -m pip install -U pip
|
python3 -m pip install -U pip
|
||||||
|
|
||||||
# Add some swap space.
|
# Add some swap space.
|
||||||
@@ -20,9 +21,8 @@ mkswap /swapfile
|
|||||||
swapon /swapfile
|
swapon /swapfile
|
||||||
|
|
||||||
# Stop SSH (because we have SSM.)
|
# Stop SSH (because we have SSM.)
|
||||||
sudo service sshd stop
|
service sshd stop
|
||||||
|
|
||||||
# Install Docker Compose.
|
# Install Docker Compose.
|
||||||
curl -L "https://github.com/docker/compose/releases/latest/download/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
|
curl -L "https://github.com/docker/compose/releases/latest/download/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
|
||||||
chmod +x /usr/local/bin/docker-compose
|
chmod +x /usr/local/bin/docker-compose
|
||||||
docker-compose version
|
|
||||||
|
|||||||
@@ -1,3 +1,9 @@
|
|||||||
|
# An elastic IP, so if the reverse proxy is modified, the route tables won't.
|
||||||
|
resource "aws_eip" "public" {
|
||||||
|
instance = aws_instance.gitea.id
|
||||||
|
domain = "vpc"
|
||||||
|
}
|
||||||
|
|
||||||
data "aws_iam_instance_profile" "ssm" {
|
data "aws_iam_instance_profile" "ssm" {
|
||||||
name = "SSMInstanceProfile"
|
name = "SSMInstanceProfile"
|
||||||
}
|
}
|
||||||
@@ -7,12 +13,13 @@ resource "aws_instance" "gitea" {
|
|||||||
# ami = data.aws_ami.amazon-linux-2.id
|
# ami = data.aws_ami.amazon-linux-2.id
|
||||||
ami = "ami-0adec96dc0cdc7bca"
|
ami = "ami-0adec96dc0cdc7bca"
|
||||||
instance_type = "t4g.nano"
|
instance_type = "t4g.nano"
|
||||||
subnet_id = data.aws_subnet.subnet.id
|
subnet_id = module.vpc.public_subnets[0]
|
||||||
|
|
||||||
user_data = file("install.sh")
|
user_data = file("install.sh")
|
||||||
user_data_replace_on_change = true
|
user_data_replace_on_change = true
|
||||||
|
|
||||||
iam_instance_profile = data.aws_iam_instance_profile.ssm.name
|
iam_instance_profile = data.aws_iam_instance_profile.ssm.name
|
||||||
|
vpc_security_group_ids = [aws_security_group.public_access.id]
|
||||||
|
|
||||||
root_block_device {
|
root_block_device {
|
||||||
volume_type = "gp3"
|
volume_type = "gp3"
|
||||||
@@ -22,4 +29,4 @@ resource "aws_instance" "gitea" {
|
|||||||
tags = {
|
tags = {
|
||||||
Name = "Codebase: Gitea"
|
Name = "Codebase: Gitea"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,55 @@
|
|||||||
data "aws_subnet" "subnet" {
|
locals {
|
||||||
tags = {
|
# The IP block for the VPC.
|
||||||
SubnetType = "Private"
|
vpc_cidr = "10.0.0.0/16"
|
||||||
SubnetOf = "Main"
|
|
||||||
}
|
# Here is the domain name changes.
|
||||||
|
domain_name = "maximhutz.com"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
data "aws_availability_zones" "all" {}
|
||||||
|
|
||||||
|
# The main VPC.
|
||||||
|
module "vpc" {
|
||||||
|
source = "terraform-aws-modules/vpc/aws"
|
||||||
|
|
||||||
|
name = "Main"
|
||||||
|
cidr = local.vpc_cidr
|
||||||
|
|
||||||
|
azs = [data.aws_availability_zones.all.names[0]]
|
||||||
|
private_subnets = [cidrsubnet(local.vpc_cidr, 8, 0)]
|
||||||
|
public_subnets = [cidrsubnet(local.vpc_cidr, 8, 4)]
|
||||||
|
|
||||||
|
private_subnet_tags = { SubnetOf = "Main", SubnetType = "Private" }
|
||||||
|
public_subnet_tags = { SubnetOf = "Main", SubnetType = "Public" }
|
||||||
|
|
||||||
|
map_public_ip_on_launch = true
|
||||||
|
enable_dns_hostnames = true
|
||||||
|
enable_dns_support = true
|
||||||
|
}
|
||||||
|
|
||||||
|
# Only allow HTTP(s) and SSH traffic. Allow full access to internet.
|
||||||
|
resource "aws_security_group" "public_access" {
|
||||||
|
vpc_id = module.vpc.vpc_id
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_vpc_security_group_ingress_rule" "ingress" {
|
||||||
|
for_each = toset(["80", "443", "22", "2222", "81", "8080", "4321", "1234"])
|
||||||
|
|
||||||
|
security_group_id = aws_security_group.public_access.id
|
||||||
|
|
||||||
|
from_port = each.value
|
||||||
|
to_port = each.value
|
||||||
|
ip_protocol = "tcp"
|
||||||
|
cidr_ipv4 = "0.0.0.0/0"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_vpc_security_group_egress_rule" "egress" {
|
||||||
|
for_each = toset(["-1"])
|
||||||
|
|
||||||
|
security_group_id = aws_security_group.public_access.id
|
||||||
|
|
||||||
|
from_port = each.value
|
||||||
|
to_port = each.value
|
||||||
|
ip_protocol = "-1"
|
||||||
|
cidr_ipv4 = "0.0.0.0/0"
|
||||||
|
}
|
||||||
13
terraform/routing.tf
Normal file
13
terraform/routing.tf
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
# The Route53 DNS zone.
|
||||||
|
data "aws_route53_zone" "main" {
|
||||||
|
name = local.domain_name
|
||||||
|
}
|
||||||
|
|
||||||
|
# Push all domain traffic through the reverse proxy.
|
||||||
|
resource "aws_route53_record" "domain" {
|
||||||
|
zone_id = data.aws_route53_zone.main.zone_id
|
||||||
|
name = "code.${data.aws_route53_zone.main.name}"
|
||||||
|
type = "A"
|
||||||
|
ttl = "60"
|
||||||
|
records = [aws_eip.public.public_ip]
|
||||||
|
}
|
||||||
Reference in New Issue
Block a user