fix: restore command now seperated
This commit is contained in:
@@ -7,9 +7,10 @@ includes:
|
||||
tasks:
|
||||
dev: docker compose -f compose.dev.yml up --build --force-recreate --no-deps
|
||||
|
||||
|
||||
build: ansible-playbook playbooks/build.yml
|
||||
deploy: ansible-playbook playbooks/deploy.yml
|
||||
restore: ansible-playbook playbooks/restore.yml
|
||||
restore: ansible-playbook playbooks/restore.yml -e "restore_bucket={{.BUCKET}} restore_key={{.KEY}}"
|
||||
run:
|
||||
- task: build
|
||||
- task: deploy
|
||||
@@ -17,7 +18,10 @@ tasks:
|
||||
enter:
|
||||
cmd: aws ssm start-session --target $INSTANCE_ID
|
||||
env:
|
||||
INSTANCE_ID: { sh: jq -r .instance_id.value < secrets.tf.json }
|
||||
INSTANCE_ID: { sh: jq -r .instance_id.value < config/infrastructure.secret.tf.json }
|
||||
AWS_REGION: { sh: jq -r .aws_region < config/ansible.secret.json }
|
||||
AWS_ACCESS_KEY_ID: { sh: jq -r .aws_access_key < config/ansible.secret.json }
|
||||
AWS_SECRET_ACCESS_KEY: { sh: jq -r .aws_secret_key < config/ansible.secret.json }
|
||||
|
||||
push:
|
||||
dir: gitea
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
- name: Make build artifact.
|
||||
hosts: localhost
|
||||
vars_files: ../config/ansible.json
|
||||
vars_files: ../config/ansible.secret.json
|
||||
gather_facts: false
|
||||
tasks:
|
||||
- name: Build image.
|
||||
community.docker.docker_image_build:
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
- name: Deploy artifact to instance.
|
||||
hosts: localhost
|
||||
become: true
|
||||
gather_facts: false
|
||||
vars_files:
|
||||
- ../config/ansible.json
|
||||
- ../config/infrastructure.json
|
||||
- ../config/ansible.secret.json
|
||||
- ../config/infrastructure.secret.tf.json
|
||||
vars:
|
||||
ansible_connection: aws_ssm
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
@@ -26,15 +27,17 @@
|
||||
access_key: "{{ aws_access_key }}"
|
||||
secret_key: "{{ aws_secret_key }}"
|
||||
|
||||
- name: Create data directory.
|
||||
ansible.builtin.file:
|
||||
path: /home/ssm-user/data
|
||||
state: directory
|
||||
mode: '0777'
|
||||
|
||||
- name: Load image.
|
||||
community.docker.docker_image_load:
|
||||
path: /root/image.tar.gz
|
||||
register: image
|
||||
|
||||
- name: Create a volume.
|
||||
community.docker.docker_volume:
|
||||
name: data
|
||||
|
||||
- name: Run image.
|
||||
community.docker.docker_container:
|
||||
name: server
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
- name: Deploy artifact to instance.
|
||||
hosts: localhost
|
||||
become: true
|
||||
gather_facts: false
|
||||
vars_files:
|
||||
- ../config/ansible.json
|
||||
- ../config/infrastructure.json
|
||||
- ../config/ansible.secret.json
|
||||
- ../config/infrastructure.secret.tf.json
|
||||
vars:
|
||||
ansible_connection: aws_ssm
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
@@ -27,23 +28,38 @@
|
||||
access_key: "{{ boot_id.value }}"
|
||||
secret_key: "{{ boot_secret.value }}"
|
||||
amazon.aws.s3_object:
|
||||
bucket: "{{ boot_bucket }}"
|
||||
object: "{{ boot_key }}"
|
||||
dest: /home/ssm-user/backup.tar.xz
|
||||
bucket: "{{ restore_bucket | mandatory(msg='You must specify the bucket of the data.') }}"
|
||||
object: "{{ restore_key | mandatory(msg='You must specify the key of the data.') }}"
|
||||
dest: /home/ssm-user/backup.tar.gz
|
||||
mode: get
|
||||
|
||||
- name: Ensure backup directory exists.
|
||||
ansible.builtin.file:
|
||||
path: /home/ssm-user/data
|
||||
path: /home/ssm-user/backup
|
||||
state: directory
|
||||
mode: '0777'
|
||||
|
||||
- name: Extract backup.
|
||||
ansible.builtin.unarchive:
|
||||
src: /home/ssm-user/backup.tar.xz
|
||||
dest: /home/ssm-user/data
|
||||
src: /home/ssm-user/backup.tar.gz
|
||||
dest: /home/ssm-user/backup
|
||||
remote_src: true
|
||||
|
||||
- name: Move backup files to data folder.
|
||||
ansible.builtin.copy:
|
||||
remote_src: true
|
||||
src: /home/ssm-user/backup/backup/my-app-backup/
|
||||
dest: /home/ssm-user/data/
|
||||
mode: '0777'
|
||||
|
||||
- name: Update permissions.
|
||||
ansible.builtin.file:
|
||||
path: /home/ssm-user/data
|
||||
recurse: true
|
||||
mode: '0777'
|
||||
owner: 1000
|
||||
group: 1000
|
||||
|
||||
- name: Restart containers.
|
||||
community.docker.docker_container:
|
||||
name: "{{ item }}"
|
||||
|
||||
@@ -1,13 +1,18 @@
|
||||
version: 3
|
||||
silent: true
|
||||
|
||||
vars:
|
||||
BACKEND: ../config/backend.secret.tf.json
|
||||
VARIABLES: ../config/variables.secret.tf.json
|
||||
OUTPUT: ../config/infrastructure.secret.tf.json
|
||||
|
||||
tasks:
|
||||
init: terraform init -backend-config=../config/backend.tf.json
|
||||
plan: terraform plan -var-file=../config/variables.tf.json
|
||||
init: terraform init -backend-config={{.BACKEND}}
|
||||
plan: terraform plan -var-file={{.VARIABLES}}
|
||||
destroy: terraform destroy
|
||||
format: terraform fmt -recursive
|
||||
out: terraform output -json > ../config/infrastructure.tf.json
|
||||
out: terraform output -json > {{.OUTPUT}}
|
||||
apply:
|
||||
- terraform apply -var-file=../config/variables.tf.json
|
||||
- terraform apply -var-file={{.VARIABLES}}
|
||||
- task: out
|
||||
import: terraform import -var-file=../config/variables.tf.json {{.CLI_ARGS}}
|
||||
import: terraform import -var-file={{.VARIABLES}} {{.CLI_ARGS}}
|
||||
@@ -4,8 +4,8 @@ data "aws_s3_bucket" "storage_bucket" {
|
||||
|
||||
data "aws_iam_policy_document" "boot" {
|
||||
statement {
|
||||
effect = "Allow"
|
||||
actions = ["s3:*", "s3-object-lambda:*"]
|
||||
effect = "Allow"
|
||||
actions = ["s3:*", "s3-object-lambda:*"]
|
||||
resources = [
|
||||
"${data.aws_s3_bucket.storage_bucket.arn}/${var.boot_key}",
|
||||
"${data.aws_s3_bucket.storage_bucket.arn}/${var.boot_key}/*",
|
||||
|
||||
@@ -6,7 +6,6 @@ amazon-linux-extras install docker ansible2 python3.8 -y
|
||||
# Make Docker work.
|
||||
systemctl enable docker
|
||||
systemctl start docker
|
||||
sudo usermod -aG docker ssm-user
|
||||
|
||||
# Set up the correct version of Python (for Ansible).
|
||||
ln -sf /usr/bin/python3.8 /usr/bin/python3
|
||||
@@ -26,3 +25,6 @@ service sshd stop
|
||||
# Install Docker Compose.
|
||||
curl -L "https://github.com/docker/compose/releases/latest/download/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
|
||||
chmod +x /usr/local/bin/docker-compose
|
||||
|
||||
# ERROR: SSM User not created yet.
|
||||
sudo usermod -aG docker ssm-user
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# An elastic IP, so if the reverse proxy is modified, the route tables won't.
|
||||
resource "aws_eip" "public" {
|
||||
instance = aws_instance.gitea.id
|
||||
instance = aws_instance.this.id
|
||||
domain = "vpc"
|
||||
}
|
||||
|
||||
@@ -9,17 +9,21 @@ data "aws_iam_instance_profile" "ssm" {
|
||||
}
|
||||
|
||||
# The Gitea instance.
|
||||
resource "aws_instance" "gitea" {
|
||||
resource "aws_instance" "this" {
|
||||
# ami = data.aws_ami.amazon-linux-2.id
|
||||
ami = "ami-0adec96dc0cdc7bca"
|
||||
instance_type = "t4g.nano"
|
||||
subnet_id = module.vpc.public_subnets[0]
|
||||
|
||||
user_data = file("install.sh")
|
||||
user_data_replace_on_change = false
|
||||
user_data_replace_on_change = true
|
||||
|
||||
iam_instance_profile = data.aws_iam_instance_profile.ssm.name
|
||||
vpc_security_group_ids = [aws_security_group.public_access.id]
|
||||
|
||||
metadata_options {
|
||||
http_tokens = "required"
|
||||
}
|
||||
|
||||
root_block_device {
|
||||
volume_type = "gp3"
|
||||
@@ -30,3 +34,8 @@ resource "aws_instance" "gitea" {
|
||||
Name = "Codebase: Gitea"
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_ec2_instance_state" "this" {
|
||||
instance_id = aws_instance.this.id
|
||||
state = "running"
|
||||
}
|
||||
|
||||
@@ -20,7 +20,7 @@ module "vpc" {
|
||||
public_subnets = [cidrsubnet(local.vpc_cidr, 8, 4)]
|
||||
|
||||
private_subnet_tags = { SubnetOf = "Main", SubnetType = "Private" }
|
||||
public_subnet_tags = { SubnetOf = "Main", SubnetType = "Public" }
|
||||
public_subnet_tags = { SubnetOf = "Main", SubnetType = "Public" }
|
||||
|
||||
map_public_ip_on_launch = true
|
||||
enable_dns_hostnames = true
|
||||
|
||||
@@ -1,27 +1,27 @@
|
||||
output "instance_id" {
|
||||
value = aws_instance.gitea.id
|
||||
value = aws_instance.this.id
|
||||
description = "The instance ID of the Gitea instance."
|
||||
}
|
||||
|
||||
output "ip_address" {
|
||||
value = aws_instance.gitea.private_ip
|
||||
value = aws_instance.this.private_ip
|
||||
description = "The Gitea IP address."
|
||||
}
|
||||
|
||||
output "boot_region" {
|
||||
value = var.aws_region
|
||||
value = var.aws_region
|
||||
description = "The region to manipulate the codebase repository boot."
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
output "boot_id" {
|
||||
value = module.boot_user.iam_access_key_id
|
||||
value = module.boot_user.iam_access_key_id
|
||||
description = "The access id to manipulate the codebase repository boot."
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
output "boot_secret" {
|
||||
value = module.boot_user.iam_access_key_secret
|
||||
value = module.boot_user.iam_access_key_secret
|
||||
description = "The access secret to manipulate the codebase repository boot."
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
@@ -14,16 +14,16 @@ variable "aws_secret" {
|
||||
}
|
||||
|
||||
variable "boot_bucket" {
|
||||
type = string
|
||||
type = string
|
||||
description = "The name of the bucket to store the boot in."
|
||||
}
|
||||
|
||||
variable "boot_key" {
|
||||
type = string
|
||||
type = string
|
||||
description = "The path that will hold the boot data."
|
||||
}
|
||||
|
||||
variable "boot_role" {
|
||||
type = string
|
||||
type = string
|
||||
description = "The name of the role for boot access."
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user