336 lines
11 KiB
YAML
336 lines
11 KiB
YAML
# GLAM Infrastructure & Data Deployment
|
|
# Automatically deploys infrastructure changes and syncs data to Hetzner server
|
|
|
|
name: Deploy GLAM Infrastructure
|
|
|
|
on:
|
|
push:
|
|
branches:
|
|
- main
|
|
paths:
|
|
- 'infrastructure/**'
|
|
- 'schemas/20251121/rdf/**'
|
|
- 'schemas/20251121/linkml/**'
|
|
- 'schemas/20251121/uml/**'
|
|
- 'data/ontology/**'
|
|
- 'frontend/**'
|
|
- '.github/workflows/deploy.yml'
|
|
|
|
# Allow manual trigger
|
|
workflow_dispatch:
|
|
inputs:
|
|
deploy_infra:
|
|
description: 'Deploy infrastructure changes'
|
|
type: boolean
|
|
default: false
|
|
deploy_data:
|
|
description: 'Deploy ontology/schema data'
|
|
type: boolean
|
|
default: true
|
|
deploy_frontend:
|
|
description: 'Deploy frontend build'
|
|
type: boolean
|
|
default: true
|
|
load_oxigraph:
|
|
description: 'Reload data into Oxigraph'
|
|
type: boolean
|
|
default: false
|
|
|
|
env:
|
|
TF_VERSION: '1.6.0'
|
|
NODE_VERSION: '20'
|
|
SERVER_USER: 'root'
|
|
|
|
jobs:
|
|
# Determine what changed
|
|
changes:
|
|
runs-on: ubuntu-latest
|
|
outputs:
|
|
infra: ${{ steps.filter.outputs.infra }}
|
|
data: ${{ steps.filter.outputs.data }}
|
|
frontend: ${{ steps.filter.outputs.frontend }}
|
|
steps:
|
|
- uses: actions/checkout@v4
|
|
|
|
- uses: dorny/paths-filter@v2
|
|
id: filter
|
|
with:
|
|
filters: |
|
|
infra:
|
|
- 'infrastructure/terraform/**'
|
|
data:
|
|
- 'schemas/20251121/rdf/**'
|
|
- 'schemas/20251121/linkml/**'
|
|
- 'schemas/20251121/uml/**'
|
|
- 'data/ontology/**'
|
|
frontend:
|
|
- 'frontend/**'
|
|
|
|
# Deploy infrastructure with Terraform
|
|
deploy-infrastructure:
|
|
runs-on: ubuntu-latest
|
|
needs: changes
|
|
if: needs.changes.outputs.infra == 'true' || github.event.inputs.deploy_infra == 'true'
|
|
|
|
defaults:
|
|
run:
|
|
working-directory: infrastructure/terraform
|
|
|
|
steps:
|
|
- uses: actions/checkout@v4
|
|
|
|
- name: Setup Terraform
|
|
uses: hashicorp/setup-terraform@v3
|
|
with:
|
|
terraform_version: ${{ env.TF_VERSION }}
|
|
|
|
- name: Terraform Init
|
|
run: terraform init
|
|
env:
|
|
TF_TOKEN_app_terraform_io: ${{ secrets.TF_API_TOKEN }}
|
|
|
|
- name: Terraform Validate
|
|
run: terraform validate
|
|
|
|
- name: Terraform Plan
|
|
run: terraform plan -no-color -out=tfplan
|
|
env:
|
|
TF_VAR_hcloud_token: ${{ secrets.HETZNER_HC_API_TOKEN }}
|
|
TF_VAR_domain: ${{ vars.GLAM_DOMAIN }}
|
|
TF_VAR_admin_email: ${{ vars.ADMIN_EMAIL }}
|
|
TF_VAR_ssh_public_key_path: /tmp/deploy_key.pub
|
|
|
|
- name: Setup SSH Key for Terraform
|
|
run: |
|
|
echo "${{ secrets.DEPLOY_SSH_PRIVATE_KEY }}" > /tmp/deploy_key
|
|
chmod 600 /tmp/deploy_key
|
|
ssh-keygen -y -f /tmp/deploy_key > /tmp/deploy_key.pub
|
|
|
|
- name: Terraform Apply
|
|
if: github.ref == 'refs/heads/main'
|
|
run: terraform apply -auto-approve tfplan
|
|
env:
|
|
TF_VAR_hcloud_token: ${{ secrets.HETZNER_HC_API_TOKEN }}
|
|
TF_VAR_domain: ${{ vars.GLAM_DOMAIN }}
|
|
TF_VAR_admin_email: ${{ vars.ADMIN_EMAIL }}
|
|
TF_VAR_ssh_public_key_path: /tmp/deploy_key.pub
|
|
|
|
- name: Get Server IP
|
|
id: server
|
|
run: |
|
|
echo "ip=$(terraform output -raw server_ip)" >> $GITHUB_OUTPUT
|
|
|
|
outputs:
|
|
server_ip: ${{ steps.server.outputs.ip }}
|
|
|
|
# Get server IP (for data/frontend deployments when infra didn't change)
|
|
get-server-ip:
|
|
runs-on: ubuntu-latest
|
|
needs: changes
|
|
if: needs.changes.outputs.infra != 'true' && github.event.inputs.deploy_infra != 'true'
|
|
|
|
outputs:
|
|
server_ip: ${{ steps.server.outputs.ip }}
|
|
|
|
steps:
|
|
- uses: actions/checkout@v4
|
|
|
|
- name: Setup Terraform
|
|
uses: hashicorp/setup-terraform@v3
|
|
with:
|
|
terraform_version: ${{ env.TF_VERSION }}
|
|
|
|
- name: Terraform Init
|
|
working-directory: infrastructure/terraform
|
|
run: terraform init
|
|
env:
|
|
TF_TOKEN_app_terraform_io: ${{ secrets.TF_API_TOKEN }}
|
|
|
|
- name: Get Server IP from State
|
|
id: server
|
|
working-directory: infrastructure/terraform
|
|
run: |
|
|
# Try to get IP from terraform state, fallback to Hetzner API
|
|
IP=$(terraform output -raw server_ip 2>/dev/null || echo "")
|
|
if [ -z "$IP" ]; then
|
|
# Fallback: query Hetzner API directly
|
|
IP=$(curl -s -H "Authorization: Bearer ${{ secrets.HETZNER_HC_API_TOKEN }}" \
|
|
"https://api.hetzner.cloud/v1/servers?name=glam-sparql" | \
|
|
jq -r '.servers[0].public_net.ipv4.ip // empty')
|
|
fi
|
|
echo "ip=$IP" >> $GITHUB_OUTPUT
|
|
|
|
# Build frontend
|
|
build-frontend:
|
|
runs-on: ubuntu-latest
|
|
needs: changes
|
|
if: needs.changes.outputs.frontend == 'true' || github.event.inputs.deploy_frontend == 'true'
|
|
|
|
steps:
|
|
- uses: actions/checkout@v4
|
|
|
|
- name: Setup Node.js
|
|
uses: actions/setup-node@v4
|
|
with:
|
|
node-version: ${{ env.NODE_VERSION }}
|
|
cache: 'npm'
|
|
cache-dependency-path: frontend/package-lock.json
|
|
|
|
- name: Install dependencies
|
|
working-directory: frontend
|
|
run: npm ci
|
|
|
|
- name: Build frontend
|
|
working-directory: frontend
|
|
run: npm run build
|
|
env:
|
|
VITE_OXIGRAPH_URL: https://${{ vars.GLAM_DOMAIN }}
|
|
|
|
- name: Upload frontend artifact
|
|
uses: actions/upload-artifact@v4
|
|
with:
|
|
name: frontend-dist
|
|
path: frontend/dist
|
|
retention-days: 1
|
|
|
|
# Deploy data and frontend to server
|
|
deploy-to-server:
|
|
runs-on: ubuntu-latest
|
|
needs:
|
|
- changes
|
|
- deploy-infrastructure
|
|
- get-server-ip
|
|
- build-frontend
|
|
# Run if any deployment is needed
|
|
if: |
|
|
always() &&
|
|
(needs.deploy-infrastructure.result == 'success' || needs.deploy-infrastructure.result == 'skipped') &&
|
|
(needs.get-server-ip.result == 'success' || needs.get-server-ip.result == 'skipped') &&
|
|
(needs.build-frontend.result == 'success' || needs.build-frontend.result == 'skipped') &&
|
|
(needs.changes.outputs.data == 'true' ||
|
|
needs.changes.outputs.frontend == 'true' ||
|
|
github.event.inputs.deploy_data == 'true' ||
|
|
github.event.inputs.deploy_frontend == 'true')
|
|
|
|
env:
|
|
SERVER_IP: ${{ needs.deploy-infrastructure.outputs.server_ip || needs.get-server-ip.outputs.server_ip }}
|
|
|
|
steps:
|
|
- uses: actions/checkout@v4
|
|
|
|
- name: Download frontend artifact
|
|
if: needs.build-frontend.result == 'success'
|
|
uses: actions/download-artifact@v4
|
|
with:
|
|
name: frontend-dist
|
|
path: frontend/dist
|
|
|
|
- name: Setup SSH
|
|
run: |
|
|
mkdir -p ~/.ssh
|
|
echo "${{ secrets.DEPLOY_SSH_PRIVATE_KEY }}" > ~/.ssh/deploy_key
|
|
chmod 600 ~/.ssh/deploy_key
|
|
ssh-keyscan -H ${{ env.SERVER_IP }} >> ~/.ssh/known_hosts 2>/dev/null || true
|
|
|
|
- name: Wait for server to be ready
|
|
run: |
|
|
echo "Waiting for server ${{ env.SERVER_IP }} to be ready..."
|
|
for i in {1..30}; do
|
|
if ssh -i ~/.ssh/deploy_key -o ConnectTimeout=5 -o StrictHostKeyChecking=no \
|
|
${{ env.SERVER_USER }}@${{ env.SERVER_IP }} "echo 'ready'" 2>/dev/null; then
|
|
echo "Server is ready!"
|
|
exit 0
|
|
fi
|
|
echo "Attempt $i/30 - waiting..."
|
|
sleep 10
|
|
done
|
|
echo "Server not ready after 5 minutes"
|
|
exit 1
|
|
|
|
- name: Deploy ontology data
|
|
if: needs.changes.outputs.data == 'true' || github.event.inputs.deploy_data == 'true'
|
|
run: |
|
|
echo "Deploying ontology and schema data..."
|
|
|
|
# Sync ontology files
|
|
rsync -avz --progress \
|
|
-e "ssh -i ~/.ssh/deploy_key -o StrictHostKeyChecking=no" \
|
|
--include="*.ttl" --include="*.rdf" --include="*.owl" --include="*.jsonld" \
|
|
--exclude="*" \
|
|
data/ontology/ \
|
|
${{ env.SERVER_USER }}@${{ env.SERVER_IP }}:/mnt/data/ontologies/
|
|
|
|
# Sync RDF schema files (latest only)
|
|
rsync -avz --progress \
|
|
-e "ssh -i ~/.ssh/deploy_key -o StrictHostKeyChecking=no" \
|
|
--exclude="archive_*" \
|
|
schemas/20251121/rdf/ \
|
|
${{ env.SERVER_USER }}@${{ env.SERVER_IP }}:/mnt/data/rdf/
|
|
|
|
# Sync LinkML schemas
|
|
rsync -avz --progress \
|
|
-e "ssh -i ~/.ssh/deploy_key -o StrictHostKeyChecking=no" \
|
|
--include="*.yaml" --exclude="*" \
|
|
schemas/20251121/linkml/ \
|
|
${{ env.SERVER_USER }}@${{ env.SERVER_IP }}:/mnt/data/linkml/
|
|
|
|
# Sync UML diagrams
|
|
rsync -avz --progress \
|
|
-e "ssh -i ~/.ssh/deploy_key -o StrictHostKeyChecking=no" \
|
|
--include="*.mmd" --exclude="*" \
|
|
schemas/20251121/uml/mermaid/ \
|
|
${{ env.SERVER_USER }}@${{ env.SERVER_IP }}:/mnt/data/uml/
|
|
|
|
- name: Deploy frontend
|
|
if: needs.build-frontend.result == 'success'
|
|
run: |
|
|
echo "Deploying frontend build..."
|
|
rsync -avz --progress --delete \
|
|
-e "ssh -i ~/.ssh/deploy_key -o StrictHostKeyChecking=no" \
|
|
frontend/dist/ \
|
|
${{ env.SERVER_USER }}@${{ env.SERVER_IP }}:/var/www/glam-frontend/
|
|
|
|
- name: Reload Oxigraph data
|
|
if: github.event.inputs.load_oxigraph == 'true'
|
|
run: |
|
|
echo "Reloading data into Oxigraph..."
|
|
ssh -i ~/.ssh/deploy_key -o StrictHostKeyChecking=no \
|
|
${{ env.SERVER_USER }}@${{ env.SERVER_IP }} \
|
|
"/var/lib/glam/scripts/load-ontologies.sh"
|
|
|
|
- name: Verify deployment
|
|
run: |
|
|
echo "Verifying deployment..."
|
|
|
|
# Check Oxigraph status
|
|
ssh -i ~/.ssh/deploy_key -o StrictHostKeyChecking=no \
|
|
${{ env.SERVER_USER }}@${{ env.SERVER_IP }} \
|
|
"systemctl is-active oxigraph && echo 'Oxigraph: OK' || echo 'Oxigraph: FAILED'"
|
|
|
|
# Check Caddy status
|
|
ssh -i ~/.ssh/deploy_key -o StrictHostKeyChecking=no \
|
|
${{ env.SERVER_USER }}@${{ env.SERVER_IP }} \
|
|
"systemctl is-active caddy && echo 'Caddy: OK' || echo 'Caddy: FAILED'"
|
|
|
|
# Check SPARQL endpoint
|
|
SPARQL_STATUS=$(ssh -i ~/.ssh/deploy_key -o StrictHostKeyChecking=no \
|
|
${{ env.SERVER_USER }}@${{ env.SERVER_IP }} \
|
|
"curl -s -o /dev/null -w '%{http_code}' 'http://localhost:7878/query' -X POST -H 'Content-Type: application/sparql-query' -d 'SELECT (1 AS ?test) WHERE {}'")
|
|
|
|
if [ "$SPARQL_STATUS" = "200" ]; then
|
|
echo "SPARQL endpoint: OK"
|
|
else
|
|
echo "SPARQL endpoint: FAILED (HTTP $SPARQL_STATUS)"
|
|
exit 1
|
|
fi
|
|
|
|
- name: Deployment Summary
|
|
run: |
|
|
echo "============================================"
|
|
echo " Deployment Complete!"
|
|
echo "============================================"
|
|
echo ""
|
|
echo "Server IP: ${{ env.SERVER_IP }}"
|
|
echo "SPARQL Endpoint: https://${{ vars.GLAM_DOMAIN }}/query"
|
|
echo "Frontend: https://${{ vars.GLAM_DOMAIN }}/"
|
|
echo ""
|