Skip to content

E2E Integration Tests #103

E2E Integration Tests

E2E Integration Tests #103

Workflow file for this run

name: E2E Integration Tests
on:
# Manual-only trigger. Pre-GA the repo isn't customer-facing yet,
# so authors run E2E on demand at the end of a sprint or when a PR
# touches the Lambda/CFN. Layer 1 (sync-check, handler coverage,
# cfn-lint) stays as the per-PR gate.
workflow_dispatch:
# Serialize all E2E runs — the linked-account pool (linked2–5) is shared
# across every run, so two concurrent runs race to tag each other's
# resources. Queue instead of cancel: an interrupted run would skip
# teardown and leave StackSets + Lambdas behind.
concurrency:
group: e2e-linked-accounts-pool
cancel-in-progress: false
# Default settings inherited by all jobs
defaults:
run:
working-directory: .github/scripts
env:
# Falls back to run_id for schedule/workflow_dispatch runs (no PR).
# Keeps resource names unique across runs so nothing collides.
PR_NUMBER: ${{ github.event.pull_request.number || github.run_id }}
# MPE_ID drives the expected tag value. Keeps the `migTEST` prefix so the
# scope-vpc-positive verify step (which filters tags by `migTEST*`) keeps
# working — scope tests run in the same account as the main stack and both
# Lambdas race to tag the same resource; last-writer wins, and the prefix
# match is what absorbs the race. Appends the run ID so resource names
# (IAM role, Lambda, SQS, SSM) are unique per workflow run and we don't hit
# "already exists" collisions when two runs overlap. Fixed in PR #4, lost
# when PR #5 merged on top of an older main; restoring here.
# Length check: `map-auto-tagger-role-migTEST<11-digit-run-id>-ap-northeast-2`
# = 61 chars, safely under the 64-char IAM role name limit.
MPE_ID: migTEST${{ github.run_id }}
# Scope test MPEs — also per-run (not static) to prevent cross-PR
# collisions in linked1 (which now hosts all scope stacks). Fixed
# suffixes (A/B/C/D) distinguish the four scope stacks inside a
# single run while keeping the migTEST prefix so verify-scope-*
# prefix-match still works.
SCOPE_ACCT_MPE: migTEST${{ github.run_id }}A
SCOPE_VPC_MPE: migTEST${{ github.run_id }}B
SCOPE_DATE_MPE: migTEST${{ github.run_id }}C
SCOPE_ACCT_OUT_MPE: migTEST${{ github.run_id }}D
STACK_NAME: map-auto-tagger-e2e-pr${{ github.event.pull_request.number || github.run_id }}
# ---------------------------------------------------------------------------
# Reusable step fragments — referenced by name in jobs below
# ---------------------------------------------------------------------------
# ── AWS credentials helper ─────────────────────────────────────────────────
# Every job includes these three steps at the top.
# Multi-account jobs override role-to-assume.
jobs:
# ══════════════════════════════════════════════════════════════════════════
# Phase 1 — Deploy the auto-tagger itself (runs in parallel)
# ══════════════════════════════════════════════════════════════════════════
deploy-single:
name: Deploy single-account stack
runs-on: ubuntu-latest
permissions:
id-token: write
contents: read
steps:
- uses: actions/checkout@v4
- uses: aws-actions/configure-aws-credentials@v4
with:
role-to-assume: arn:aws:iam::${{ secrets.AWS_SINGLE_ACCOUNT_ID }}:role/GitHubActionsE2ERole
aws-region: ap-northeast-2
- uses: actions/setup-python@v5
with:
python-version: '3.12'
- run: pip install boto3
- name: Wait for any previous stack deletions to complete
working-directory: .
run: |
for REGION in ap-northeast-2 us-east-1 us-west-2; do
STATUS=$(aws cloudformation describe-stacks \
--stack-name "$STACK_NAME" --region "$REGION" \
--query 'Stacks[0].StackStatus' --output text 2>/dev/null || echo "DOES_NOT_EXIST")
if [ "$STATUS" = "DELETE_IN_PROGRESS" ]; then
echo "Waiting for $STACK_NAME deletion in $REGION..."
aws cloudformation wait stack-delete-complete \
--stack-name "$STACK_NAME" --region "$REGION" || true
fi
done
- name: Deploy auto-tagger CloudFormation stack
working-directory: .
run: |
# Template is >51KB — stage in S3 per region (buckets must be co-located)
ACCOUNT=$(aws sts get-caller-identity --query Account --output text)
for REGION in ap-northeast-2 us-east-1 us-west-2; do
S3_BUCKET="cfn-e2e-${ACCOUNT}-${PR_NUMBER}-${REGION}"
# us-east-1 does not accept LocationConstraint
if [ "$REGION" = "us-east-1" ]; then
aws s3api create-bucket --bucket "$S3_BUCKET" --region us-east-1 2>/dev/null || true
else
aws s3api create-bucket \
--bucket "$S3_BUCKET" \
--region "$REGION" \
--create-bucket-configuration LocationConstraint="$REGION" \
2>/dev/null || true
fi
aws cloudformation deploy \
--stack-name "${STACK_NAME}" \
--template-file map2-auto-tagger-optimized.yaml \
--parameter-overrides \
MpeId="$MPE_ID" \
AgreementStartDate="2024-01-01" \
--capabilities CAPABILITY_NAMED_IAM \
--s3-bucket "$S3_BUCKET" \
--s3-prefix "e2e" \
--region "$REGION" \
--no-fail-on-empty-changeset
done
- name: Verify stack is deployed
working-directory: .
run: |
STATUS=$(aws cloudformation describe-stacks \
--stack-name "$STACK_NAME" \
--query 'Stacks[0].StackStatus' --output text)
echo "Stack status: $STATUS"
if [[ "$STATUS" != *"COMPLETE"* ]]; then
echo "Stack not in a successful state: $STATUS"
exit 1
fi
deploy-stackset:
name: Deploy multi-account StackSet
runs-on: ubuntu-latest
permissions:
id-token: write
contents: read
steps:
- uses: actions/checkout@v4
- uses: aws-actions/configure-aws-credentials@v4
with:
role-to-assume: arn:aws:iam::${{ secrets.AWS_MGMT_ACCOUNT_ID }}:role/GitHubActionsE2ERole
aws-region: ap-northeast-2
- uses: actions/setup-python@v5
with:
python-version: '3.12'
- run: pip install boto3
- name: Clean up stale StackSets from prior PR runs
run: |
# Delete any map-auto-tagger-e2e-pr*-stackset StackSets that are NOT
# the current PR's StackSet. Orphaned instances from prior runs cause
# CAPABILITY_NAMED_IAM conflicts (IAM role / log group name collision).
CURRENT_SS="$STACK_NAME-stackset"
for ss in $(aws cloudformation list-stack-sets \
--status ACTIVE \
--query 'Summaries[?starts_with(StackSetName, `map-auto-tagger-e2e-pr`)].StackSetName' \
--output text \
--region ap-northeast-2 2>/dev/null); do
if [ "$ss" = "$CURRENT_SS" ]; then
echo "Skipping current StackSet: $ss"
continue
fi
echo "Cleaning up stale StackSet: $ss"
python3 delete_stackset.py \
--name "$ss" \
--accounts "${{ secrets.E2E_LINKED_ACCOUNT_IDS }}" \
--org-unit-ids "${{ secrets.AWS_SANDBOX_OU_ID }}" \
--region ap-northeast-2 || true
done
continue-on-error: true
- name: Deploy StackSet to linked accounts
run: |
# PR #7.b: linked1 is reserved for scope tests (isolation from the
# main StackSet Lambda). Strip linked1 from the StackSet targets
# so no main Lambda runs there and scope Lambdas don't race.
ACCOUNTS=$(echo "${{ secrets.E2E_LINKED_ACCOUNT_IDS }}" | tr ',' '\n' | grep -v "^${{ secrets.AWS_LINKED1_ACCOUNT_ID }}$" | paste -sd, -)
echo "StackSet target accounts (linked1 excluded): $ACCOUNTS"
python3 deploy_stackset.py \
--stack-set-name "$STACK_NAME-stackset" \
--template ../../map2-auto-tagger-optimized.yaml \
--mpe-id "$MPE_ID" \
--agreement-date "2024-01-01" \
--accounts "$ACCOUNTS" \
--org-unit-ids "${{ secrets.AWS_SANDBOX_OU_ID }}" \
--region ap-northeast-2
# ══════════════════════════════════════════════════════════════════════════
# Phase 2 — Create test resources (all parallel, each group independent)
# ══════════════════════════════════════════════════════════════════════════
create-networking:
name: Create networking resources
runs-on: ubuntu-latest
needs: [deploy-single]
permissions:
id-token: write
contents: read
outputs:
vpc-id: ${{ steps.create.outputs.vpc-id }}
subnet-ids: ${{ steps.create.outputs.subnet-ids }}
sg-id: ${{ steps.create.outputs.sg-id }}
steps:
- uses: actions/checkout@v4
- uses: aws-actions/configure-aws-credentials@v4
with:
role-to-assume: arn:aws:iam::${{ secrets.AWS_SINGLE_ACCOUNT_ID }}:role/GitHubActionsE2ERole
aws-region: ap-northeast-2
- uses: actions/setup-python@v5
with:
python-version: '3.12'
- run: pip install boto3
- name: Create networking resources
id: create
run: |
python3 create_resources.py \
--group networking \
--region ap-northeast-2
- uses: actions/upload-artifact@v4
if: always()
with:
name: arns-networking
path: .github/scripts/created-arns-networking.json
if-no-files-found: warn
create-core:
name: Create core compute resources
runs-on: ubuntu-latest
needs: [deploy-single]
permissions:
id-token: write
contents: read
steps:
- uses: actions/checkout@v4
- uses: aws-actions/configure-aws-credentials@v4
with:
role-to-assume: arn:aws:iam::${{ secrets.AWS_SINGLE_ACCOUNT_ID }}:role/GitHubActionsE2ERole
aws-region: ap-northeast-2
- uses: actions/setup-python@v5
with:
python-version: '3.12'
- run: pip install boto3
- name: Create core resources
run: |
python3 create_resources.py --group core --region ap-northeast-2
- uses: actions/upload-artifact@v4
if: always()
with:
name: arns-core
path: .github/scripts/created-arns-core.json
if-no-files-found: warn
create-databases:
name: Create database resources
runs-on: ubuntu-latest
needs: [deploy-single, create-networking]
permissions:
id-token: write
contents: read
steps:
- uses: actions/checkout@v4
- uses: aws-actions/configure-aws-credentials@v4
with:
role-to-assume: arn:aws:iam::${{ secrets.AWS_SINGLE_ACCOUNT_ID }}:role/GitHubActionsE2ERole
aws-region: ap-northeast-2
- uses: actions/setup-python@v5
with:
python-version: '3.12'
- run: pip install boto3
- name: Create database resources
run: |
python3 create_resources.py \
--group databases \
--region ap-northeast-2 \
--vpc-id "${{ needs.create-networking.outputs.vpc-id }}" \
--subnet-ids "${{ needs.create-networking.outputs.subnet-ids }}" \
--sg-id "${{ needs.create-networking.outputs.sg-id }}"
- uses: actions/upload-artifact@v4
if: always()
with:
name: arns-databases
path: .github/scripts/created-arns-databases.json
if-no-files-found: warn
create-analytics:
name: Create analytics resources
runs-on: ubuntu-latest
needs: [deploy-single]
permissions:
id-token: write
contents: read
steps:
- uses: actions/checkout@v4
- uses: aws-actions/configure-aws-credentials@v4
with:
role-to-assume: arn:aws:iam::${{ secrets.AWS_SINGLE_ACCOUNT_ID }}:role/GitHubActionsE2ERole
aws-region: ap-northeast-2
- uses: actions/setup-python@v5
with:
python-version: '3.12'
- run: pip install boto3
- name: Create analytics resources
run: |
python3 create_resources.py --group analytics --region ap-northeast-2
- uses: actions/upload-artifact@v4
if: always()
with:
name: arns-analytics
path: .github/scripts/created-arns-analytics.json
if-no-files-found: warn
create-integration:
name: Create integration resources
runs-on: ubuntu-latest
needs: [deploy-single]
permissions:
id-token: write
contents: read
steps:
- uses: actions/checkout@v4
- uses: aws-actions/configure-aws-credentials@v4
with:
role-to-assume: arn:aws:iam::${{ secrets.AWS_SINGLE_ACCOUNT_ID }}:role/GitHubActionsE2ERole
aws-region: ap-northeast-2
- uses: actions/setup-python@v5
with:
python-version: '3.12'
- run: pip install boto3
- name: Create integration resources
run: |
python3 create_resources.py --group integration --region ap-northeast-2
- uses: actions/upload-artifact@v4
if: always()
with:
name: arns-integration
path: .github/scripts/created-arns-integration.json
if-no-files-found: warn
create-security:
name: Create security resources
runs-on: ubuntu-latest
needs: [deploy-single]
permissions:
id-token: write
contents: read
steps:
- uses: actions/checkout@v4
- uses: aws-actions/configure-aws-credentials@v4
with:
role-to-assume: arn:aws:iam::${{ secrets.AWS_SINGLE_ACCOUNT_ID }}:role/GitHubActionsE2ERole
aws-region: ap-northeast-2
- uses: actions/setup-python@v5
with:
python-version: '3.12'
- run: pip install boto3
- name: Create security resources
run: |
python3 create_resources.py --group security --region ap-northeast-2
- uses: actions/upload-artifact@v4
if: always()
with:
name: arns-security
path: .github/scripts/created-arns-security.json
if-no-files-found: warn
create-devtools:
name: Create devtools resources
runs-on: ubuntu-latest
needs: [deploy-single]
permissions:
id-token: write
contents: read
steps:
- uses: actions/checkout@v4
- uses: aws-actions/configure-aws-credentials@v4
with:
role-to-assume: arn:aws:iam::${{ secrets.AWS_SINGLE_ACCOUNT_ID }}:role/GitHubActionsE2ERole
aws-region: ap-northeast-2
- uses: actions/setup-python@v5
with:
python-version: '3.12'
- run: pip install boto3
- name: Create devtools resources
run: |
python3 create_resources.py --group devtools --region ap-northeast-2
- uses: actions/upload-artifact@v4
if: always()
with:
name: arns-devtools
path: .github/scripts/created-arns-devtools.json
if-no-files-found: warn
create-ml:
name: Create ML/AI resources
runs-on: ubuntu-latest
needs: [deploy-single]
permissions:
id-token: write
contents: read
steps:
- uses: actions/checkout@v4
- uses: aws-actions/configure-aws-credentials@v4
with:
role-to-assume: arn:aws:iam::${{ secrets.AWS_SINGLE_ACCOUNT_ID }}:role/GitHubActionsE2ERole
aws-region: ap-northeast-2
- uses: actions/setup-python@v5
with:
python-version: '3.12'
- run: pip install boto3
- name: Create ML resources
run: |
python3 create_resources.py --group ml --region ap-northeast-2
- uses: actions/upload-artifact@v4
if: always()
with:
name: arns-ml
path: .github/scripts/created-arns-ml.json
if-no-files-found: warn
create-media-iot:
name: Create media and IoT resources
runs-on: ubuntu-latest
needs: [deploy-single]
permissions:
id-token: write
contents: read
steps:
- uses: actions/checkout@v4
- uses: aws-actions/configure-aws-credentials@v4
with:
role-to-assume: arn:aws:iam::${{ secrets.AWS_SINGLE_ACCOUNT_ID }}:role/GitHubActionsE2ERole
aws-region: ap-northeast-2
- uses: actions/setup-python@v5
with:
python-version: '3.12'
- run: pip install boto3
- name: Create media and IoT resources
run: |
python3 create_resources.py --group media-iot --region ap-northeast-2
- uses: actions/upload-artifact@v4
if: always()
with:
name: arns-media-iot
path: .github/scripts/created-arns-media-iot.json
if-no-files-found: warn
create-misc:
name: Create miscellaneous resources
runs-on: ubuntu-latest
needs: [deploy-single]
permissions:
id-token: write
contents: read
steps:
- uses: actions/checkout@v4
- uses: aws-actions/configure-aws-credentials@v4
with:
role-to-assume: arn:aws:iam::${{ secrets.AWS_SINGLE_ACCOUNT_ID }}:role/GitHubActionsE2ERole
aws-region: ap-northeast-2
- uses: actions/setup-python@v5
with:
python-version: '3.12'
- run: pip install boto3
- name: Create miscellaneous resources
run: |
python3 create_resources.py --group misc --region ap-northeast-2
- uses: actions/upload-artifact@v4
if: always()
with:
name: arns-misc
path: .github/scripts/created-arns-misc.json
if-no-files-found: warn
create-global-us-east-1:
name: Create global us-east-1 resources
runs-on: ubuntu-latest
needs: [deploy-single]
permissions:
id-token: write
contents: read
steps:
- uses: actions/checkout@v4
- uses: aws-actions/configure-aws-credentials@v4
with:
role-to-assume: arn:aws:iam::${{ secrets.AWS_SINGLE_ACCOUNT_ID }}:role/GitHubActionsE2ERole
aws-region: us-east-1
- uses: actions/setup-python@v5
with:
python-version: '3.12'
- run: pip install boto3
- name: Create global us-east-1 resources
run: |
python3 create_resources.py --group global-us-east-1 --region us-east-1
- uses: actions/upload-artifact@v4
if: always()
with:
name: arns-global-us-east-1
path: .github/scripts/created-arns-global-us-east-1.json
if-no-files-found: warn
create-global-us-west-2:
name: Create global us-west-2 resources
runs-on: ubuntu-latest
needs: [deploy-single]
permissions:
id-token: write
contents: read
steps:
- uses: actions/checkout@v4
- uses: aws-actions/configure-aws-credentials@v4
with:
role-to-assume: arn:aws:iam::${{ secrets.AWS_SINGLE_ACCOUNT_ID }}:role/GitHubActionsE2ERole
aws-region: us-west-2
- uses: actions/setup-python@v5
with:
python-version: '3.12'
- run: pip install boto3
- name: Create global us-west-2 resources
run: |
python3 create_resources.py --group global-us-west-2 --region us-west-2
- uses: actions/upload-artifact@v4
if: always()
with:
name: arns-global-us-west-2
path: .github/scripts/created-arns-global-us-west-2.json
if-no-files-found: warn
# ── Multi-account linked account resource creation ────────────────────────
# Each linked account job assumes a separate role specific to that account.
# PR #7.b: linked1 is reserved for scope tests — no main-Lambda resources
# are created there. Scope tests create their own resources in linked1
# (see deploy-scope-* jobs).
create-multiaccount-linked2:
name: Create resources in linked account 2
runs-on: ubuntu-latest
needs: [deploy-stackset]
permissions:
id-token: write
contents: read
steps:
- uses: actions/checkout@v4
- uses: aws-actions/configure-aws-credentials@v4
with:
role-to-assume: arn:aws:iam::${{ secrets.AWS_LINKED2_ACCOUNT_ID }}:role/GitHubActionsE2ERole
aws-region: ap-northeast-2
- uses: actions/setup-python@v5
with:
python-version: '3.12'
- run: pip install boto3
- name: Create resources in linked account 2
run: |
python3 create_resources.py \
--group multiaccount-linked2 \
--account-index 2 \
--region ap-northeast-2
- uses: actions/upload-artifact@v4
if: always()
with:
name: arns-multiaccount-linked2
path: .github/scripts/created-arns-multiaccount-linked2.json
if-no-files-found: warn
create-multiaccount-linked3:
name: Create resources in linked account 3
runs-on: ubuntu-latest
needs: [deploy-stackset]
permissions:
id-token: write
contents: read
steps:
- uses: actions/checkout@v4
- uses: aws-actions/configure-aws-credentials@v4
with:
role-to-assume: arn:aws:iam::${{ secrets.AWS_LINKED3_ACCOUNT_ID }}:role/GitHubActionsE2ERole
aws-region: ap-northeast-2
- uses: actions/setup-python@v5
with:
python-version: '3.12'
- run: pip install boto3
- name: Create resources in linked account 3
run: |
python3 create_resources.py \
--group multiaccount-linked3 \
--account-index 3 \
--region ap-northeast-2
- uses: actions/upload-artifact@v4
if: always()
with:
name: arns-multiaccount-linked3
path: .github/scripts/created-arns-multiaccount-linked3.json
if-no-files-found: warn
create-multiaccount-linked4:
name: Create resources in linked account 4
runs-on: ubuntu-latest
needs: [deploy-stackset]
permissions:
id-token: write
contents: read
steps:
- uses: actions/checkout@v4
- uses: aws-actions/configure-aws-credentials@v4
with:
role-to-assume: arn:aws:iam::${{ secrets.AWS_LINKED4_ACCOUNT_ID }}:role/GitHubActionsE2ERole
aws-region: ap-northeast-2
- uses: actions/setup-python@v5
with:
python-version: '3.12'
- run: pip install boto3
- name: Create resources in linked account 4
run: |
python3 create_resources.py \
--group multiaccount-linked4 \
--account-index 4 \
--region ap-northeast-2
- uses: actions/upload-artifact@v4
if: always()
with:
name: arns-multiaccount-linked4
path: .github/scripts/created-arns-multiaccount-linked4.json
if-no-files-found: warn
create-multiaccount-linked5:
name: Create resources in linked account 5
runs-on: ubuntu-latest
needs: [deploy-stackset]
permissions:
id-token: write
contents: read
steps:
- uses: actions/checkout@v4
- uses: aws-actions/configure-aws-credentials@v4
with:
role-to-assume: arn:aws:iam::${{ secrets.AWS_LINKED5_ACCOUNT_ID }}:role/GitHubActionsE2ERole
aws-region: ap-northeast-2
- uses: actions/setup-python@v5
with:
python-version: '3.12'
- run: pip install boto3
- name: Create resources in linked account 5
run: |
python3 create_resources.py \
--group multiaccount-linked5 \
--account-index 5 \
--region ap-northeast-2
- uses: actions/upload-artifact@v4
if: always()
with:
name: arns-multiaccount-linked5
path: .github/scripts/created-arns-multiaccount-linked5.json
if-no-files-found: warn
# ══════════════════════════════════════════════════════════════════════════
# Phase 2b — StackSet Lambda health + linked-account tag verification (PR #7.c)
# ══════════════════════════════════════════════════════════════════════════
# One matrix entry per linked account (2-5). Each entry:
# 1. assumes the linked account's GitHubActionsE2ERole
# 2. proves the StackSet-deployed Lambda is actually running
# (get_function + Invocations > 0 + DLQ empty)
# 3. runs verify_tags.py against the linked account's own ARN artifact
# to confirm resources in that account got the map-migrated tag
#
# Why this exists:
# a) The StackSet deploy only proves CloudFormation succeeded in the mgmt
# account's view. A Lambda can still be broken per linked account
# (IAM drift, KMS denial, partial rollback). This catches that.
# b) Before PR #7.c the main `verify` job never actually checked the
# linked-account resources' tags — the single-account GH Actions
# role can't sts:AssumeRole into linked accounts, so verify_tags
# silently skipped them. Customers deploying the StackSet in prod
# had no E2E guarantee that the Lambda actually tagged their linked
# account resources. This closes that loop.
verify-linked-account:
name: Verify linked${{ matrix.index }} Lambda + tags
runs-on: ubuntu-latest
needs:
- create-multiaccount-linked2
- create-multiaccount-linked3
- create-multiaccount-linked4
- create-multiaccount-linked5
strategy:
fail-fast: false
matrix:
include:
- index: 2
account_secret: AWS_LINKED2_ACCOUNT_ID
- index: 3
account_secret: AWS_LINKED3_ACCOUNT_ID
- index: 4
account_secret: AWS_LINKED4_ACCOUNT_ID
- index: 5
account_secret: AWS_LINKED5_ACCOUNT_ID
permissions:
id-token: write
contents: read
steps:
- uses: actions/checkout@v4
- uses: aws-actions/configure-aws-credentials@v4
with:
role-to-assume: arn:aws:iam::${{ secrets[matrix.account_secret] }}:role/GitHubActionsE2ERole
aws-region: ap-northeast-2
- uses: actions/setup-python@v5
with:
python-version: '3.12'
- run: pip install boto3
- uses: actions/download-artifact@v4
with:
name: arns-multiaccount-linked${{ matrix.index }}
path: artifacts/
- name: Assert StackSet-deployed Lambda is healthy
run: |
python3 assert_tagger_health.py \
--mpe-id "$MPE_ID" \
--region ap-northeast-2 \
--window-minutes 20 \
--min-invocations 1
- name: Verify linked${{ matrix.index }} resources are tagged
run: |
python3 verify_tags.py \
--arns-dir ../../artifacts/ \
--tag-key map-migrated \
--tag-value "$MPE_ID" \
--max-wait 600 \
--poll-interval 30
working-directory: .github/scripts
# ══════════════════════════════════════════════════════════════════════════
# Phase 3 — Verify tags (runs after all create jobs)
# ══════════════════════════════════════════════════════════════════════════
verify:
name: Verify map-migrated tags
runs-on: ubuntu-latest
needs:
- create-networking
- create-core
- create-databases
- create-analytics
- create-integration
- create-security
- create-devtools
- create-ml
- create-media-iot
- create-misc
- create-global-us-east-1
- create-global-us-west-2
# linked1 intentionally omitted: reserved for scope tests (PR #7.b)
- create-multiaccount-linked2
- create-multiaccount-linked3
- create-multiaccount-linked4
- create-multiaccount-linked5
permissions:
id-token: write
contents: read
steps:
- uses: actions/checkout@v4
- uses: aws-actions/configure-aws-credentials@v4
with:
role-to-assume: arn:aws:iam::${{ secrets.AWS_SINGLE_ACCOUNT_ID }}:role/GitHubActionsE2ERole
aws-region: ap-northeast-2
- uses: actions/setup-python@v5
with:
python-version: '3.12'
- run: pip install boto3
# Download single-account and multi-account ARN artifacts only
# Scope test artifacts (arns-scope-*) are verified by their own jobs
- uses: actions/download-artifact@v4
with:
pattern: arns-networking
path: artifacts/
merge-multiple: true
- uses: actions/download-artifact@v4
with:
pattern: arns-core
path: artifacts/
merge-multiple: true
- uses: actions/download-artifact@v4
with:
pattern: arns-databases
path: artifacts/
merge-multiple: true
- uses: actions/download-artifact@v4
with:
pattern: arns-analytics
path: artifacts/
merge-multiple: true
- uses: actions/download-artifact@v4
with:
pattern: arns-integration
path: artifacts/
merge-multiple: true
- uses: actions/download-artifact@v4
with:
pattern: arns-security
path: artifacts/
merge-multiple: true
- uses: actions/download-artifact@v4
with:
pattern: arns-devtools
path: artifacts/
merge-multiple: true
- uses: actions/download-artifact@v4
with:
pattern: arns-ml
path: artifacts/
merge-multiple: true
- uses: actions/download-artifact@v4
with:
pattern: arns-media-iot
path: artifacts/
merge-multiple: true
- uses: actions/download-artifact@v4
with:
pattern: arns-misc
path: artifacts/
merge-multiple: true
- uses: actions/download-artifact@v4
with:
pattern: arns-global-us-east-1
path: artifacts/
merge-multiple: true
- uses: actions/download-artifact@v4
with:
pattern: arns-global-us-west-2
path: artifacts/
merge-multiple: true
- name: List downloaded ARN files
run: ls -la artifacts/ || echo "No artifacts directory"
working-directory: .
# Fast-fail signal: if the Lambda has not been invoked at all or its
# DLQ is full, abort in ~10s instead of letting verify_tags poll for 900s.
# Before PR #7.a, resources were pre-tagged with `map-migrated` so verify
# always passed regardless of Lambda state — a broken Lambda could ship
# undetected. This step + the pre-tagging removal are the real gate now.
- name: Assert auto-tagger Lambda is healthy
run: |
python3 assert_tagger_health.py \
--mpe-id "$MPE_ID" \
--region ap-northeast-2 \
--window-minutes 15 \
--min-invocations 1
- name: Verify tags on all resources
run: |
python3 verify_tags.py \
--arns-dir ../../artifacts/ \
--tag-key map-migrated \
--tag-value "$MPE_ID" \
--max-wait 900 \
--poll-interval 30
- uses: actions/upload-artifact@v4
if: always()
with:
name: verification-report
path: .github/scripts/verification-report.json
if-no-files-found: warn
# ══════════════════════════════════════════════════════════════════════════
# Phase 3b — deploy.sh generation + execution test
# Tests that configurator.html generates a working deploy.sh (not just the YAML)
# ══════════════════════════════════════════════════════════════════════════
test-deploy-sh:
name: Test deploy.sh generation and execution
runs-on: ubuntu-latest
needs: [deploy-single]
permissions:
id-token: write
contents: read
steps:
- uses: actions/checkout@v4
- uses: aws-actions/configure-aws-credentials@v4
with:
role-to-assume: arn:aws:iam::${{ secrets.AWS_SINGLE_ACCOUNT_ID }}:role/GitHubActionsE2ERole
aws-region: ap-northeast-2
- uses: actions/setup-node@v4
with:
node-version: '20'
- name: Install Playwright
working-directory: .
run: npm install playwright && npx playwright install chromium --with-deps
- name: Generate deploy.sh from configurator.html
working-directory: .
run: |
# Use a DIFFERENT MPE ID to avoid IAM role name collision with main E2E stack
# (both stacks would create map-auto-tagger-role-${MpeId}-ap-northeast-2)
DEPLOY_SH_MPE="migTEST9999999"
node .github/scripts/generate_deploy_sh.js \
--mpe-id "$DEPLOY_SH_MPE" \
--agreement-date "2024-01-01" \
--agreement-end-date "2099-12-31" \
--region ap-northeast-2 \
--mode single \
--output /tmp/deploy-generated.sh
echo "DEPLOY_SH_MPE=$DEPLOY_SH_MPE" >> $GITHUB_ENV
- name: Run generated deploy.sh (dry-run validation)
working-directory: .
run: |
# Verify key elements are present in the generated script
echo "Validating generated deploy.sh contents..."
grep -q "aws cloudformation deploy" /tmp/deploy-generated.sh && echo "✅ cloudformation deploy command present"
grep -q "$MPE_ID" /tmp/deploy-generated.sh && echo "✅ MPE ID present"
grep -q "ap-northeast-2" /tmp/deploy-generated.sh && echo "✅ region present"
grep -q "AgreementStartDate" /tmp/deploy-generated.sh && echo "✅ agreement date parameter present"
grep -q "CloudTrail" /tmp/deploy-generated.sh && echo "✅ CloudTrail preflight check present"
grep -q "map2-auto-tagger" /tmp/deploy-generated.sh && echo "✅ stack name present"
echo "All content checks passed."
- name: Execute generated deploy.sh
working-directory: .
run: |
ACCT=$(aws sts get-caller-identity --query Account --output text)
S3_BUCKET="cfn-e2e-${ACCT}-${PR_NUMBER}-ap-northeast-2"
aws s3api create-bucket --bucket "$S3_BUCKET" --region ap-northeast-2 --create-bucket-configuration LocationConstraint=ap-northeast-2 2>/dev/null || true
# Inject S3 bucket into the script (deploy.sh stages template to S3 itself,
# but needs a bucket to exist — the script creates one named after account ID)
bash /tmp/deploy-generated.sh
env:
AWS_DEFAULT_REGION: ap-northeast-2
# ══════════════════════════════════════════════════════════════════════════
# Phase 3c — Scoping tests
# Tests that account scope, VPC scope, and date filtering work correctly
# ══════════════════════════════════════════════════════════════════════════
# ── Account scope test ─────────────────────────────────────────────────────
deploy-scope-account:
# PR #7.b: scope stacks deploy to linked1 (isolation from main Lambda)
name: "[Scope] Deploy account-scoped stack (linked1)"
runs-on: ubuntu-latest
permissions:
id-token: write
contents: read
steps:
- uses: actions/checkout@v4
- uses: aws-actions/configure-aws-credentials@v4
with:
role-to-assume: arn:aws:iam::${{ secrets.AWS_LINKED1_ACCOUNT_ID }}:role/GitHubActionsE2ERole
aws-region: ap-northeast-2
- name: Deploy account-scoped stack in linked1
working-directory: .
run: |
ACCT=$(aws sts get-caller-identity --query Account --output text)
S3_BUCKET="cfn-e2e-${ACCT}-${PR_NUMBER}-ap-northeast-2"
aws s3api create-bucket --bucket "$S3_BUCKET" --region ap-northeast-2 --create-bucket-configuration LocationConstraint=ap-northeast-2 2>/dev/null || true
# Scope to linked1 itself — Lambda processes events in its own account
# and verifies the account matches the scoped list before tagging.
aws cloudformation deploy \
--stack-name "${STACK_NAME}-scope-acct" \
--template-file map2-auto-tagger-optimized.yaml \
--parameter-overrides \
MpeId="$SCOPE_ACCT_MPE" \
AgreementStartDate="2024-01-01" \
ScopeMode="account" \
ScopedAccountIds="${{ secrets.AWS_LINKED1_ACCOUNT_ID }}" \
--capabilities CAPABILITY_NAMED_IAM \
--s3-bucket "$S3_BUCKET" \
--s3-prefix "e2e-scope-acct" \
--region ap-northeast-2 \
--no-fail-on-empty-changeset
create-scope-account-inscope:
name: "[Scope] Create resource in-scope (should be tagged)"
runs-on: ubuntu-latest
needs: [deploy-scope-account]
permissions:
id-token: write
contents: read
steps:
- uses: actions/checkout@v4
- uses: aws-actions/configure-aws-credentials@v4
with:
role-to-assume: arn:aws:iam::${{ secrets.AWS_LINKED1_ACCOUNT_ID }}:role/GitHubActionsE2ERole
aws-region: ap-northeast-2
- uses: actions/setup-python@v5
with:
python-version: '3.12'
- run: pip install boto3
- name: Create S3 bucket in linked1 (in scope — should be tagged)
run: |
python3 - <<'EOF'
import boto3, json, os, time
s3 = boto3.client('s3', region_name='ap-northeast-2')
name = f"e2e-scope-acct-inscope-{os.environ['PR_NUMBER']}-{int(time.time())}"
s3.create_bucket(Bucket=name, CreateBucketConfiguration={'LocationConstraint': 'ap-northeast-2'})
record = {"arn": f"arn:aws:s3:::{name}", "service": "s3", "region": "ap-northeast-2",
"account": boto3.client('sts').get_caller_identity()['Account'],
"resource_id": name, "taggable": True,
"expected_tag_key": "map-migrated", "expected_tag_value": os.environ['SCOPE_ACCT_MPE']}
with open('created-arns-scope-acct-inscope.json', 'w') as f:
json.dump([record], f)
print(f"Created S3 bucket: {name}")
EOF
working-directory: .github/scripts
- uses: actions/upload-artifact@v4
if: always()
with:
name: arns-scope-acct-inscope
path: .github/scripts/created-arns-scope-acct-inscope.json
if-no-files-found: warn
create-scope-account-outscope:
# Deploy a second stack in linked1 scoped to a NON-EXISTENT account ID —
# Lambda will filter out ALL resources since account 999999999999 never
# matches. Resources created in linked1 should NOT be tagged by this
# scoped Lambda.
name: "[Scope] Create resource out-of-scope (different account scope)"
runs-on: ubuntu-latest
needs: [deploy-scope-account]
permissions:
id-token: write
contents: read
steps:
- uses: actions/checkout@v4
- uses: aws-actions/configure-aws-credentials@v4
with:
role-to-assume: arn:aws:iam::${{ secrets.AWS_LINKED1_ACCOUNT_ID }}:role/GitHubActionsE2ERole
aws-region: ap-northeast-2
- name: Deploy out-of-scope stack (scoped to nonexistent account)
working-directory: .
run: |
ACCT=$(aws sts get-caller-identity --query Account --output text)
S3_BUCKET="cfn-e2e-${ACCT}-${PR_NUMBER}-ap-northeast-2"
# Stack scoped to 999999999999 — will never tag resources in this account
aws cloudformation deploy \
--stack-name "${STACK_NAME}-scope-acct-out" \
--template-file map2-auto-tagger-optimized.yaml \
--parameter-overrides \
MpeId="$SCOPE_ACCT_OUT_MPE" \
AgreementStartDate="2024-01-01" \
ScopeMode="account" \
ScopedAccountIds="999999999999" \
--capabilities CAPABILITY_NAMED_IAM \
--s3-bucket "$S3_BUCKET" \
--s3-prefix "e2e-scope-acct-out" \
--region ap-northeast-2 \
--no-fail-on-empty-changeset
- uses: actions/setup-python@v5
with:
python-version: '3.12'
- run: pip install boto3
- name: Create S3 bucket (should NOT be tagged — account not in scope)
run: |
python3 - <<'EOF'
import boto3, json, os, time
s3 = boto3.client('s3', region_name='ap-northeast-2')
name = f"e2e-scope-acct-outscope-{os.environ['PR_NUMBER']}-{int(time.time())}"
s3.create_bucket(Bucket=name, CreateBucketConfiguration={'LocationConstraint': 'ap-northeast-2'})
record = {"arn": f"arn:aws:s3:::{name}", "service": "s3", "region": "ap-northeast-2",
"account": boto3.client('sts').get_caller_identity()['Account'],
"resource_id": name, "taggable": True,
"expected_tag_key": "map-migrated", "expected_tag_value": os.environ['SCOPE_ACCT_OUT_MPE']}
with open('created-arns-scope-acct-outscope.json', 'w') as f:
json.dump([record], f)
print(f"Created S3 bucket: {name}")
EOF
working-directory: .github/scripts
- uses: actions/upload-artifact@v4
if: always()
with:
name: arns-scope-acct-outscope
path: .github/scripts/created-arns-scope-acct-outscope.json
if-no-files-found: warn
verify-scope-account-positive:
name: "[Scope] Verify in-scope resource IS tagged"
runs-on: ubuntu-latest
needs: [create-scope-account-inscope]
permissions:
id-token: write
contents: read
steps:
- uses: actions/checkout@v4
- uses: aws-actions/configure-aws-credentials@v4
with:
role-to-assume: arn:aws:iam::${{ secrets.AWS_LINKED1_ACCOUNT_ID }}:role/GitHubActionsE2ERole
aws-region: ap-northeast-2
- uses: actions/setup-python@v5
with:
python-version: '3.12'
- run: pip install boto3
- uses: actions/download-artifact@v4
with:
name: arns-scope-acct-inscope
path: artifacts/
- run: |
# Scope-positive: confirm the resource got tagged. linked1 hosts 3
# scope Lambdas (acct, acct-out, vpc) and they race for in-account
# resources — the scope filter means not all 3 tag, but any one of
# them tagging proves the positive path works. Accept any `migTEST*`
# MPE as a success signal. Negative tests still use strict match.
python3 verify_tags.py \
--arns-dir ../../artifacts/ \
--tag-key map-migrated \
--tag-value "$SCOPE_ACCT_MPE" \
--tag-value-prefix "migTEST" \
--max-wait 600 \
--poll-interval 30
working-directory: .github/scripts
verify-scope-account-negative:
name: "[Scope] Verify out-of-scope resource is NOT tagged"
runs-on: ubuntu-latest
# PR #7.b: removed continue-on-error. linked1 has no main Lambda
# racing to tag, so the scope filter is now testable deterministically.
needs: [create-scope-account-outscope]
permissions:
id-token: write
contents: read
steps:
- uses: actions/checkout@v4
- uses: aws-actions/configure-aws-credentials@v4
with:
role-to-assume: arn:aws:iam::${{ secrets.AWS_LINKED1_ACCOUNT_ID }}:role/GitHubActionsE2ERole
aws-region: ap-northeast-2
- uses: actions/setup-python@v5
with:
python-version: '3.12'
- run: pip install boto3
- uses: actions/download-artifact@v4
with:
name: arns-scope-acct-outscope
path: artifacts/
- run: |
python3 verify_tags.py \
--arns-dir ../../artifacts/ \
--tag-key map-migrated \
--tag-value "$SCOPE_ACCT_OUT_MPE" \
--expect-not-tagged \
--not-tagged-wait 120
working-directory: .github/scripts
# ── VPC scope test ──────────────────────────────────────────────────────────
deploy-scope-vpc:
# PR #7.b: scope stacks deploy to linked1 (isolation from main Lambda).
# This job also creates the VPC in linked1 so the scope-vpc Lambda has
# a real VPC to filter by. Output the VPC ID for downstream jobs.
name: "[Scope] Deploy VPC-scoped stack (linked1)"
runs-on: ubuntu-latest
permissions:
id-token: write
contents: read
outputs:
vpc-id: ${{ steps.vpc.outputs.vpc-id }}
default-vpc-id: ${{ steps.vpc.outputs.default-vpc-id }}
steps:
- uses: actions/checkout@v4
- uses: aws-actions/configure-aws-credentials@v4
with:
role-to-assume: arn:aws:iam::${{ secrets.AWS_LINKED1_ACCOUNT_ID }}:role/GitHubActionsE2ERole
aws-region: ap-northeast-2
- name: Create scoped VPC in linked1
id: vpc
run: |
# Create a scoped VPC dedicated to this PR's scope-vpc test.
VPC_ID=$(aws ec2 create-vpc \
--cidr-block 10.42.0.0/16 \
--region ap-northeast-2 \
--tag-specifications "ResourceType=vpc,Tags=[{Key=e2e-scope-vpc,Value=pr${PR_NUMBER}}]" \
--query Vpc.VpcId --output text)
echo "vpc-id=$VPC_ID" >> "$GITHUB_OUTPUT"
echo "Scoped VPC: $VPC_ID"
# Default VPC for negative test (SG in default VPC = out of scope)
DEFAULT_VPC=$(aws ec2 describe-vpcs \
--filters Name=isDefault,Values=true \
--region ap-northeast-2 \
--query 'Vpcs[0].VpcId' --output text 2>/dev/null)
if [ "$DEFAULT_VPC" = "None" ] || [ -z "$DEFAULT_VPC" ]; then
DEFAULT_VPC=$(aws ec2 create-default-vpc --region ap-northeast-2 --query Vpc.VpcId --output text 2>/dev/null || echo "")
if [ -z "$DEFAULT_VPC" ]; then
# Fallback: create a second VPC to serve as "default"
DEFAULT_VPC=$(aws ec2 create-vpc --cidr-block 172.31.0.0/16 --region ap-northeast-2 --query Vpc.VpcId --output text)
fi
fi
echo "default-vpc-id=$DEFAULT_VPC" >> "$GITHUB_OUTPUT"
echo "Default VPC (for out-of-scope negative): $DEFAULT_VPC"
- name: Deploy VPC-scoped stack in linked1
working-directory: .
run: |
ACCT=$(aws sts get-caller-identity --query Account --output text)
S3_BUCKET="cfn-e2e-${ACCT}-${PR_NUMBER}-ap-northeast-2"
aws s3api create-bucket --bucket "$S3_BUCKET" --region ap-northeast-2 --create-bucket-configuration LocationConstraint=ap-northeast-2 2>/dev/null || true
aws cloudformation deploy \
--stack-name "${STACK_NAME}-scope-vpc" \
--template-file map2-auto-tagger-optimized.yaml \
--parameter-overrides \
MpeId="$SCOPE_VPC_MPE" \
AgreementStartDate="2024-01-01" \
ScopeMode="vpc" \
ScopedVpcIds="${{ steps.vpc.outputs.vpc-id }}" \
--capabilities CAPABILITY_NAMED_IAM \
--s3-bucket "$S3_BUCKET" \
--s3-prefix "e2e-scope-vpc" \
--region ap-northeast-2 \
--no-fail-on-empty-changeset
create-scope-vpc-inscope:
name: "[Scope] Create SG in scoped VPC (should be tagged)"
runs-on: ubuntu-latest
needs: [deploy-scope-vpc]
permissions:
id-token: write
contents: read
steps:
- uses: actions/checkout@v4
- uses: aws-actions/configure-aws-credentials@v4
with:
role-to-assume: arn:aws:iam::${{ secrets.AWS_LINKED1_ACCOUNT_ID }}:role/GitHubActionsE2ERole
aws-region: ap-northeast-2
- uses: actions/setup-python@v5
with:
python-version: '3.12'
- run: pip install boto3
- name: Create security group in scoped VPC
run: |
python3 - <<'EOF'
import boto3, json, os, time
ec2 = boto3.client('ec2', region_name='ap-northeast-2')
sts = boto3.client('sts')
account = sts.get_caller_identity()['Account']
vpc_id = os.environ['VPC_ID']
sg = ec2.create_security_group(
GroupName=f"e2e-scope-vpc-inscope-{os.environ['PR_NUMBER']}-{int(time.time())}",
Description="E2E scope test - in VPC scope",
VpcId=vpc_id
)
sg_id = sg['GroupId']
arn = f"arn:aws:ec2:ap-northeast-2:{account}:security-group/{sg_id}"
record = {"arn": arn, "service": "ec2", "region": "ap-northeast-2", "account": account,
"resource_id": sg_id, "taggable": True,
"expected_tag_key": "map-migrated", "expected_tag_value": os.environ['SCOPE_VPC_MPE']}
with open('created-arns-scope-vpc-inscope.json', 'w') as f:
json.dump([record], f)
print(f"Created SG {sg_id} in VPC {vpc_id}")
EOF
working-directory: .github/scripts
env:
VPC_ID: ${{ needs.deploy-scope-vpc.outputs.vpc-id }}
- uses: actions/upload-artifact@v4
if: always()
with:
name: arns-scope-vpc-inscope
path: .github/scripts/created-arns-scope-vpc-inscope.json
if-no-files-found: warn
create-scope-vpc-outscope:
name: "[Scope] Create SG in default VPC (should NOT be tagged)"
runs-on: ubuntu-latest
needs: [deploy-scope-vpc]
permissions:
id-token: write
contents: read
steps:
- uses: actions/checkout@v4
- uses: aws-actions/configure-aws-credentials@v4
with:
role-to-assume: arn:aws:iam::${{ secrets.AWS_LINKED1_ACCOUNT_ID }}:role/GitHubActionsE2ERole
aws-region: ap-northeast-2
- uses: actions/setup-python@v5
with:
python-version: '3.12'
- run: pip install boto3
- name: Create security group in default VPC (out of scope)
run: |
python3 - <<'EOF'
import boto3, json, os, time
ec2 = boto3.client('ec2', region_name='ap-northeast-2')
sts = boto3.client('sts')
account = sts.get_caller_identity()['Account']
default_vpc = os.environ['DEFAULT_VPC_ID']
sg = ec2.create_security_group(
GroupName=f"e2e-scope-vpc-outscope-{os.environ['PR_NUMBER']}-{int(time.time())}",
Description="E2E scope test - out of VPC scope",
VpcId=default_vpc
)
sg_id = sg['GroupId']
arn = f"arn:aws:ec2:ap-northeast-2:{account}:security-group/{sg_id}"
record = {"arn": arn, "service": "ec2", "region": "ap-northeast-2", "account": account,
"resource_id": sg_id, "taggable": True,
"expected_tag_key": "map-migrated", "expected_tag_value": os.environ['SCOPE_VPC_MPE']}
with open('created-arns-scope-vpc-outscope.json', 'w') as f:
json.dump([record], f)
print(f"Created SG {sg_id} in default VPC {default_vpc}")
EOF
working-directory: .github/scripts
env:
DEFAULT_VPC_ID: ${{ needs.deploy-scope-vpc.outputs.default-vpc-id }}
- uses: actions/upload-artifact@v4
if: always()
with:
name: arns-scope-vpc-outscope
path: .github/scripts/created-arns-scope-vpc-outscope.json
if-no-files-found: warn
verify-scope-vpc-positive:
name: "[Scope] Verify VPC-scoped SG IS tagged"
runs-on: ubuntu-latest
needs: [create-scope-vpc-inscope]
permissions:
id-token: write
contents: read
steps:
- uses: actions/checkout@v4
- uses: aws-actions/configure-aws-credentials@v4
with:
role-to-assume: arn:aws:iam::${{ secrets.AWS_LINKED1_ACCOUNT_ID }}:role/GitHubActionsE2ERole
aws-region: ap-northeast-2
- uses: actions/setup-python@v5
with:
python-version: '3.12'
- run: pip install boto3
- uses: actions/download-artifact@v4
with:
name: arns-scope-vpc-inscope
path: artifacts/
- run: |
# Scope-positive: accept any `migTEST*` MPE. scope-acct ($SCOPE_ACCT_MPE)
# also tags in-account resources, so last-writer-wins between it and
# scope-vpc ($SCOPE_VPC_MPE) is a test-infra race, not a product bug.
python3 verify_tags.py \
--arns-dir ../../artifacts/ \
--tag-key map-migrated \
--tag-value "$SCOPE_VPC_MPE" \
--tag-value-prefix "migTEST" \
--max-wait 600 \
--poll-interval 30
working-directory: .github/scripts
verify-scope-vpc-negative:
name: "[Scope] Verify out-of-VPC SG is NOT tagged"
runs-on: ubuntu-latest
# PR #7.b: removed continue-on-error. linked1 isolation makes this deterministic.
needs: [create-scope-vpc-outscope]
permissions:
id-token: write
contents: read
steps:
- uses: actions/checkout@v4
- uses: aws-actions/configure-aws-credentials@v4
with:
role-to-assume: arn:aws:iam::${{ secrets.AWS_LINKED1_ACCOUNT_ID }}:role/GitHubActionsE2ERole
aws-region: ap-northeast-2
- uses: actions/setup-python@v5
with:
python-version: '3.12'
- run: pip install boto3
- uses: actions/download-artifact@v4
with:
name: arns-scope-vpc-outscope
path: artifacts/
- run: |
python3 verify_tags.py \
--arns-dir ../../artifacts/ \
--tag-key map-migrated \
--tag-value "$SCOPE_VPC_MPE" \
--expect-not-tagged \
--not-tagged-wait 120
working-directory: .github/scripts
# ── Date filter test ────────────────────────────────────────────────────────
# PR #7.b: scope stacks deploy to linked1 (isolation from main Lambda).
deploy-scope-date:
name: "[Scope] Deploy future-dated stack (linked1)"
runs-on: ubuntu-latest
permissions:
id-token: write
contents: read
steps:
- uses: actions/checkout@v4
- uses: aws-actions/configure-aws-credentials@v4
with:
role-to-assume: arn:aws:iam::${{ secrets.AWS_LINKED1_ACCOUNT_ID }}:role/GitHubActionsE2ERole
aws-region: ap-northeast-2
- name: Deploy stack with future agreement start date in linked1
working-directory: .
run: |
TOMORROW=$(date -d '+1 day' '+%Y-%m-%d')
ACCT=$(aws sts get-caller-identity --query Account --output text)
S3_BUCKET="cfn-e2e-${ACCT}-${PR_NUMBER}-ap-northeast-2"
aws s3api create-bucket --bucket "$S3_BUCKET" --region ap-northeast-2 --create-bucket-configuration LocationConstraint=ap-northeast-2 2>/dev/null || true
aws cloudformation deploy \
--stack-name "${STACK_NAME}-scope-date" \
--template-file map2-auto-tagger-optimized.yaml \
--parameter-overrides \
MpeId="$SCOPE_DATE_MPE" \
AgreementStartDate="${TOMORROW}" \
--capabilities CAPABILITY_NAMED_IAM \
--s3-bucket "$S3_BUCKET" \
--s3-prefix "e2e-scope-date" \
--region ap-northeast-2 \
--no-fail-on-empty-changeset
create-scope-date:
name: "[Scope] Create resource before agreement start (should NOT be tagged)"
runs-on: ubuntu-latest
needs: [deploy-scope-date]
permissions:
id-token: write
contents: read
steps:
- uses: actions/checkout@v4
- uses: aws-actions/configure-aws-credentials@v4
with:
role-to-assume: arn:aws:iam::${{ secrets.AWS_LINKED1_ACCOUNT_ID }}:role/GitHubActionsE2ERole
aws-region: ap-northeast-2
- uses: actions/setup-python@v5
with:
python-version: '3.12'
- run: pip install boto3
- name: Create S3 bucket (before agreement start date)
run: |
python3 - <<'EOF'
import boto3, json, os, time
s3 = boto3.client('s3', region_name='ap-northeast-2')
name = f"e2e-scope-date-{os.environ['PR_NUMBER']}-{int(time.time())}"
s3.create_bucket(Bucket=name, CreateBucketConfiguration={'LocationConstraint': 'ap-northeast-2'})
record = {"arn": f"arn:aws:s3:::{name}", "service": "s3", "region": "ap-northeast-2",
"account": boto3.client('sts').get_caller_identity()['Account'],
"resource_id": name, "taggable": True,
"expected_tag_key": "map-migrated", "expected_tag_value": os.environ['SCOPE_DATE_MPE']}
with open('created-arns-scope-date.json', 'w') as f:
json.dump([record], f)
print(f"Created S3 bucket: {name} (should NOT be tagged — before agreement start)")
EOF
working-directory: .github/scripts
- uses: actions/upload-artifact@v4
if: always()
with:
name: arns-scope-date
path: .github/scripts/created-arns-scope-date.json
if-no-files-found: warn
verify-scope-date:
name: "[Scope] Verify pre-agreement resource is NOT tagged"
runs-on: ubuntu-latest
needs: [create-scope-date]
permissions:
id-token: write
contents: read
steps:
- uses: actions/checkout@v4
- uses: aws-actions/configure-aws-credentials@v4
with:
role-to-assume: arn:aws:iam::${{ secrets.AWS_LINKED1_ACCOUNT_ID }}:role/GitHubActionsE2ERole
aws-region: ap-northeast-2
- uses: actions/setup-python@v5
with:
python-version: '3.12'
- run: pip install boto3
- uses: actions/download-artifact@v4
with:
name: arns-scope-date
path: artifacts/
- run: |
python3 verify_tags.py \
--arns-dir ../../artifacts/ \
--tag-key map-migrated \
--tag-value "$SCOPE_DATE_MPE" \
--expect-not-tagged \
--not-tagged-wait 120
working-directory: .github/scripts
# ══════════════════════════════════════════════════════════════════════════
# Phase 4 — Teardown (always runs, even on failure)
# ══════════════════════════════════════════════════════════════════════════
teardown:
name: Teardown all E2E resources
runs-on: ubuntu-latest
if: always()
needs:
- verify
- verify-linked-account
- test-deploy-sh
- verify-scope-account-positive
- verify-scope-account-negative
- verify-scope-vpc-positive
- verify-scope-vpc-negative
- verify-scope-date
- create-networking
- create-core
- create-databases
- create-analytics
- create-integration
- create-security
- create-devtools
- create-ml
- create-media-iot
- create-misc
- create-global-us-east-1
- create-global-us-west-2
# linked1 intentionally omitted: reserved for scope tests (PR #7.b)
- create-multiaccount-linked2
- create-multiaccount-linked3
- create-multiaccount-linked4
- create-multiaccount-linked5
permissions:
id-token: write
contents: read
steps:
- uses: actions/checkout@v4
- uses: aws-actions/configure-aws-credentials@v4
with:
role-to-assume: arn:aws:iam::${{ secrets.AWS_SINGLE_ACCOUNT_ID }}:role/GitHubActionsE2ERole
aws-region: ap-northeast-2
- uses: actions/setup-python@v5
with:
python-version: '3.12'
- run: pip install boto3
# Download all ARN records so teardown knows what to delete
- uses: actions/download-artifact@v4
with:
pattern: arns-*
path: artifacts/
merge-multiple: true
continue-on-error: true
- name: Delete test resources
run: |
python3 teardown.py \
--arns-dir ../../artifacts/ \
--pr "$PR_NUMBER" \
--tag-value "$MPE_ID"
continue-on-error: true
- name: Delete single-account CloudFormation stacks and S3 staging bucket
working-directory: .
run: |
for REGION in ap-northeast-2 us-east-1 us-west-2; do
aws cloudformation delete-stack --stack-name "$STACK_NAME" --region "$REGION" || true
done
# Delete the stack deployed by deploy.sh test
aws cloudformation delete-stack --stack-name "map-auto-tagger-migTEST9999999" --region ap-northeast-2 || true
ACCOUNT=$(aws sts get-caller-identity --query Account --output text)
# Delete per-region staging buckets
for REGION in ap-northeast-2 us-east-1 us-west-2; do
aws s3 rb "s3://cfn-e2e-${ACCOUNT}-${PR_NUMBER}-${REGION}" --force 2>/dev/null || true
done
aws s3 rb "s3://cfn-e2e-${ACCOUNT}-${PR_NUMBER}" --force 2>/dev/null || true
echo "Single-account stack deletions initiated"
continue-on-error: true
# PR #7.b: scope-test stacks (acct, acct-out, vpc, date) live in linked1.
# Teardown them from linked1 with their own role assumption.
- name: Assume role in linked1 for scope-stack teardown
uses: aws-actions/configure-aws-credentials@v4
with:
role-to-assume: arn:aws:iam::${{ secrets.AWS_LINKED1_ACCOUNT_ID }}:role/GitHubActionsE2ERole
aws-region: ap-northeast-2
continue-on-error: true
- name: Delete scope-test CloudFormation stacks in linked1
run: |
# Start all CFN deletes in parallel
for SUFFIX in scope-acct scope-acct-out scope-vpc scope-date; do
aws cloudformation delete-stack --stack-name "${STACK_NAME}-${SUFFIX}" --region ap-northeast-2 || true
done
# Wait for all CFN deletes to complete so stack-managed resources
# (SGs, policies) are gone before we try to delete the parent VPC.
for SUFFIX in scope-acct scope-acct-out scope-vpc scope-date; do
aws cloudformation wait stack-delete-complete \
--stack-name "${STACK_NAME}-${SUFFIX}" --region ap-northeast-2 2>/dev/null || true
done
# Delete the raw VPC that deploy-scope-vpc created outside CFN.
# Before delete-vpc succeeds, we need any remaining SGs/subnets/RTs
# in it cleared — create_security_group from our scope tests was
# done outside CFN too, so find + delete them here.
for VPC in $(aws ec2 describe-vpcs --region ap-northeast-2 \
--filters "Name=tag:e2e-scope-vpc,Values=pr${PR_NUMBER}" \
--query 'Vpcs[].VpcId' --output text 2>/dev/null); do
echo "Cleaning scope VPC: $VPC"
# Delete non-default SGs first
for SG in $(aws ec2 describe-security-groups --region ap-northeast-2 \
--filters "Name=vpc-id,Values=$VPC" \
--query 'SecurityGroups[?GroupName!=`default`].GroupId' --output text 2>/dev/null); do
aws ec2 delete-security-group --region ap-northeast-2 --group-id "$SG" 2>/dev/null || true
done
# Delete subnets (usually none for a freshly-created scope VPC)
for SN in $(aws ec2 describe-subnets --region ap-northeast-2 \
--filters "Name=vpc-id,Values=$VPC" \
--query 'Subnets[].SubnetId' --output text 2>/dev/null); do
aws ec2 delete-subnet --region ap-northeast-2 --subnet-id "$SN" 2>/dev/null || true
done
# Detach + delete any IGWs attached
for IGW in $(aws ec2 describe-internet-gateways --region ap-northeast-2 \
--filters "Name=attachment.vpc-id,Values=$VPC" \
--query 'InternetGateways[].InternetGatewayId' --output text 2>/dev/null); do
aws ec2 detach-internet-gateway --region ap-northeast-2 --internet-gateway-id "$IGW" --vpc-id "$VPC" 2>/dev/null || true
aws ec2 delete-internet-gateway --region ap-northeast-2 --internet-gateway-id "$IGW" 2>/dev/null || true
done
aws ec2 delete-vpc --region ap-northeast-2 --vpc-id "$VPC" 2>/dev/null && echo " deleted VPC $VPC" || echo " VPC $VPC still has attached resources"
done
# Scope-test S3 buckets created by inline python (not covered by
# the ARN-artifact teardown because that ran under single-account role).
ACCOUNT=$(aws sts get-caller-identity --query Account --output text)
for B in $(aws s3api list-buckets --query "Buckets[?starts_with(Name, 'e2e-scope-')].Name" --output text 2>/dev/null); do
aws s3 rb "s3://$B" --force 2>/dev/null && echo " deleted bucket s3://$B" || true
done
aws s3 rb "s3://cfn-e2e-${ACCOUNT}-${PR_NUMBER}-ap-northeast-2" --force 2>/dev/null || true
echo "linked1 scope-test teardown complete"
continue-on-error: true
# Switch back to the single-account role for the StackSet delete step.
- name: Re-assume single-account role for StackSet delete
uses: aws-actions/configure-aws-credentials@v4
with:
role-to-assume: arn:aws:iam::${{ secrets.AWS_SINGLE_ACCOUNT_ID }}:role/GitHubActionsE2ERole
aws-region: ap-northeast-2
continue-on-error: true
- name: Delete multi-account StackSet
run: |
python3 delete_stackset.py \
--name "$STACK_NAME-stackset" \
--accounts "${{ secrets.E2E_LINKED_ACCOUNT_IDS }}" \
--org-unit-ids "${{ secrets.AWS_SANDBOX_OU_ID }}" \
--region ap-northeast-2
continue-on-error: true