|
| 1 | +import * as pulumi from "@pulumi/pulumi"; |
| 2 | +import * as aws from "@pulumi/aws"; |
| 3 | +import * as awscloud from "@pulumi/cloud-aws"; |
| 4 | + |
| 5 | +let config = new pulumi.Config("airflow"); |
| 6 | +const dbPassword = config.require("dbPassword"); |
| 7 | + |
| 8 | +let securityGroupIds = [ awscloud.getCluster()!.securityGroupId! ]; |
| 9 | + |
| 10 | +let dbSubnets = new aws.rds.SubnetGroup("dbsubnets", { |
| 11 | + subnetIds: awscloud.getNetwork().subnetIds, |
| 12 | +}); |
| 13 | + |
| 14 | +let db = new aws.rds.Instance("postgresdb", { |
| 15 | + engine: "postgres", |
| 16 | + |
| 17 | + instanceClass: "db.t2.micro", |
| 18 | + allocatedStorage: 20, |
| 19 | + |
| 20 | + dbSubnetGroupName: dbSubnets.id, |
| 21 | + vpcSecurityGroupIds: securityGroupIds, |
| 22 | + |
| 23 | + name: "airflow", |
| 24 | + username: "airflow", |
| 25 | + password: dbPassword, |
| 26 | + |
| 27 | + skipFinalSnapshot: true, |
| 28 | +}); |
| 29 | + |
| 30 | +let cacheSubnets = new aws.elasticache.SubnetGroup("cachesubnets", { |
| 31 | + subnetIds: awscloud.getNetwork().subnetIds, |
| 32 | +}); |
| 33 | + |
| 34 | +let cacheCluster = new aws.elasticache.Cluster("cachecluster", { |
| 35 | + clusterId: "cache-" + pulumi.getStack(), |
| 36 | + engine: "redis", |
| 37 | + |
| 38 | + nodeType: "cache.t2.micro", |
| 39 | + numCacheNodes: 1, |
| 40 | + |
| 41 | + subnetGroupName: cacheSubnets.id, |
| 42 | + securityGroupIds: securityGroupIds, |
| 43 | +}); |
| 44 | + |
| 45 | +let environment = { |
| 46 | + "POSTGRES_HOST": db.endpoint.apply(e => e.split(":")[0]), |
| 47 | + "POSTGRES_PASSWORD": dbPassword, |
| 48 | + |
| 49 | + "REDIS_HOST": cacheCluster.cacheNodes.apply(n => n[0].address), |
| 50 | + |
| 51 | + "EXECUTOR": "Celery", |
| 52 | +}; |
| 53 | + |
| 54 | +let airflowController = new awscloud.Service("airflowcontroller", { |
| 55 | + containers: { |
| 56 | + "webserver": { |
| 57 | + build: "./airflow-container", |
| 58 | + ports: [{ port: 8080, external: true, protocol: "http" }], |
| 59 | + environment: environment, |
| 60 | + command: [ "webserver" ], |
| 61 | + }, |
| 62 | + |
| 63 | + "scheduler": { |
| 64 | + build: "./airflow-container", |
| 65 | + environment: environment, |
| 66 | + command: [ "scheduler" ], |
| 67 | + }, |
| 68 | + }, |
| 69 | + replicas: 1, |
| 70 | +}); |
| 71 | + |
| 72 | +let airflower = new awscloud.Service("airflower", { |
| 73 | + containers: { |
| 74 | + // If the container is named "flower", we create environment variables that start |
| 75 | + // with `FLOWER_` and Flower tries and fails to parse them as configuration. |
| 76 | + "notflower": { |
| 77 | + build: "./airflow-container", |
| 78 | + ports: [{ port: 5555, external: true, protocol: "http" }], |
| 79 | + environment: environment, |
| 80 | + command: [ "flower" ], |
| 81 | + }, |
| 82 | + }, |
| 83 | +}); |
| 84 | + |
| 85 | +let airflowWorkers = new awscloud.Service("airflowworkers", { |
| 86 | + containers: { |
| 87 | + "worker": { |
| 88 | + build: "./airflow-container", |
| 89 | + environment: environment, |
| 90 | + command: [ "worker" ], |
| 91 | + memory: 1024, |
| 92 | + }, |
| 93 | + }, |
| 94 | + replicas: 3, |
| 95 | +}); |
| 96 | + |
| 97 | +export let airflowEndpoint = airflowController.defaultEndpoint.apply(e => e.hostname); |
| 98 | +export let flowerEndpoint = airflower.defaultEndpoint.apply(e => e.hostname); |
0 commit comments