diff --git a/README.md b/README.md index 16d943c3..e21a9205 100644 --- a/README.md +++ b/README.md @@ -1,146 +1,51 @@ -Prerequisites: - -AWS CLI installed and configured with admin access to the C&C account. -Terraform installed -Git installed - - -Create new S3 bucket for new Jenkins instance to use setting the following options DURING CREATION some can't be set after - - bucket should be named using the following template : avillach-biodatacatalyst-deployments- - - Object Locking must be enabled - - Encryption should be AES-256 - - Enable Object-level logging as secrets are stored in this bucket - - Enable versioning - - Server access logging enabled (hms-dbmi-cnc-cloudtrail, no target prefix) - -Set Bucket Policy in the Permissions section for the bucket to the following after replacing __BUCKET_NAME__ with the bucket name: - ------------------------------------------------------ -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "AWS": "arn:aws:iam::191687121306:role/hms-dbmi-cnc-role" - }, - "Action": [ - "s3:GetObject", - "s3:PutObject", - "s3:PutObjectAcl", - "s3:GetObjectAcl", - "s3:GetObjectTagging", - "s3:DeleteObject" - ], - "Resource": "arn:aws:s3:::__BUCKET_NAME__/*" - }, - { - "Effect": "Allow", - "Principal": { - "AWS": "arn:aws:iam::752463128620:role/system/jenkins-s3-role" - }, - "Action": [ - "s3:GetObject", - "s3:PutObject", - "s3:PutObjectAcl", - "s3:GetObjectAcl", - "s3:GetObjectTagging", - "s3:DeleteObject" - ], - "Resource": "arn:aws:s3:::__BUCKET_NAME__/*" - } - ] -} ------------------------------------------------------ - - - - -Clone https://github.com/hms-dbmi/avillachlab-jenkins - -Run the following commands after replacing all __VARIABLE_NAME__ entries with their correct values for the environment: - ------------------------------------------------------ - -cd dev-jenkins-terraform -env > env.txt -terraform init -terraform apply -auto-approve \ --var "git-commit=__GIT_COMMIT_FOR_JENKINS_REPO__" \ --var "stack-s3-bucket=__S3_BUCKET_NAME_YOU_CREATED__" \ --var "stack-id=__S3_BUCKET_NAME_SUFFIX__" \ --var "subnet-id=__JENKINS_SUBNET_ID__" \ --var "vpc-id=__JENKINS_VPC_ID__" \ --var "instance-profile-name=__JENKINS_INSTANCE_PROFILE_NAME__" \ --var "access-cidr=__JENKINS_ACCESS_CIDR__" \ --var "provisioning-cidr=__JENKINS_PROVISIONING_CIDR__" - -aws s3 --sse=AES256 cp terraform.tfstate s3://${stack_s3_bucket}/jenkins_state_${GIT_COMMIT}/terraform.tfstate -aws s3 --sse=AES256 cp env.txt s3://${stack_s3_bucket}/jenkins_state_${GIT_COMMIT}/last_env.txt - -INSTANCE_ID=`terraform state show aws_instance.dev-jenkins | grep "\"i-[a-f0-9]" | cut -f 2 -d "=" | sed 's/"//g'` - -while [ -z $(/usr/local/bin/aws --region=us-east-1 ec2 describe-tags --filters "Name=resource-id,Values=${INSTANCE_ID}" | grep InitComplete) ];do echo "still initializing";sleep 10;done - -echo "http://`terraform state show aws_instance.dev-jenkins | grep private_ip | cut -f 2 -d "=" | sed 's/\"//g' | sed 's/ //g'`" - ------------------------------------------------------ - - -Set stack_s3_bucket Value to new S3 bucket name in new Jenkins - - Manage Jenkins > Configure System - - under "Global properties" set stack_s3_bucket to the new bucket created in the first step - -Add the following arn as a trusted entity in the hms-dbmi-cnc-role in the prod account: - - https://console.aws.amazon.com/iam/home?region=us-east-1#/roles/hms-dbmi-cnc-role?section=trust - - example template : - - { - "Effect": "Allow", - "Principal": { - "AWS": "arn:aws:sts::752463128620:assumed-role/jenkins-s3-role/< instance id of the jenkins ec2 you just created >" - }, - "Action": "sts:AssumeRole", - "Condition": {} - } - - - example : - - { - "Effect": "Allow", - "Principal": { - "AWS": "arn:aws:sts::752463128620:assumed-role/jenkins-s3-role/i-0615f53dd368cbdfc" - }, - "Action": "sts:AssumeRole", - "Condition": {} - } - -Switch to Jenkins Configuration View ( DO NOT QUEUE UP THE JOBS! Wait for each to complete successfully before going on to the next. ) - -Run Jenkins Build "Create stack_variables.tf files" -Run Jenkins Build "Update VPC Settings" after confirming the following: - - confirm that the R53_Zone_ID is correct for the prod account Route 53 Zone - - confirm that the vpc and subnet group names are correct for the prod account -Run Jenkins Build "Update PIC-SURE Token Introspection Token" -Run Jenkins Build "Update Fence Client Credentials" - - provide the correct Fence Client ID and Client Secret as provided by the Fence team -Run Jenkins Build "Update HTTPD Certs and Key" - - provide the correct Cert, Chain and Key file for the production HTTPD server +# Overview +Welcome to the base folder of your Jenkins project. This folder contains two subfolders, each dedicated to a specific aspect of your Jenkins infrastructure: `jenkins-docker` and `jenkins-terraform`. Below, you'll find an overview of each component along with links to their respective README files for detailed information. -Switch to the Deployment View +## Jenkins Docker (`jenkins-docker`) -Run Jenkins Build Check For Updates - - The first time this runs it will take about 1.5 hours because it has to rekey the data. +This section focuses on the Dockerization of Jenkins, incorporating additional tools and configurations to enhance its functionality. The Jenkins Docker image is extended from the official LTS image, making it a versatile and powerful solution. For more information, refer to the [Jenkins Docker README](jenkins-docker/README.md). -Run Jenkins Build Swap Stacks - - This will point the internal production CNAME at the current stage environment - - The current stage environment becomes prod and the current prod environment becomes stage +## Jenkins Terraform (`jenkins-terraform`) -Run Jenkins Build Check For Updates - - This time it should only take about a half hour because the data has already been rekeyed. +Here, Terraform is leveraged to deploy and manage Jenkins infrastructure on AWS. The README provides insights into the variables, backend configuration, and outputs defined in the Terraform files. Detailed information about variables, Terraform backend configuration, and outputs can be found in the [Jenkins Terraform README](jenkins-terraform/README.md). +Feel free to explore each component to understand their configurations, usage, and any additional details you may need for managing your Jenkins environment efficiently. If you have any questions or need assistance, don't hesitate to reach out. +## Table of Contents +- [Jenkins Docker](#jenkins-docker) +- [Jenkins Terraform](#jenkins-terraform) +Feel free to dive into the respective sections for detailed information and instructions. +# Jenkins Docker (`jenkins-docker`) + +This section focuses on the Dockerization of Jenkins, incorporating additional tools and configurations to enhance its functionality. The Jenkins Docker image is extended from the official LTS image, making it a versatile and powerful solution. + +## [Jenkins Docker README](jenkins-docker/README.md) + +- [Features](jenkins-docker/README.md#features) +- [Prerequisites](jenkins-docker/README.md#prerequisites) +- [Building the Image](jenkins-docker/README.md#building-the-image) +- [Configuration](jenkins-docker/README.md#configuration) +- [Usage](jenkins-docker/README.md#usage) +- [Cleanup](jenkins-docker/README.md#cleanup) +- [Contributing](jenkins-docker/README.md#contributing) +- [License](jenkins-docker/README.md#license) + +Feel free to explore the Jenkins Docker README for comprehensive information on configuring, building, and using the Docker image. + +# Jenkins Terraform (`jenkins-terraform`) + +Here, Terraform is leveraged to deploy and manage Jenkins infrastructure on AWS. The README provides insights into the variables, backend configuration, and outputs defined in the Terraform files. + +## [Jenkins Terraform README](jenkins-terraform/README.md) + +- [Variables](jenkins-terraform/README.md#variables) +- [Terraform Backend](jenkins-terraform/README.md#terraform-backend) +- [Outputs](jenkins-terraform/README.md#outputs) + +Feel free to explore the Jenkins Terraform README for detailed information on variables, Terraform backend configuration, and outputs. + +Feel free to explore each component to understand their configurations, usage, and any additional details you may need for managing your Jenkins environment efficiently. If you have any questions or need assistance, don't hesitate to reach out. diff --git a/dev-jenkins-terraform/install-docker.sh b/dev-jenkins-terraform/install-docker.sh deleted file mode 100644 index d858d7dd..00000000 --- a/dev-jenkins-terraform/install-docker.sh +++ /dev/null @@ -1,375 +0,0 @@ -#!/bin/bash -sudo yum install wget -y -sudo yum install -y https://s3.amazonaws.com/ec2-downloads-windows/SSMAgent/latest/linux_amd64/amazon-ssm-agent.rpm -sudo systemctl enable amazon-ssm-agent -sudo systemctl start amazon-ssm-agent -wget https://s3.amazonaws.com/amazoncloudwatch-agent/centos/amd64/latest/amazon-cloudwatch-agent.rpm -sudo rpm -U amazon-cloudwatch-agent.rpm -sudo touch /opt/aws/amazon-cloudwatch-agent/etc/custom_config.json -echo " - -{ - \"metrics\": { - - \"metrics_collected\": { - \"cpu\": { - \"measurement\": [ - \"cpu_usage_idle\", - \"cpu_usage_user\", - \"cpu_usage_system\" - ], - \"metrics_collection_interval\": 300, - \"totalcpu\": false - }, - \"disk\": { - \"measurement\": [ - \"used_percent\" - ], - \"metrics_collection_interval\": 600, - \"resources\": [ - \"*\" - ] - }, - \"mem\": { - \"measurement\": [ - \"mem_used_percent\", - \"mem_available\", - \"mem_available_percent\", - \"mem_total\", - \"mem_used\" - - ], - \"metrics_collection_interval\": 600 - } - } - }, - \"logs\":{ - \"logs_collected\":{ - \"files\":{ - \"collect_list\":[ - { - \"file_path\":\"/var/log/secure\", - \"log_group_name\":\"secure\", - \"log_stream_name\":\"{instance_id} secure\", - \"timestamp_format\":\"UTC\" - }, - { - \"file_path\":\"/var/log/messages\", - \"log_group_name\":\"messages\", - \"log_stream_name\":\"{instance_id} messages\", - \"timestamp_format\":\"UTC\" - }, - { - \"file_path\":\"/var/log/audit/audit.log\", - \"log_group_name\":\"audit.log\", - \"log_stream_name\":\"{instance_id} audit.log\", - \"timestamp_format\":\"UTC\" - }, - { - \"file_path\":\"/var/log/yum.log\", - \"log_group_name\":\"yum.log\", - \"log_stream_name\":\"{instance_id} yum.log\", - \"timestamp_format\":\"UTC\" - }, - { - \"file_path\":\"/var/log/jenkins-docker-logs/*\", - \"log_group_name\":\"jenkins-logs\", - \"log_stream_name\":\"{instance_id} ${stack_id} jenkins-app-logs\", - \"timestamp_format\":\"UTC\" - }, - { - \"file_path\":\"/var/jenkins_home/jobs/Backup Jenkins Home/**/log\", - \"log_group_name\":\"jenkins-logs\", - \"log_stream_name\":\"{instance_id} ${stack_id} jenkins-build-logs Backup_Jenkins_Home\", - \"timestamp_format\":\"UTC\" - }, - { - \"file_path\":\"/var/jenkins_home/jobs/Create new Jenkins Server/**/log\", - \"log_group_name\":\"jenkins-logs\", - \"log_stream_name\":\"{instance_id} ${stack_id} jenkins-build-logs Create_new_Jenkins_Server\", - \"timestamp_format\":\"UTC\" - }, - { - \"file_path\":\"/var/jenkins_home/jobs/Create stack_variables.tf Files/**/log\", - \"log_group_name\":\"jenkins-logs\", - \"log_stream_name\":\"{instance_id} ${stack_id} jenkins-build-logs Create_stack_variables.tf_Files\", - \"timestamp_format\":\"UTC\" - }, - { - \"file_path\":\"/var/jenkins_home/jobs/Destroy Old Jenkins Server/**/log\", - \"log_group_name\":\"jenkins-logs\", - \"log_stream_name\":\"{instance_id} ${stack_id} jenkins-build-logs Destroy_Old_Jenkins_Server\", - \"timestamp_format\":\"UTC\" - }, - { - \"file_path\":\"/var/jenkins_home/jobs/Docker-AWSCLI/**/log\", - \"log_group_name\":\"jenkins-logs\", - \"log_stream_name\":\"{instance_id} ${stack_id} jenkins-build-logs Docker_AWSCLI\", - \"timestamp_format\":\"UTC\" - }, - { - \"file_path\":\"/var/jenkins_home/jobs/List Instance Profiles/**/log\", - \"log_group_name\":\"jenkins-logs\", - \"log_stream_name\":\"{instance_id} ${stack_id} jenkins-build-logs List_Instance_Profiles\", - \"timestamp_format\":\"UTC\" - }, - { - \"file_path\":\"/var/jenkins_home/jobs/Update Bucket Policy/**/log\", - \"log_group_name\":\"jenkins-logs\", - \"log_stream_name\":\"{instance_id} ${stack_id} jenkins-build-logs Update_Bucket_Policy\", - \"timestamp_format\":\"UTC\" - }, - { - \"file_path\":\"/var/jenkins_home/jobs/Update Fence Client Credentials/**/log\", - \"log_group_name\":\"jenkins-logs\", - \"log_stream_name\":\"{instance_id} ${stack_id} jenkins-build-logs Update_Fence_Client_Credentials\", - \"timestamp_format\":\"UTC\" - }, - { - \"file_path\":\"/var/jenkins_home/jobs/Update HTTPD Certs and Key/**/log\", - \"log_group_name\":\"jenkins-logs\", - \"log_stream_name\":\"{instance_id} ${stack_id} jenkins-build-logs Update_HTTPD_Certs_and_Key\", - \"timestamp_format\":\"UTC\" - }, - { - \"file_path\":\"/var/jenkins_home/jobs/Update PIC-SURE Token Introspection Token/**/log\", - \"log_group_name\":\"jenkins-logs\", - \"log_stream_name\":\"{instance_id} ${stack_id} jenkins-build-logs Update_PIC_SURE_Token_Introspection_Token\", - \"timestamp_format\":\"UTC\" - }, - { - \"file_path\":\"/var/jenkins_home/jobs/Update VPC Settings/**/log\", - \"log_group_name\":\"jenkins-logs\", - \"log_stream_name\":\"{instance_id} ${stack_id} jenkins-build-logs Update_VPC_Settings\", - \"timestamp_format\":\"UTC\" - }, - { - \"file_path\":\"/var/jenkins_home/jobs/Deployment Pipeline/**/log\", - \"log_group_name\":\"jenkins-logs\", - \"log_stream_name\":\"{instance_id} ${stack_id} jenkins-build-logs Deployment_Pipeline\", - \"timestamp_format\":\"UTC\" - }, - { - \"file_path\":\"/var/jenkins_home/jobs/Move Prod DNS Pointer/**/log\", - \"log_group_name\":\"jenkins-logs\", - \"log_stream_name\":\"{instance_id} ${stack_id} jenkins-build-logs Move_Prod_DNS_Pointer\", - \"timestamp_format\":\"UTC\" - }, - { - \"file_path\":\"/var/jenkins_home/jobs/Retrieve Build Spec/**/log\", - \"log_group_name\":\"jenkins-logs\", - \"log_stream_name\":\"{instance_id} ${stack_id} jenkins-build-logs Retrieve_Build_Spec\", - \"timestamp_format\":\"UTC\" - }, - { - \"file_path\":\"/var/jenkins_home/jobs/Retrieve Deployment State/**/log\", - \"log_group_name\":\"jenkins-logs\", - \"log_stream_name\":\"{instance_id} ${stack_id} jenkins-build-logs Retrieve_Deployment_State\", - \"timestamp_format\":\"UTC\" - }, - { - \"file_path\":\"/var/jenkins_home/jobs/Swap Stacks/**/log\", - \"log_group_name\":\"jenkins-logs\", - \"log_stream_name\":\"{instance_id} ${stack_id} jenkins-build-logs Swap_Stacks\", - \"timestamp_format\":\"UTC\" - }, - { - \"file_path\":\"/var/jenkins_home/jobs/Teardown and Rebuild Stage Environment/**/log\", - \"log_group_name\":\"jenkins-logs\", - \"log_stream_name\":\"{instance_id} ${stack_id} jenkins-build-logs Teardown_and_Rebuild_Stage_Environment\", - \"timestamp_format\":\"UTC\" - }, - { - \"file_path\":\"/var/jenkins_home/jobs/Write Stack State/**/log\", - \"log_group_name\":\"jenkins-logs\", - \"log_stream_name\":\"{instance_id} ${stack_id} jenkins-build-logs Write_Stack_State\", - \"timestamp_format\":\"UTC\" - }, - { - \"file_path\":\"/var/jenkins_home/jobs/Import_and_Rekey_HPDS_Data/**/log\", - \"log_group_name\":\"jenkins-logs\", - \"log_stream_name\":\"{instance_id} ${stack_id} jenkins-build-logs Import_and_Rekey_HPDS_Data\", - \"timestamp_format\":\"UTC\" - }, - { - \"file_path\":\"/var/jenkins_home/jobs/PIC-SURE Auth Micro-App Build/**/log\", - \"log_group_name\":\"jenkins-logs\", - \"log_stream_name\":\"{instance_id} ${stack_id} jenkins-build-logs PIC_SURE_Auth_Micro_App_Build\", - \"timestamp_format\":\"UTC\" - }, - { - \"file_path\":\"/var/jenkins_home/jobs/PIC-SURE Pipeline/**/log\", - \"log_group_name\":\"jenkins-logs\", - \"log_stream_name\":\"{instance_id} ${stack_id} jenkins-build-logs PIC_SURE_Pipeline\", - \"timestamp_format\":\"UTC\" - }, - { - \"file_path\":\"/var/jenkins_home/jobs/PIC-SURE Wildfly Image Build/**/log\", - \"log_group_name\":\"jenkins-logs\", - \"log_stream_name\":\"{instance_id} ${stack_id} jenkins-build-logs PIC_SURE_Wildfly_Image_Build\", - \"timestamp_format\":\"UTC\" - }, - { - \"file_path\":\"/var/jenkins_home/jobs/PIC-SURE-API Build/**/log\", - \"log_group_name\":\"jenkins-logs\", - \"log_stream_name\":\"{instance_id} ${stack_id} jenkins-build-logs PIC_SURE_API_Build\", - \"timestamp_format\":\"UTC\" - }, - { - \"file_path\":\"/var/jenkins_home/jobs/PIC-SURE-HPDS Build/**/log\", - \"log_group_name\":\"jenkins-logs\", - \"log_stream_name\":\"{instance_id} ${stack_id} jenkins-build-logs PIC_SURE_HPDS_Build\", - \"timestamp_format\":\"UTC\" - }, - { - \"file_path\":\"/var/jenkins_home/jobs/PIC-SURE-HPDS-UI Docker Build/**/log\", - \"log_group_name\":\"jenkins-logs\", - \"log_stream_name\":\"{instance_id} ${stack_id} jenkins-build-logs PIC_SURE_HPDS_UI_Docker_Build\", - \"timestamp_format\":\"UTC\" - }, - { - \"file_path\":\"/var/jenkins_home/jobs/biodatacatalyst-ui/**/log\", - \"log_group_name\":\"jenkins-logs\", - \"log_stream_name\":\"{instance_id} ${stack_id} jenkins-build-logs biodatacatalyst_ui\", - \"timestamp_format\":\"UTC\" - }, - { - \"file_path\":\"/var/jenkins_home/jobs/Await Initialization/**/log\", - \"log_group_name\":\"jenkins-logs\", - \"log_stream_name\":\"{instance_id} ${stack_id} jenkins-build-logs Await_Initialization\", - \"timestamp_format\":\"UTC\" - }, - { - \"file_path\":\"/var/jenkins_home/jobs/Check For Updates/**/log\", - \"log_group_name\":\"jenkins-logs\", - \"log_stream_name\":\"{instance_id} ${stack_id} jenkins-build-logs Check_For_Updates\", - \"timestamp_format\":\"UTC\" - } - ] - } - } - } - - -} - -" > /opt/aws/amazon-cloudwatch-agent/etc/custom_config.json -sudo /opt/aws/amazon-cloudwatch-agent/bin/amazon-cloudwatch-agent-ctl -a fetch-config -m ec2 -c file:/opt/aws/amazon-cloudwatch-agent/etc/custom_config.json -s - -# Trend Mirco -echo '172.25.255.76 dsm.datastage.hms.harvard.edu' | sudo tee -a /etc/hosts -ACTIVATIONURL='dsm://dsm.datastage.hms.harvard.edu:4120/' -MANAGERURL='https://dsm.datastage.hms.harvard.edu:443' -CURLOPTIONS='--silent --tlsv1.2' -linuxPlatform=''; -isRPM=''; -if [[ $(/usr/bin/id -u) -ne 0 ]]; then - echo You are not running as the root user. Please try again with root privileges.; - logger -t You are not running as the root user. Please try again with root privileges.; - exit 1; -fi; -if ! type curl >/dev/null 2>&1; then - echo "Please install CURL before running this script." - logger -t Please install CURL before running this script - exit 1 -fi -curl $MANAGERURL/software/deploymentscript/platform/linuxdetectscriptv1/ -o /tmp/PlatformDetection $CURLOPTIONS --insecure -if [ -s /tmp/PlatformDetection ]; then - . /tmp/PlatformDetection -else - echo "Failed to download the agent installation support script." - logger -t Failed to download the Deep Security Agent installation support script - exit 1 -fi -platform_detect -if [[ -z "$${linuxPlatform}" ]] || [[ -z "$${isRPM}" ]]; then - echo Unsupported platform is detected - logger -t Unsupported platform is detected - exit 1 -fi -echo Downloading agent package... -if [[ $isRPM == 1 ]]; then package='agent.rpm' - else package='agent.deb' -fi -curl -H "Agent-Version-Control: on" $MANAGERURL/software/agent/$${runningPlatform}$${majorVersion}/$${archType}/$package?tenantID= -o /tmp/$package $CURLOPTIONS --insecure -echo Installing agent package... -rc=1 -if [[ $isRPM == 1 && -s /tmp/agent.rpm ]]; then - rpm -ihv /tmp/agent.rpm - rc=$? -elif [[ -s /tmp/agent.deb ]]; then - dpkg -i /tmp/agent.deb - rc=$? -else - echo Failed to download the agent package. Please make sure the package is imported in the Deep Security Manager - logger -t Failed to download the agent package. Please make sure the package is imported in the Deep Security Manager - exit 1 -fi -if [[ $${rc} != 0 ]]; then - echo Failed to install the agent package - logger -t Failed to install the agent package - exit 1 -fi -echo Install the agent package successfully -sleep 15 -/opt/ds_agent/dsa_control -r -/opt/ds_agent/dsa_control -a $ACTIVATIONURL "policyid:14" - - -echo "user-data progress starting update" -sudo yum -y update -echo "user-data progress finished update installing epel-release" -sudo yum -y install epel-release -echo "user-data progress finished epel-release adding docker-ce repo" -sudo yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo -echo "user-data progress added docker-ce repo starting docker install" -sudo yum -y install docker-ce docker-ce-cli containerd.io -echo "user-data progress finished docker install enabling docker service" -sudo systemctl enable docker -echo "user-data progress finished enabling docker service starting docker" -sudo service docker start -cd /home/centos/jenkins -sudo mkdir -p /var/jenkins_home/jobs/ -sudo mkdir -p /var/log/jenkins-docker-logs -cp -r jobs/* /var/jenkins_home/jobs/ -sudo docker build --build-arg S3_BUCKET=${stack_s3_bucket} -t avillach-lab-dev-jenkins . - -## Download and Install Nessus -for i in {1..5}; do sudo /usr/local/bin/aws --region us-east-1 s3 cp s3://${stack_s3_bucket}/nessus_config/setup.sh /opt/nessus_setup.sh && break || sleep 45; done -sh /opt/nessus_setup.sh "${stack_s3_bucket}" "CNC_Prod" - -# Download Jenkins config file from s3 -for i in {1..5}; do sudo /usr/local/bin/aws --region us-east-1 s3 cp s3://${stack_s3_bucket}/jenkins_config/config.xml /var/jenkins_home/config.xml && break || sleep 45; done - -# copy ssl cert & key from s3 -for i in {1..5}; do sudo /usr/local/bin/aws --region us-east-1 s3 cp s3://${stack_s3_bucket}/certs/jenkins/jenkins.cer /root/jenkins.cer && break || sleep 45; done -for i in {1..5}; do sudo /usr/local/bin/aws --region us-east-1 s3 cp s3://${stack_s3_bucket}/certs/jenkins/jenkins.key /root/jenkins.key && break || sleep 45; done - -# generate keystore file for docker/jenkins use -keystore_pass=`echo $RANDOM | md5sum | head -c 20` -sudo openssl pkcs12 -export -in /root/jenkins.cer -inkey /root/jenkins.key -out /root/jenkins.p12 -password pass:$keystore_pass - -#run jenkins docker container -sudo docker run -d -v /var/jenkins_home/jobs:/var/jenkins_home/jobs \ - -v /var/jenkins_home/config.xml:/usr/share/jenkins/ref/config.xml.override \ - -v /var/jenkins_home/workspace:/var/jenkins_home/workspace \ - -v /var/run/docker.sock:/var/run/docker.sock \ - -v /root/jenkins.p12:/root/jenkins.p12 \ - -p 443:8443 \ - --restart always \ - --name jenkins \ - avillach-lab-dev-jenkins \ - --httpsPort=8443 \ - --httpsKeyStore=/root/jenkins.p12 \ - --httpsKeyStorePassword="$keystore_pass" - -for i in 1 2 3 4 5; do sudo /usr/local/bin/aws --region us-east-1 s3 cp s3://${stack_s3_bucket}/domain-join.sh /root/domain-join.sh && break || sleep 45; done -cd /root -sudo bash domain-join.sh - -echo "setup script finished" - -sudo docker logs -f jenkins > /var/log/jenkins-docker-logs/jenkins.log & - -INSTANCE_ID=$(curl -H "X-aws-ec2-metadata-token: $(curl -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600")" --silent http://169.254.169.254/latest/meta-data/instance-id) -sudo docker exec jenkins /usr/local/bin/aws --region=us-east-1 ec2 create-tags --resources $${INSTANCE_ID} --tags Key=InitComplete,Value=true diff --git a/dev-jenkins-terraform/jenkins-ec2.tf b/dev-jenkins-terraform/jenkins-ec2.tf deleted file mode 100644 index bd8f6b4a..00000000 --- a/dev-jenkins-terraform/jenkins-ec2.tf +++ /dev/null @@ -1,73 +0,0 @@ -resource "tls_private_key" "provisioning-key" { - algorithm = "RSA" - rsa_bits = 4096 -} -output "provisioning-private-key" { - value = tls_private_key.provisioning-key.private_key_pem -} -resource "aws_key_pair" "generated_key" { - key_name = "jenkins-provisioning-key-${var.stack-id}-${var.git-commit}" - public_key = tls_private_key.provisioning-key.public_key_openssh -} - - -data "template_file" "jenkins-user_data" { - template = file("install-docker.sh") - vars = { - stack_s3_bucket = var.stack-s3-bucket - stack_id = var.stack-id - } -} - -data "template_cloudinit_config" "config" { - gzip = true - base64_encode = true - - # user_data - part { - content_type = "text/x-shellscript" - content = data.template_file.jenkins-user_data.rendered - } -} - -resource "aws_instance" "dev-jenkins" { - ami = "ami-03d8711fa5461c9f3" - instance_type = "m5.2xlarge" - associate_public_ip_address = false - key_name = aws_key_pair.generated_key.key_name - - iam_instance_profile = var.instance-profile-name - - root_block_device { - delete_on_termination = true - encrypted = true - volume_size = 1000 - } - - provisioner "file" { - source = "../jenkins-docker" - destination = "/home/centos/jenkins" - connection { - type = "ssh" - user = "centos" - private_key = tls_private_key.provisioning-key.private_key_pem - host = self.private_ip - } - } - - vpc_security_group_ids = [ - aws_security_group.inbound-jenkins-from-lma.id, - aws_security_group.outbound-jenkins-to-internet.id - ] - - subnet_id = var.subnet-id - - tags = { - Owner = "Avillach_Lab" - Environment = "development" - Name = "FISMA Terraform Playground - Dev Jenkins - ${var.stack-id} - ${var.git-commit}" - } - - user_data = data.template_cloudinit_config.config.rendered - -} diff --git a/dev-jenkins-terraform/jenkins-security-groups.tf b/dev-jenkins-terraform/jenkins-security-groups.tf deleted file mode 100644 index 1608ff19..00000000 --- a/dev-jenkins-terraform/jenkins-security-groups.tf +++ /dev/null @@ -1,77 +0,0 @@ -resource "aws_security_group" "inbound-jenkins-from-lma" { - name = "allow_inbound_from_lma_subnet_to_jenkins_vpc_${var.stack-id}_${var.git-commit}" - description = "Allow inbound traffic from LMA on ports 22, 80 and 443" - vpc_id = var.vpc-id - - ingress { - from_port = 80 - to_port = 80 - protocol = "tcp" - cidr_blocks = [ - var.access-cidr - ] - } - - ingress { - from_port = 443 - to_port = 443 - protocol = "tcp" - cidr_blocks = [ - var.access-cidr - ] - } - - ingress { - from_port = 22 - to_port = 22 - protocol = "tcp" - cidr_blocks = [ - var.access-cidr - ] - } - - ingress { - from_port = 22 - to_port = 22 - protocol = "tcp" - cidr_blocks = [ - var.provisioning-cidr - ] - } - - ingress { - from_port = 22 - to_port = 22 - protocol = "tcp" - cidr_blocks = [ - "172.25.255.73/32" - ] - } - - tags = { - Owner = "Avillach_Lab" - Environment = "development" - Name = "FISMA Terraform Playground - inbound-jenkins-from-lma Security Group - ${var.stack-id}" - } -} - -resource "aws_security_group" "outbound-jenkins-to-internet" { - name = "allow_jenkins_outbound_to_internet_${var.stack-id}_${var.git-commit}" - description = "Allow outbound traffic from Jenkins" - vpc_id = var.vpc-id - - egress { - from_port = 0 - to_port = 0 - protocol = -1 - cidr_blocks = [ - "0.0.0.0/0" - ] - } - - tags = { - Owner = "Avillach_Lab" - Environment = "development" - Name = "FISMA Terraform Playground - outbound-jenkins-to-internet Security Group - ${var.stack-id}" - } -} diff --git a/dev-jenkins-terraform/provider.tf b/dev-jenkins-terraform/provider.tf deleted file mode 100644 index 7faf7528..00000000 --- a/dev-jenkins-terraform/provider.tf +++ /dev/null @@ -1,5 +0,0 @@ -provider "aws" { - region = "us-east-1" - profile = "avillachlab-secure-infrastructure" - version = "3.74" -} diff --git a/dev-jenkins-terraform/stack_variables.tf b/dev-jenkins-terraform/stack_variables.tf deleted file mode 100644 index 843d123b..00000000 --- a/dev-jenkins-terraform/stack_variables.tf +++ /dev/null @@ -1,32 +0,0 @@ -variable "stack-id" { - type = string - default = "test" -} - -variable "git-commit" { - type = string -} - -variable "subnet-id" { - type = string -} - -variable "vpc-id" { - type = string -} - -variable "instance-profile-name" { - type = string -} - -variable "access-cidr" { - type = string -} - -variable "provisioning-cidr" { - type = string -} - -variable "stack-s3-bucket" { - type = string -} diff --git a/jenkins-docker/.gitignore b/jenkins-docker/.gitignore new file mode 100644 index 00000000..014c0bc0 --- /dev/null +++ b/jenkins-docker/.gitignore @@ -0,0 +1,5 @@ +builds/ +lastStable +lastSuccessful +nextBuildNumber +.DS_Store \ No newline at end of file diff --git a/jenkins-docker/Dockerfile b/jenkins-docker/Dockerfile index aa36f433..8fb5b8a1 100644 --- a/jenkins-docker/Dockerfile +++ b/jenkins-docker/Dockerfile @@ -1,41 +1,67 @@ FROM jenkins/jenkins:lts - -ARG S3_BUCKET - -COPY plugins.txt /usr/share/jenkins/ref/plugins.txt - -COPY config.xml /var/jenkins_home/config.xml - -COPY scriptApproval.xml /var/jenkins_home/scriptApproval.xml - -COPY hudson.tasks.Maven.xml /var/jenkins_home/hudson.tasks.Maven.xml - +# user to swap easily between OS operations and jenkins configuration. +ENV JENKINS_USERNAME=$USER + +ARG JENKINS_DOCKER_TERRAFORM_DISTRO +ARG PLUGINS_FILE +ARG CONFIG_XML_FILE +ARG SCRIPT_APPROVAL_FILE +ARG HUDSON_TASKS_FILE +ARG JENKINS_JOBS_DIR +ARG PKCS12_FILE +ARG PKCS12_PASS +ARG JENKINS_HTTP_PORT=-1 +ARG JENKINS_HTTPS_PORT=8443 + +# Can use this to swap users back to jenkins user without hardcoding it. Safe to hardcode swap to root user. +###### OS configs and package installations ######## +# We could run these commands and build an image then configure jenkins only in this dockerfile instead of directly using the jenkins stock image USER root +# debian repos to install openjdk headless? +# Jenkins must have a better way to manage JRE .... RUN echo deb http://archive.debian.org/debian stretch-backports main >> /etc/apt/sources.list - RUN echo deb http://archive.debian.org/debian stretch main >> /etc/apt/sources.list RUN apt-get update -RUN apt-get -y install apt-transport-https - -RUN apt-get -y install python3-pip +RUN apt-get -y -t stretch-backports install openjdk-11-jdk-headless -RUN apt-get -y install wget +RUN apt-get -y install apt-transport-https \ + python3-pip \ + wget +# maybe not the best way to install awscli +# see https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html RUN pip3 install --no-input awscli --upgrade --break-system-packages -RUN curl -fsSL https://get.docker.com | sh - -RUN docker --version - -RUN apt-get -y -t stretch-backports install openjdk-11-jdk-headless +RUN set -e; \ + curl -fsSL https://get.docker.com | sh && \ + docker --version +# maven RUN apt-get -y install maven - +# jq RUN apt-get install jq -y +# terraform +RUN wget -c $JENKINS_DOCKER_TERRAFORM_DISTRO -O /opt/terraform.zip && \ + unzip /opt/terraform.zip -d /usr/local/bin/ && rm -f /opt/terraform.zip + +# Cleanup +RUN apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* /var/cache/apt/* + +##### Configuration for jenkins ########### +USER $JENKINS_USERNAME +# Check if PLUGINS_FILE is provided and copy it, then run jenkins-plugin-cli +COPY $PLUGINS_FILE /usr/share/jenkins/ref/plugins.txt RUN /bin/jenkins-plugin-cli -f /usr/share/jenkins/ref/plugins.txt || echo "Some errors occurred during plugin installation." -RUN wget https://releases.hashicorp.com/terraform/0.12.31/terraform_0.12.31_linux_amd64.zip && unzip terraform_0.12.31_linux_amd64.zip && mv terraform /usr/local/bin/ +COPY $CONFIG_XML_FILE /var/jenkins_home/config.xml +COPY $SCRIPT_APPROVAL_FILE /var/jenkins_home/scriptApproval.xml +COPY $HUDSON_TASKS_FILE /var/jenkins_home/hudson.tasks.Maven.xml +COPY $JENKINS_JOBS_DIR /var/jenkins_home/jobs +COPY $PKCS12_FILE /var/jenkins_home/$PKCS12_FILE + +# Should set env var at runtime using an entrypoint. +ENV JENKINS_OPTS="--httpPort=$JENKINS_HTTP_PORT --httpsPort=$JENKINS_HTTPS_PORT --httpsKeyStore=/var/jenkins_home/$PKCS12_FILE --httpsKeyStorePassword=$PKCS12_PASS" diff --git a/jenkins-docker/README.md b/jenkins-docker/README.md new file mode 100644 index 00000000..ea760997 --- /dev/null +++ b/jenkins-docker/README.md @@ -0,0 +1,64 @@ +# Jenkins Docker Image with Additional Tools and Configuration + +This Dockerfile extends the official Jenkins Docker image (LTS) and adds additional tools and configurations to enhance its functionality. + +## Table of Contents + +- [Features](#features) +- [Prerequisites](#prerequisites) +- [Building the Image](#building-the-image) +- [Configuration](#configuration) +- [Usage](#usage) +- [Cleanup](#cleanup) +- [Contributing](#contributing) +- [License](#license) + +## Features + +- Installs OpenJDK 11, Maven, jq, and AWS CLI +- Installs Docker and configures it to work within the Jenkins environment +- Installs Terraform and other essential tools +- Allows easy user switching between OS operations and Jenkins configuration +- Configures Jenkins with specified plugins, settings, and jobs +- Provides options to customize Jenkins HTTP and HTTPS ports +- Cleans up unnecessary files to reduce image size + +## Prerequisites + +- Docker installed on the host machine + +## Building the Image + +To build the Jenkins Docker image, run the following commands: + +```bash +docker build \ + --build-arg JENKINS_DOCKER_TERRAFORM_DISTRO= \ + --build-arg PLUGINS_FILE= \ + --build-arg CONFIG_XML_FILE= \ + --build-arg SCRIPT_APPROVAL_FILE= \ + --build-arg HUDSON_TASKS_FILE= \ + --build-arg JENKINS_JOBS_DIR= \ + --build-arg PKCS12_FILE= \ + --build-arg PKCS12_PASS= \ + --build-arg JENKINS_HTTP_PORT= \ + --build-arg JENKINS_HTTPS_PORT= \ + -t your/jenkins-docker-image . +``` +## Configuration + +### Environment Variables + +- **JENKINS_USERNAME**: User to swap easily between OS operations and Jenkins configuration. +- **JENKINS_HTTP_PORT**: Custom HTTP port for Jenkins (default is -1, which means the default Jenkins port). +- **JENKINS_HTTPS_PORT**: Custom HTTPS port for Jenkins (default is 8443). + +### Configuration Files + +- **PLUGINS_FILE**: Text file containing a list of Jenkins plugins to be installed. +- **CONFIG_XML_FILE**: Jenkins configuration XML file. +- **SCRIPT_APPROVAL_FILE**: Jenkins scriptApproval XML file. +- **HUDSON_TASKS_FILE**: Jenkins hudson.tasks.Maven XML file. +- **JENKINS_JOBS_DIR**: Directory containing Jenkins job configurations. +- **PKCS12_FILE**: Path to the PKCS12 file for Jenkins HTTPS configuration. +- **PKCS12_PASS**: Password for the PKCS12 file. diff --git a/jenkins-docker/config.xml b/jenkins-docker/config.xml index dbf5781b..af5aa622 100644 --- a/jenkins-docker/config.xml +++ b/jenkins-docker/config.xml @@ -131,6 +131,7 @@ PIC-SURE-API Build PIC-SURE-HPDS Build PIC-SURE-HPDS-UI Docker Build + PIC-SURE-VISUALIZATION Build Retrieve Build Spec Retrieve Deployment State Teardown and Rebuild Stage Environment @@ -165,7 +166,7 @@ - 4 + 6 stack_s3_bucket avillach-biodatacatalyst-deployments-b7rwpwv git_base_url diff --git a/jenkins-docker/jobs/Await Initialization/config.xml b/jenkins-docker/jobs/Await Initialization/config.xml index 10155d5a..6b2ab899 100644 --- a/jenkins-docker/jobs/Await Initialization/config.xml +++ b/jenkins-docker/jobs/Await Initialization/config.xml @@ -1,40 +1,36 @@ - + This would be cleaner and more portable if it was a python script that check for initialization. false target_stack - - false - infrastructure_git_hash - - + git_hash false - + 2 - https://${git_base_url}/pic-sure-bdc-infrastructure + ${infrastructure_git_repo} - ${infrastructure_git_hash} + ${git_hash} false - + true @@ -45,93 +41,130 @@ false - cd app-infrastructure -wget https://releases.hashicorp.com/terraform/0.12.31/terraform_0.12.31_linux_amd64.zip && unzip terraform_0.12.31_linux_amd64.zip && mv terraform /usr/local/bin/ -terraform init - -aws sts assume-role --duration-seconds 3600 --role-arn "arn:aws:iam::${cnc_acct_id}:role/system/jenkins-s3-role" --role-session-name "teardown-rebuild" > assume-role-output.txt - -export AWS_ACCESS_KEY_ID=`grep AccessKeyId assume-role-output.txt | cut -d ':' -f 2 | sed "s/[ ,\"]//g"` -export AWS_SECRET_ACCESS_KEY=`grep SecretAccessKey assume-role-output.txt | cut -d ':' -f 2 | sed "s/[ ,\"]//g"` -export AWS_SESSION_TOKEN=`grep SessionToken assume-role-output.txt | cut -d ':' -f 2 | sed "s/[ ,\"]//g"` + #!/bin/bash +set -e +# Source folder containing the scripts +source_scripts_folder="${JENKINS_HOME}/workspace/Bash_Functions/" +ls -la "$source_scripts_folder" + +# Iterate through the files in the folder and source them +for script_file in "$source_scripts_folder"*.sh; do + chmod +x "$script_file" + if [ -f "$script_file" ] && [ -x "$script_file" ]; then + echo "sourcing $script_file" + source "$script_file" + fi +done + +cd app-infrastructure aws s3 cp s3://$stack_s3_bucket/deployment_state_metadata/${target_stack}/terraform.tfstate . -aws s3 cp s3://$stack_s3_bucket/deployment_state_metadata/${target_stack}/stack_variables.tf . -cp stack_variables.tf ../s3-deployment-roles/ -aws s3 cp s3://$stack_s3_bucket/deployment_state_metadata/${target_stack}/terraform.tfstate_roles ../s3-deployment-roles/terraform.tfstate || echo "role state doesnt exist, it will be created" - -unset AWS_ACCESS_KEY_ID -unset AWS_SECRET_ACCESS_KEY -unset AWS_SESSION_TOKEN - -aws sts assume-role --duration-seconds 3600 --role-arn "arn:aws:iam::${app_acct_id}:role/hms-dbmi-cnc-role" --role-session-name "teardown-rebuild" > assume-role-output.txt - -export AWS_ACCESS_KEY_ID=`grep AccessKeyId assume-role-output.txt | cut -d ':' -f 2 | sed "s/[ ,\"]//g"` -export AWS_SECRET_ACCESS_KEY=`grep SecretAccessKey assume-role-output.txt | cut -d ':' -f 2 | sed "s/[ ,\"]//g"` -export AWS_SESSION_TOKEN=`grep SessionToken assume-role-output.txt | cut -d ':' -f 2 | sed "s/[ ,\"]//g"` - -INSTANCE_ID=`terraform state show aws_instance.wildfly-ec2 | grep "\"i-[a-f0-9]" | cut -f 2 -d "=" | sed 's/"//g'` -while [ -z $(/usr/local/bin/aws --region=us-east-1 ec2 describe-tags --filters "Name=resource-id,Values=${INSTANCE_ID}" | grep InitComplete) ];do echo "still initializing";sleep 10;done -echo "Wildfly EC2 Initialization Complete" - -aws sts assume-role --duration-seconds 3600 --role-arn "arn:aws:iam::${app_acct_id}:role/hms-dbmi-cnc-role" --role-session-name "teardown-rebuild" > assume-role-output.txt - -export AWS_ACCESS_KEY_ID=`grep AccessKeyId assume-role-output.txt | cut -d ':' -f 2 | sed "s/[ ,\"]//g"` -export AWS_SECRET_ACCESS_KEY=`grep SecretAccessKey assume-role-output.txt | cut -d ':' -f 2 | sed "s/[ ,\"]//g"` -export AWS_SESSION_TOKEN=`grep SessionToken assume-role-output.txt | cut -d ':' -f 2 | sed "s/[ ,\"]//g"` - -INSTANCE_ID=`terraform state show aws_instance.httpd-ec2 | grep "\"i-[a-f0-9]" | cut -f 2 -d "=" | sed 's/"//g'` -while [ -z $(/usr/local/bin/aws --region=us-east-1 ec2 describe-tags --filters "Name=resource-id,Values=${INSTANCE_ID}" | grep InitComplete) ];do echo "still initializing";sleep 10;done -echo "HTTPD EC2 Initialization Complete" - -aws sts assume-role --duration-seconds 3600 --role-arn "arn:aws:iam::${app_acct_id}:role/hms-dbmi-cnc-role" --role-session-name "teardown-rebuild" > assume-role-output.txt - -export AWS_ACCESS_KEY_ID=`grep AccessKeyId assume-role-output.txt | cut -d ':' -f 2 | sed "s/[ ,\"]//g"` -export AWS_SECRET_ACCESS_KEY=`grep SecretAccessKey assume-role-output.txt | cut -d ':' -f 2 | sed "s/[ ,\"]//g"` -export AWS_SESSION_TOKEN=`grep SessionToken assume-role-output.txt | cut -d ':' -f 2 | sed "s/[ ,\"]//g"` - -INSTANCE_ID=`terraform state show aws_instance.open-hpds-ec2 | grep "\"i-[a-f0-9]" | cut -f 2 -d "=" | sed 's/"//g'` -while [ -z $(/usr/local/bin/aws --region=us-east-1 ec2 describe-tags --filters "Name=resource-id,Values=${INSTANCE_ID}" | grep InitComplete) ];do echo "still initializing";sleep 10;done -echo "Open HPDS EC2 Initialization Complete" -aws sts assume-role --duration-seconds 3600 --role-arn "arn:aws:iam::${app_acct_id}:role/hms-dbmi-cnc-role" --role-session-name "teardown-rebuild" > assume-role-output.txt - -export AWS_ACCESS_KEY_ID=`grep AccessKeyId assume-role-output.txt | cut -d ':' -f 2 | sed "s/[ ,\"]//g"` -export AWS_SECRET_ACCESS_KEY=`grep SecretAccessKey assume-role-output.txt | cut -d ':' -f 2 | sed "s/[ ,\"]//g"` -export AWS_SESSION_TOKEN=`grep SessionToken assume-role-output.txt | cut -d ':' -f 2 | sed "s/[ ,\"]//g"` - -INSTANCE_ID=`terraform state show aws_instance.dictionary-ec2 | grep "\"i-[a-f0-9]" | cut -f 2 -d "=" | sed 's/"//g'` -while [ -z $(/usr/local/bin/aws --region=us-east-1 ec2 describe-tags --filters "Name=resource-id,Values=${INSTANCE_ID}" | grep InitComplete) ];do echo "still initializing";sleep 10;done -echo "Dictionary EC2 Initialization Complete" - -aws sts assume-role --duration-seconds 3600 --role-arn "arn:aws:iam::${app_acct_id}:role/hms-dbmi-cnc-role" --role-session-name "teardown-rebuild" > assume-role-output.txt - -export AWS_ACCESS_KEY_ID=`grep AccessKeyId assume-role-output.txt | cut -d ':' -f 2 | sed "s/[ ,\"]//g"` -export AWS_SECRET_ACCESS_KEY=`grep SecretAccessKey assume-role-output.txt | cut -d ':' -f 2 | sed "s/[ ,\"]//g"` -export AWS_SESSION_TOKEN=`grep SessionToken assume-role-output.txt | cut -d ':' -f 2 | sed "s/[ ,\"]//g"` - -aws sts assume-role --duration-seconds 3600 --role-arn "arn:aws:iam::${app_acct_id}:role/hms-dbmi-cnc-role" --role-session-name "teardown-rebuild" > assume-role-output.txt - -export AWS_ACCESS_KEY_ID=`grep AccessKeyId assume-role-output.txt | cut -d ':' -f 2 | sed "s/[ ,\"]//g"` -export AWS_SECRET_ACCESS_KEY=`grep SecretAccessKey assume-role-output.txt | cut -d ':' -f 2 | sed "s/[ ,\"]//g"` -export AWS_SESSION_TOKEN=`grep SessionToken assume-role-output.txt | cut -d ':' -f 2 | sed "s/[ ,\"]//g"` - -INSTANCE_ID=`terraform state show aws_instance.auth-hpds-ec2 | grep "\"i-[a-f0-9]" | cut -f 2 -d "=" | sed 's/"//g'` -while [ -z $(/usr/local/bin/aws --region=us-east-1 ec2 describe-tags --filters "Name=resource-id,Values=${INSTANCE_ID}" | grep InitComplete) ];do echo "still initializing";sleep 10;done -echo "Auth HPDS EC2 Initialization Complete" +terraform init -unset AWS_ACCESS_KEY_ID -unset AWS_SECRET_ACCESS_KEY -unset AWS_SESSION_TOKEN +output_names=$(terraform output -json | jq -r 'keys[]') +echo "Output names: $output_names" + +for output_name in $output_names; do + reset_role + assume_role + + echo "Current instance: ${output_name}." + INSTANCE_ID=`terraform output "${output_name}"` + + # Skip if the instance ID is empty or is not a valid instance-id + if [ -z "$INSTANCE_ID" ] || ! [[ "$INSTANCE_ID" =~ ^i-[a-fA-F0-9]{17}$ ]]; then + echo "No instance ID for output ${output_name}. Skipping..." + continue + fi + + echo "Instance-id is $INSTANCE_ID" + + aws --region=us-east-1 ec2 describe-tags --filters "Name=resource-id,Values=${INSTANCE_ID}" + while [ $(aws --region=us-east-1 ec2 describe-tags --filters "Name=resource-id,Values=${INSTANCE_ID}" | grep InitComplete | wc -l ) -eq 0 ]; do + # avoid role timeout by assuming role continously + reset_role + assume_role + echo "${output_name} EC2 still initializing" + sleep 60 + done + + echo "${output_name} EC2 Initialization Complete" +done + + + + + # json tag schema. Used to uniquely identify a projects staging instances. +cat <<EOF > staging_httpd_tags_file.json +[ + { + "Name": "tag:Stack", + "Values": ["$target_stack"] + }, + { + "Name": "tag:Node", + "Values": ["HTTPD"] + }, + { + "Name": "tag:Project", + "Values": ["$env_project"] + } +] +EOF + + + + #!/bin/bash +# Source folder containing the scripts +source_scripts_folder="${JENKINS_HOME}/workspace/Bash_Functions/" +ls -la "$source_scripts_folder" + +# Iterate through the files in the folder and source them +for script_file in "$source_scripts_folder"*.sh; do + chmod +x "$script_file" + if [ -f "$script_file" ] && [ -x "$script_file" ]; then + echo "sourcing $script_file" + source "$script_file" + fi +done + +cd "$WORKSPACE" + +assume_role +# register new deployment to staging ( register green ) +echo "registering new deployment to staging ( register green )" +target_group_vpc=$(get_target_group_vpc_by_tg_name $staging_tg_name) +staging_target_group_arn=$(get_target_group_arn_by_name "$staging_tg_name") +staging_httpd_instance_prv_ips=($(get_private_ip_by_tags "staging_httpd_tags_file.json")) + +if [ -z "${staging_httpd_instance_prv_ips}" ]; then + echo "No private IPs found when registering target groups." + echo "Ensure nodes have been created with proper tags for Node,Project and Stack keys." + exit 1 +else + register_targets "$target_group_vpc" "$staging_target_group_arn" "${staging_httpd_instance_prv_ips[@]}" + + # Wait for draining + wait_for_target_group_health "$staging_target_group_arn" "HEALTHY" "${staging_httpd_instance_prv_ips[@]}" +fi +reset_role + - + false false + + + 240 + + + - + \ No newline at end of file diff --git a/jenkins-docker/jobs/Bash_Functions/config.xml b/jenkins-docker/jobs/Bash_Functions/config.xml new file mode 100644 index 00000000..1c8beea7 --- /dev/null +++ b/jenkins-docker/jobs/Bash_Functions/config.xml @@ -0,0 +1,397 @@ + + + + Job will be triggered by Check for updates. + +Gives a central location to store bash functions used in the pipeline. + false + + + true + false + false + false + + false + + + echo ' +assume_role(){ + local role_arn=${1:-"arn:aws:iam::'${app_acct_id}':role/'${jenkins_provisioning_assume_role_name}'"} + OUTPUT=$(aws sts assume-role \ + --role-arn $role_arn \ + --role-session-name "teardown-rebuild" \ + --query "Credentials.[AccessKeyId,SecretAccessKey,SessionToken]" \ + --output text) + + export AWS_ACCESS_KEY_ID=$(echo $OUTPUT | awk "{print \$1}") + export AWS_SECRET_ACCESS_KEY=$(echo $OUTPUT | awk "{print \$2}") + export AWS_SESSION_TOKEN=$(echo $OUTPUT | awk "{print \$3}") + export AWS_DEFAULT_REGION=us-east-1 +} + +reset_role(){ + unset AWS_ACCESS_KEY_ID + unset AWS_SECRET_ACCESS_KEY + unset AWS_SESSION_TOKEN +} + +export -f assume_role +export -f reset_role + ' > role_functions.sh + + + + # Provides automation for swapping instances between staging and live target groups, using green/blue deployment strategy. + +# Script Usage: +# To use this script, specify the names of the staging and live target groups, as well as JSON files +# containing tag filters for identifying instances to swap between the target groups. + +# Example Usage: +# swap_stacks "staging-tg-name" "live-tg-name" "staging-httpd-tags.txt" "live-httpd-tags.txt" + +# Function to get Target Group ARN by name + +echo ' +get_target_group_arn_by_name() { + # Use AWS CLI to describe target groups and extract the ARN by name + aws elbv2 describe-target-groups \ + --query "TargetGroups[?TargetGroupName==\`$1\`].TargetGroupArn" \ + --output text +} + +# Function to get Target Group VPC by name +get_target_group_vpc_by_tg_name() { + local target_group_name=$1 + # Use AWS CLI to describe the target group and extract the VPC ID + aws elbv2 describe-target-groups \ + --names "$target_group_name" \ + --query "TargetGroups[0].VpcId" \ + --output text +} + +# Function to get private IP by tags +get_private_ip_by_tags() { + local tag_filters_file="$1" + # Use AWS CLI to describe instances with specified tag filters from a file and extract private IPs + aws ec2 describe-instances \ + --filters "file://$tag_filters_file" \ + --query "Reservations[].Instances[].PrivateIpAddress" \ + --output text +} + +# Function to check if an EC2 instance is in the same VPC as a target group +is_ec2_in_same_vpc_as_target_group() { + local ec2_private_ip="$1" + local target_group_vpc_id="$2" + # Use AWS CLI to describe the EC2 instance and extract its VPC ID + local ec2_vpc_id=$(aws ec2 describe-instances \ + --filters "Name=private-ip-address,Values=$ec2_private_ip" \ + --query "Reservations[0].Instances[0].VpcId" \ + --output text) + + # Compare the EC2 instances VPC ID with the known target groups VPC ID + if [ "$ec2_vpc_id" == "$target_group_vpc_id" ]; then + echo "The EC2 instance with private IP $ec2_private_ip is in the same VPC as the target group." + return 0 # Return success (0) + else + echo "The EC2 instance with private IP $ec2_private_ip is NOT in the same VPC as the target group." + return 1 # Return failure (non-zero) + fi +} + +# Function to register targets in a target group +register_targets() { + local local_target_group_vpc="$1" + local local_target_group_arn="$2" + local httpd_priv_ips=("${@:3}") + + for local_httpd_priv_ips in "${httpd_priv_ips[@]}"; do + # Check if the EC2 instance is in the same VPC as the target group and register it accordingly + if is_ec2_in_same_vpc_as_target_group "$local_httpd_priv_ips" "$local_target_group_vpc"; then + aws elbv2 register-targets --target-group-arn $local_target_group_arn --targets "Id=$local_httpd_priv_ips" + else + aws elbv2 register-targets --target-group-arn $local_target_group_arn --targets "Id=$local_httpd_priv_ips,AvailabilityZone=all" + fi + done +} + +# Function to deregister targets from a target group +deregister_targets() { + local local_target_group_vpc="$1" + local local_target_group_arn="$2" + local httpd_priv_ips=("${@:3}") + + for httpd_priv_ip in "${httpd_priv_ips[@]}"; do + # Check if the EC2 instance is in the same VPC as the target group and deregister it accordingly + if is_ec2_in_same_vpc_as_target_group "$httpd_priv_ip" "$local_target_group_vpc"; then + aws elbv2 deregister-targets --target-group-arn $local_target_group_arn --targets "Id=$httpd_priv_ip" + else + aws elbv2 deregister-targets --target-group-arn $local_target_group_arn --targets "Id=$httpd_priv_ip,AvailabilityZone=all" + fi + done +} + +# Function to wait for a target group to become healthy for all instances +wait_for_target_group_health() { + local target_group_arn="$1" + local wait_for_health="$2" + local target_instance_ips=("${@:3}") + + local timeout_secs=600 + local start_time=$(date +%s) + + for target_instance_ip in "${target_instance_ips[@]}"; do + local health_status=$(aws elbv2 describe-target-health \ + --target-group-arn "$target_group_arn" \ + --targets "Id=$target_instance_ip" \ + --query "TargetHealthDescriptions[0].TargetHealth.State" \ + --output text + ) + + while [[ ${health_status^^} != ${wait_for_health^^} ]]; do + local elapsed_time=$(( $(date +%s) - start_time )) + if [ $elapsed_time -ge $timeout_secs ]; then + echo "Target group health check exceeds timeout" + echo "Target=$target_instance_ip Current_Health=${health_status^^} wait_for_health=$wait_for_health" + exit 1 + fi + + # Use AWS CLI to describe the target health status and update the health_status variable + health_status=$(aws elbv2 describe-target-health \ + --target-group-arn "$target_group_arn" \ + --targets "Id=$target_instance_ip" \ + --query "TargetHealthDescriptions[0].TargetHealth.State" \ + --output text + ) + echo "$target_instance_ip - ${health_status^^}. Waiting for health of ${wait_for_health^^}" + sleep 10 # Wait for 10 seconds before checking again + done + done +} + +# Function to swap instances between staging and live target groups +swap_stacks() { + local staging_tg_name=$1 + local live_tg_name=$2 + local staging_httpd_tags_file="$3" + local live_httpd_tags_file="$4" + local fail_empty_stacks="${5,,:-"false",,}" # Set to true for stable prod and release testing + echo "staging_tg_name=$1" + echo "live_tg_name=$2" + echo "staging_httpd_tags_file=$3" + echo "live_httpd_tags_file=$4" + + # Get Target Group ARNs using the function + local local_staging_target_group_arn=$(get_target_group_arn_by_name "$staging_tg_name") + local local_live_target_group_arn=$(get_target_group_arn_by_name "$live_tg_name") + + # Get target group VPC. Both target groups are known to be in the same VPC + local local_target_group_vpc=$(get_target_group_vpc_by_tg_name "$live_tg_name") + + # Get private IPs using the function with tags from files + local local_staging_httpd_instance_prv_ips=($(get_private_ip_by_tags "$staging_httpd_tags_file")) + local local_live_httpd_instance_prv_ips=($(get_private_ip_by_tags "$live_httpd_tags_file")) + + # Promote Staging to Live (green to blue) + echo "Promote Staging to Live (green to blue)" + if [ -z "${local_staging_httpd_instance_prv_ips}" ]; then + echo "No staging IP Addresses found when promoting to live server." + echo "Ensure nodes have been created with proper tags for Node, Project, and Stack keys." + if [ "${fail_empty_stacks}" = "true" ]; then + echo "failing build" + exit 1 + fi + else + register_targets "$local_target_group_vpc" "$local_live_target_group_arn" "${local_staging_httpd_instance_prv_ips[@]}" + wait_for_target_group_health "$local_live_target_group_arn" "HEALTHY" "${local_staging_httpd_instance_prv_ips[@]}" + fi + + # Demote Live to Staging (blue to green) + echo "Demote Live to Staging (blue to green)" + if [ -z "${local_live_httpd_instance_prv_ips}" ]; then + echo "No live IP Addresses found when demoting to staging server." + echo "Ensure nodes have been created with proper tags for Node, Project, and Stack keys." + if [ "${fail_empty_stacks}" = "true" ]; then + echo "failing build" + exit 1 + fi + else + register_targets "$local_target_group_vpc" "$local_staging_target_group_arn" "${local_live_httpd_instance_prv_ips[@]}" + wait_for_target_group_health "$local_staging_target_group_arn" "HEALTHY" "${local_live_httpd_instance_prv_ips[@]}" + fi + + # Deregister previous Live from Live TG (remove blue from blue) + echo "Deregister previous Live from Live TG (remove blue from blue)" + if [ -z "${local_live_httpd_instance_prv_ips}" ]; then + echo "No live IP Addresses found when deregistering from live TG." + echo "Ensure nodes have been created with proper tags for Node, Project, and Stack keys and that it has a TGA." + if [ "${fail_empty_stacks}" = "true" ]; then + echo "failing build" + exit 1 + fi + else + deregister_targets "$local_target_group_vpc" "$local_live_target_group_arn" "${local_live_httpd_instance_prv_ips[@]}" + # Wait for draining + wait_for_target_group_health "$local_live_target_group_arn" "UNUSED" "${local_live_httpd_instance_prv_ips[@]}" + fi + + # Deregister previous Staging from Staging TG (remove green from green) + echo "Deregister previous Staging from Staging TG (remove green from green)" + if [ -z "${local_staging_httpd_instance_prv_ips}" ]; then + echo "No staging IP Addresses found when deregistering from live TG." + echo "Ensure nodes have been created with proper tags for Node, Project, and Stack keys and that it has a TGA." + if [ "${fail_empty_stacks}" = "true" ]; then + echo "failing build" + exit 1 + fi + else + deregister_targets "$local_target_group_vpc" "$local_staging_target_group_arn" "${local_staging_httpd_instance_prv_ips[@]}" + # Wait for draining + wait_for_target_group_health "$local_staging_target_group_arn" "UNUSED" "${local_staging_httpd_instance_prv_ips[@]}" + fi + echo "Swap Stacks complete" +}' > target_group_attachment_functions.sh + + + + cat <<'EOF' > pic_sure_rds_snapshot.sh +# Use feature toggle to the strategy to use for picsure RDS +# Available strategies +# * STANDALONE = Unmanaged RDS instance. No prebuild blue snapshot +# * STANDALONE_W_SS = Unmanaged RDS instance. With prebuild blue snapshot +# * STANDALONE_W_TAGGED_SS = Unmanaged RDS instance. Will use a specified tagged snapshot to build from. # Tags are being tricky. But should work. +# * MANAGED_EMPTY = Managed RDS instance. No prebuild blue snapshot +# * MANAGED_USE_LATEST_SS = Managed RDS instance. Will take and use the latest snapshot of blue. + +do_picsure_rds_strategy() { + # variables + local picsure_rds_strategy="${1}" + local live_stack="${2}" + local env_project="${3}" + + # picsure_rds_standalone_instance= + if [ -z ${picsure_rds_strategy} ]; then + picsure_rds_snapshot_id= + fi + if [ "${picsure_rds_strategy}" == "STANDALONE" ]; then + # placeholder. implement with persistent work + echo "placeholder" + fi + if [ "${picsure_rds_strategy}" == "STANDALONE_W_SS" ]; then + # placeholder. implement with persistent work + echo "placeholder" + fi + if [ "${picsure_rds_strategy}" == "STANDALONE_W_TAGGED_SS" ]; then + # placeholder. implement with persistent work + echo "placeholder" + fi + # RDS Strategy: Managed RDS. Empty Database. + if [ "${picsure_rds_strategy}" == "MANAGED_EMPTY" ]; then + echo "initializing empty database." + exit 0 + fi + # RDS Strategy: Managed RDS using latest SS of blue. + if [ "${picsure_rds_strategy}" == "MANAGED_USE_LATEST_SS" ]; then + echo "Creating snapshot for picsure rds" + create_rds_snapshot_by_tag "${live_stack}" "${env_project}" + picsure_rds_snapshot_id=$(get_latest_rds_snapshot_id "${picsure_rds_strategy}" "${live_stack}" "${env_project}") + echo "$picsure_rds_snapshot_id" + # validate snapshot + if [ -z "$picsure_rds_snapshot_id" ]; then + echo "Error: Error with RDS Snapshot. Snapshot empty. Ensure a snapshot exists for the blue rds instance." + exit 1 + fi + # validate snapshot id format + if [[ ! "$picsure_rds_snapshot_id" =~ ^[a-zA-Z0-9-]+$ ]]; then + echo "Error: Error with RDS Snapshot. $picsure_rds_snapshot_id is not a valid snapshot id." + exit 1 + fi + fi +} + +# Funtion to return latest snapshot for rds instance +get_latest_rds_snapshot_id() { + local picsure_rds_strategy="$1" + local stack="$2" + local project="$3" + + if [ "${picsure_rds_strategy}" == "MANAGED_USE_LATEST_SS" ]; then + db_instance_identifier=$(get_db_instance_identifier_by_tag "$live_stack" "$env_project") + latest_snapshot=$(aws rds describe-db-snapshots \ + --db-instance-identifier "$db_instance_identifier" \ + --query "DBSnapshots | sort_by(@, &SnapshotCreateTime) | [-1].DBSnapshotIdentifier" \ + --output text) + fi + echo "$latest_snapshot" +} + +# find db instance by stack and project tags +get_db_instance_identifier_by_tag() { + local stack="$1" + local project="$2" + local identifier + identifier=$(aws rds describe-db-instances | jq --arg stack "$stack" --arg project "$project" -r '.DBInstances[] | select(.TagList[] | .Key == "Stack" and .Value == $stack) | select(.TagList[] | .Key == "Project" and .Value == $project) | .DBInstanceIdentifier') + echo "$identifier" +} + +create_rds_snapshot_by_tag() { + local live_stack="$1" + local env_project="$2" + + # Find the RDS instance based on the specified tag + local db_instance_identifier + db_instance_identifier=$(get_db_instance_identifier_by_tag "$live_stack" "$env_project") + + if [ -z "$db_instance_identifier" ]; then + echo "No RDS instance found with tag: $live_stack and $env_project" + return 1 + fi + local timestamp=$(date +"%Y-%m-%d-%H-%M-%S") + local snapshot_identifier="${db_instance_identifier}-snapshot-${timestamp}" + + # Create a snapshot for the RDS instance + aws rds create-db-snapshot \ + --db-instance-identifier "$db_instance_identifier" \ + --db-snapshot-identifier "$snapshot_identifier" + + while true; do + local status=$(aws rds describe-db-snapshots \ + --db-instance-identifier "$db_instance_identifier" \ + --db-snapshot-identifier "$snapshot_identifier" \ + --query "DBSnapshots[0].Status" --output text) + + if [ "${status,,}" = "available" ]; then + echo "$snapshot_identifier" + return 0 + else + sleep 20 + fi + done +} +EOF + + + + + + + target_group_attachment_functions.sh, role_functions.sh, pic_sure_rds_snapshot.sh + false + false + false + true + true + false + + + + + false + + + false + + + \ No newline at end of file diff --git a/jenkins-docker/jobs/Check For Updates/config.xml b/jenkins-docker/jobs/Check For Updates/config.xml index 6e39908d..4c3cb6a3 100644 --- a/jenkins-docker/jobs/Check For Updates/config.xml +++ b/jenkins-docker/jobs/Check For Updates/config.xml @@ -4,17 +4,17 @@ false - + 2 - https://${git_base_url}/pic-sure-bdc-release-control + ${release_control_git_repo} c1cbb1df-9da8-4f3f-85b9-8de0b9685008 - */master + ${release_control_git_hash} false @@ -28,9 +28,9 @@ false - + - - true - - - false - diff --git a/jenkins-docker/jobs/Update HTTPD Certs and Key/config.xml b/jenkins-docker/jobs/Update HTTPD Certs and Key/config.xml index fdb20887..b3d49be2 100644 --- a/jenkins-docker/jobs/Update HTTPD Certs and Key/config.xml +++ b/jenkins-docker/jobs/Update HTTPD Certs and Key/config.xml @@ -30,11 +30,7 @@ false - aws sts assume-role --duration-seconds 900 --role-arn "arn:aws:iam::${cnc_acct_id}:role/system/jenkins-s3-role" --role-session-name "teardown-rebuild" > assume-role-output.txt - -export AWS_ACCESS_KEY_ID=`grep AccessKeyId assume-role-output.txt | cut -d ':' -f 2 | sed "s/[ ,\"]//g"` -export AWS_SECRET_ACCESS_KEY=`grep SecretAccessKey assume-role-output.txt | cut -d ':' -f 2 | sed "s/[ ,\"]//g"` -export AWS_SESSION_TOKEN=`grep SessionToken assume-role-output.txt | cut -d ':' -f 2 | sed "s/[ ,\"]//g"` + aws s3 cp server.key s3://$stack_s3_bucket/certs/httpd/server.key aws s3 cp server.crt s3://$stack_s3_bucket/certs/httpd/server.crt @@ -43,13 +39,9 @@ aws s3 cp server.chain s3://$stack_s3_bucket/certs/httpd/server.chain rm server.key rm server.crt rm server.chain - -unset AWS_ACCESS_KEY_ID -unset AWS_SECRET_ACCESS_KEY -unset AWS_SESSION_TOKEN - \ No newline at end of file + diff --git a/jenkins-docker/jobs/Update Jenkins global config.xml on s3/config.xml b/jenkins-docker/jobs/Update Jenkins global config.xml on s3/config.xml new file mode 100644 index 00000000..9f790cde --- /dev/null +++ b/jenkins-docker/jobs/Update Jenkins global config.xml on s3/config.xml @@ -0,0 +1,26 @@ + + + + This job will sync the current jenkins config.xml to the s3 location described in the global parameter "jenkins_config_s3_location" + false + + + true + false + false + false + + false + + + #!/bin/bash +# Different environments should set the config.xml name in jenkins_config_s3_location configuration to a unique name such as config_auth_dev.xml + +echo "aws s3 cp ${JENKINS_HOME}/config.xml ${jenkins_config_s3_location}" +aws s3 cp ${JENKINS_HOME}/config.xml ${jenkins_config_s3_location} + + + + + + \ No newline at end of file diff --git a/jenkins-docker/jobs/Update PIC-SURE Token Introspection Token/config.xml b/jenkins-docker/jobs/Update PIC-SURE Token Introspection Token/config.xml index f18fdbcc..12cf019b 100644 --- a/jenkins-docker/jobs/Update PIC-SURE Token Introspection Token/config.xml +++ b/jenkins-docker/jobs/Update PIC-SURE Token Introspection Token/config.xml @@ -4,20 +4,20 @@ false - + 2 - https://${git_base_url}/jwt-creator.git + ${pic_sure_introspection_token_repo} - */master + ${pic_sure_introspection_token_hash} false - + true @@ -36,13 +36,24 @@ false - cd target + #!/bin/bash +set -e +# Source folder containing the scripts +source_scripts_folder="${JENKINS_HOME}/workspace/Bash_Functions/" +ls -la "$source_scripts_folder" -aws sts assume-role --duration-seconds 900 --role-arn "arn:aws:iam::${cnc_acct_id}:role/system/jenkins-s3-role" --role-session-name "teardown-rebuild" > assume-role-output.txt +# Iterate through the files in the folder and source them +for script_file in "$source_scripts_folder"*.sh; do + chmod +x "$script_file" + if [ -f "$script_file" ] && [ -x "$script_file" ]; then + echo "sourcing $script_file" + source "$script_file" + fi +done -export AWS_ACCESS_KEY_ID=`grep AccessKeyId assume-role-output.txt | cut -d ':' -f 2 | sed "s/[ ,\"]//g"` -export AWS_SECRET_ACCESS_KEY=`grep SecretAccessKey assume-role-output.txt | cut -d ':' -f 2 | sed "s/[ ,\"]//g"` -export AWS_SESSION_TOKEN=`grep SessionToken assume-role-output.txt | cut -d ':' -f 2 | sed "s/[ ,\"]//g"` +cd target + +reset_role aws s3 cp s3://$stack_s3_bucket/deployment_state_metadata/a/stack_variables.tf . @@ -61,22 +72,26 @@ aws s3 cp stack_variables.tf s3://$stack_s3_bucket/deployment_state_metadata/a/s mv stack_variables.tf stack_variables_a.tf -aws s3 cp s3://$stack_s3_bucket/deployment_state_metadata/b/stack_variables.tf . +echo "New token:" +echo "${new_token_introspection_token}" +# only one stack at the moment +#aws s3 cp s3://$stack_s3_bucket/deployment_state_metadata/b/stack_variables.tf . -export old_token_introspection_token=`cat stack_variables.tf | grep -A3 picsure_token_introspection_token | head -n 3 | tail -1 | cut -d ' ' -f 5 | sed 's/"//g'` +#export old_token_introspection_token=`cat stack_variables.tf | grep -A3 picsure_token_introspection_token | head -n 3 | tail -1 | cut -d ' ' -f 5 | sed 's/"//g'` -sed -i "s/$old_token_introspection_token/$new_token_introspection_token/g" stack_variables.tf +#sed -i "s/$old_token_introspection_token/$new_token_introspection_token/g" stack_variables.tf -aws s3 cp stack_variables.tf s3://$stack_s3_bucket/deployment_state_metadata/b/stack_variables.tf +#aws s3 cp stack_variables.tf s3://$stack_s3_bucket/deployment_state_metadata/b/stack_variables.tf -mv stack_variables.tf stack_variables_b.tf +#mv stack_variables.tf stack_variables_b.tf -unset AWS_ACCESS_KEY_ID -unset AWS_SECRET_ACCESS_KEY -unset AWS_SESSION_TOKEN +#unset AWS_ACCESS_KEY_ID +#unset AWS_SECRET_ACCESS_KEY +#unset AWS_SESSION_TOKEN + - + \ No newline at end of file diff --git a/jenkins-docker/jobs/Update Preprod HTTPD Certs and Key/config.xml b/jenkins-docker/jobs/Update Preprod HTTPD Certs and Key/config.xml index b6d1a820..dac0d9c4 100644 --- a/jenkins-docker/jobs/Update Preprod HTTPD Certs and Key/config.xml +++ b/jenkins-docker/jobs/Update Preprod HTTPD Certs and Key/config.xml @@ -30,11 +30,7 @@ false - aws sts assume-role --duration-seconds 900 --role-arn "arn:aws:iam::${cnc_acct_id}:role/system/jenkins-s3-role" --role-session-name "teardown-rebuild" > assume-role-output.txt - -export AWS_ACCESS_KEY_ID=`grep AccessKeyId assume-role-output.txt | cut -d ':' -f 2 | sed "s/[ ,\"]//g"` -export AWS_SECRET_ACCESS_KEY=`grep SecretAccessKey assume-role-output.txt | cut -d ':' -f 2 | sed "s/[ ,\"]//g"` -export AWS_SESSION_TOKEN=`grep SessionToken assume-role-output.txt | cut -d ':' -f 2 | sed "s/[ ,\"]//g"` + aws s3 cp preprod_server.key s3://$stack_s3_bucket/certs/httpd/preprod_server.key aws s3 cp preprod_server.crt s3://$stack_s3_bucket/certs/httpd/preprod_server.crt @@ -43,10 +39,6 @@ aws s3 cp preprod_server.chain s3://$stack_s3_bucket/certs/httpd/preprod_server. rm preprod_server.key rm preprod_server.crt rm preprod_server.chain - -unset AWS_ACCESS_KEY_ID -unset AWS_SECRET_ACCESS_KEY -unset AWS_SESSION_TOKEN diff --git a/jenkins-docker/jobs/Update VPC Settings/config.xml b/jenkins-docker/jobs/Update VPC Settings/config.xml deleted file mode 100644 index 1c453f5f..00000000 --- a/jenkins-docker/jobs/Update VPC Settings/config.xml +++ /dev/null @@ -1,178 +0,0 @@ - - - - - false - - - - - R53_Zone_ID - - Z07894451Y9DMEARH90L3 - false - - - vpc_a - - vpc-0d248ee6e4ef337ef - false - - - vpc_b - - vpc-037509eb9baffa584 - false - - - db-subnet-group-name-a - - main-a - false - - - db-subnet-group-name-b - - main-b - false - - - - - - true - false - false - false - - false - - - echo $JENKINS_HOME - -# Describe subnets in specified VPCs in the prod account - -aws sts assume-role --duration-seconds 900 --role-arn "arn:aws:iam::${app_acct_id}:role/hms-dbmi-cnc-role" --role-session-name "teardown-rebuild" > assume-role-output.txt - -export AWS_ACCESS_KEY_ID=`grep AccessKeyId assume-role-output.txt | cut -d ':' -f 2 | sed "s/[ ,\"]//g"` -export AWS_SECRET_ACCESS_KEY=`grep SecretAccessKey assume-role-output.txt | cut -d ':' -f 2 | sed "s/[ ,\"]//g"` -export AWS_SESSION_TOKEN=`grep SessionToken assume-role-output.txt | cut -d ':' -f 2 | sed "s/[ ,\"]//g"` - -aws ec2 --region us-east-1 describe-subnets - -aws ec2 --region us-east-1 describe-subnets --filters "Name=vpc-id,Values=$vpc_a" > subnets_a.json -cat subnets_a.json -aws ec2 --region us-east-1 describe-subnets --filters "Name=vpc-id,Values=$vpc_b" > subnets_b.json -cat subnets_b.json - -unset AWS_ACCESS_KEY_ID -unset AWS_SECRET_ACCESS_KEY -unset AWS_SESSION_TOKEN - - - - - - false - - - - - # Push new subnet variables files to bucket - -aws sts assume-role --duration-seconds 900 --role-arn "arn:aws:iam::${cnc_acct_id}:role/system/jenkins-s3-role" --role-session-name "configure-vpc-settings" > assume-role-output.txt - -export AWS_ACCESS_KEY_ID=`grep AccessKeyId assume-role-output.txt | cut -d ':' -f 2 | sed "s/[ ,\"]//g"` -export AWS_SECRET_ACCESS_KEY=`grep SecretAccessKey assume-role-output.txt | cut -d ':' -f 2 | sed "s/[ ,\"]//g"` -export AWS_SESSION_TOKEN=`grep SessionToken assume-role-output.txt | cut -d ':' -f 2 | sed "s/[ ,\"]//g"` - -aws s3 cp subnet_variables_a.tf s3://$stack_s3_bucket/deployment_state_metadata/a/subnet_variables.tf -aws s3 cp subnet_variables_b.tf s3://$stack_s3_bucket/deployment_state_metadata/b/subnet_variables.tf - -unset AWS_ACCESS_KEY_ID -unset AWS_SECRET_ACCESS_KEY -unset AWS_SESSION_TOKEN - - - - - - \ No newline at end of file diff --git a/jenkins-docker/jobs/biodatacatalyst-ui/config.xml b/jenkins-docker/jobs/biodatacatalyst-ui/config.xml index 5f130c7a..45d4d936 100644 --- a/jenkins-docker/jobs/biodatacatalyst-ui/config.xml +++ b/jenkins-docker/jobs/biodatacatalyst-ui/config.xml @@ -1,85 +1,93 @@ - - - false - - - - - pipeline_build_id - - MANUAL_RUN - false - - - git_hash - - */master - false - - - S3_BUCKET_NAME - - ${stack_s3_bucket} - false - - - - - - 2 - - - https://${git_base_url}/pic-sure-bdc-frontend.git - - - - - ${git_hash} - - - false - - - - true - false - false - false - - false - - - - aws s3 --sse=AES256 cp s3://$S3_BUCKET_NAME/releases/jenkins_pipeline_build_${pipeline_build_id}/pic-sure-hpds-ui.tar.gz . - - PIC_SURE_HPDS_UI_VERSION=`docker load -i pic-sure-hpds-ui.tar.gz|cut -d ':' -f 3` - - docker tag hms-dbmi/pic-sure-hpds-ui:${PIC_SURE_HPDS_UI_VERSION} hms-dbmi/pic-sure-hpds-ui:TARGET_BUILD_VERSION + + + false + + + + + pipeline_build_id + MANUAL_RUN + false + + + git_hash + */master + false + + + S3_BUCKET_NAME + ${stack_s3_bucket} + false + + + + + + 2 + + + ${pic_sure_biodatacatalyst_ui_repo} + + + + + ${git_hash} + + + false + + + + true + false + false + false + + false + + + aws s3 --sse=AES256 cp s3://$S3_BUCKET_NAME/releases/jenkins_pipeline_build_${pipeline_build_id}/pic-sure-hpds-ui.tar.gz . +PIC_SURE_HPDS_UI_VERSION=$(docker load -i pic-sure-hpds-ui.tar.gz | cut -d ':' -f 3) +docker tag hms-dbmi/pic-sure-hpds-ui:${PIC_SURE_HPDS_UI_VERSION} hms-dbmi/pic-sure-hpds-ui:TARGET_BUILD_VERSION - GIT_BRANCH_SHORT=`echo ${GIT_BRANCH} | cut -d "/" -f 2` - GIT_COMMIT_SHORT=`echo ${GIT_COMMIT} | cut -c1-7` - - cd biodatacatalyst-ui +GIT_BRANCH_SHORT=$(echo ${GIT_BRANCH} | cut -d "/" -f 2) +GIT_COMMIT_SHORT=$(echo ${GIT_COMMIT} | cut -c1-7) - - if [ ! -d open-pic-sure-bdc-frontend/ui/src/main/picsureui/ ]; then - mkdir -p open-pic-sure-bdc-frontend/ui/src/main/picsureui/ - fi +# Clean the dir out first +rm -rf biodatacatalyst-ui/open-pic-sure-bdc-frontend - docker build --build-arg FILE_SUFFIX=${pipeline_build_id} --build-arg IS_OPEN_ACCESS=${is_open_access} -t hms-dbmi/pic-sure-biodatacatalyst-ui:${GIT_BRANCH_SHORT}_${GIT_COMMIT_SHORT} . - - cd ../ - mkdir -p docker_image_output - cd docker_image_output - - docker save hms-dbmi/pic-sure-biodatacatalyst-ui:${GIT_BRANCH_SHORT}_${GIT_COMMIT_SHORT} | gzip > pic-sure-ui.tar.gz - - aws s3 --sse=AES256 cp pic-sure-ui.tar.gz s3://$S3_BUCKET_NAME/releases/jenkins_pipeline_build_${pipeline_build_id}/pic-sure-ui.tar.gz - - - - - - +if [ "${env_is_open_access}" = 'true' ]; then + # Move Open Access UI into BDC UI + echo "Is open access: Copying repository to biodatacatalyst-ui/open-pic-sure-bdc-frontend" + cp -r "/var/jenkins_home/workspace/PIC-SURE Open Build" "biodatacatalyst-ui/open-pic-sure-bdc-frontend/" + echo "ls -la biodatacatalyst-ui/open-pic-sure-bdc-frontend" +else + # Create an empty directory structure if not open access + echo "Is not open access: making empty dir structure" + mkdir -p "biodatacatalyst-ui/open-pic-sure-bdc-frontend/ui/src/main/psamaui/" + mkdir -p "biodatacatalyst-ui/open-pic-sure-bdc-frontend/ui/src/main/picsureui/" +fi + +# Check if 'psamaui' directory exists at 'biodatacatalyst-ui/open-pic-sure-bdc-frontend/ui/src/main/' +if [ ! -d "biodatacatalyst-ui/open-pic-sure-bdc-frontend/ui/src/main/psamaui/" ]; then + mkdir -p "biodatacatalyst-ui/open-pic-sure-bdc-frontend/ui/src/main/psamaui/" +fi + + +# Move into workspace dir +cd biodatacatalyst-ui +docker build --build-arg FILE_SUFFIX=${pipeline_build_id} --build-arg IS_OPEN_ACCESS=${env_is_open_access} -t hms-dbmi/pic-sure-biodatacatalyst-ui:${GIT_BRANCH_SHORT}_${GIT_COMMIT_SHORT} . +mkdir -p docker_image_output +cd docker_image_output + +docker save hms-dbmi/pic-sure-biodatacatalyst-ui:${GIT_BRANCH_SHORT}_${GIT_COMMIT_SHORT} | gzip > pic-sure-ui.tar.gz +aws s3 --sse=AES256 cp pic-sure-ui.tar.gz s3://$S3_BUCKET_NAME/releases/jenkins_pipeline_build_${pipeline_build_id}/pic-sure-ui.tar.gz + + + + + + + \ No newline at end of file diff --git a/jenkins-docker/plugins.txt b/jenkins-docker/plugins.txt index d30dbcb8..22b149a0 100644 --- a/jenkins-docker/plugins.txt +++ b/jenkins-docker/plugins.txt @@ -19,6 +19,6 @@ ws-cleanup subversion ldap cloudbees-folder -list-git-branches-parameter copyartifact saml +role-strategy \ No newline at end of file diff --git a/jenkins-docker/scriptApproval.xml b/jenkins-docker/scriptApproval.xml index b640d7f0..72336ded 100644 --- a/jenkins-docker/scriptApproval.xml +++ b/jenkins-docker/scriptApproval.xml @@ -1,35 +1,10 @@ - + - 017d0db438428731cd600b6ebda805065433520f - 06a421bb34e0e8aa45d8402eab04e6e54e46f913 - 06b6d44b82898dcd0960934aea3f4d5eae271fc5 - 111a0660bd7c615e97a4b6114a6e6a3048a6ab9a - 1e85783c7377f7012394a563d520e4fbebc0511f - 29e61b5ee7c9114091e46e3570618e0104d5cfc6 - 322c8d131432ddc7d8da24362c1e9fbd98209def - 41c82a6a8c829d8c2826c2ee9e675231938c91c9 - 4562d484eec17bbf6c1a4c2dfb9e75c76db75188 - 4b3bea48f47c9481c6775e29d6b4b6adab5e286e - 4ce588ae2c722178d1a0bcbb7962423f5fc78068 - 52bdc81591ab4a2186602c27d33b3f595ba3e163 - 5cd0f8e92c0fad5e2ae68b2d55065b69c776acc1 - 67e468ca78a4d1d61a4f44b52f1177572094ff1e - 6af9235e3b2be6f41cf962add1f08ca4a5664bfd - 73bc048aca7c126772f39853e7dc8417ddd4ec54 - 796400d4c193b8ba1000aae6ef1d46e80b348afe - 9061fb0b398bac834e90acac342c71b75fd5e9b3 - 90be77cc74f11f2305ec7704d3c7955b7c1655a2 - adb6d9d969156a0e0d503c00f76271c44e185346 - adce500d4a52fb0b5edb0f93c5c0e26ccca0b4d9 - bf65c48f68235f3efd5c76b56406453d8655fdd9 - c21d14044b16f41290e92dcf26e9fd31b278b442 - d2e279e72f439f7d7e6f65aeeddbf8d6b167b659 - e5ad22ea5934b94ba902974b1677e7d2250594d7 - fb10b1b30d59d5087570a5af06b603a62fde9408 - df74e9004131c8ff7cdaea580a40816aeeaeb142 - 001e4903e2f6757845eabf6158f55b75bb3f5d7e - c542b6d9c470ba02878b35aa54dead50ad8a7fb3 + SHA512:3f205299bef2fe3329258a42f2c60d332cc35ccb41a3ca0512d7869c6aff4561ff708c48123b4f7c3dec33ccca30d54fb8b9b51b5bc7070f96236d11a3f0bdcb + SHA512:90a8e21755ae6fa93c3f606a1ce62d6116d485ed6569a94cde9993aa005f6f29557fbd9a705bc09baded1de761b15d28f5e001db9c9c10d2e138653dffe52c26 + SHA512:d1a422ef19f95e8c6f81b8b0f4c90d988d3e24a0833acb2bb71f103a912f7e1d66203367571bda4283470da26d0a62fba8d60d3fb23ee9304a457892dbf5b919 + SHA512:d4b9f78c783e15291f0bc65fe5e5915b3be9586ffaf793a9202a0292560b9effac840e27257a517e666fd6ca7b2863c1fb0078bf30055aacfe6ce1f77d78edd6 method groovy.json.JsonOutput toJson java.util.Map diff --git a/jenkins-terraform/README.md b/jenkins-terraform/README.md new file mode 100644 index 00000000..b2a55027 --- /dev/null +++ b/jenkins-terraform/README.md @@ -0,0 +1,140 @@ +# Table of Contents + +- [Variables](#variables) +- [Terraform Backend](#terraform-backend) +- [Outputs](#outputs) + +## Variables +- [stack_id](#stack_id) +- [git_commit](#git_commit) +- [jenkins_vpc_id](#jenkins_vpc_id) +- [jenkins_instance_profile_name](#jenkins_instance_profile_name) +- [jenkins_tf_state_bucket](#jenkins_tf_state_bucket) +- [jenkins_subnet_id](#jenkins_subnet_id) +- [jenkins_sg_egress_allow_all_cidr_blocks](#jenkins_sg_egress_allow_all_cidr_blocks) +- [jenkins_sg_ingress_https_cidr_blocks](#jenkins_sg_ingress_https_cidr_blocks) +- [jenkins_config_s3_location](#jenkins_config_s3_location) +- [jenkins_ec2_instance_type](#jenkins_ec2_instance_type) +- [jenkins_tf_local_var_OS_dist](#jenkins_tf_local_var_OS_dist) +- [jenkins_ec2_ebs_volume_size](#jenkins_ec2_ebs_volume_size) +- [jenkins_docker_maven_distro](#jenkins_docker_maven_distro) +- [jenkins_docker_terraform_distro](#jenkins_docker_terraform_distro) +- [jenkins_git_repo](#jenkins_git_repo) +- [program](#program) +- [env_is_open_access](#env_is_open_access) +- [environment_name](#environment_name) +- [is_initialized](#is_initialized) +- [locals](#locals) + +## Terraform Backend +- [Terraform Backend Configuration](#terraform-backend-configuration) + +## Outputs +- [jenkins-ec2-id](#jenkins-ec2-id) +- [jenkins-ec2-ip](#jenkins-ec2-ip) + +# Variables + +The following variables are defined in the `jenkins-deploy-tf-variables.tf` and `jenkins-local-tf-variables.tf` file. Adjust these variables based on your specific requirements: + +- **stack_id**: + - Description: Identifier for the Jenkins instance stack. + - Type: `string` + +- **git_commit**: + - Description: Git commit hash for version tracking. + - Type: `string` + +- **jenkins_vpc_id**: + - Description: ID of the VPC where Jenkins will be deployed. + - Type: `string` + +- **jenkins_instance_profile_name**: + - Description: Name of the IAM instance profile attached to the Jenkins EC2 instance. + - Type: `string` + +- **jenkins_tf_state_bucket**: + - Description: Name of the S3 bucket for storing Terraform state. + - Type: `string` + +- **jenkins_subnet_id**: + - Description: ID of the subnet where Jenkins will be deployed. + - Type: `string` + +- **jenkins_sg_egress_allow_all_cidr_blocks**: + - Description: List of CIDR blocks for egress traffic from Jenkins security group. + - Type: `list(any)` + +- **jenkins_sg_ingress_https_cidr_blocks**: + - Description: List of CIDR blocks for inbound HTTPS traffic to Jenkins security group. + - Type: `list(any)` + +- **jenkins_config_s3_location**: + - Description: S3 location for Jenkins configuration files. + - Type: `string` + +- **jenkins_ec2_instance_type**: + - Description: AWS EC2 instance type for Jenkins. + - Type: `string` + +- **jenkins_tf_local_var_OS_dist**: + - Description: Operating system distribution for Jenkins (e.g., "CENTOS"). + - Type: `string` + +- **jenkins_ec2_ebs_volume_size**: + - Description: Size of the EBS volume attached to the Jenkins EC2 instance. + - Type: `number` + +- **jenkins_docker_maven_distro**: + - Description: Docker Maven distribution used by Jenkins. + - Type: `string` + +- **jenkins_docker_terraform_distro**: + - Description: Docker Terraform distribution used by Jenkins. + - Type: `string` + +- **jenkins_git_repo**: + - Description: Git repository URL for Jenkins. + - Type: `string` + +- **program**: + - Description: Program identifier. + - Type: `string` + +- **env_is_open_access**: + - Description: Boolean flag indicating if the environment is open access. + - Type: `bool` + +- **environment_name**: + - Description: Name of the environment (e.g., "dev"). + - Type: `string` + +- **is_initialized**: + - Description: Flag indicating whether Jenkins is initialized. + - Type: `string` + +Make sure to replace the placeholder values with actual configurations. + +# Terraform Backend + +```hcl +terraform { + backend "s3" { + encrypt = true + } +} +``` + +# Outputs + +This module defines the following outputs: + +- **jenkins-ec2-id**: + - Description: The ID of the Jenkins EC2 instance. + - Usage: Use this output to reference the unique identifier of the deployed Jenkins EC2 instance. + +- **jenkins-ec2-ip**: + - Description: The private IP address of the Jenkins EC2 instance. + - Usage: Use this output to obtain the private IP address assigned to the Jenkins EC2 instance. + +These outputs can be utilized in other Terraform objects or scripts to access information about the deployed Jenkins infrastructure. diff --git a/jenkins-terraform/distro/linux/centos/user-scripts/install-docker.sh b/jenkins-terraform/distro/linux/centos/user-scripts/install-docker.sh new file mode 100644 index 00000000..a2d89954 --- /dev/null +++ b/jenkins-terraform/distro/linux/centos/user-scripts/install-docker.sh @@ -0,0 +1,26 @@ +#!/bin/bash + +sh /opt/srce/scripts/start-gsstools.sh +sudo yum -y update + +# grab image tar +aws s3 cp s3://${jenkins_tf_state_bucket}/containers/jenkins/jenkins.tar.gz jenkins.tar.gz + +# load image +load_result=$(docker load -i jenkins.tar.gz) +image_tag=$(echo "$load_result" | grep -o -E "jenkins:[[:alnum:]_]+") + +#run docker container +sudo docker run -d --log-driver syslog --log-opt tag=jenkins \ + -v /var/jenkins_home/workspace:/var/jenkins_home/workspace \ + -v /var/run/docker.sock:/var/run/docker.sock \ + -p 443:8443 \ + --restart always \ + --name jenkins \ + $image_tag + +#sudo docker logs -f jenkins > /var/log/jenkins-docker-logs/jenkins.log & + +INSTANCE_ID=$(curl -H "X-aws-ec2-metadata-token: $(curl -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600")" --silent http://169.254.169.254/latest/meta-data/instance-id) +sudo /usr/bin/aws --region=us-east-1 ec2 create-tags --resources $${INSTANCE_ID} --tags Key=InitComplete,Value=true + diff --git a/jenkins-terraform/export_initialization_variables.sh b/jenkins-terraform/export_initialization_variables.sh new file mode 100644 index 00000000..5e2022a6 --- /dev/null +++ b/jenkins-terraform/export_initialization_variables.sh @@ -0,0 +1,18 @@ +#!\usr\bin\env bash + +export GIT_COMMIT= \ +export jenkins_tf_state_region= \ +export jenkins_tf_state_bucket= \ +export jenkins_tf_state_bucket= \ +export stack_id= \ +export jenkins_subnet_id= \ +export jenkins_vpc_id= \ +export jenkins_instance_profile_name= \ +export jenkins_sg_ingress_https_cidr_blocks= \ +export jenkins_sg_egress_allow_all_cidr_blocks= \ +export jenkins_config_s3_location= \ +export jenkins_ec2_instance_type= \ +export jenkins_tf_local_var_OS_dist= \ +export jenkins_ec2_ebs_volume_size= \ +export jenkins_docker_maven_distr=NO LONGER USED \ +export jenkins_docker_terraform_distro= diff --git a/jenkins-terraform/initialize-jenkins.sh b/jenkins-terraform/initialize-jenkins.sh new file mode 100644 index 00000000..8c34c836 --- /dev/null +++ b/jenkins-terraform/initialize-jenkins.sh @@ -0,0 +1,42 @@ +#!/bin/bash + +### BEFORE RUNNING! +# Need to export the variables used in the terraform apply below as env variables or store them in a variable.tf file +# or just replace the variables with the values needed. +# Values should be stored in the global config.xml that is located at the ${jenkins_config_s3_location} variable. +# +# Also need to have jenkins-s3-role on the ec2 + +#### Script that will be used to initialize a jenkins CI environment. +## Once a Jenkins server is built it is able to recreate itself. +## This should be a replicate of the bash script used in the Create New Jenkins Server. + +### Install terraform current distro used is https://releases.hashicorp.com/terraform/0.12.31/terraform_0.12.31_linux_amd64.zip + +RUN wget -c $JENKINS_DOCKER_TERRAFORM_DISTRO -O /opt/terraform.zip + +RUN unzip /opt/terraform.zip -d /usr/local/bin/ + + +# backend s3 config will always be encrypted +# need to find a better key location that isn't tied to the git commit for the job +terraform init \ +-backend-config="bucket=${jenkins_tf_state_bucket}" \ +-backend-config="key=jenkins_state/jenkins_${GIT_COMMIT}/terraform.tfstate" \ +-backend-config="region=${jenkins_tf_state_region}" + +terraform apply -auto-approve \ +-var "git_commit=`echo ${GIT_COMMIT} |cut -c1-7`" \ +-var "jenkins_tf_state_bucket=${jenkins_tf_state_bucket}" \ +-var "stack_id=${stack_id}" \ +-var "jenkins_subnet_id=${jenkins_subnet_id}" \ +-var "jenkins_vpc_id=${jenkins_vpc_id}" \ +-var "jenkins_instance_profile_name=${jenkins_instance_profile_name}" \ +-var "jenkins_sg_ingress_https_cidr_blocks=${jenkins_sg_ingress_https_cidr_blocks}" \ +-var "jenkins_sg_egress_allow_all_cidr_blocks=${jenkins_sg_egress_allow_all_cidr_blocks}" \ +-var "jenkins_config_s3_location=${jenkins_config_s3_location}" \ +-var "jenkins_ec2_instance_type=${jenkins_ec2_instance_type}" \ +-var "jenkins_tf_local_var_OS_dist=${jenkins_tf_local_var_OS_dist}" \ +-var "jenkins_ec2_ebs_volume_size=${jenkins_ec2_ebs_volume_size}" \ +-var "jenkins_docker_maven_distro=${jenkins_docker_maven_distro}" \ +-var "jenkins_docker_terraform_distro=${jenkins_docker_terraform_distro}" diff --git a/jenkins-terraform/jenkins-deploy-tf-variables.tf b/jenkins-terraform/jenkins-deploy-tf-variables.tf new file mode 100644 index 00000000..08abc2d0 --- /dev/null +++ b/jenkins-terraform/jenkins-deploy-tf-variables.tf @@ -0,0 +1,89 @@ +# local / dynamic variables can be found in jenkins-local-tf-variables.tf + +variable "stack_id" { + type = string + default = "test" +} + +variable "git_commit" { + type = string +} + +variable "jenkins_vpc_id" { + type = string +} + +variable "jenkins_instance_profile_name" { + type = string +} + +variable "jenkins_tf_state_bucket" { + type = string +} + +variable "jenkins_subnet_id" { + type = string +} + +variable "jenkins_sg_egress_allow_all_cidr_blocks" { + type = list(any) +} + +variable "jenkins_sg_ingress_https_cidr_blocks" { + type = list(any) +} + +variable "jenkins_config_s3_location" { + type = string +} + +variable "jenkins_ec2_instance_type" { + type = string +} + +variable "jenkins_tf_local_var_OS_dist" { + type = string + + # in terraform .13 variable validations are no longer experimental and is production ready. + # use this validations when upgrading to terraform .13 + # will not implement .12 experimental features + #validation { + # condition = contains(local.valid_os,var.jenkins_tf_local_var_OS_dist) + # error_message = "Unsupported OS Distribution - Check the Terraform accepted valid_os list" + #} +} + +variable "jenkins_ec2_ebs_volume_size" { + type = number +} + +variable "jenkins_docker_maven_distro" { + type = string +} + +variable "jenkins_docker_terraform_distro" { + type = string +} + +variable "jenkins_git_repo" { + type = string +} + +variable "program" { + type = string + default = "BdC" +} + +variable "env_is_open_access" { + type = bool +} + +variable "environment_name" { + type = string + default = "dev" +} + +variable "is_initialized" { + type = string + default = "false" +} \ No newline at end of file diff --git a/jenkins-terraform/jenkins-ec2.tf b/jenkins-terraform/jenkins-ec2.tf new file mode 100644 index 00000000..28ff20e4 --- /dev/null +++ b/jenkins-terraform/jenkins-ec2.tf @@ -0,0 +1,75 @@ +# See the jenkins-local-tf-variables to find how the user-script is being set +data "template_file" "jenkins-user_data" { + template = file(local.user_script) + vars = { + jenkins_tf_state_bucket = var.jenkins_tf_state_bucket + stack_id = var.stack_id + jenkins_config_s3_location = var.jenkins_config_s3_location + jenkins_docker_maven_distro = var.jenkins_docker_maven_distro + jenkins_docker_terraform_distro = var.jenkins_docker_terraform_distro + jenkins_git_repo = var.jenkins_git_repo + git_commit = var.git_commit + } +} + +#Lookup latest AMI +data "aws_ami" "centos" { + most_recent = true + owners = ["752463128620"] + name_regex = "^srce-centos7-golden-*" +} + +data "template_cloudinit_config" "config" { + gzip = true + base64_encode = true + + # user_data + part { + content_type = "text/x-shellscript" + content = data.template_file.jenkins-user_data.rendered + } +} + +resource "aws_instance" "jenkins" { + ami = data.aws_ami.centos.id + instance_type = var.jenkins_ec2_instance_type + + iam_instance_profile = var.jenkins_instance_profile_name + + root_block_device { + delete_on_termination = true + encrypted = true + volume_size = 1000 + } + + vpc_security_group_ids = [ + aws_security_group.inbound-jenkins.id, + aws_security_group.outbound-jenkins-to-internet.id + ] + + subnet_id = var.jenkins_subnet_id + + tags = { + Owner = "Avillach_Lab" + Environment = var.environment_name + Project = local.project + Program = var.program + Name = "${var.program} Jenkins ${local.project} - ${var.stack_id} - ${var.git_commit}" + InitComplete = "${var.is_initialized}" + } + + user_data = data.template_cloudinit_config.config.rendered + + lifecycle { + create_before_destroy = true + } + +} + +output "jenkins-ec2-id" { + value = aws_instance.jenkins.id +} + +output "jenkins-ec2-ip" { + value = aws_instance.jenkins.private_ip +} diff --git a/jenkins-terraform/jenkins-local-tf-variables.tf b/jenkins-terraform/jenkins-local-tf-variables.tf new file mode 100644 index 00000000..ba002839 --- /dev/null +++ b/jenkins-terraform/jenkins-local-tf-variables.tf @@ -0,0 +1,21 @@ +/* + This tf vars should store dynamic vars + +*/ + +# variable to control OS Distribution. + +locals { + # list of supported Distributions. Used by jenkins_tf_local_var_dist for validation of Supported Distributions + valid_os = ["CENTOS"] + centos_user_script = "distro/linux/centos/user-scripts/install-docker.sh" + + # set user script location using coalesce based on the distribution + centos_script = var.jenkins_tf_local_var_OS_dist == "CENTOS" ? local.centos_user_script : "" + example_script = var.jenkins_tf_local_var_OS_dist == "EXAMPLE" ? "Unreachable Coalesece Example" : "" + + user_script = coalesce(local.centos_script, local.example_script) + + project = var.env_is_open_access ? "Open PIC-SURE" : "Auth PIC-SURE" +} + diff --git a/jenkins-terraform/jenkins-security-groups.tf b/jenkins-terraform/jenkins-security-groups.tf new file mode 100644 index 00000000..b6f546a7 --- /dev/null +++ b/jenkins-terraform/jenkins-security-groups.tf @@ -0,0 +1,50 @@ +# uniq name for security group. +resource "random_string" "random" { + length = 6 + special = false +} +locals { + uniq_name = random_string.random.result +} + +resource "aws_security_group" "inbound-jenkins" { + name = "allow_inbound_to_jenkins_${var.stack_id}_${local.uniq_name}" + description = "Allow inbound traffic on Port 443" + vpc_id = var.jenkins_vpc_id + + ingress { + from_port = 443 + to_port = 443 + protocol = "tcp" + cidr_blocks = var.jenkins_sg_ingress_https_cidr_blocks + } + + tags = { + Owner = "Avillach_Lab" + Environment = var.environment_name + Project = local.project + Program = var.program + Name = "${local.project} Jenkins Inbound Security Group - ${var.stack_id}" + } +} + +resource "aws_security_group" "outbound-jenkins-to-internet" { + name = "allow_jenkins_outbound_to_internet_${var.stack_id}_${local.uniq_name}" + description = "Allow outbound traffic from Jenkins" + vpc_id = var.jenkins_vpc_id + + egress { + from_port = 0 + to_port = 0 + protocol = -1 + cidr_blocks = var.jenkins_sg_egress_allow_all_cidr_blocks + } + + tags = { + Owner = "Avillach_Lab" + Environment = var.environment_name + Project = local.project + Program = var.program + Name = "${local.project} Jenkins Outbound Security Group - ${var.stack_id}" + } +} diff --git a/jenkins-terraform/provider.tf b/jenkins-terraform/provider.tf new file mode 100644 index 00000000..6cc28761 --- /dev/null +++ b/jenkins-terraform/provider.tf @@ -0,0 +1,12 @@ +provider "aws" { + region = "us-east-1" + profile = "avillachlab-secure-infrastructure" + version = "3.74" +} + +# currenlty using default AES encryption +terraform { + backend "s3" { + encrypt = true + } +} \ No newline at end of file diff --git a/non-fisma-infrastructure/provider.tf b/non-fisma-infrastructure/provider.tf index d87ee1cd..f7fb68d2 100644 --- a/non-fisma-infrastructure/provider.tf +++ b/non-fisma-infrastructure/provider.tf @@ -1,4 +1,6 @@ -provider "aws" { - region = "us-east-1" - profile = "avillachlab-secure-infrastructure" -} +# commenting out so non-fisma-infrastructure can never be applied +# should just remove this? +#provider "aws" { +# region = "us-east-1" +# profile = "avillachlab-secure-infrastructure" +#}