Skip to content

Instantly share code, notes, and snippets.

@lusoal
Last active February 23, 2023 16:50
Show Gist options
  • Select an option

  • Save lusoal/8b90105e3e8604a87ba238c19e8c0f72 to your computer and use it in GitHub Desktop.

Select an option

Save lusoal/8b90105e3e8604a87ba238c19e8c0f72 to your computer and use it in GitHub Desktop.
---
AWSTemplateFormatVersion: '2010-09-09'
Description: AWS CloudFormation template for dynamic Cloud 9 setups. Creates a Cloud9
bootstraps the instance. It also creates a t2.micro instance to deploy the terraform scripts to spin-up an Amazon EKS cluster
Parameters:
ExampleC9InstanceType:
Description: Example Cloud9 instance type
Type: String
Default: t3.small
AllowedValues:
- t2.micro
- t3.micro
- t3.small
- t3.medium
ConstraintDescription: Must be a valid Cloud9 instance type
C9EnvType:
Description: Environment type.
Default: event-engine
Type: String
AllowedValues:
- self
- 3rdParty
- event-engine
ConstraintDescription: must specify self or 3rdParty.
OwnerArn:
Type: String
Description: The Arn of the Cloud9 Owner to be set if 3rdParty deployment.
Default: ""
ExampleC9InstanceVolumeSize:
Type: Number
Description: The Size in GB of the Cloud9 Instance Volume.
Default: 15
LatestAmiId:
Type: 'AWS::SSM::Parameter::Value<AWS::EC2::Image::Id>'
Default: '/aws/service/ami-amazon-linux-latest/amzn2-ami-hvm-x86_64-gp2'
Conditions:
Create3rdPartyResources: !Equals [ !Ref C9EnvType, 3rdParty ]
CreateEventEngineResources: !Equals [ !Ref C9EnvType, event-engine ]
Resources:
################## PERMISSIONS AND ROLES #################
ExampleC9Role:
Type: AWS::IAM::Role
Properties:
Tags:
- Key: Environment
Value: AWS Example
AssumeRolePolicyDocument:
Version: '2012-10-17'
Statement:
- Effect: Allow
Principal:
Service:
- ec2.amazonaws.com
- ssm.amazonaws.com
Action:
- sts:AssumeRole
ManagedPolicyArns:
- arn:aws:iam::aws:policy/AdministratorAccess
Path: "/"
ExampleC9LambdaExecutionRole:
Type: AWS::IAM::Role
Properties:
AssumeRolePolicyDocument:
Version: '2012-10-17'
Statement:
- Effect: Allow
Principal:
Service:
- lambda.amazonaws.com
Action:
- sts:AssumeRole
Path: "/"
Policies:
- PolicyName:
Fn::Join:
- ''
- - ExampleC9LambdaPolicy-
- Ref: AWS::Region
PolicyDocument:
Version: '2012-10-17'
Statement:
- Effect: Allow
Action:
- logs:CreateLogGroup
- logs:CreateLogStream
- logs:PutLogEvents
Resource: arn:aws:logs:*:*:*
- Effect: Allow
Action:
- cloudformation:DescribeStacks
- cloudformation:DescribeStackEvents
- cloudformation:DescribeStackResource
- cloudformation:DescribeStackResources
- ec2:DescribeInstances
- ec2:AssociateIamInstanceProfile
- ec2:ModifyInstanceAttribute
- ec2:ReplaceIamInstanceProfileAssociation
- iam:ListInstanceProfiles
- iam:PassRole
Resource: "*"
################## LAMBDA BOOTSTRAP FUNCTION ################
ExampleC9BootstrapInstanceLambda:
Description: Bootstrap Cloud9 instance
Type: Custom::ExampleC9BootstrapInstanceLambda
DependsOn:
- ExampleC9BootstrapInstanceLambdaFunction
- ExampleC9Instance
- ExampleC9InstanceProfile
- ExampleC9LambdaExecutionRole
Properties:
Tags:
- Key: Environment
Value: AWS Example
ServiceToken:
Fn::GetAtt:
- ExampleC9BootstrapInstanceLambdaFunction
- Arn
REGION:
Ref: AWS::Region
StackName:
Ref: AWS::StackName
EnvironmentId:
Ref: ExampleC9Instance
LabIdeInstanceProfileName:
Ref: ExampleC9InstanceProfile
LabIdeInstanceProfileArn:
Fn::GetAtt:
- ExampleC9InstanceProfile
- Arn
ExampleC9BootstrapInstanceLambdaFunction:
Type: AWS::Lambda::Function
Properties:
Tags:
- Key: Environment
Value: AWS Example
Handler: index.lambda_handler
Role:
Fn::GetAtt:
- ExampleC9LambdaExecutionRole
- Arn
Runtime: python3.9
MemorySize: 256
Timeout: '600'
Code:
ZipFile: |
from __future__ import print_function
import boto3
import json
import os
import time
import traceback
import cfnresponse
def lambda_handler(event, context):
# logger.info('event: {}'.format(event))
# logger.info('context: {}'.format(context))
responseData = {}
status = cfnresponse.SUCCESS
if event['RequestType'] == 'Delete':
responseData = {'Success': 'Custom Resource removed'}
cfnresponse.send(event, context, status, responseData, 'CustomResourcePhysicalID')
if event['RequestType'] == 'Create':
try:
# Open AWS clients
ec2 = boto3.client('ec2')
# Get the InstanceId of the Cloud9 IDE
instance = ec2.describe_instances(Filters=[{'Name': 'tag:Name','Values': ['aws-cloud9-'+event['ResourceProperties']['StackName']+'-'+event['ResourceProperties']['EnvironmentId']]}])['Reservations'][0]['Instances'][0]
# logger.info('instance: {}'.format(instance))
# Create the IamInstanceProfile request object
iam_instance_profile = {
'Arn': event['ResourceProperties']['LabIdeInstanceProfileArn'],
'Name': event['ResourceProperties']['LabIdeInstanceProfileName']
}
# logger.info('iam_instance_profile: {}'.format(iam_instance_profile))
# Wait for Instance to become ready before adding Role
instance_state = instance['State']['Name']
# logger.info('instance_state: {}'.format(instance_state))
while instance_state != 'running':
time.sleep(5)
instance_state = ec2.describe_instances(InstanceIds=[instance['InstanceId']])
# logger.info('instance_state: {}'.format(instance_state))
# attach instance profile
response = ec2.associate_iam_instance_profile(IamInstanceProfile=iam_instance_profile, InstanceId=instance['InstanceId'])
# logger.info('response - associate_iam_instance_profile: {}'.format(response))
r_ec2 = boto3.resource('ec2')
responseData = {'Success': 'Started bootstrapping for instance: '+instance['InstanceId']}
cfnresponse.send(event, context, status, responseData, 'CustomResourcePhysicalID')
except Exception as e:
status = cfnresponse.FAILED
print(traceback.format_exc())
responseData = {'Error': traceback.format_exc(e)}
finally:
cfnresponse.send(event, context, status, responseData, 'CustomResourcePhysicalID')
################## SSM BOOTSRAP FOR EC2 THAT APPLIES TERRAFORM ###############
ExampleC9OutputBucket:
Type: AWS::S3::Bucket
DeletionPolicy: Delete
Properties:
VersioningConfiguration:
Status: Enabled
BucketEncryption:
ServerSideEncryptionConfiguration:
- ServerSideEncryptionByDefault:
SSEAlgorithm: AES256
ExampleC9SSMDocument:
Type: AWS::SSM::Document
DependsOn: [ExampleC9OutputBucket]
Properties:
Tags:
- Key: Environment
Value: AWS Example
DocumentType: Command
DocumentFormat: YAML
Content:
schemaVersion: '2.2'
description: Bootstrap Cloud9 Instance
mainSteps:
- action: aws:runShellScript
name: ExampleC9bootstrap
inputs:
runCommand:
- "#!/bin/bash"
- date
- echo LANG=en_US.utf-8 >> /etc/environment
- echo LC_ALL=en_US.UTF-8 >> /etc/environment
- . /home/ec2-user/.bashrc
- echo '=== UPDATE system packages and INSTALL dependencies ==='
- yum update -y; yum install -y vim git jq bash-completion moreutils gettext yum-utils perl-Digest-SHA tree
- echo '=== ENABLE Amazon Extras EPEL Repository and INSTALL Git LFS ==='
- yum install -y amazon-linux-extras
- amazon-linux-extras install epel -y
- yum install -y git-lfs
- echo '=== INSTALL AWS CLI v2 ==='
- curl 'https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip' -o 'awscliv2.zip'
- unzip awscliv2.zip -d /tmp
- /tmp/aws/install --update
- rm -rf aws awscliv2.zip
- echo '=== INSTALL Kubernetes CLI ==='
- curl -o /tmp/kubectl https://s3.us-west-2.amazonaws.com/amazon-eks/1.22.6/2022-03-09/bin/linux/amd64/kubectl
- chmod +x /tmp/kubectl && mv /tmp/kubectl /usr/local/bin/
- /usr/local/bin/kubectl completion bash > /etc/bash_completion.d/kubectl
- echo '=== INSTALL Terraform CLI ==='
- yum-config-manager --add-repo https://rpm.releases.hashicorp.com/AmazonLinux/hashicorp.repo
- yum -y install terraform
- echo '=== CREATE Terraform Files ==='
- mkdir -p /home/ec2-user/environment/eks-blueprint
- |
cat > /home/ec2-user/environment/eks-blueprint/providers.tf <<EOF
terraform {
required_version = ">= 1.0.1"
required_providers {
aws = {
source = "hashicorp/aws"
version = ">= 3.72"
}
kubernetes = {
source = "hashicorp/kubernetes"
version = ">= 2.10"
}
helm = {
source = "hashicorp/helm"
version = ">= 2.4.1"
}
kubectl = {
source = "gavinbunney/kubectl"
version = ">= 1.14"
}
}
}
EOF
- |
cat > /home/ec2-user/environment/eks-blueprint/data.tf << EOF
# Find the user currently in use by AWS
data "aws_caller_identity" "current" {}
# Region in which to deploy the solution
data "aws_region" "current" {}
# Availability zones to use in our solution
data "aws_availability_zones" "available" {
state = "available"
}
data "aws_eks_cluster" "cluster" {
name = module.eks_blueprints.eks_cluster_id
}
data "aws_eks_cluster_auth" "this" {
name = module.eks_blueprints.eks_cluster_id
}
EOF
- |
cat > /home/ec2-user/environment/eks-blueprint/outputs.tf <<EOF
output "vpc_id" {
description = "The ID of the VPC"
value = module.vpc.vpc_id
}
output "configure_kubectl" {
description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig"
value = module.eks_blueprints.configure_kubectl
}
EOF
- |
cat > /home/ec2-user/environment/eks-blueprint/locals.tf <<EOF
locals {
name = basename(path.cwd)
region = data.aws_region.current.name
cluster_version = "1.23"
vpc_cidr = "10.0.0.0/16"
azs = slice(data.aws_availability_zones.available.names, 0, 3)
node_group_name = "managed-ondemand"
tags = {
Blueprint = local.name
GithubRepo = "github.com/aws-ia/terraform-aws-eks-blueprints"
}
}
EOF
- |
cat > /home/ec2-user/environment/eks-blueprint/main.tf <<EOF
provider "kubernetes" {
host = module.eks_blueprints.eks_cluster_endpoint
cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data)
token = data.aws_eks_cluster_auth.this.token
}
provider "helm" {
kubernetes {
host = module.eks_blueprints.eks_cluster_endpoint
cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data)
token = data.aws_eks_cluster_auth.this.token
}
}
provider "kubectl" {
apply_retry_count = 10
host = module.eks_blueprints.eks_cluster_endpoint
cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data)
load_config_file = false
token = data.aws_eks_cluster_auth.this.token
}
module "eks_blueprints" {
source = "github.com/aws-ia/terraform-aws-eks-blueprints?ref=v4.21.0"
cluster_name = local.name
# EKS Cluster VPC and Subnet mandatory config
vpc_id = module.vpc.vpc_id
private_subnet_ids = module.vpc.private_subnets
# EKS CONTROL PLANE VARIABLES
cluster_version = local.cluster_version
# List of Additional roles admin in the cluster
# Comment this section if you ARE NOT at an AWS Event, as the TeamRole won't exist on your site, or replace with any valid role you want
map_roles = [
{
rolearn = "arn:aws:iam::\${data.aws_caller_identity.current.account_id}:role/TeamRole"
username = "ops-role" # The user name within Kubernetes to map to the IAM role
groups = ["system:masters"] # A list of groups within Kubernetes to which the role is mapped; Checkout K8s Role and Rolebindings
}
]
# EKS MANAGED NODE GROUPS
managed_node_groups = {
mg_5 = {
node_group_name = local.node_group_name
instance_types = ["m5.xlarge"]
subnet_ids = module.vpc.private_subnets
}
}
tags = local.tags
}
module "vpc" {
source = "terraform-aws-modules/vpc/aws"
version = "3.16.0"
name = local.name
cidr = local.vpc_cidr
azs = local.azs
public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k)]
private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 10)]
enable_nat_gateway = true
create_igw = true
enable_dns_hostnames = true
single_nat_gateway = true
# Manage so we can name
manage_default_network_acl = true
default_network_acl_tags = { Name = "\${local.name}-default" }
manage_default_route_table = true
default_route_table_tags = { Name = "\${local.name}-default" }
manage_default_security_group = true
default_security_group_tags = { Name = "\${local.name}-default" }
public_subnet_tags = {
"kubernetes.io/cluster/\${local.name}" = "shared"
"kubernetes.io/role/elb" = "1"
}
private_subnet_tags = {
"kubernetes.io/cluster/\${local.name}" = "shared"
"kubernetes.io/role/internal-elb" = "1"
}
tags = local.tags
}
EOF
- sudo chown -R ec2-user:ec2-user /home/ec2-user/environment/eks-blueprint/
- cd /home/ec2-user/environment/eks-blueprint && terraform init && terraform apply --auto-approve
- |
bucket_name=$(aws s3 ls | grep -i mod | awk '{print $3}')
aws s3 cp /home/ec2-user/environment/eks-blueprint/terraform.tfstate s3://$bucket_name
- echo "Bootstrap completed with return code $?"
ExampleC9BootstrapAssociation:
Type: AWS::SSM::Association
DependsOn: ExampleC9OutputBucket
Properties:
Name: !Ref ExampleC9SSMDocument
OutputLocation:
S3Location:
OutputS3BucketName: !Ref ExampleC9OutputBucket
OutputS3KeyPrefix: bootstrapoutput
Targets:
- Key: tag:SSMBootstrap
Values:
- Active
### SSM Bootstrap for Cloud9 ###
ExampleC9SSMDocument2:
Type: AWS::SSM::Document
DependsOn: [ExampleC9OutputBucket, ExampleC9BootstrapAssociation]
Properties:
Tags:
- Key: Environment
Value: AWS Example
DocumentType: Command
DocumentFormat: YAML
Content:
schemaVersion: '2.2'
description: Bootstrap Cloud9 Instance
mainSteps:
- action: aws:runShellScript
name: ExampleC9bootstrap2
inputs:
runCommand:
- "#!/bin/bash"
- date
- echo LANG=en_US.utf-8 >> /etc/environment
- echo LC_ALL=en_US.UTF-8 >> /etc/environment
- . /home/ec2-user/.bashrc
- echo '=== UPDATE system packages and INSTALL dependencies ==='
- yum update -y; yum install -y vim git jq bash-completion moreutils gettext yum-utils perl-Digest-SHA tree
- echo '=== ENABLE Amazon Extras EPEL Repository and INSTALL Git LFS ==='
- yum install -y amazon-linux-extras
- amazon-linux-extras install epel -y
- yum install -y git-lfs
- echo '=== INSTALL AWS CLI v2 ==='
- curl 'https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip' -o 'awscliv2.zip'
- unzip awscliv2.zip -d /tmp
- /tmp/aws/install --update
- rm -rf aws awscliv2.zip
- echo '=== INSTALL Kubernetes CLI ==='
- curl -o /tmp/kubectl https://s3.us-west-2.amazonaws.com/amazon-eks/1.22.6/2022-03-09/bin/linux/amd64/kubectl
- chmod +x /tmp/kubectl && mv /tmp/kubectl /usr/local/bin/
- /usr/local/bin/kubectl completion bash > /etc/bash_completion.d/kubectl
- echo '=== INSTALL Terraform CLI ==='
- yum-config-manager --add-repo https://rpm.releases.hashicorp.com/AmazonLinux/hashicorp.repo
- yum -y install terraform
- echo '=== CREATE Terraform Files ==='
- mkdir -p /home/ec2-user/environment/eks-blueprint
- |
cat > /home/ec2-user/environment/eks-blueprint/providers.tf <<EOF
terraform {
required_version = ">= 1.0.1"
required_providers {
aws = {
source = "hashicorp/aws"
version = ">= 3.72"
}
kubernetes = {
source = "hashicorp/kubernetes"
version = ">= 2.10"
}
helm = {
source = "hashicorp/helm"
version = ">= 2.4.1"
}
kubectl = {
source = "gavinbunney/kubectl"
version = ">= 1.14"
}
}
}
EOF
- |
cat > /home/ec2-user/environment/eks-blueprint/data.tf << EOF
# Find the user currently in use by AWS
data "aws_caller_identity" "current" {}
# Region in which to deploy the solution
data "aws_region" "current" {}
# Availability zones to use in our solution
data "aws_availability_zones" "available" {
state = "available"
}
data "aws_eks_cluster" "cluster" {
name = module.eks_blueprints.eks_cluster_id
}
data "aws_eks_cluster_auth" "this" {
name = module.eks_blueprints.eks_cluster_id
}
EOF
- |
cat > /home/ec2-user/environment/eks-blueprint/outputs.tf <<EOF
output "vpc_id" {
description = "The ID of the VPC"
value = module.vpc.vpc_id
}
output "configure_kubectl" {
description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig"
value = module.eks_blueprints.configure_kubectl
}
EOF
- |
cat > /home/ec2-user/environment/eks-blueprint/locals.tf <<EOF
locals {
name = basename(path.cwd)
region = data.aws_region.current.name
cluster_version = "1.23"
vpc_cidr = "10.0.0.0/16"
azs = slice(data.aws_availability_zones.available.names, 0, 3)
node_group_name = "managed-ondemand"
tags = {
Blueprint = local.name
GithubRepo = "github.com/aws-ia/terraform-aws-eks-blueprints"
}
}
EOF
- |
cat > /home/ec2-user/environment/eks-blueprint/main.tf <<EOF
provider "kubernetes" {
host = module.eks_blueprints.eks_cluster_endpoint
cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data)
token = data.aws_eks_cluster_auth.this.token
}
provider "helm" {
kubernetes {
host = module.eks_blueprints.eks_cluster_endpoint
cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data)
token = data.aws_eks_cluster_auth.this.token
}
}
provider "kubectl" {
apply_retry_count = 10
host = module.eks_blueprints.eks_cluster_endpoint
cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data)
load_config_file = false
token = data.aws_eks_cluster_auth.this.token
}
module "eks_blueprints" {
source = "github.com/aws-ia/terraform-aws-eks-blueprints?ref=v4.21.0"
cluster_name = local.name
# EKS Cluster VPC and Subnet mandatory config
vpc_id = module.vpc.vpc_id
private_subnet_ids = module.vpc.private_subnets
# EKS CONTROL PLANE VARIABLES
cluster_version = local.cluster_version
# List of Additional roles admin in the cluster
# Comment this section if you ARE NOT at an AWS Event, as the TeamRole won't exist on your site, or replace with any valid role you want
map_roles = [
{
rolearn = "arn:aws:iam::\${data.aws_caller_identity.current.account_id}:role/TeamRole"
username = "ops-role" # The user name within Kubernetes to map to the IAM role
groups = ["system:masters"] # A list of groups within Kubernetes to which the role is mapped; Checkout K8s Role and Rolebindings
}
]
# EKS MANAGED NODE GROUPS
managed_node_groups = {
mg_5 = {
node_group_name = local.node_group_name
instance_types = ["m5.xlarge"]
subnet_ids = module.vpc.private_subnets
}
}
tags = local.tags
}
module "vpc" {
source = "terraform-aws-modules/vpc/aws"
version = "3.16.0"
name = local.name
cidr = local.vpc_cidr
azs = local.azs
public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k)]
private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 10)]
enable_nat_gateway = true
create_igw = true
enable_dns_hostnames = true
single_nat_gateway = true
# Manage so we can name
manage_default_network_acl = true
default_network_acl_tags = { Name = "\${local.name}-default" }
manage_default_route_table = true
default_route_table_tags = { Name = "\${local.name}-default" }
manage_default_security_group = true
default_security_group_tags = { Name = "\${local.name}-default" }
public_subnet_tags = {
"kubernetes.io/cluster/\${local.name}" = "shared"
"kubernetes.io/role/elb" = "1"
}
private_subnet_tags = {
"kubernetes.io/cluster/\${local.name}" = "shared"
"kubernetes.io/role/internal-elb" = "1"
}
tags = local.tags
}
EOF
- sleep 900
- |
bucket_name=$(aws s3 ls | grep -i mod | awk '{print $3}')
aws s3 cp s3://$bucket_name/terraform.tfstate /home/ec2-user/environment/eks-blueprint/
- cd /home/ec2-user/environment/eks-blueprint/ && terraform init
- sudo chown -R ec2-user:ec2-user /home/ec2-user/environment/eks-blueprint/
- echo 'aws cloud9 update-environment --environment-id $C9_PID --managed-credentials-action DISABLE' >> /home/ec2-user/.bashrc
- |
aws_region=$(curl --silent http://169.254.169.254/latest/dynamic/instance-identity/document | jq -r .region)
aws eks --region $aws_region update-kubeconfig --name eks-blueprint
- mkdir -p /home/ec2-user/.kube/ && cp /root/.kube/config /home/ec2-user/.kube/
- sudo chown -R ec2-user:ec2-user /home/ec2-user/.kube/
- echo "Bootstrap completed with return code $?"
ExampleC9BootstrapAssociation2:
Type: AWS::SSM::Association
DependsOn: [ExampleC9OutputBucket, ExampleC9BootstrapAssociation]
Properties:
Name: !Ref ExampleC9SSMDocument2
OutputLocation:
S3Location:
OutputS3BucketName: !Ref ExampleC9OutputBucket
OutputS3KeyPrefix: bootstrapoutput2
Targets:
- Key: tag:SSMBootstrap
Values:
- Active2
################## INSTANCE #####################
ExampleC9InstanceProfile:
Type: AWS::IAM::InstanceProfile
Properties:
Path: "/"
Roles:
- Ref: ExampleC9Role
ExampleC9Instance:
Description: "-"
DependsOn: ExampleC9BootstrapAssociation2
Type: AWS::Cloud9::EnvironmentEC2
Properties:
Description: AWS Cloud9 instance for Examples
AutomaticStopTimeMinutes: 3600
InstanceType:
Ref: ExampleC9InstanceType
Name:
Ref: AWS::StackName
OwnerArn: !If [Create3rdPartyResources, !Ref OwnerArn, !If [CreateEventEngineResources, !Join ['',['arn:aws:iam::',!Ref 'AWS::AccountId',':assumed-role/TeamRole/MasterKey']],!Ref "AWS::NoValue"]]
Tags:
-
Key: SSMBootstrap
Value: Active2 # Change back
-
Key: Environment
Value: AWS Example
##### Instance to create environment #####
EC2Instance:
Type: AWS::EC2::Instance
Properties:
InstanceType: t3.medium
SecurityGroups: [!Ref 'InstanceSecurityGroup']
ImageId: !Ref 'LatestAmiId'
IamInstanceProfile: !Ref ExampleC9InstanceProfile
Tags:
- Key: SSMBootstrap
Value: Active
- Key: Name
Value: terraform-deployer
InstanceSecurityGroup:
Type: AWS::EC2::SecurityGroup
Properties:
GroupDescription: Enable SSH access via port 22
SecurityGroupIngress:
- IpProtocol: tcp
FromPort: 22
ToPort: 22
CidrIp: 0.0.0.0/0
Outputs:
Cloud9IDE:
Value:
Fn::Join:
- ''
- - https://
- Ref: AWS::Region
- ".console.aws.amazon.com/cloud9/ide/"
- Ref: ExampleC9Instance
- "?region="
- Ref: AWS::Region
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment