Saturday, April 11, 2020

EKS security


1. Limit the access to cluster api server

Use UI or following command:

aws eks update-cluster-config \
    --region us-east-1 \
    --name co-ec-eks-cluster-vpc-05b52a0ba174eeeee \
--resources-vpc-config endpointPublicAccess=true,publicAccessCidrs="19.19.19.19/32",endpointPrivateAccess=true

Use UI (EKS Networking section) or following command to check change result:
aws eks describe-cluster --name co-ec-eks-cluster-vpc-05b52a0ba174eeeee --region us-east-1

We can also do this from terraform through following code.  Sample code:

resource "aws_eks_cluster" "co-ec-eks-cluster" {
 
name     = local.eks_cluster_name
  role_arn
= aws_iam_role.co-ec-eks-cluster-iam-role.arn

 
vpc_config {
   
security_group_ids      = [aws_security_group.co-ec-eks-cluster-security-group.id]
   
subnet_ids              = local.subnet_ids
    endpoint_private_access
= true // allow access to EKS network
    // https://www.cloudflare.com/ips-v4 for list of IPs from Cloudflare
   
public_access_cidrs = toset(concat(data.cloudflare_ip_ranges.cloudflare.ipv4_cidr_blocks, local.workstation-external-cidr))
  }

 
depends_on = [
    aws_iam_role_policy_attachment.co-ec-eks-cluster-AmazonEKSClusterPolicy,
    aws_iam_role_policy_attachment.co-ec-eks-cluster-AmazonEKSServicePolicy,
  ]
}


2. Access control to EKS cluster node (so the services running on it will have access to the resources) through terraform:



resource "aws_iam_role" "fs-ec-eks-node-iam-role" {
  name = "fs-ec-eks-node-iam-role-${local.vpc_id}"
  assume_role_policy = <<POLICY
{
  "Version": "2012-10-17",
  "Statement": [
    {
      "Effect": "Allow",
      "Principal": {
        "Service": "ec2.amazonaws.com"
      },
      "Action": "sts:AssumeRole"
    }
  ]
}
POLICY
}

resource "aws_iam_role_policy" "fs-ec-eks-node-auto-scale-policy" {
  name = "fs-ec-eks-node-auto-scale-policy"  role = aws_iam_role.fs-ec-eks-node-iam-role.id
  policy = <<-EOF
{
    "Version": "2012-10-17",
    "Statement": [
        {
            "Effect": "Allow",
            "Action": [
                "autoscaling:DescribeAutoScalingGroups",
                "autoscaling:DescribeAutoScalingInstances",
                "autoscaling:DescribeLaunchConfigurations",
                "autoscaling:DescribeTags",
                "autoscaling:SetDesiredCapacity",
                "autoscaling:TerminateInstanceInAutoScalingGroup"
            ],
            "Resource": "*"
        }
    ]
}
EOF
}

resource "aws_iam_role_policy" "fs-ec-eks-node-metrics-access-policy" {
  name = "fs-ec-eks-node-metrics-access-policy"  role = aws_iam_role.fs-ec-eks-node-iam-role.id
  policy = <<-EOF
{
  "Version": "2012-10-17",
  "Statement": [
    {
      "Effect": "Allow",
      "Action": [
        "cloudwatch:GetMetricData",
        "cloudwatch:GetMetricStatistics",
        "cloudwatch:ListMetrics"
      ],
      "Resource": "*"
    }
  ]
}
EOF
}

resource "aws_iam_role_policy_attachment" "fs-ec-eks-node-AmazonEKSWorkerNodePolicy" {
  policy_arn = "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy"  role       = aws_iam_role.fs-ec-eks-node-iam-role.name}

resource "aws_iam_role_policy_attachment" "fs-ec-eks-node-AmazonEKS_CNI_Policy" {
  policy_arn = "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy"  role       = aws_iam_role.fs-ec-eks-node-iam-role.name}

resource "aws_iam_role_policy_attachment" "fs-ec-eks-node-AmazonEC2ContainerRegistryReadOnly" {
  policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"  role       = aws_iam_role.fs-ec-eks-node-iam-role.name}

resource "aws_iam_role_policy_attachment" "fs-ec-eks-node-CloudWatchAgentServerPolicy" {
  policy_arn = "arn:aws:iam::aws:policy/CloudWatchAgentServerPolicy"  role       = aws_iam_role.fs-ec-eks-node-iam-role.name}

resource "aws_iam_role_policy_attachment" "fs-ec-eks-node-AmazonDynamoDBFullAccess" {
  policy_arn = "arn:aws:iam::aws:policy/AmazonDynamoDBFullAccess"  role       = aws_iam_role.fs-ec-eks-node-iam-role.name}


# Using the new feature from reinvent:19 to provisioning node automatically without the need# for EC2 provisioning.  EKS-optimized AMIs will be used automatically for each node.# Nodes launched as part of a managed node group are automatically tagged for auto-discovery# by k8s cluster autoscaler.# https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html# https://www.terraform.io/docs/providers/aws/r/eks_node_group.htmlresource "aws_eks_node_group" "fs-ec-eks-node-group" {
  cluster_name    = aws_eks_cluster.fs-ec-eks-cluster.name  node_group_name = "fs-ec-eks-node-group-${local.vpc_id}"  node_role_arn   = aws_iam_role.fs-ec-eks-node-iam-role.arn  subnet_ids      = local.subnet_ids  instance_types  = [var.instance_type]

  scaling_config {
    desired_size = 3    max_size     = 8 // TODO    min_size     = 3  }

  depends_on = [
    aws_iam_role_policy_attachment.fs-ec-eks-node-AmazonEKSWorkerNodePolicy,
    aws_iam_role_policy_attachment.fs-ec-eks-node-AmazonEKS_CNI_Policy,
    aws_iam_role_policy_attachment.fs-ec-eks-node-AmazonEC2ContainerRegistryReadOnly,
    aws_iam_role_policy_attachment.fs-ec-eks-node-CloudWatchAgentServerPolicy,
    aws_iam_role_policy_attachment.fs-ec-eks-node-AmazonDynamoDBFullAccess,
  ]
}


3. Control the access to MSK(Kafka):

resource "aws_security_group" "fs-ec-msk-cluster-security-group" {
  name        = "fs-ec-msk-cluster-security-group-${local.vpc_id}"  description = "Cluster communication with worker nodes"  vpc_id      = local.vpc_id
  egress {
    from_port   = 0    to_port     = 0    protocol    = "-1"    cidr_blocks = ["0.0.0.0/0"]
  }

  tags = {
    Name = "fs-ec-msk-cluster-${local.vpc_id}"  }
}

# allow access from every host in the same vpc.// TODOresource "aws_security_group_rule" "fs-ec-msk-cluster-ingress-workstation-http" {
  cidr_blocks       = [local.vpc_cidr]
  description       = "Allow access to Kafka from same VPC"  from_port         = 9092  protocol          = "tcp"  security_group_id = aws_security_group.fs-ec-msk-cluster-security-group.id  to_port           = 9092  type              = "ingress"}
resource "aws_security_group_rule" "fs-ec-msk-cluster-ingress-workstation-https" {
  cidr_blocks       = [local.vpc_cidr]
  description       = "Allow access to Kafka from same VPC"  from_port         = 9094  protocol          = "tcp"  security_group_id = aws_security_group.fs-ec-msk-cluster-security-group.id  to_port           = 9094  type              = "ingress"}
resource "aws_security_group_rule" "fs-ec-msk-cluster-ingress-workstation-zookeeper" {
  cidr_blocks       = [local.vpc_cidr]
  description       = "Allow access to Zookeeper from same VPC"  from_port         = 2181  protocol          = "tcp"  security_group_id = aws_security_group.fs-ec-msk-cluster-security-group.id  to_port           = 2181  type              = "ingress"}

resource "aws_kms_key" "fs-ec-kms" {
  description = "KMS key"}

resource "aws_msk_cluster" "fs-ec-msk-cluster" {
  cluster_name           = "fs-ec-msk-cluster-${local.vpc_id}"  kafka_version          = var.kafka_version  number_of_broker_nodes = length(local.subnets_ids)

  configuration_info {
    arn      = aws_msk_configuration.fs-ec-msk-configuration.arn    revision = aws_msk_configuration.fs-ec-msk-configuration.latest_revision  }

  broker_node_group_info {
    instance_type   = var.broker_type    ebs_volume_size = var.broker_ebs_size    client_subnets  = local.subnets_ids
    security_groups = [aws_security_group.fs-ec-msk-cluster-security-group.id]
  }

  encryption_info {
    encryption_at_rest_kms_key_arn = aws_kms_key.fs-ec-kms.arn    encryption_in_transit {
      client_broker = "TLS" // PLAINTEXT"        in_cluster = true    }
  }

  tags = {
    Name = "fs-ec-msk-cluster-${local.vpc_id}"  }
}

// it is not possible to destroy cluster configs so a random number is usedresource "random_id" "msk" {
  byte_length = 4}

resource "aws_msk_configuration" "fs-ec-msk-configuration" {
  kafka_versions = [var.kafka_version]
  name           = "${var.msk_config_name_prefix}fs-ec-msk-configuration-${local.vpc_id}-${random_id.msk.hex}"
  server_properties = <<PROPERTIES
auto.create.topics.enable = true
delete.topic.enable = false
num.partitions = 96
PROPERTIES
}




Reference:









No comments:

Post a Comment