Files
gitops-ack-kro-argocd/charts/kro/resource-groups/eks/rg-eks-basic.yaml
T

343 lines
12 KiB
YAML

# yamllint disable rule:line-length
---
apiVersion: kro.run/v1alpha1
kind: ResourceGraphDefinition
metadata:
name: eksclusterbasic.kro.run
annotations:
argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
argocd.argoproj.io/sync-wave: "-1"
spec:
schema:
apiVersion: v1alpha1
kind: EksClusterBasic
spec:
name: string
tenant: string
environment: string
region: string
accountId: string
managementAccountId: string
k8sVersion: string
adminRoleName: string
fleetSecretManagerSecretNameSuffix: string
domainName: string
aws_partition: string | default="aws"
aws_dns_suffix: string | default="amazonaws.com"
network:
vpcID: string
subnets:
controlplane:
subnet1ID: string
subnet2ID: string
workers:
subnet1ID: string
subnet2ID: string
workloads: string # Define if we want to deploy workloads application
gitops:
addonsRepoBasePath: string
addonsRepoPath: string
addonsRepoRevision: string
addonsRepoUrl: string
fleetRepoBasePath: string
fleetRepoPath: string
fleetRepoRevision: string
fleetRepoUrl: string
addons:
enable_external_secrets: string
external_secrets_namespace: string
external_secrets_service_account: string
status:
clusterARN: ${ekscluster.status.ackResourceMetadata.arn}
cdata: ${ekscluster.status.certificateAuthority.data}
endpoint: ${ekscluster.status.endpoint}
clusterState: ${ekscluster.status.status}
resources:
###########################################################
# EKS Cluster
###########################################################
- id: clusterRole
template:
apiVersion: iam.services.k8s.aws/v1alpha1
kind: Role
metadata:
namespace: "${schema.spec.name}"
name: "${schema.spec.name}-cluster-role"
annotations:
services.k8s.aws/region: ${schema.spec.region}
spec:
name: "${schema.spec.name}-cluster-role"
policies:
- arn:aws:iam::aws:policy/AmazonEKSClusterPolicy
- arn:aws:iam::aws:policy/AmazonEKSComputePolicy
- arn:aws:iam::aws:policy/AmazonEKSBlockStoragePolicy
- arn:aws:iam::aws:policy/AmazonEKSLoadBalancingPolicy
- arn:aws:iam::aws:policy/AmazonEKSNetworkingPolicy
assumeRolePolicyDocument: |
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": "eks.amazonaws.com"
},
"Action": [
"sts:AssumeRole",
"sts:TagSession"
]
}
]
}
- id: nodeRole
template:
apiVersion: iam.services.k8s.aws/v1alpha1
kind: Role
metadata:
namespace: "${schema.spec.name}"
name: "${schema.spec.name}-cluster-node-role"
annotations:
services.k8s.aws/region: ${schema.spec.region}
spec:
name: "${schema.spec.name}-cluster-node-role"
policies:
- arn:aws:iam::aws:policy/AmazonEKSWorkerNodeMinimalPolicy
- arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryPullOnly
- arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore
- arn:aws:iam::aws:policy/CloudWatchAgentServerPolicy
assumeRolePolicyDocument: |
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": "ec2.amazonaws.com"
},
"Action": [
"sts:AssumeRole",
"sts:TagSession"
]
}
]
}
# https://aws-controllers-k8s.github.io/community/reference/eks/v1alpha1/cluster/
- id: ekscluster
readyWhen:
- ${ekscluster.status.status == "ACTIVE"}
template:
apiVersion: eks.services.k8s.aws/v1alpha1
kind: Cluster
metadata:
namespace: "${schema.spec.name}"
name: "${schema.spec.name}"
# implicit dependencies with roles
annotations:
clusterRoleArn: "${clusterRole.status.ackResourceMetadata.arn}"
nodeRoleArn: "${nodeRole.status.ackResourceMetadata.arn}"
services.k8s.aws/region: ${schema.spec.region}
spec:
name: "${schema.spec.name}"
roleARN: "${clusterRole.status.ackResourceMetadata.arn}"
version: "${schema.spec.k8sVersion}"
accessConfig:
authenticationMode: "API_AND_CONFIG_MAP"
bootstrapClusterCreatorAdminPermissions: true
computeConfig:
enabled: true
nodeRoleARN: ${nodeRole.status.ackResourceMetadata.arn}
nodePools:
- system
- general-purpose
kubernetesNetworkConfig:
ipFamily: ipv4
elasticLoadBalancing:
enabled: true
logging:
clusterLogging:
- enabled: true
types:
- api
- audit
- authenticator
- controllerManager
- scheduler
storageConfig:
blockStorage:
enabled: true
resourcesVPCConfig:
endpointPrivateAccess: true
endpointPublicAccess: true
subnetIDs:
- ${schema.spec.network.subnets.controlplane.subnet1ID}
- ${schema.spec.network.subnets.controlplane.subnet2ID}
zonalShiftConfig:
enabled: true
tags:
kro-management: ${schema.spec.name}
tenant: ${schema.spec.tenant}
environment: ${schema.spec.environment}
- id: podIdentityAddon
template:
apiVersion: eks.services.k8s.aws/v1alpha1
kind: Addon
metadata:
name: eks-pod-identity-agent
namespace: "${schema.spec.name}"
annotations:
clusterArn: "${ekscluster.status.ackResourceMetadata.arn}"
services.k8s.aws/region: ${schema.spec.region}
spec:
name: eks-pod-identity-agent
addonVersion: v1.3.4-eksbuild.1
clusterName: "${schema.spec.name}"
###########################################################
# ArgoCD Integration
###########################################################
- id: argocdSecret
template:
apiVersion: v1
kind: Secret
metadata:
name: "${schema.spec.name}"
namespace: argocd
labels:
argocd.argoproj.io/secret-type: cluster
# Compatible fleet-management
fleet_member: spoke
tenant: "${schema.spec.tenant}"
environment: "${schema.spec.environment}"
aws_cluster_name: "${schema.spec.name}"
workloads: "${schema.spec.workloads}"
#using : useSelector: true for centralized mode
enable_external_secrets: "${schema.spec.addons.enable_external_secrets}"
annotations:
# GitOps Bridge
accountId: "${schema.spec.accountId}"
aws_account_id: "${schema.spec.accountId}"
region: "${schema.spec.region}"
aws_region: "${schema.spec.region}"
aws_central_region: "${schema.spec.region}" # used in fleet-management gitops
oidcProvider: "${ekscluster.status.identity.oidc.issuer}"
aws_cluster_name: "${schema.spec.name}"
aws_vpc_id: "${schema.spec.network.vpcID}"
# GitOps Configuration
addons_repo_basepath: "${schema.spec.gitops.addonsRepoBasePath}"
addons_repo_path: "${schema.spec.gitops.addonsRepoPath}"
addons_repo_revision: "${schema.spec.gitops.addonsRepoRevision}"
addons_repo_url: "${schema.spec.gitops.addonsRepoUrl}"
fleet_repo_basepath: "${schema.spec.gitops.fleetRepoBasePath}"
fleet_repo_path: "${schema.spec.gitops.fleetRepoPath}"
fleet_repo_revision: "${schema.spec.gitops.fleetRepoRevision}"
fleet_repo_url: "${schema.spec.gitops.fleetRepoUrl}"
# Generic
external_secrets_namespace: "${schema.spec.addons.external_secrets_namespace}"
external_secrets_service_account: "${schema.spec.addons.external_secrets_service_account}"
access_entry_arn: "${accessEntry.status.ackResourceMetadata.arn}"
type: Opaque
# TODO bug in KRO, it always see some drifts..
stringData:
name: "${schema.spec.name}"
server: "${ekscluster.status.ackResourceMetadata.arn}"
project: "default"
- id: accessEntry
readyWhen:
- ${accessEntry.status.conditions.exists(x, x.type == 'ACK.ResourceSynced' && x.status == "True")} #check on ACK condition
template:
apiVersion: eks.services.k8s.aws/v1alpha1
kind: AccessEntry
metadata:
namespace: "${schema.spec.name}"
name: "${schema.spec.name}-access-entry"
annotations:
services.k8s.aws/region: ${schema.spec.region}
spec:
clusterName: "${schema.spec.name}"
accessPolicies:
- accessScope:
type: "cluster"
policyARN: "arn:aws:eks::aws:cluster-access-policy/AmazonEKSClusterAdminPolicy"
principalARN: "arn:aws:iam::${schema.spec.managementAccountId}:role/hub-cluster-argocd-controller"
type: STANDARD
- id: accessEntryAdmin
template:
apiVersion: eks.services.k8s.aws/v1alpha1
kind: AccessEntry
metadata:
namespace: "${schema.spec.name}"
name: "${schema.spec.name}-access-entry-admin"
annotations:
services.k8s.aws/region: ${schema.spec.region}
spec:
clusterName: "${schema.spec.name}"
accessPolicies:
- accessScope:
type: "cluster"
policyARN: "arn:aws:eks::aws:cluster-access-policy/AmazonEKSClusterAdminPolicy"
principalARN: "arn:aws:iam::${schema.spec.accountId}:role/${schema.spec.adminRoleName}"
type: STANDARD
###########################################################
# External Secrets AddOn Pod Identity
###########################################################
- id: externalSecretsRole
template:
apiVersion: iam.services.k8s.aws/v1alpha1
kind: Role
metadata:
namespace: "${schema.spec.name}"
name: "${schema.spec.name}-external-secrets-role"
annotations:
services.k8s.aws/region: ${schema.spec.region}
spec:
name: "${schema.spec.name}-external-secrets-role"
policies:
- arn:aws:iam::aws:policy/SecretsManagerReadWrite
assumeRolePolicyDocument: |
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": "pods.eks.amazonaws.com"
},
"Action": [
"sts:AssumeRole",
"sts:TagSession"
]
}
]
}
- id: externalSecretsPodIdentityAssociation
readyWhen:
- ${externalSecretsPodIdentityAssociation.status.conditions.exists(x, x.type == 'ACK.ResourceSynced' && x.status == "True")} #check on ACK condition
template:
apiVersion: eks.services.k8s.aws/v1alpha1
kind: PodIdentityAssociation
metadata:
name: "${schema.spec.name}-external-secrets"
namespace: "${schema.spec.name}"
annotations:
services.k8s.aws/region: ${schema.spec.region}
spec:
clusterName: "${schema.spec.name}"
namespace: argocd
roleARN: "${externalSecretsRole.status.ackResourceMetadata.arn}"
serviceAccount: external-secrets-sa
tags:
environment: "${schema.spec.environment}"
managedBy: ACK
application: external-secrets