Initial commit from kro/examples/aws/eks-cluster-mgmt

This commit is contained in:
2026-04-21 09:55:53 -03:00
parent 0585444299
commit 7d11fd5889
66 changed files with 3667 additions and 0 deletions
@@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/
@@ -0,0 +1,24 @@
apiVersion: v2
name: kro-pi-instance
description: A Helm chart for Kubernetes
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.1.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "1.16.0"
@@ -0,0 +1,62 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "kro-pi-instance.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "kro-pi-instance.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "kro-pi-instance.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "kro-pi-instance.labels" -}}
helm.sh/chart: {{ include "kro-pi-instance.chart" . }}
{{ include "kro-pi-instance.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "kro-pi-instance.selectorLabels" -}}
app.kubernetes.io/name: {{ include "kro-pi-instance.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "kro-pi-instance.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "kro-pi-instance.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}
@@ -0,0 +1,63 @@
{{- $cluster := .Values.clusterName -}}
{{- $namespace := .Values.piNamespace -}}
{{- $name := .Values.name -}}
{{- $root := . -}}
{{- $serviceAccounts := .Values.serviceAccounts -}}
{{- $policyDocument := .Values.policyDocument -}}
{{- range $serviceAccounts }}
apiVersion: kro.run/v1alpha1
kind: PodIdentity
metadata:
name: "{{ include "kro-pi-instance.name" $root }}-{{ . }}"
annotations:
argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
argocd.argoproj.io/sync-wave: "-5"
spec:
name: {{$name}}
values:
aws:
clusterName: {{ $cluster }}
policy:
policyDocument: |
{
"Version": "2012-10-17",
"Statement": [
{{- range $index, $policy := $policyDocument }}
{
"Effect": "Allow",
"Action": [
{{- range $i, $action := $policy.actions }}
"{{ $action }}"{{ if not (eq (add $i 1) (len $policy.actions)) }},{{ end }}
{{- end }}
],
"Resource": [
{{- if $policy.customArn }}
"{{ $policy.customArn }}"
{{- else if eq $policy.resourceName "*" }}
"*"
{{- else }}
"arn:aws:{{ $policy.resourceType }}:{{ $.Values.region }}:{{ $.Values.accountId }}:{{ $policy.resourceName }}"
{{- end }}
]
{{- if $policy.conditions }}
,"Condition": {
{{- range $j, $condition := $policy.conditions }}
"{{ $condition.test }}": {
"{{ $condition.variable }}": [
{{- range $k, $value := $condition.values }}
"{{ $value }}"{{ if not (eq (add $k 1) (len $condition.values)) }},{{ end }}
{{- end }}
]
}
{{- end }}
}
{{- end }}
}{{ if not (eq (add $index 1) (len $.Values.policyDocument)) }},{{ end }}
{{- end }}
]
}
piAssociation:
serviceAccount: {{ . }}
piNamespace: {{ $namespace }}
---
{{- end}}
@@ -0,0 +1,12 @@
# region: eu-west-2
# name: myname
# serviceAccounts:
# - "test"
# - "test2"
# piNamespace: "default"
# clusterName: "spoke-workload2"
# policyDocument:
# - resourceType: ssm
# resourceName: "*"
# actions:
# - "ssm:DescribeParameters"
@@ -0,0 +1 @@
# TODO: rg that creates EFS file system (using ACK EFS controller) and corresponding StorageClass
@@ -0,0 +1,342 @@
# yamllint disable rule:line-length
---
apiVersion: kro.run/v1alpha1
kind: ResourceGraphDefinition
metadata:
name: eksclusterbasic.kro.run
annotations:
argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
argocd.argoproj.io/sync-wave: "-1"
spec:
schema:
apiVersion: v1alpha1
kind: EksClusterBasic
spec:
name: string
tenant: string
environment: string
region: string
accountId: string
managementAccountId: string
k8sVersion: string
adminRoleName: string
fleetSecretManagerSecretNameSuffix: string
domainName: string
aws_partition: string | default="aws"
aws_dns_suffix: string | default="amazonaws.com"
network:
vpcID: string
subnets:
controlplane:
subnet1ID: string
subnet2ID: string
workers:
subnet1ID: string
subnet2ID: string
workloads: string # Define if we want to deploy workloads application
gitops:
addonsRepoBasePath: string
addonsRepoPath: string
addonsRepoRevision: string
addonsRepoUrl: string
fleetRepoBasePath: string
fleetRepoPath: string
fleetRepoRevision: string
fleetRepoUrl: string
addons:
enable_external_secrets: string
external_secrets_namespace: string
external_secrets_service_account: string
status:
clusterARN: ${ekscluster.status.ackResourceMetadata.arn}
cdata: ${ekscluster.status.certificateAuthority.data}
endpoint: ${ekscluster.status.endpoint}
clusterState: ${ekscluster.status.status}
resources:
###########################################################
# EKS Cluster
###########################################################
- id: clusterRole
template:
apiVersion: iam.services.k8s.aws/v1alpha1
kind: Role
metadata:
namespace: "${schema.spec.name}"
name: "${schema.spec.name}-cluster-role"
annotations:
services.k8s.aws/region: ${schema.spec.region}
spec:
name: "${schema.spec.name}-cluster-role"
policies:
- arn:aws:iam::aws:policy/AmazonEKSClusterPolicy
- arn:aws:iam::aws:policy/AmazonEKSComputePolicy
- arn:aws:iam::aws:policy/AmazonEKSBlockStoragePolicy
- arn:aws:iam::aws:policy/AmazonEKSLoadBalancingPolicy
- arn:aws:iam::aws:policy/AmazonEKSNetworkingPolicy
assumeRolePolicyDocument: |
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": "eks.amazonaws.com"
},
"Action": [
"sts:AssumeRole",
"sts:TagSession"
]
}
]
}
- id: nodeRole
template:
apiVersion: iam.services.k8s.aws/v1alpha1
kind: Role
metadata:
namespace: "${schema.spec.name}"
name: "${schema.spec.name}-cluster-node-role"
annotations:
services.k8s.aws/region: ${schema.spec.region}
spec:
name: "${schema.spec.name}-cluster-node-role"
policies:
- arn:aws:iam::aws:policy/AmazonEKSWorkerNodeMinimalPolicy
- arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryPullOnly
- arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore
- arn:aws:iam::aws:policy/CloudWatchAgentServerPolicy
assumeRolePolicyDocument: |
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": "ec2.amazonaws.com"
},
"Action": [
"sts:AssumeRole",
"sts:TagSession"
]
}
]
}
# https://aws-controllers-k8s.github.io/community/reference/eks/v1alpha1/cluster/
- id: ekscluster
readyWhen:
- ${ekscluster.status.status == "ACTIVE"}
template:
apiVersion: eks.services.k8s.aws/v1alpha1
kind: Cluster
metadata:
namespace: "${schema.spec.name}"
name: "${schema.spec.name}"
# implicit dependencies with roles
annotations:
clusterRoleArn: "${clusterRole.status.ackResourceMetadata.arn}"
nodeRoleArn: "${nodeRole.status.ackResourceMetadata.arn}"
services.k8s.aws/region: ${schema.spec.region}
spec:
name: "${schema.spec.name}"
roleARN: "${clusterRole.status.ackResourceMetadata.arn}"
version: "${schema.spec.k8sVersion}"
accessConfig:
authenticationMode: "API_AND_CONFIG_MAP"
bootstrapClusterCreatorAdminPermissions: true
computeConfig:
enabled: true
nodeRoleARN: ${nodeRole.status.ackResourceMetadata.arn}
nodePools:
- system
- general-purpose
kubernetesNetworkConfig:
ipFamily: ipv4
elasticLoadBalancing:
enabled: true
logging:
clusterLogging:
- enabled: true
types:
- api
- audit
- authenticator
- controllerManager
- scheduler
storageConfig:
blockStorage:
enabled: true
resourcesVPCConfig:
endpointPrivateAccess: true
endpointPublicAccess: true
subnetIDs:
- ${schema.spec.network.subnets.controlplane.subnet1ID}
- ${schema.spec.network.subnets.controlplane.subnet2ID}
zonalShiftConfig:
enabled: true
tags:
kro-management: ${schema.spec.name}
tenant: ${schema.spec.tenant}
environment: ${schema.spec.environment}
- id: podIdentityAddon
template:
apiVersion: eks.services.k8s.aws/v1alpha1
kind: Addon
metadata:
name: eks-pod-identity-agent
namespace: "${schema.spec.name}"
annotations:
clusterArn: "${ekscluster.status.ackResourceMetadata.arn}"
services.k8s.aws/region: ${schema.spec.region}
spec:
name: eks-pod-identity-agent
addonVersion: v1.3.4-eksbuild.1
clusterName: "${schema.spec.name}"
###########################################################
# ArgoCD Integration
###########################################################
- id: argocdSecret
template:
apiVersion: v1
kind: Secret
metadata:
name: "${schema.spec.name}"
namespace: argocd
labels:
argocd.argoproj.io/secret-type: cluster
# Compatible fleet-management
fleet_member: spoke
tenant: "${schema.spec.tenant}"
environment: "${schema.spec.environment}"
aws_cluster_name: "${schema.spec.name}"
workloads: "${schema.spec.workloads}"
#using : useSelector: true for centralized mode
enable_external_secrets: "${schema.spec.addons.enable_external_secrets}"
annotations:
# GitOps Bridge
accountId: "${schema.spec.accountId}"
aws_account_id: "${schema.spec.accountId}"
region: "${schema.spec.region}"
aws_region: "${schema.spec.region}"
aws_central_region: "${schema.spec.region}" # used in fleet-management gitops
oidcProvider: "${ekscluster.status.identity.oidc.issuer}"
aws_cluster_name: "${schema.spec.name}"
aws_vpc_id: "${schema.spec.network.vpcID}"
# GitOps Configuration
addons_repo_basepath: "${schema.spec.gitops.addonsRepoBasePath}"
addons_repo_path: "${schema.spec.gitops.addonsRepoPath}"
addons_repo_revision: "${schema.spec.gitops.addonsRepoRevision}"
addons_repo_url: "${schema.spec.gitops.addonsRepoUrl}"
fleet_repo_basepath: "${schema.spec.gitops.fleetRepoBasePath}"
fleet_repo_path: "${schema.spec.gitops.fleetRepoPath}"
fleet_repo_revision: "${schema.spec.gitops.fleetRepoRevision}"
fleet_repo_url: "${schema.spec.gitops.fleetRepoUrl}"
# Generic
external_secrets_namespace: "${schema.spec.addons.external_secrets_namespace}"
external_secrets_service_account: "${schema.spec.addons.external_secrets_service_account}"
access_entry_arn: "${accessEntry.status.ackResourceMetadata.arn}"
type: Opaque
# TODO bug in KRO, it always see some drifts..
stringData:
name: "${schema.spec.name}"
server: "${ekscluster.status.ackResourceMetadata.arn}"
project: "default"
- id: accessEntry
readyWhen:
- ${accessEntry.status.conditions.exists(x, x.type == 'ACK.ResourceSynced' && x.status == "True")} #check on ACK condition
template:
apiVersion: eks.services.k8s.aws/v1alpha1
kind: AccessEntry
metadata:
namespace: "${schema.spec.name}"
name: "${schema.spec.name}-access-entry"
annotations:
services.k8s.aws/region: ${schema.spec.region}
spec:
clusterName: "${schema.spec.name}"
accessPolicies:
- accessScope:
type: "cluster"
policyARN: "arn:aws:eks::aws:cluster-access-policy/AmazonEKSClusterAdminPolicy"
principalARN: "arn:aws:iam::${schema.spec.managementAccountId}:role/hub-cluster-argocd-controller"
type: STANDARD
- id: accessEntryAdmin
template:
apiVersion: eks.services.k8s.aws/v1alpha1
kind: AccessEntry
metadata:
namespace: "${schema.spec.name}"
name: "${schema.spec.name}-access-entry-admin"
annotations:
services.k8s.aws/region: ${schema.spec.region}
spec:
clusterName: "${schema.spec.name}"
accessPolicies:
- accessScope:
type: "cluster"
policyARN: "arn:aws:eks::aws:cluster-access-policy/AmazonEKSClusterAdminPolicy"
principalARN: "arn:aws:iam::${schema.spec.accountId}:role/${schema.spec.adminRoleName}"
type: STANDARD
###########################################################
# External Secrets AddOn Pod Identity
###########################################################
- id: externalSecretsRole
template:
apiVersion: iam.services.k8s.aws/v1alpha1
kind: Role
metadata:
namespace: "${schema.spec.name}"
name: "${schema.spec.name}-external-secrets-role"
annotations:
services.k8s.aws/region: ${schema.spec.region}
spec:
name: "${schema.spec.name}-external-secrets-role"
policies:
- arn:aws:iam::aws:policy/SecretsManagerReadWrite
assumeRolePolicyDocument: |
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": "pods.eks.amazonaws.com"
},
"Action": [
"sts:AssumeRole",
"sts:TagSession"
]
}
]
}
- id: externalSecretsPodIdentityAssociation
readyWhen:
- ${externalSecretsPodIdentityAssociation.status.conditions.exists(x, x.type == 'ACK.ResourceSynced' && x.status == "True")} #check on ACK condition
template:
apiVersion: eks.services.k8s.aws/v1alpha1
kind: PodIdentityAssociation
metadata:
name: "${schema.spec.name}-external-secrets"
namespace: "${schema.spec.name}"
annotations:
services.k8s.aws/region: ${schema.spec.region}
spec:
clusterName: "${schema.spec.name}"
namespace: argocd
roleARN: "${externalSecretsRole.status.ackResourceMetadata.arn}"
serviceAccount: external-secrets-sa
tags:
environment: "${schema.spec.environment}"
managedBy: ACK
application: external-secrets
+175
View File
@@ -0,0 +1,175 @@
apiVersion: kro.run/v1alpha1
kind: ResourceGraphDefinition
metadata:
name: ekscluster.kro.run
annotations:
argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
argocd.argoproj.io/sync-wave: "0"
spec:
schema:
apiVersion: v1alpha1
kind: EksCluster
spec:
name: string
tenant: string | default="auto1"
environment: string | default="staging"
region: string | default="us-west-2"
k8sVersion: string | default="1.34"
accountId: string
managementAccountId: string
adminRoleName: string | default="Admin"
fleetSecretManagerSecretNameSuffix: string | default="argocd-secret"
domainName: string | default="cluster.example.com"
vpc:
create: boolean | default=true
vpcCidr: string | default="10.0.0.0/16"
publicSubnet1Cidr: string | default="10.0.1.0/24"
publicSubnet2Cidr: string | default="10.0.2.0/24"
privateSubnet1Cidr: string | default="10.0.11.0/24"
privateSubnet2Cidr: string | default="10.0.12.0/24"
vpcId: string | default=""
publicSubnet1Id: string | default=""
publicSubnet2Id: string | default=""
privateSubnet1Id: string | default=""
privateSubnet2Id: string | default=""
workloads: string | default="false" # Define if we want to deploy workloads application
gitops:
addonsRepoBasePath: string | default="addons/"
addonsRepoPath: string | default="bootstrap"
addonsRepoRevision: string | default="main"
addonsRepoUrl: string | default="https://github.com/allamand/eks-cluster-mgmt"
fleetRepoBasePath: string | default="fleet/"
fleetRepoPath: string | default="bootstrap"
fleetRepoRevision: string | default="main"
fleetRepoUrl: string | default="https://github.com/allamand/eks-cluster-mgmt"
addons:
enable_external_secrets: string | default="true"
external_secrets_namespace: string | default="external-secrets"
external_secrets_service_account: string | default="external-secrets-sa"
resources:
- id: vpc
includeWhen:
- ${schema.spec.vpc.create}
readyWhen:
- ${vpc.status.conditions.exists(x, x.type == 'Ready' && x.status == "True")} # Check on kro conditions
template:
apiVersion: kro.run/v1alpha1
kind: Vpc
metadata:
name: ${schema.spec.name}
namespace: ${schema.spec.name}
labels:
app.kubernetes.io/instance: ${schema.spec.name}
annotations:
argocd.argoproj.io/tracking-id: clusters:kro.run/Vpc:${schema.spec.name}/${schema.spec.name}
spec:
name: ${schema.spec.name}
region: ${schema.spec.region}
cidr:
vpcCidr: ${schema.spec.vpc.vpcCidr}
publicSubnet1Cidr: ${schema.spec.vpc.publicSubnet1Cidr}
publicSubnet2Cidr: ${schema.spec.vpc.publicSubnet2Cidr}
privateSubnet1Cidr: ${schema.spec.vpc.privateSubnet1Cidr}
privateSubnet2Cidr: ${schema.spec.vpc.privateSubnet2Cidr}
- id: eksWithVpc
includeWhen:
- ${schema.spec.vpc.create}
readyWhen:
- ${eksWithVpc.status.conditions.exists(x, x.type == 'Ready' && x.status == "True")} # Check on kro conditions
template:
apiVersion: kro.run/v1alpha1
kind: EksClusterBasic
metadata:
name: ${schema.spec.name}
namespace: ${schema.spec.name}
labels:
app.kubernetes.io/instance: ${schema.spec.name}
annotations:
argocd.argoproj.io/tracking-id: clusters:kro.run/EksCluster:${schema.spec.name}/${schema.spec.name}
spec:
name: ${schema.spec.name}
tenant: ${schema.spec.tenant}
environment: ${schema.spec.environment}
region: ${schema.spec.region}
accountId: ${schema.spec.accountId}
managementAccountId: ${schema.spec.managementAccountId}
k8sVersion: ${schema.spec.k8sVersion}
adminRoleName: ${schema.spec.adminRoleName}
fleetSecretManagerSecretNameSuffix: ${schema.spec.fleetSecretManagerSecretNameSuffix}
domainName: ${schema.spec.domainName}
network:
vpcID: "${vpc.status.vpcID}"
subnets:
controlplane:
subnet1ID: "${vpc.status.privateSubnet1ID}"
subnet2ID: "${vpc.status.privateSubnet2ID}"
workers:
subnet1ID: "${vpc.status.privateSubnet1ID}"
subnet2ID: "${vpc.status.privateSubnet2ID}"
workloads: ${schema.spec.workloads}
gitops:
addonsRepoBasePath: ${schema.spec.gitops.addonsRepoBasePath}
addonsRepoPath: ${schema.spec.gitops.addonsRepoPath}
addonsRepoRevision: ${schema.spec.gitops.addonsRepoRevision}
addonsRepoUrl: ${schema.spec.gitops.addonsRepoUrl}
fleetRepoBasePath: ${schema.spec.gitops.fleetRepoBasePath}
fleetRepoPath: ${schema.spec.gitops.fleetRepoPath}
fleetRepoRevision: ${schema.spec.gitops.fleetRepoRevision}
fleetRepoUrl: ${schema.spec.gitops.fleetRepoUrl}
addons:
enable_external_secrets: ${schema.spec.addons.enable_external_secrets}
external_secrets_namespace: ${schema.spec.addons.external_secrets_namespace}
external_secrets_service_account: ${schema.spec.addons.external_secrets_service_account}
- id: eksExistingVpc
includeWhen:
- ${!schema.spec.vpc.create}
readyWhen:
- ${eksExistingVpc.status.conditions.exists(x, x.type == 'Ready' && x.status == "True")} # Check on kro conditions
template:
apiVersion: kro.run/v1alpha1
kind: EksClusterBasic
metadata:
name: ${schema.spec.name}
namespace: ${schema.spec.name}
labels:
app.kubernetes.io/instance: ${schema.spec.name}
annotations:
argocd.argoproj.io/tracking-id: clusters:kro.run/EksCluster:${schema.spec.name}/${schema.spec.name}
spec:
name: ${schema.spec.name}
tenant: ${schema.spec.tenant}
environment: ${schema.spec.environment}
region: ${schema.spec.region}
accountId: ${schema.spec.accountId}
managementAccountId: ${schema.spec.managementAccountId}
k8sVersion: ${schema.spec.k8sVersion}
adminRoleName: ${schema.spec.adminRoleName}
fleetSecretManagerSecretNameSuffix: ${schema.spec.fleetSecretManagerSecretNameSuffix}
domainName: ${schema.spec.domainName}
network:
vpcID: "${schema.spec.vpc.vpcId}"
subnets:
controlplane:
subnet1ID: "${schema.spec.vpc.privateSubnet1Id}"
subnet2ID: "${schema.spec.vpc.privateSubnet2Id}"
workers:
subnet1ID: "${schema.spec.vpc.privateSubnet1Id}"
subnet2ID: "${schema.spec.vpc.privateSubnet2Id}"
workloads: ${schema.spec.workloads}
gitops:
addonsRepoBasePath: ${schema.spec.gitops.addonsRepoBasePath}
addonsRepoPath: ${schema.spec.gitops.addonsRepoPath}
addonsRepoRevision: ${schema.spec.gitops.addonsRepoRevision}
addonsRepoUrl: ${schema.spec.gitops.addonsRepoUrl}
fleetRepoBasePath: ${schema.spec.gitops.fleetRepoBasePath}
fleetRepoPath: ${schema.spec.gitops.fleetRepoPath}
fleetRepoRevision: ${schema.spec.gitops.fleetRepoRevision}
fleetRepoUrl: ${schema.spec.gitops.fleetRepoUrl}
addons:
enable_external_secrets: ${schema.spec.addons.enable_external_secrets}
external_secrets_namespace: ${schema.spec.addons.external_secrets_namespace}
external_secrets_service_account: ${schema.spec.addons.external_secrets_service_account}
+247
View File
@@ -0,0 +1,247 @@
apiVersion: kro.run/v1alpha1
kind: ResourceGraphDefinition
metadata:
name: vpc.kro.run
annotations:
argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
argocd.argoproj.io/sync-wave: "-1"
spec:
schema:
apiVersion: v1alpha1
kind: Vpc
spec:
name: string
region: string
cidr:
vpcCidr: string | default="10.0.0.0/16"
publicSubnet1Cidr: string | default="10.0.1.0/24"
publicSubnet2Cidr: string | default="10.0.2.0/24"
privateSubnet1Cidr: string | default="10.0.11.0/24"
privateSubnet2Cidr: string | default="10.0.12.0/24"
status:
vpcID: ${vpc.status.vpcID}
publicSubnet1ID: ${publicSubnet1.status.subnetID}
publicSubnet2ID: ${publicSubnet2.status.subnetID}
privateSubnet1ID: ${privateSubnet1.status.subnetID}
privateSubnet2ID: ${privateSubnet2.status.subnetID}
resources: # how to publish a field in the RG claim e.g. vpcID
- id: vpc
template:
apiVersion: ec2.services.k8s.aws/v1alpha1
kind: VPC
metadata:
namespace: ${schema.spec.name}
name: ${schema.spec.name}-vpc
annotations:
services.k8s.aws/region: ${schema.spec.region}
spec:
cidrBlocks:
- ${schema.spec.cidr.vpcCidr}
enableDNSSupport: true
enableDNSHostnames: true
tags:
- key: "Name"
value: ${schema.spec.name}-vpc
- id: internetGateway
template:
apiVersion: ec2.services.k8s.aws/v1alpha1
kind: InternetGateway
metadata:
namespace: ${schema.spec.name}
name: ${schema.spec.name}-igw
annotations:
services.k8s.aws/region: ${schema.spec.region}
spec:
vpc: ${vpc.status.vpcID}
tags:
- key: "Name"
value: ${schema.spec.name}-igw
- id: natGateway1
template:
apiVersion: ec2.services.k8s.aws/v1alpha1
kind: NATGateway
metadata:
namespace: ${schema.spec.name}
name: ${schema.spec.name}-nat-gateway1
annotations:
services.k8s.aws/region: ${schema.spec.region}
spec:
subnetID: ${publicSubnet1.status.subnetID}
allocationID: ${eip1.status.allocationID}
tags:
- key: "Name"
value: ${schema.spec.name}-nat-gateway1
- id: natGateway2
template:
apiVersion: ec2.services.k8s.aws/v1alpha1
kind: NATGateway
metadata:
namespace: ${schema.spec.name}
name: ${schema.spec.name}-nat-gateway2
annotations:
services.k8s.aws/region: ${schema.spec.region}
spec:
subnetID: ${publicSubnet2.status.subnetID}
allocationID: ${eip2.status.allocationID}
tags:
- key: "Name"
value: ${schema.spec.name}-nat-gateway2
- id: eip1
template:
apiVersion: ec2.services.k8s.aws/v1alpha1
kind: ElasticIPAddress
metadata:
namespace: ${schema.spec.name}
name: ${schema.spec.name}-eip1
annotations:
services.k8s.aws/region: ${schema.spec.region}
spec:
tags:
- key: "Name"
value: ${schema.spec.name}-eip1
- id: eip2
template:
apiVersion: ec2.services.k8s.aws/v1alpha1
kind: ElasticIPAddress
metadata:
namespace: ${schema.spec.name}
name: ${schema.spec.name}-eip2
annotations:
services.k8s.aws/region: ${schema.spec.region}
spec:
tags:
- key: "Name"
value: ${schema.spec.name}-eip2
- id: publicRoutetable
template:
apiVersion: ec2.services.k8s.aws/v1alpha1
kind: RouteTable
metadata:
namespace: ${schema.spec.name}
name: ${schema.spec.name}-public-routetable
annotations:
services.k8s.aws/region: ${schema.spec.region}
spec:
vpcID: ${vpc.status.vpcID}
routes:
- destinationCIDRBlock: 0.0.0.0/0
gatewayID: ${internetGateway.status.internetGatewayID}
tags:
- key: "Name"
value: ${schema.spec.name}-public-routetable
- id: privateRoutetable1
template:
apiVersion: ec2.services.k8s.aws/v1alpha1
kind: RouteTable
metadata:
namespace: ${schema.spec.name}
name: ${schema.spec.name}-private-routetable1
annotations:
services.k8s.aws/region: ${schema.spec.region}
spec:
vpcID: ${vpc.status.vpcID}
routes:
- destinationCIDRBlock: 0.0.0.0/0
natGatewayID: ${natGateway1.status.natGatewayID}
tags:
- key: "Name"
value: ${schema.spec.name}-private-routetable1
- id: privateRoutetable2
template:
apiVersion: ec2.services.k8s.aws/v1alpha1
kind: RouteTable
metadata:
namespace: ${schema.spec.name}
name: ${schema.spec.name}-private-routetable2
annotations:
services.k8s.aws/region: ${schema.spec.region}
spec:
vpcID: ${vpc.status.vpcID}
routes:
- destinationCIDRBlock: 0.0.0.0/0
natGatewayID: ${natGateway2.status.natGatewayID}
tags:
- key: "Name"
value: ${schema.spec.name}-private-routetable2
- id: publicSubnet1
template:
apiVersion: ec2.services.k8s.aws/v1alpha1
kind: Subnet
metadata:
namespace: ${schema.spec.name}
name: ${schema.spec.name}-public-subnet1
annotations:
services.k8s.aws/region: ${schema.spec.region}
spec:
availabilityZone: ${schema.spec.region}a
cidrBlock: ${schema.spec.cidr.publicSubnet1Cidr}
mapPublicIPOnLaunch: true
vpcID: ${vpc.status.vpcID}
routeTables:
- ${publicRoutetable.status.routeTableID}
tags:
- key: "Name"
value: ${schema.spec.name}-public-subnet1
- key: kubernetes.io/role/elb
value: '1'
- id: publicSubnet2
template:
apiVersion: ec2.services.k8s.aws/v1alpha1
kind: Subnet
metadata:
namespace: ${schema.spec.name}
name: ${schema.spec.name}-public-subnet2
annotations:
services.k8s.aws/region: ${schema.spec.region}
spec:
availabilityZone: ${schema.spec.region}b
cidrBlock: ${schema.spec.cidr.publicSubnet2Cidr}
mapPublicIPOnLaunch: true
vpcID: ${vpc.status.vpcID}
routeTables:
- ${publicRoutetable.status.routeTableID}
tags:
- key: "Name"
value: ${schema.spec.name}-public-subnet2
- key: kubernetes.io/role/elb
value: '1'
- id: privateSubnet1
template:
apiVersion: ec2.services.k8s.aws/v1alpha1
kind: Subnet
metadata:
namespace: ${schema.spec.name}
name: ${schema.spec.name}-private-subnet1
annotations:
services.k8s.aws/region: ${schema.spec.region}
spec:
availabilityZone: ${schema.spec.region}a
cidrBlock: ${schema.spec.cidr.privateSubnet1Cidr}
vpcID: ${vpc.status.vpcID}
routeTables:
- ${privateRoutetable1.status.routeTableID}
tags:
- key: "Name"
value: ${schema.spec.name}-private-subnet1
- key: kubernetes.io/role/internal-elb
value: '1'
- id: privateSubnet2
template:
apiVersion: ec2.services.k8s.aws/v1alpha1
kind: Subnet
metadata:
namespace: ${schema.spec.name}
name: ${schema.spec.name}-private-subnet2
annotations:
services.k8s.aws/region: ${schema.spec.region}
spec:
availabilityZone: ${schema.spec.region}b
cidrBlock: ${schema.spec.cidr.privateSubnet2Cidr}
vpcID: ${vpc.status.vpcID}
routeTables:
- ${privateRoutetable2.status.routeTableID}
tags:
- key: "Name"
value: ${schema.spec.name}-private-subnet2
- key: kubernetes.io/role/internal-elb
value: '1'
@@ -0,0 +1 @@
# TODO: rgi for creating IAM role/policy, ServiceAccount, and EKS pod identity association
@@ -0,0 +1,80 @@
apiVersion: kro.run/v1alpha1
kind: ResourceGroup
metadata:
name: podidentity.kro.run
annotations:
argocd.argoproj.io/sync-wave: "-5"
spec:
schema:
apiVersion: v1alpha1
kind: PodIdentity
spec:
name: string | default="pod-identity"
values:
aws:
clusterName: string
policy:
description: 'string | default="Test Description"'
path: 'string | default="/"'
policyDocument: string | default=""
piAssociation:
serviceAccount: string
piNamespace: string
status:
policyStatus: ${podpolicy.status.conditions}
roleStatus: ${podrole.status.conditions}
resources:
- id: podpolicy
readyWhen:
- ${podpolicy.status.conditions[0].status == "True"}
template:
apiVersion: iam.services.k8s.aws/v1alpha1
kind: Policy
metadata:
name: ${schema.spec.name}-pod-policy
spec:
name: ${schema.spec.name}-pod-policy
description: ${schema.spec.values.policy.description}
path: ${schema.spec.values.policy.path}
policyDocument: ${schema.spec.values.policy.policyDocument}
- id: podrole
readyWhen:
- ${podrole.status.conditions[0].status == "True"}
template:
apiVersion: iam.services.k8s.aws/v1alpha1
kind: Role
metadata:
name: ${schema.spec.name}-role
spec:
name: ${schema.spec.name}-role
policies:
- ${podpolicy.status.ackResourceMetadata.arn}
assumeRolePolicyDocument: |
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": "pods.eks.amazonaws.com"
},
"Action": [
"sts:TagSession",
"sts:AssumeRole"
]
}
]
}
- id: piAssociation
readyWhen:
- ${piAssociation.status.conditions[0].status == "True"}
template:
apiVersion: eks.services.k8s.aws/v1alpha1
kind: PodIdentityAssociation
metadata:
name: ${schema.spec.name}-pod-association-${schema.spec.values.piAssociation.serviceAccount}
spec:
clusterName: ${schema.spec.values.aws.clusterName}
roleARN: ${podrole.status.ackResourceMetadata.arn}
serviceAccount: ${schema.spec.values.piAssociation.serviceAccount}
namespace: ${schema.spec.values.piAssociation.piNamespace}