Commit 3a8fa3ee authored by malf's avatar malf
Browse files

add argocd installation

parent c19719f0
---
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: crossplane-kind
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: workloads-project
source:
repoURL: git@git.cccfr.de:noc/iac.git
targetRevision: HEAD
path: kind/crossplane-complete
destination:
server: https://kubernetes.default.svc
namespace: crossplane-system
syncPolicy:
automated:
prune: true
syncOptions:
- CreateNamespace=true
retry:
limit: 1
backoff:
duration: 5s
factor: 2
maxDuration: 1m
---
apiVersion: v1
kind: ConfigMap
metadata:
name: argocd-cm
namespace: argocd
labels:
app.kubernetes.io/name: argocd-cm
app.kubernetes.io/part-of: argocd
data:
#
# Check out this FAQ as to why this is needed.
# https://argoproj.github.io/argo-cd/faq/#why-are-resources-of-type-sealedsecret-stuck-in-the-progressing-state
#
resource.customizations.health.bitnami.com_SealedSecret: |
hs = {}
hs.status = "Healthy"
hs.message = "Controller doesn't report resource status"
return hs
\ No newline at end of file
#
# Helm Repositories
# Non standard Helm Chart repositories have to be registered
# Each repository must have 'url', 'type' and 'name' fields
#
---
apiVersion: v1
kind: Secret
metadata:
name: prometheus-helm-repo
namespace: argocd
labels:
argocd.argoproj.io/secret-type: repository
stringData:
name: prometheus-argocd
url: https://prometheus-community.github.io/helm-charts
type: helm
---
apiVersion: v1
kind: Secret
metadata:
name: grafana-helm-repo
namespace: argocd
labels:
argocd.argoproj.io/secret-type: repository
stringData:
name: grafana-argocd
url: https://grafana.github.io/helm-charts
type: helm
---
apiVersion: v1
kind: Secret
metadata:
name: crossplane-helm-repo
namespace: argocd
labels:
argocd.argoproj.io/secret-type: repository
stringData:
name: crossplane-argocd
url: https://charts.crossplane.io/stable
type: helm
\ No newline at end of file
#
# If you are connecting repositories via SSH, ArgoCD will need to know the SSH known hosts public key of the repository servers.
# This ConfigMap contains a single key/value pair, with ssh_known_hosts as the key and the actual public keys of the SSH servers as data.
# The public key(s) of each single repository server ArgoCD will connect via SSH must be configured, otherwise the connections to the repository will fail. There is no fallback.
# The public key can be obtained from an existing ~/.ssh/known_hosts or using a CLI utility as follows: 'ssh-keyscan github.com'
#
---
apiVersion: v1
kind: ConfigMap
metadata:
name: argocd-ssh-known-hosts-cm
namespace: argocd
labels:
app.kubernetes.io/name: argocd-cm
app.kubernetes.io/part-of: argocd
data:
ssh_known_hosts: |
github.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==
##!/bin/bash
#
# Install ArgoCD
#
kubectl create namespace argocd
kubectl apply -n argocd -f argocd.yaml
#
# By default, the Argo CD API server is not exposed with an external IP. It is deployed as a ClusterIP server.
# The easiest way to expose it outside the cluster is to patch this Service as a 'LoadBalancer' type.
# Alternatively, an Ingress can be registered with an Ingress Controller so that we can use an existing Load Balancer.
# Optionally, Create a CNAME record pointing to the load balancer
#
kubectl -n argocd patch svc argocd-server -p '{"spec": {"type": "NodePort", "ports": [{"name":"http","port":80,"protocol":"TCP","targetPort":8080, "nodePort": 30000},{"name":"https","port":443,"protocol":"TCP","targetPort":8080, "nodePort":30001}]}}'
# TODO: -> ingress
# get argocd binary
curl -sSL -o argocd https://github.com/argoproj/argo-cd/releases/latest/download/argocd-linux-amd64
sudo mv argocd /usr/local/bin/argocd
chmod +x /usr/local/bin/argocd
#
# Upon installation, a Secret named 'argocd-initial-admin-secret' is created which contains the base64-encoded password for 'admin' account.
# The next step is to change the default password for the admin account
# Following this, it is best to delete the 'argocd-initial-admin-secret' Secret
#
kubectl -n argocd get secret argocd-initial-admin-secret --template={{.data.password}} | base64 -d; echo
argocd login localhost:30000
argocd account update-password
kubectl -n argocd delete secret argocd-initial-admin-secret
#
# Apply general Argo CD configuration
#
kubectl apply -f argocd-setup-configmap.yaml
#
# ArgoCD will need to know the SSH known hosts public key of the repository servers.
# This information is provided using a ConfigMap named 'argocd-ssh-known-hosts-cm'
#
kubectl apply -f argocd-setup-ssh-known-hosts-configmap.yaml
#
# Setup Git and Helm repositories
#
#kubectl apply -f argocd-setup-git-repositories.yaml
kubectl apply -f argocd-setup-helm-repositories.yaml
#
# Setup Argo CD projects
#
kubectl apply -f project-applications.yaml
kubectl apply -f project-workloads.yaml
#
# Deploy the Application that installs Crossplane and triggers the provisioning of an EKS cluster
#
#argocd app create --file application-crossplane.yaml
#
# Deploy the App of Apps to the management cluster which will trigger the deployment of applications to the workload cluster
# Prior to doing this, you will have to register the remote workload cluster per the instructions here:
# https://argo-cd.readthedocs.io/en/stable/getting_started/#5-register-a-cluster-to-deploy-apps-to-optional
#
#argocd app create --file application-apps.yaml
This diff is collapsed.
apiVersion: pkg.crossplane.io/v1
kind: Provider
metadata:
name: crossplane-provider-aws
spec:
package: "public.ecr.aws/awsvijisarathy/crossplane-provider-aws:v0.17.0"
\ No newline at end of file
##!/bin/bash
#
# The Crossplane CLI extends kubectl with functionality to build, push, and install Crossplane packages
#
curl -sL https://raw.githubusercontent.com/crossplane/crossplane/release-1.0/install.sh | sh
sudo mv kubectl-crossplane /usr/local/bin
#
# Install Crossplane on your "management" cluster
#
kubectl create namespace crossplane-system
helm repo add crossplane-stable https://charts.crossplane.io/stable
helm repo update
#
# Install Crossplane core components using Helm chart
#
helm install crossplane --namespace crossplane-system crossplane-stable/crossplane --version 1.4.1
#
# Providers extend Crossplane with custom resources that can be used to declaratively configure a system.
# In order to provision a resource, a CRD needs to be registered in your Kubernetes cluster and its controller should be watching the Custom Resources those CRDs define.
# Crossplane provider packages contain many CRDs and their controllers.
# The 'provider-aws' package is the Crossplane infrastructure provider for AWS. This package contains the followig:
# 1. Custom Resource Definitions (CRDs) that model AWS infrastructure and services (e.g. RDS, S3, EKS clusters, etc.) These are called 'managed resources'
# 2. Controllers to provision these resources in AWS based on the users desired state captured in CRDs they create
# 3. Implementations of Crossplane's portable resource abstractions, enabling AWS resources to fulfill a user's general need for cloud services
#
# The core Crossplane controller can install provider controllers and CRDs for you through its own provider packaging mechanism, which is triggered by the application of a 'Provider' resource.
# In order to request installation of the provider-aws package, apply the 'aws-provider.yaml' resource to the cluster where Crossplane is running.
# Providers can be installed using the 'kubectl crossplane install provider' command as well.
# Check out documentation on installing providers: https://crossplane.io/docs/v1.3/concepts/providers.html
#
kubectl apply -f aws-provider.yaml
#
# In order to authenticate with the external provider API such as AWS, the provider controllers need to have access to credentials.
# It could be an IAM User for AWS
# An AWS user with Administrative privileges is needed to enable Crossplane to create the required resources
# We wil have to first create a configuration file, secrets.conf, with credeantials of an AWS account in the following format.
#
# [default]
# aws_access_key_id =ABCDEFGHIJ0123456789
# aws_secret_access_key = Ow3HUaP8BbqkV4dUrZr0H7yT5nGP5OPFcZJ+
#
# Then using that file, a Kubernetes Secret is created as follows
#
kubectl -n crossplane-system create secret generic aws-credentials --from-file=credentials=./secrets.conf
#
# Create a ProviderConfig resource, referencing the above Secret
#
kubectl apply -f aws-providerconfig.yaml
#
# Crossplane goes beyond simply modelling infrastructure primitives as ‘managed resources’.
# Composition is a concept that allows platform builders to define new custom resources that are composed of managed resources, like an RDS instance
# Crossplane calls these “composite resources” (XRs).
# Composition can be used to build a catalogue of custom resources and classes of configuration that fit the needs and opinions of your organisation.
# Crossplane uses two special resources to define and configure these new composite resources:
# A CompositeResourceDefinition (XRD) defines a new kind of composite resource, including its schema. An XRD may optionally offer a claim (XRC).
# A Composition specifies which managed resources a composite resource will be composed of, and how they should be configured.
# You can create multiple Composition options for each composite resource.
# Check out the composition documentation: https://crossplane.io/docs/v1.3/concepts/composition.html
#
# CompositeResourceDefinitions (XRDs) and Compositions may be packaged and installed as a configuration.
# A configuration is a package of composition configuration that can easily be installed to Crossplane by creating a declarative 'Configuration' resource, or by using 'kubectl crossplane install configuration'.
# Check out the documentation on creating configuration: https://crossplane.io/docs/v1.3/getting-started/create-configuration.html
#
#
# Create a package for EKS cluster creation
# This package will help create a new VPC with 2 private/public subnets, IGW, NATGW and the EKS cluster with managed node group
# Push this package to a repository in an image registry
#
cd eks-configuration
kubectl crossplane build configuration
kubectl crossplane push configuration IMAGE_REPO:IMAGE_TAG
#
# Install the package to a cluster
# Use one of the following two options to either install the one from the public repo in ECR and the one you built above.
#
kubectl apply -f crossplane-eks-composition.yaml
kubectl crossplane install configuration IMAGE_REPO:IMAGE_TAG
#
# Check if the package and the XRDs defined in it were installed properly
#
kubectl get Configuration crossplane-eks-composition
kubectl get CompositeResourceDefinition eksclusters.eks.sarathy.io
#
# Create an EKS cluster and a nodegroup using an XR
#
kubectl apply -f eks-cluster-xr.yaml
#
# Here are a set of CLI commands to look at various resources
#
kubectl get crossplane # get all resources related to Crossplane.
kubectl get managed # get all resources that represent a unit of external infrastructure such as RDSInstance.
kubectl get composite # get all resources that represent an XR
#
# Cleanup
#
kubectl delete -f crossplane-eks-composition.yaml
kubectl delete -f aws-providerconfig.yaml
kubectl delete -f aws-provider.yaml
kubectl delete -f aws-credentials.yaml
helm uninstall crossplane --namespace crossplane-system
......@@ -15,3 +15,11 @@ nodes:
- containerPort: 443
hostPort: 443
protocol: TCP
- containerPort: 30000
hostPort: 30000
listenAddress: "0.0.0.0" # Optional, defaults to "0.0.0.0"
protocol: tcp # Optional, defaults to tcp
- containerPort: 30001
hostPort: 30001
listenAddress: "0.0.0.0" # Optional, defaults to "0.0.0.0"
protocol: tcp # Optional, defaults to tcp
#
# When using the app of apps deployment pattern for bootstrapping a cluster with a set of applications,
# a top-level parent Application resource is created, typically as a Helm chart.
# This chart comprises a set of templates, each of which is a child Application.
# The ArgoCD Application Controller responds only to Application resources deployed to the 'argocd' namespace.
# The 'applications-project' AppProject allows a non-admin user to deploy only Application resources in the 'argocd' namespace.
# Its counterpart 'workloads-project' is used for actual workloads whch allows the user to deploy other resources such as Deployment, DaemonSet etc.
#
---
apiVersion: argoproj.io/v1alpha1
kind: AppProject
metadata:
name: applications-project
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
description: Project for parent applicatons used when employing app of apps pattern
sourceRepos:
- git@git.cccfr.de:noc/iac.git
#
# Allow this project to deploy only to 'argocd' namespace
#
destinations:
- namespace: argocd
server: https://kubernetes.default.svc
#
# Deny all namespace-scoped resources from being created, except for Application
#
namespaceResourceWhitelist:
- group: 'argoproj.io'
kind: Application
---
apiVersion: argoproj.io/v1alpha1
kind: AppProject
metadata:
name: workloads-project
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
description: Project for deploying Crossplane with ArgoCD
sourceRepos:
- git@git.cccfr.de:noc/iac.git
#
# List of namespaces/clusters to which Applications in this project can be deployed into
#
destinations:
- namespace: crossplane-system
server: https://kubernetes.default.svc
- namespace: kube-system
server: https://kubernetes.default.svc
- namespace: sealed-secrets
server: https://kubernetes.default.svc
- namespace: eks
server: https://kubernetes.default.svc
#
# Deny all cluster-scoped resources from being created, except for the ones listed
#
clusterResourceWhitelist:
- group: ''
kind: Namespace
- group: 'rbac.authorization.k8s.io'
kind: ClusterRole
- group: 'rbac.authorization.k8s.io'
kind: ClusterRoleBinding
- group: 'policy'
kind: PodSecurityPolicy
- group: 'apiextensions.k8s.io'
kind: CustomResourceDefinition
- group: 'pkg.crossplane.io'
kind: Configuration
- group: 'pkg.crossplane.io'
kind: Lock
- group: 'pkg.crossplane.io'
kind: Provider
- group: 'aws.crossplane.io'
kind: ProviderConfig
- group: 'eks.sarathy.io'
kind: EKSCluster
- group: 'eks.aws.crossplane.io'
kind: NodeGroup
- group: 'eks.aws.crossplane.io'
kind: Cluster
#
# Allow all namespace-scoped resources to be created, except for ResourceQuota, LimitRange, NetworkPolicy
#
namespaceResourceBlacklist:
- group: ''
kind: ResourceQuota
- group: ''
kind: LimitRange
- group: ''
kind: NetworkPolicy
[default]
aws_access_key_id =ABCDEFGHIJ0123456789
aws_secret_access_key = Ow3HUaP8BbqkV4dUrZr0H7yT5nGP5OPFcZJ+
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment