diff --git a/.envrc b/.envrc new file mode 100644 index 0000000..3bc14b2 --- /dev/null +++ b/.envrc @@ -0,0 +1 @@ +use flake ./build/# diff --git a/bitbucket-pipelines.yml b/bitbucket-pipelines.yml new file mode 100644 index 0000000..05bdaf9 --- /dev/null +++ b/bitbucket-pipelines.yml @@ -0,0 +1,137 @@ +definitions: + steps: + - step: &test + name: "Test" + runs-on: + - "linux" + - "self.hosted" + script: + - echo please create tests + + - step: &build + name: "Build" + runs-on: + - "nixrunner" + - "linux.shell" + - "self.hosted" + script: + - mkdir -p images + - (umask 077 ; echo -n $SSHKEY | base64 -d > ./id_rsa) + - nix build --print-build-logs ./build/#dockerImage + - cp $(readlink ./result) images/ + artifacts: + - images/* + + - step: &package-dev + name: "Package Chart for Dev" + runs-on: + - "nixrunner" + - "linux.shell" + - "self.hosted" + script: + - mkdir -p charts + - export VERSION="$BITBUCKET_BUILD_NUMBER" + - export REPOSITORY="$K8S_ARES_DEV_DOCKERREGISTRY_URL/$BITBUCKET_REPO_SLUG" + - nix-shell -p yq-go --run 'yq -i ".controllerManager.manager.image.tag = env(VERSION)" ./ops/chart/values.yaml' + - nix-shell -p yq-go --run 'yq -i ".controllerManager.manager.image.repository = env(REPOSITORY)" ./ops/chart/values.yaml' + - nix-shell -p kubernetes-helm --run 'helm repo add base "$K8S_ARES_DEV_CHARTMUSEUM_ENDPOINT" --username "$K8S_ARES_DEV_CHARTMUSEUM_USERNAME" --password "$K8S_ARES_DEV_CHARTMUSEUM_PASSWORD" --insecure-skip-tls-verify --force-update && helm dependency build ./ops/chart && helm package ./ops/chart -d result --app-version "$VERSION" --version "$VERSION"' + - cp -a ./result/. charts/ + artifacts: + - charts/* + + - step: &package-qa + name: "Package Chart for QA" + runs-on: + - "nixrunner" + - "linux.shell" + - "self.hosted" + script: + - mkdir -p charts + - export VERSION="$BITBUCKET_BUILD_NUMBER" + - export REPOSITORY="$K8S_ARES_QA_DOCKERREGISTRY_URL/$BITBUCKET_REPO_SLUG" + - nix-shell -p yq-go --run 'yq -i ".controllerManager.manager.image.tag = env(VERSION)" ./ops/chart/values.yaml' + - nix-shell -p yq-go --run 'yq -i ".controllerManager.manager.image.repository = env(REPOSITORY)" ./ops/chart/values.yaml' + - nix-shell -p kubernetes-helm --run 'helm repo add base "$K8S_ARES_QA_CHARTMUSEUM_ENDPOINT" --username "$K8S_ARES_QA_CHARTMUSEUM_USERNAME" --password "$K8S_ARES_QA_CHARTMUSEUM_PASSWORD" --insecure-skip-tls-verify --force-update && helm dependency build ./ops/chart && helm package ./ops/chart -d result --app-version "$VERSION" --version "$VERSION"' + - cp -a ./result/. charts/ + artifacts: + - charts/* + + - step: &publish-dev + name: "Publish Chart to Dev" + runs-on: + - "nixrunner" + - "linux.shell" + - "self.hosted" + deployment: dev + script: + - | + nix-shell -p cacert curl --run 'curl -k --fail --data-binary "@charts/$(ls charts | tee /dev/stderr | head -n 1)" -u "$K8S_ARES_DEV_CHARTMUSEUM_USERNAME:$K8S_ARES_DEV_CHARTMUSEUM_PASSWORD" "$K8S_ARES_DEV_CHARTMUSEUM_ENDPOINT/api/charts"' + + - step: &push-dev + name: "Push image to Dev" + image: topmanage/deployment-pipeline-image:28 + runs-on: + - "linux" + - "self.hosted" + # deployment: dev + script: + - | + set -euo pipefail + DOCKERREGISTRY_URL=$K8S_ARES_DEV_DOCKERREGISTRY_URL \ + DOCKERREGISTRY_CACERT=$K8S_ARES_DEV_DOCKERREGISTRY_CACERT \ + DOCKERREGISTRY_CLIENTCERT=$K8S_ARES_DEV_DOCKERREGISTRY_CLIENTCERT \ + DOCKERREGISTRY_CLIENTKEY=$K8S_ARES_DEV_DOCKERREGISTRY_CLIENTKEY \ + DOCKERREGISTRY_PASSWORD=$K8S_ARES_DEV_DOCKERREGISTRY_PASSWORD \ + ./build/push-image.sh + + - step: &publish-qa + name: "Publish Chart to QA" + runs-on: + - "nixrunner" + - "linux.shell" + - "self.hosted" + deployment: qa + script: + - | + nix-shell -p cacert curl --run 'curl -k --fail --data-binary "@charts/$(ls charts | tee /dev/stderr | head -n 1)" -u "$K8S_ARES_QA_CHARTMUSEUM_USERNAME:$K8S_ARES_QA_CHARTMUSEUM_PASSWORD" "$K8S_ARES_QA_CHARTMUSEUM_ENDPOINT/api/charts"' + + - step: &push-qa + name: "Push image to QA" + image: topmanage/deployment-pipeline-image:28 + runs-on: + - "linux" + - "self.hosted" + # deployment: qa + script: + - | + set -euo pipefail + DOCKERREGISTRY_URL=$K8S_ARES_QA_DOCKERREGISTRY_URL \ + DOCKERREGISTRY_CACERT=$K8S_ARES_QA_DOCKERREGISTRY_CACERT \ + DOCKERREGISTRY_CLIENTCERT=$K8S_ARES_QA_DOCKERREGISTRY_CLIENTCERT \ + DOCKERREGISTRY_CLIENTKEY=$K8S_ARES_QA_DOCKERREGISTRY_CLIENTKEY \ + DOCKERREGISTRY_PASSWORD=$K8S_ARES_QA_DOCKERREGISTRY_PASSWORD \ + ./build/push-image.sh + +pipelines: + default: + - parallel: + - step: *build + - step: *test + - step: *package-dev + - step: *push-dev + - step: *publish-dev + branches: + master: + - parallel: + - step: *build + - step: *test + - step: *package-qa + - step: *push-qa + - step: *publish-qa + tags: + release-*: + - parallel: + - step: *build + - step: *test + - step: *package-dev + - step: *push-dev diff --git a/build/flake.lock b/build/flake.lock new file mode 100644 index 0000000..22a3a81 --- /dev/null +++ b/build/flake.lock @@ -0,0 +1,75 @@ +{ + "nodes": { + "flake-utils": { + "inputs": { + "systems": "systems" + }, + "locked": { + "lastModified": 1701680307, + "narHash": "sha256-kAuep2h5ajznlPMD9rnQyffWG8EM/C73lejGofXvdM8=", + "owner": "numtide", + "repo": "flake-utils", + "rev": "4022d587cbbfd70fe950c1e2083a02621806a725", + "type": "github" + }, + "original": { + "owner": "numtide", + "repo": "flake-utils", + "type": "github" + } + }, + "nixpkgs": { + "locked": { + "lastModified": 1704842529, + "narHash": "sha256-OTeQA+F8d/Evad33JMfuXC89VMetQbsU4qcaePchGr4=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "eabe8d3eface69f5bb16c18f8662a702f50c20d5", + "type": "github" + }, + "original": { + "id": "nixpkgs", + "type": "indirect" + } + }, + "nixpkgs-unstable": { + "locked": { + "lastModified": 1704722960, + "narHash": "sha256-mKGJ3sPsT6//s+Knglai5YflJUF2DGj7Ai6Ynopz0kI=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "317484b1ead87b9c1b8ac5261a8d2dd748a0492d", + "type": "github" + }, + "original": { + "id": "nixpkgs", + "ref": "nixos-unstable", + "type": "indirect" + } + }, + "root": { + "inputs": { + "flake-utils": "flake-utils", + "nixpkgs": "nixpkgs", + "nixpkgs-unstable": "nixpkgs-unstable" + } + }, + "systems": { + "locked": { + "lastModified": 1681028828, + "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", + "owner": "nix-systems", + "repo": "default", + "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", + "type": "github" + }, + "original": { + "owner": "nix-systems", + "repo": "default", + "type": "github" + } + } + }, + "root": "root", + "version": 7 +} diff --git a/build/flake.nix b/build/flake.nix new file mode 100644 index 0000000..1a017af --- /dev/null +++ b/build/flake.nix @@ -0,0 +1,52 @@ +{ + description = "Zitadel K8s Operator"; + + inputs = { + flake-utils.url = "github:numtide/flake-utils"; + nixpkgs-unstable.url = "nixpkgs/nixos-unstable"; + }; + + outputs = { self, nixpkgs, nixpkgs-unstable, flake-utils }: + flake-utils.lib.eachDefaultSystem (system: + let + unstable = nixpkgs-unstable.legacyPackages.${system}; + pkgs = nixpkgs.legacyPackages.${system}; + package = unstable.buildGoModule { + pname = "zitadel-k8s-operator"; + version = "0.0.0"; + src = ../src; + doCheck = false; + vendorHash = "sha256-tELr2Idyk3g6LLwMlehO4dnsHkmBO9ltLmheabQz1QY="; + installPhase = '' + runHook preInstall + + mkdir -p $out/bin + dir="$GOPATH/bin" + [ -e "$dir" ] && cp -r $dir/cmd $out/manager + + runHook postInstall + ''; + }; + dockerPackage = pkgs.dockerTools.buildImage { + name = "zitadel-k8s-operator"; + fromImageName = "gcr.io/distroless/static"; + fromImageTag = "nonroot"; + copyToRoot = pkgs.buildEnv { + name = "operator"; + paths = [ package ]; + pathsToLink = [ "/" ]; + }; + config = { + Cmd = [ "/manager" ]; + WorkingDir = "/"; + User = "65532:65532"; + }; + }; + in with pkgs; { + packages.default = package; + packages.dockerImage = dockerPackage; + devShells.default = mkShell { + buildInputs = [ nixfmt unstable.gopls operator-sdk unstable.go ]; + }; + }); +} diff --git a/build/push-image.sh b/build/push-image.sh new file mode 100755 index 0000000..cbf89fb --- /dev/null +++ b/build/push-image.sh @@ -0,0 +1,63 @@ +#!/bin/bash +set -xeuo pipefail + +# Setup client certificate for docker registry login +mkdir -p /.docker +mkdir -p /etc/docker/certs.d/$DOCKERREGISTRY_URL +echo $DOCKERREGISTRY_CACERT +(umask 077 ; echo $DOCKERREGISTRY_CACERT | base64 -d > /.docker/ca.pem) +(umask 077 ; echo $DOCKERREGISTRY_CACERT | base64 -d > /etc/docker/certs.d/$DOCKERREGISTRY_URL/ca.crt) #Don't ask why this is needed twice. +(umask 077 ; echo $DOCKERREGISTRY_CLIENTCERT | base64 -d > /etc/docker/certs.d/$DOCKERREGISTRY_URL/client.cert) +(umask 077 ; echo $DOCKERREGISTRY_CLIENTKEY | base64 -d > /etc/docker/certs.d/$DOCKERREGISTRY_URL/client.key) + +docker --tls login -u $DOCKERREGISTRY_USER -p $DOCKERREGISTRY_PASSWORD $DOCKERREGISTRY_URL + +export DOCKER_HOST=$DOCKERDAEMON_ADDRESS #Setup docker to use a specific daemon + +BUILD_IMAGE_NAME=$(ls images | tee /dev/stderr | head -n 1) + +IMAGE_ID=$( + docker load --input "images/$BUILD_IMAGE_NAME" | + sed -nr 's/^Loaded image: (.*)$/\1/p' | + xargs -I{} docker image ls "{}" --format="{{.ID}}" | + tee /dev/stderr +) + +DOCKER_IMAGE_NAME=$DOCKERREGISTRY_URL/$BITBUCKET_REPO_SLUG +VERSION=$BITBUCKET_BUILD_NUMBER + +if [[ "${BITBUCKET_BRANCH:-""}" == "master" ]]; then + LATEST="latest" +else + unset LATEST +fi + +escapeTag(){ echo "${1//[^a-zA-Z0-9._\-]/-}"; } + +tagPush(){ + if [ -n "$1" ]; then + local tag=$(escapeTag "$1") + docker tag "$IMAGE_ID" "$DOCKER_IMAGE_NAME:$tag" && docker push "$DOCKER_IMAGE_NAME:$tag" + fi +} + +tagRemove(){ + if [ -n "$1" ]; then + local tag=$(escapeTag "$1") + docker rmi "$DOCKER_IMAGE_NAME:$tag" + fi +} + +set +u + +tagPush "$VERSION" +tagPush "$BITBUCKET_BRANCH" +tagPush "$BITBUCKET_TAG" +tagPush "$BITBUCKET_COMMIT" +tagPush "$LATEST" + +tagRemove "$VERSION" +tagRemove "$BITBUCKET_BRANCH" +tagRemove "$BITBUCKET_TAG" +tagRemove "$BITBUCKET_COMMIT" +tagRemove "$LATEST" diff --git a/ops/chart/.helmignore b/ops/chart/.helmignore new file mode 100644 index 0000000..0e8a0eb --- /dev/null +++ b/ops/chart/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/ops/chart/Chart.yaml b/ops/chart/Chart.yaml new file mode 100644 index 0000000..e35392a --- /dev/null +++ b/ops/chart/Chart.yaml @@ -0,0 +1,21 @@ +apiVersion: v2 +name: zitadel-k8s-operator +description: A Helm chart for Kubernetes +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "0.1.0" diff --git a/ops/chart/crds/apiapp-crd.yaml b/ops/chart/crds/apiapp-crd.yaml new file mode 100644 index 0000000..0a4deb6 --- /dev/null +++ b/ops/chart/crds/apiapp-crd.yaml @@ -0,0 +1,176 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.11.1 + creationTimestamp: null + name: apiapps.zitadel.topmanage.com +spec: + group: zitadel.topmanage.com + names: + kind: APIApp + listKind: APIAppList + plural: apiapps + singular: apiapp + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: APIApp is the Schema for the apiapps API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: APIAppSpec defines the desired state of APIApp + properties: + authMethodType: + enum: + - API_AUTH_METHOD_TYPE_BASIC + - API_AUTH_METHOD_TYPE_PRIVATE_KEY_JWT + type: string + projectRef: + description: 'INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + Important: Run "make" to regenerate code after modifying this file' + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of + an entire object, this string should contain a valid JSON/Go + field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen + only to have some well-defined way of referencing a part of + an object. TODO: this design is not final and this field is + subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + x-kubernetes-map-type: atomic + required: + - authMethodType + - projectRef + type: object + status: + description: APIAppStatus defines the observed state of APIApp + properties: + appId: + default: "" + type: string + clientId: + default: "" + type: string + conditions: + description: 'INSERT ADDITIONAL STATUS FIELD - define observed state + of cluster Important: Run "make" to regenerate code after modifying + this file' + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + keyId: + default: "" + type: string + required: + - appId + - clientId + - keyId + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/ops/chart/crds/machineuser-crd.yaml b/ops/chart/crds/machineuser-crd.yaml new file mode 100644 index 0000000..9212805 --- /dev/null +++ b/ops/chart/crds/machineuser-crd.yaml @@ -0,0 +1,224 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.11.1 + creationTimestamp: null + name: machineusers.zitadel.topmanage.com +spec: + group: zitadel.topmanage.com + names: + kind: MachineUser + listKind: MachineUserList + plural: machineusers + singular: machineuser + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: MachineUser is the Schema for the machineusers API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: MachineUserSpec defines the desired state of MachineUser + properties: + accessTokenType: + enum: + - ACCESS_TOKEN_TYPE_BEARER + - ACCESS_TOKEN_TYPE_JWT + type: string + organizationRef: + description: 'INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + Important: Run "make" to regenerate code after modifying this file' + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of + an entire object, this string should contain a valid JSON/Go + field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen + only to have some well-defined way of referencing a part of + an object. TODO: this design is not final and this field is + subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + x-kubernetes-map-type: atomic + userGrants: + items: + properties: + projectRef: + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead + of an entire object, this string should contain a valid + JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container + within a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that + triggered the event) or if no container name is specified + "spec.containers[2]" (container with index 2 in this pod). + This syntax is chosen only to have some well-defined way + of referencing a part of an object. TODO: this design + is not final and this field is subject to change in the + future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + x-kubernetes-map-type: atomic + roleKeys: + items: + type: string + type: array + required: + - projectRef + type: object + type: array + required: + - accessTokenType + - organizationRef + type: object + status: + description: MachineUserStatus defines the observed state of MachineUser + properties: + conditions: + description: 'INSERT ADDITIONAL STATUS FIELD - define observed state + of cluster Important: Run "make" to regenerate code after modifying + this file' + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + keyId: + default: "" + type: string + patId: + default: "" + type: string + userId: + default: "" + type: string + required: + - keyId + - patId + - userId + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/ops/chart/crds/oidcapp-crd.yaml b/ops/chart/crds/oidcapp-crd.yaml new file mode 100644 index 0000000..fdfbe9d --- /dev/null +++ b/ops/chart/crds/oidcapp-crd.yaml @@ -0,0 +1,240 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.11.1 + creationTimestamp: null + name: oidcapps.zitadel.topmanage.com +spec: + group: zitadel.topmanage.com + names: + kind: OIDCApp + listKind: OIDCAppList + plural: oidcapps + singular: oidcapp + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: OIDCApp is the Schema for the oidcapps API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: OIDCAppSpec defines the desired state of OIDCApp + properties: + accessTokenRoleAssertion: + type: boolean + accessTokenType: + enum: + - OIDC_TOKEN_TYPE_BEARER + - OIDC_TOKEN_TYPE_JWT + type: string + additionalOrigins: + items: + type: string + type: array + appType: + enum: + - OIDC_APP_TYPE_WEB + - OIDC_APP_TYPE_USER_AGENT + - OIDC_APP_TYPE_NATIVE + type: string + authMethodType: + enum: + - OIDC_AUTH_METHOD_TYPE_BASIC + - OIDC_AUTH_METHOD_TYPE_POST + - OIDC_AUTH_METHOD_TYPE_NONE + - OIDC_AUTH_METHOD_TYPE_PRIVATE_KEY_JWT + type: string + clockSkew: + format: duration + type: string + devMode: + type: boolean + grantTypes: + items: + enum: + - OIDC_GRANT_TYPE_AUTHORIZATION_CODE + - OIDC_GRANT_TYPE_IMPLICIT + - OIDC_GRANT_TYPE_REFRESH_TOKEN + - OIDC_GRANT_TYPE_DEVICE_CODE + - OIDC_GRANT_TYPE_TOKEN_EXCHANGE + type: string + type: array + idTokenRoleAssertion: + type: boolean + idTokenUserinfoAssertion: + type: boolean + postLogoutRedirectUris: + items: + type: string + type: array + projectRef: + description: 'INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + Important: Run "make" to regenerate code after modifying this file' + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of + an entire object, this string should contain a valid JSON/Go + field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen + only to have some well-defined way of referencing a part of + an object. TODO: this design is not final and this field is + subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + x-kubernetes-map-type: atomic + redirectUris: + items: + type: string + type: array + responseTypes: + items: + enum: + - OIDC_RESPONSE_TYPE_CODE + - OIDC_RESPONSE_TYPE_ID_TOKEN + - OIDC_RESPONSE_TYPE_ID_TOKEN_TOKEN + type: string + type: array + skipNativeAppSuccessPage: + type: boolean + required: + - accessTokenRoleAssertion + - accessTokenType + - appType + - authMethodType + - clockSkew + - devMode + - grantTypes + - idTokenRoleAssertion + - idTokenUserinfoAssertion + - postLogoutRedirectUris + - projectRef + - redirectUris + - responseTypes + - skipNativeAppSuccessPage + type: object + status: + description: OIDCAppStatus defines the observed state of OIDCApp + properties: + appId: + default: "" + type: string + clientId: + default: "" + type: string + conditions: + description: 'INSERT ADDITIONAL STATUS FIELD - define observed state + of cluster Important: Run "make" to regenerate code after modifying + this file' + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + required: + - appId + - clientId + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/ops/chart/crds/organization-crd.yaml b/ops/chart/crds/organization-crd.yaml new file mode 100644 index 0000000..32e6032 --- /dev/null +++ b/ops/chart/crds/organization-crd.yaml @@ -0,0 +1,183 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.11.1 + creationTimestamp: null + name: organizations.zitadel.topmanage.com +spec: + group: zitadel.topmanage.com + names: + kind: Organization + listKind: OrganizationList + plural: organizations + singular: organization + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: Organization is the Schema for the organizations API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: OrganizationSpec defines the desired state of Organization + properties: + organizationAdmin: + properties: + email: + type: string + firstName: + type: string + lastName: + type: string + userName: + type: string + required: + - email + - firstName + - lastName + - userName + type: object + zitadelClusterRef: + description: 'INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + Important: Run "make" to regenerate code after modifying this file' + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of + an entire object, this string should contain a valid JSON/Go + field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen + only to have some well-defined way of referencing a part of + an object. TODO: this design is not final and this field is + subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + x-kubernetes-map-type: atomic + required: + - organizationAdmin + - zitadelClusterRef + type: object + status: + description: OrganizationStatus defines the observed state of Organization + properties: + adminId: + default: "" + type: string + conditions: + description: 'INSERT ADDITIONAL STATUS FIELD - define observed state + of cluster Important: Run "make" to regenerate code after modifying + this file Conditions for the Database object.' + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + orgId: + default: "" + type: string + required: + - adminId + - orgId + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/ops/chart/crds/project-crd.yaml b/ops/chart/crds/project-crd.yaml new file mode 100644 index 0000000..56ca4eb --- /dev/null +++ b/ops/chart/crds/project-crd.yaml @@ -0,0 +1,233 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.11.1 + creationTimestamp: null + name: projects.zitadel.topmanage.com +spec: + group: zitadel.topmanage.com + names: + kind: Project + listKind: ProjectList + plural: projects + singular: project + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: Project is the Schema for the projects API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ProjectSpec defines the desired state of Project + properties: + grants: + items: + properties: + organizationRef: + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead + of an entire object, this string should contain a valid + JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container + within a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that + triggered the event) or if no container name is specified + "spec.containers[2]" (container with index 2 in this pod). + This syntax is chosen only to have some well-defined way + of referencing a part of an object. TODO: this design + is not final and this field is subject to change in the + future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + x-kubernetes-map-type: atomic + roleKeys: + items: + type: string + type: array + required: + - organizationRef + - roleKeys + type: object + type: array + hasProjectCheck: + type: boolean + organizationRef: + description: 'INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + Important: Run "make" to regenerate code after modifying this file + https://zitadel.com/docs/apis/resources/mgmt/management-service-add-project' + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of + an entire object, this string should contain a valid JSON/Go + field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen + only to have some well-defined way of referencing a part of + an object. TODO: this design is not final and this field is + subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + x-kubernetes-map-type: atomic + projectRoleAssertion: + type: boolean + projectRoleCheck: + type: boolean + roles: + items: + properties: + displayName: + type: string + group: + type: string + key: + type: string + required: + - displayName + - group + - key + type: object + type: array + required: + - organizationRef + type: object + status: + description: ProjectStatus defines the observed state of Project + properties: + conditions: + description: 'INSERT ADDITIONAL STATUS FIELD - define observed state + of cluster Important: Run "make" to regenerate code after modifying + this file Conditions for the Database object.' + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + projectId: + default: "" + type: string + required: + - projectId + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/ops/chart/crds/zitadelcluster-crd.yaml b/ops/chart/crds/zitadelcluster-crd.yaml new file mode 100644 index 0000000..3f89183 --- /dev/null +++ b/ops/chart/crds/zitadelcluster-crd.yaml @@ -0,0 +1,347 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.11.1 + creationTimestamp: null + name: zitadelclusters.zitadel.topmanage.com +spec: + group: zitadel.topmanage.com + names: + kind: ZitadelCluster + listKind: ZitadelClusterList + plural: zitadelclusters + singular: zitadelcluster + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: ZitadelCluster is the Schema for the zitadelclusters API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ZitadelClusterSpec defines the desired state of ZitadelCluster + properties: + crdbClusterRef: + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of + an entire object, this string should contain a valid JSON/Go + field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen + only to have some well-defined way of referencing a part of + an object. TODO: this design is not final and this field is + subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + x-kubernetes-map-type: atomic + domainSettings: + properties: + smtpSenderAddressMatchesInstanceDomain: + default: true + type: boolean + userLoginMustBeDomain: + default: true + type: boolean + validateOrgDomains: + default: true + type: boolean + required: + - smtpSenderAddressMatchesInstanceDomain + - userLoginMustBeDomain + - validateOrgDomains + type: object + externalPort: + default: 443 + format: int64 + type: integer + externalSecure: + default: true + type: boolean + firstOrgName: + default: DEFAULT + description: 'INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + Important: Run "make" to regenerate code after modifying this file' + type: string + host: + type: string + image: + properties: + name: + type: string + tag: + type: string + required: + - name + - tag + type: object + podAnnotations: + additionalProperties: + type: string + description: PodAnnotations to add to the Pods metadata. + type: object + purpose: + enum: + - demo + - trial + - staging + - productive + - testing + type: string + replicas: + default: 3 + format: int32 + type: integer + resources: + description: ResourceRequirements describes the compute resource requirements. + properties: + claims: + description: "Claims lists the names of resources, defined in + spec.resourceClaims, that are used by this container. \n This + is an alpha field and requires enabling the DynamicResourceAllocation + feature gate. \n This field is immutable. It can only be set + for containers." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims + of the Pod where this field is used. It makes that resource + available inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources + allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + rootTLSSecret: + description: SecretReference represents a Secret Reference. It has + enough information to retrieve secret in any namespace + properties: + name: + description: name is unique within a namespace to reference a + secret resource. + type: string + namespace: + description: namespace defines the space within which the secret + name must be unique. + type: string + type: object + x-kubernetes-map-type: atomic + serviceAnnotations: + additionalProperties: + type: string + description: ServiceAnnotations to add to the service metadata. + type: object + smtpConfig: + properties: + host: + type: string + password: + properties: + secretRef: + description: SecretKeySelector selects a key of a Secret. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + required: + - secretRef + type: object + replyToAddress: + type: string + senderAddress: + type: string + senderName: + type: string + tls: + default: true + type: boolean + user: + type: string + required: + - host + - senderAddress + - senderName + - tls + type: object + required: + - crdbClusterRef + - domainSettings + - externalPort + - externalSecure + - firstOrgName + - host + - image + - purpose + - resources + - rootTLSSecret + - smtpConfig + type: object + status: + description: ZitadelClusterStatus defines the observed state of ZitadelCluster + properties: + conditions: + description: 'INSERT ADDITIONAL STATUS FIELD - define observed state + of cluster Important: Run "make" to regenerate code after modifying + this file' + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + defaultInstanceId: + default: "" + type: string + replicas: + default: 3 + format: int32 + type: integer + smtpProviderId: + default: "" + type: string + required: + - defaultInstanceId + - smtpProviderId + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/ops/chart/templates/_helpers.tpl b/ops/chart/templates/_helpers.tpl new file mode 100644 index 0000000..1880b9a --- /dev/null +++ b/ops/chart/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "zitadel-k8s-operator.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "zitadel-k8s-operator.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "zitadel-k8s-operator.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "zitadel-k8s-operator.labels" -}} +helm.sh/chart: {{ include "zitadel-k8s-operator.chart" . }} +{{ include "zitadel-k8s-operator.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "zitadel-k8s-operator.selectorLabels" -}} +app.kubernetes.io/name: {{ include "zitadel-k8s-operator.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "zitadel-k8s-operator.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "zitadel-k8s-operator.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/ops/chart/templates/deployment.yaml b/ops/chart/templates/deployment.yaml new file mode 100644 index 0000000..b005ccd --- /dev/null +++ b/ops/chart/templates/deployment.yaml @@ -0,0 +1,85 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "zitadel-k8s-operator.fullname" . }}-controller-manager + labels: + app.kubernetes.io/component: manager + app.kubernetes.io/created-by: src + app.kubernetes.io/part-of: src + control-plane: controller-manager + {{- include "zitadel-k8s-operator.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.controllerManager.replicas }} + selector: + matchLabels: + control-plane: controller-manager + {{- include "zitadel-k8s-operator.selectorLabels" . | nindent 6 }} + template: + metadata: + labels: + control-plane: controller-manager + {{- include "zitadel-k8s-operator.selectorLabels" . | nindent 8 }} + annotations: + kubectl.kubernetes.io/default-container: manager + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/arch + operator: In + values: + - amd64 + - arm64 + - ppc64le + - s390x + - key: kubernetes.io/os + operator: In + values: + - linux + containers: + - args: {{- toYaml .Values.controllerManager.kubeRbacProxy.args | nindent 8 }} + env: + - name: KUBERNETES_CLUSTER_DOMAIN + value: {{ quote .Values.kubernetesClusterDomain }} + image: {{ .Values.controllerManager.kubeRbacProxy.image.repository }}:{{ .Values.controllerManager.kubeRbacProxy.image.tag + | default .Chart.AppVersion }} + name: kube-rbac-proxy + ports: + - containerPort: 8443 + name: https + protocol: TCP + resources: {{- toYaml .Values.controllerManager.kubeRbacProxy.resources | nindent + 10 }} + securityContext: {{- toYaml .Values.controllerManager.kubeRbacProxy.containerSecurityContext + | nindent 10 }} + - args: {{- toYaml .Values.controllerManager.manager.args | nindent 8 }} + command: + - /manager + env: + - name: KUBERNETES_CLUSTER_DOMAIN + value: {{ quote .Values.kubernetesClusterDomain }} + image: {{ .Values.controllerManager.manager.image.repository }}:{{ .Values.controllerManager.manager.image.tag + | default .Chart.AppVersion }} + livenessProbe: + httpGet: + path: /healthz + port: 8081 + initialDelaySeconds: 15 + periodSeconds: 20 + name: manager + readinessProbe: + httpGet: + path: /readyz + port: 8081 + initialDelaySeconds: 5 + periodSeconds: 10 + resources: {{- toYaml .Values.controllerManager.manager.resources | nindent 10 + }} + securityContext: {{- toYaml .Values.controllerManager.manager.containerSecurityContext + | nindent 10 }} + securityContext: + runAsNonRoot: true + serviceAccountName: {{ include "zitadel-k8s-operator.fullname" . }}-controller-manager + terminationGracePeriodSeconds: 10 \ No newline at end of file diff --git a/ops/chart/templates/leader-election-rbac.yaml b/ops/chart/templates/leader-election-rbac.yaml new file mode 100644 index 0000000..341e9e5 --- /dev/null +++ b/ops/chart/templates/leader-election-rbac.yaml @@ -0,0 +1,59 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "zitadel-k8s-operator.fullname" . }}-leader-election-role + labels: + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: src + app.kubernetes.io/part-of: src + {{- include "zitadel-k8s-operator.labels" . | nindent 4 }} +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "zitadel-k8s-operator.fullname" . }}-leader-election-rolebinding + labels: + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: src + app.kubernetes.io/part-of: src + {{- include "zitadel-k8s-operator.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: '{{ include "zitadel-k8s-operator.fullname" . }}-leader-election-role' +subjects: +- kind: ServiceAccount + name: '{{ include "zitadel-k8s-operator.fullname" . }}-controller-manager' + namespace: '{{ .Release.Namespace }}' \ No newline at end of file diff --git a/ops/chart/templates/manager-rbac.yaml b/ops/chart/templates/manager-rbac.yaml new file mode 100644 index 0000000..6e7066a --- /dev/null +++ b/ops/chart/templates/manager-rbac.yaml @@ -0,0 +1,346 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "zitadel-k8s-operator.fullname" . }}-manager-role + labels: + {{- include "zitadel-k8s-operator.labels" . | nindent 4 }} +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - get + - list + - patch + - watch +- apiGroups: + - "" + resources: + - endpoints + verbs: + - create + - get + - list + - patch + - watch +- apiGroups: + - "" + resources: + - endpoints/restricted + verbs: + - create + - get + - list + - patch + - watch +- apiGroups: + - "" + resources: + - events + verbs: + - create + - list + - patch + - watch +- apiGroups: + - "" + resources: + - pods + verbs: + - delete + - get +- apiGroups: + - "" + resources: + - secrets + verbs: + - create + - list + - patch + - watch +- apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - create + - list + - patch + - watch +- apiGroups: + - "" + resources: + - services + verbs: + - create + - list + - patch + - watch +- apiGroups: + - apps + resources: + - deployments + verbs: + - create + - list + - patch + - watch +- apiGroups: + - batch + resources: + - jobs + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - certificates.k8s.io + resources: + - certificatesigningrequests + verbs: + - create + - delete + - get + - list + - patch + - watch +- apiGroups: + - certificates.k8s.io + resources: + - certificatesigningrequests/approval + verbs: + - update +- apiGroups: + - certificates.k8s.io + resources: + - certificatesigningrequests/status + verbs: + - get + - patch + - update +- apiGroups: + - crdb.cockroachlabs.com + resources: + - crdbclusters + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - crdb.cockroachlabs.com + resources: + - crdbclusters/finalizers + verbs: + - update +- apiGroups: + - crdb.cockroachlabs.com + resources: + - crdbclusters/status + verbs: + - get + - patch + - update +- apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - create + - list + - patch + - watch +- apiGroups: + - rbac.authorization.k8s.io + resources: + - clusterrolebindings + - rolebindings + - roles + verbs: + - create + - list + - patch + - watch +- apiGroups: + - zitadel.topmanage.com + resources: + - apiapps + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - zitadel.topmanage.com + resources: + - apiapps/finalizers + verbs: + - update +- apiGroups: + - zitadel.topmanage.com + resources: + - apiapps/status + verbs: + - get + - patch + - update +- apiGroups: + - zitadel.topmanage.com + resources: + - machineusers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - zitadel.topmanage.com + resources: + - machineusers/finalizers + verbs: + - update +- apiGroups: + - zitadel.topmanage.com + resources: + - machineusers/status + verbs: + - get + - patch + - update +- apiGroups: + - zitadel.topmanage.com + resources: + - oidcapps + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - zitadel.topmanage.com + resources: + - oidcapps/finalizers + verbs: + - update +- apiGroups: + - zitadel.topmanage.com + resources: + - oidcapps/status + verbs: + - get + - patch + - update +- apiGroups: + - zitadel.topmanage.com + resources: + - organizations + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - zitadel.topmanage.com + resources: + - organizations/finalizers + verbs: + - update +- apiGroups: + - zitadel.topmanage.com + resources: + - organizations/status + verbs: + - get + - patch + - update +- apiGroups: + - zitadel.topmanage.com + resources: + - projects + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - zitadel.topmanage.com + resources: + - projects/finalizers + verbs: + - update +- apiGroups: + - zitadel.topmanage.com + resources: + - projects/status + verbs: + - get + - patch + - update +- apiGroups: + - zitadel.topmanage.com + resources: + - zitadelclusters + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - zitadel.topmanage.com + resources: + - zitadelclusters/finalizers + verbs: + - update +- apiGroups: + - zitadel.topmanage.com + resources: + - zitadelclusters/status + verbs: + - get + - patch + - update +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ include "zitadel-k8s-operator.fullname" . }}-manager-rolebinding + labels: + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: src + app.kubernetes.io/part-of: src + {{- include "zitadel-k8s-operator.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: '{{ include "zitadel-k8s-operator.fullname" . }}-manager-role' +subjects: +- kind: ServiceAccount + name: '{{ include "zitadel-k8s-operator.fullname" . }}-controller-manager' + namespace: '{{ .Release.Namespace }}' \ No newline at end of file diff --git a/ops/chart/templates/metrics-reader-rbac.yaml b/ops/chart/templates/metrics-reader-rbac.yaml new file mode 100644 index 0000000..e778633 --- /dev/null +++ b/ops/chart/templates/metrics-reader-rbac.yaml @@ -0,0 +1,14 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "zitadel-k8s-operator.fullname" . }}-metrics-reader + labels: + app.kubernetes.io/component: kube-rbac-proxy + app.kubernetes.io/created-by: src + app.kubernetes.io/part-of: src + {{- include "zitadel-k8s-operator.labels" . | nindent 4 }} +rules: +- nonResourceURLs: + - /metrics + verbs: + - get \ No newline at end of file diff --git a/ops/chart/templates/metrics-service.yaml b/ops/chart/templates/metrics-service.yaml new file mode 100644 index 0000000..537aa91 --- /dev/null +++ b/ops/chart/templates/metrics-service.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "zitadel-k8s-operator.fullname" . }}-controller-manager-metrics-service + labels: + app.kubernetes.io/component: kube-rbac-proxy + app.kubernetes.io/created-by: src + app.kubernetes.io/part-of: src + control-plane: controller-manager + {{- include "zitadel-k8s-operator.labels" . | nindent 4 }} +spec: + type: {{ .Values.metricsService.type }} + selector: + control-plane: controller-manager + {{- include "zitadel-k8s-operator.selectorLabels" . | nindent 4 }} + ports: + {{- .Values.metricsService.ports | toYaml | nindent 2 }} \ No newline at end of file diff --git a/ops/chart/templates/proxy-rbac.yaml b/ops/chart/templates/proxy-rbac.yaml new file mode 100644 index 0000000..c66bcc4 --- /dev/null +++ b/ops/chart/templates/proxy-rbac.yaml @@ -0,0 +1,40 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "zitadel-k8s-operator.fullname" . }}-proxy-role + labels: + app.kubernetes.io/component: kube-rbac-proxy + app.kubernetes.io/created-by: src + app.kubernetes.io/part-of: src + {{- include "zitadel-k8s-operator.labels" . | nindent 4 }} +rules: +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create +- apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ include "zitadel-k8s-operator.fullname" . }}-proxy-rolebinding + labels: + app.kubernetes.io/component: kube-rbac-proxy + app.kubernetes.io/created-by: src + app.kubernetes.io/part-of: src + {{- include "zitadel-k8s-operator.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: '{{ include "zitadel-k8s-operator.fullname" . }}-proxy-role' +subjects: +- kind: ServiceAccount + name: '{{ include "zitadel-k8s-operator.fullname" . }}-controller-manager' + namespace: '{{ .Release.Namespace }}' \ No newline at end of file diff --git a/ops/chart/templates/serviceaccount.yaml b/ops/chart/templates/serviceaccount.yaml new file mode 100644 index 0000000..9180f1e --- /dev/null +++ b/ops/chart/templates/serviceaccount.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "zitadel-k8s-operator.fullname" . }}-controller-manager + labels: + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: src + app.kubernetes.io/part-of: src + {{- include "zitadel-k8s-operator.labels" . | nindent 4 }} + annotations: + {{- toYaml .Values.controllerManager.serviceAccount.annotations | nindent 4 }} \ No newline at end of file diff --git a/ops/chart/values.yaml b/ops/chart/values.yaml new file mode 100644 index 0000000..5fb6bf1 --- /dev/null +++ b/ops/chart/values.yaml @@ -0,0 +1,53 @@ +controllerManager: + kubeRbacProxy: + args: + - --secure-listen-address=0.0.0.0:8443 + - --upstream=http://127.0.0.1:8080/ + - --logtostderr=true + - --v=0 + containerSecurityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + image: + repository: gcr.io/kubebuilder/kube-rbac-proxy + tag: v0.13.1 + resources: + limits: + cpu: 500m + memory: 128Mi + requests: + cpu: 5m + memory: 64Mi + manager: + args: + - --health-probe-bind-address=:8081 + - --metrics-bind-address=127.0.0.1:8080 + - --leader-elect + containerSecurityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + image: + repository: controller + tag: latest + resources: + limits: + cpu: 500m + memory: 128Mi + requests: + cpu: 10m + memory: 64Mi + replicas: 1 + serviceAccount: + annotations: {} +kubernetesClusterDomain: cluster.local +metricsService: + ports: + - name: https + port: 8443 + protocol: TCP + targetPort: https + type: ClusterIP diff --git a/src/.dockerignore b/src/.dockerignore new file mode 100644 index 0000000..0f04682 --- /dev/null +++ b/src/.dockerignore @@ -0,0 +1,4 @@ +# More info: https://docs.docker.com/engine/reference/builder/#dockerignore-file +# Ignore build and test binaries. +bin/ +testbin/ diff --git a/src/.gitignore b/src/.gitignore new file mode 100644 index 0000000..e917e5c --- /dev/null +++ b/src/.gitignore @@ -0,0 +1,26 @@ + +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib +bin +testbin/* +Dockerfile.cross + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Kubernetes Generated files - skip generated files, except for vendored files + +!vendor/**/zz_generated.* + +# editor and IDE paraphernalia +.idea +*.swp +*.swo +*~ diff --git a/src/Dockerfile b/src/Dockerfile new file mode 100644 index 0000000..ef4cfaf --- /dev/null +++ b/src/Dockerfile @@ -0,0 +1,33 @@ +# Build the manager binary +FROM golang:1.19 as builder +ARG TARGETOS +ARG TARGETARCH + +WORKDIR /workspace +# Copy the Go Modules manifests +COPY go.mod go.mod +COPY go.sum go.sum +# cache deps before building and copying source so that we don't need to re-download as much +# and so that source changes don't invalidate our downloaded layer +RUN go mod download + +# Copy the go source +COPY cmd/main.go cmd/main.go +COPY api/ api/ +COPY internal/controller/ internal/controller/ + +# Build +# the GOARCH has not a default value to allow the binary be built according to the host where the command +# was called. For example, if we call make docker-build in a local env which has the Apple Silicon M1 SO +# the docker BUILDPLATFORM arg will be linux/arm64 when for Apple x86 it will be linux/amd64. Therefore, +# by leaving it empty we can ensure that the container and binary shipped on it will have the same platform. +RUN CGO_ENABLED=0 GOOS=${TARGETOS:-linux} GOARCH=${TARGETARCH} go build -a -o manager cmd/main.go + +# Use distroless as minimal base image to package the manager binary +# Refer to https://github.com/GoogleContainerTools/distroless for more details +FROM gcr.io/distroless/static:nonroot +WORKDIR / +COPY --from=builder /workspace/manager . +USER 65532:65532 + +ENTRYPOINT ["/manager"] diff --git a/src/Makefile b/src/Makefile new file mode 100644 index 0000000..ebb3ebd --- /dev/null +++ b/src/Makefile @@ -0,0 +1,291 @@ +# VERSION defines the project version for the bundle. +# Update this value when you upgrade the version of your project. +# To re-generate a bundle for another specific version without changing the standard setup, you can: +# - use the VERSION as arg of the bundle target (e.g make bundle VERSION=0.0.2) +# - use environment variables to overwrite this value (e.g export VERSION=0.0.2) +VERSION ?= 0.0.1 + +# CHANNELS define the bundle channels used in the bundle. +# Add a new line here if you would like to change its default config. (E.g CHANNELS = "candidate,fast,stable") +# To re-generate a bundle for other specific channels without changing the standard setup, you can: +# - use the CHANNELS as arg of the bundle target (e.g make bundle CHANNELS=candidate,fast,stable) +# - use environment variables to overwrite this value (e.g export CHANNELS="candidate,fast,stable") +ifneq ($(origin CHANNELS), undefined) +BUNDLE_CHANNELS := --channels=$(CHANNELS) +endif + +# DEFAULT_CHANNEL defines the default channel used in the bundle. +# Add a new line here if you would like to change its default config. (E.g DEFAULT_CHANNEL = "stable") +# To re-generate a bundle for any other default channel without changing the default setup, you can: +# - use the DEFAULT_CHANNEL as arg of the bundle target (e.g make bundle DEFAULT_CHANNEL=stable) +# - use environment variables to overwrite this value (e.g export DEFAULT_CHANNEL="stable") +ifneq ($(origin DEFAULT_CHANNEL), undefined) +BUNDLE_DEFAULT_CHANNEL := --default-channel=$(DEFAULT_CHANNEL) +endif +BUNDLE_METADATA_OPTS ?= $(BUNDLE_CHANNELS) $(BUNDLE_DEFAULT_CHANNEL) + +# IMAGE_TAG_BASE defines the docker.io namespace and part of the image name for remote images. +# This variable is used to construct full image tags for bundle and catalog images. +# +# For example, running 'make bundle-build bundle-push catalog-build catalog-push' will build and push both +# topmanage.com/src-bundle:$VERSION and topmanage.com/src-catalog:$VERSION. +IMAGE_TAG_BASE ?= topmanage.com/src + +# BUNDLE_IMG defines the image:tag used for the bundle. +# You can use it as an arg. (E.g make bundle-build BUNDLE_IMG=/:) +BUNDLE_IMG ?= $(IMAGE_TAG_BASE)-bundle:v$(VERSION) + +# BUNDLE_GEN_FLAGS are the flags passed to the operator-sdk generate bundle command +BUNDLE_GEN_FLAGS ?= -q --overwrite --version $(VERSION) $(BUNDLE_METADATA_OPTS) + +# USE_IMAGE_DIGESTS defines if images are resolved via tags or digests +# You can enable this value if you would like to use SHA Based Digests +# To enable set flag to true +USE_IMAGE_DIGESTS ?= false +ifeq ($(USE_IMAGE_DIGESTS), true) + BUNDLE_GEN_FLAGS += --use-image-digests +endif + +# Set the Operator SDK version to use. By default, what is installed on the system is used. +# This is useful for CI or a project to utilize a specific version of the operator-sdk toolkit. +OPERATOR_SDK_VERSION ?= unknown + +# Image URL to use all building/pushing image targets +IMG ?= controller:latest +# ENVTEST_K8S_VERSION refers to the version of kubebuilder assets to be downloaded by envtest binary. +ENVTEST_K8S_VERSION = 1.26.0 + +# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set) +ifeq (,$(shell go env GOBIN)) +GOBIN=$(shell go env GOPATH)/bin +else +GOBIN=$(shell go env GOBIN) +endif + +# Setting SHELL to bash allows bash commands to be executed by recipes. +# Options are set to exit when a recipe line exits non-zero or a piped command fails. +SHELL = /usr/bin/env bash -o pipefail +.SHELLFLAGS = -ec + +.PHONY: all +all: build + +##@ General + +# The help target prints out all targets with their descriptions organized +# beneath their categories. The categories are represented by '##@' and the +# target descriptions by '##'. The awk commands is responsible for reading the +# entire set of makefiles included in this invocation, looking for lines of the +# file as xyz: ## something, and then pretty-format the target and help. Then, +# if there's a line with ##@ something, that gets pretty-printed as a category. +# More info on the usage of ANSI control characters for terminal formatting: +# https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_parameters +# More info on the awk command: +# http://linuxcommand.org/lc3_adv_awk.php + +.PHONY: help +help: ## Display this help. + @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) + +##@ Development + +.PHONY: manifests +manifests: controller-gen ## Generate WebhookConfiguration, ClusterRole and CustomResourceDefinition objects. + $(CONTROLLER_GEN) rbac:roleName=manager-role crd webhook paths="./..." output:crd:artifacts:config=config/crd/bases + +.PHONY: generate +generate: controller-gen ## Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations. + $(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..." + +.PHONY: fmt +fmt: ## Run go fmt against code. + go fmt ./... + +.PHONY: vet +vet: ## Run go vet against code. + go vet ./... + +.PHONY: test +test: manifests generate fmt vet envtest ## Run tests. + KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" go test ./... -coverprofile cover.out + +##@ Build + +.PHONY: build +build: manifests generate fmt vet ## Build manager binary. + go build -o bin/manager cmd/main.go + +.PHONY: run +run: manifests generate fmt vet ## Run a controller from your host. + go run ./cmd/main.go + +# If you wish built the manager image targeting other platforms you can use the --platform flag. +# (i.e. docker build --platform linux/arm64 ). However, you must enable docker buildKit for it. +# More info: https://docs.docker.com/develop/develop-images/build_enhancements/ +.PHONY: docker-build +docker-build: test ## Build docker image with the manager. + docker build -t ${IMG} . + +.PHONY: docker-push +docker-push: ## Push docker image with the manager. + docker push ${IMG} + +# PLATFORMS defines the target platforms for the manager image be build to provide support to multiple +# architectures. (i.e. make docker-buildx IMG=myregistry/mypoperator:0.0.1). To use this option you need to: +# - able to use docker buildx . More info: https://docs.docker.com/build/buildx/ +# - have enable BuildKit, More info: https://docs.docker.com/develop/develop-images/build_enhancements/ +# - be able to push the image for your registry (i.e. if you do not inform a valid value via IMG=> then the export will fail) +# To properly provided solutions that supports more than one platform you should use this option. +PLATFORMS ?= linux/arm64,linux/amd64,linux/s390x,linux/ppc64le +.PHONY: docker-buildx +docker-buildx: test ## Build and push docker image for the manager for cross-platform support + # copy existing Dockerfile and insert --platform=${BUILDPLATFORM} into Dockerfile.cross, and preserve the original Dockerfile + sed -e '1 s/\(^FROM\)/FROM --platform=\$$\{BUILDPLATFORM\}/; t' -e ' 1,// s//FROM --platform=\$$\{BUILDPLATFORM\}/' Dockerfile > Dockerfile.cross + - docker buildx create --name project-v3-builder + docker buildx use project-v3-builder + - docker buildx build --push --platform=$(PLATFORMS) --tag ${IMG} -f Dockerfile.cross . + - docker buildx rm project-v3-builder + rm Dockerfile.cross + +##@ Deployment + +ifndef ignore-not-found + ignore-not-found = false +endif + +.PHONY: install +install: manifests kustomize ## Install CRDs into the K8s cluster specified in ~/.kube/config. + $(KUSTOMIZE) build config/crd | kubectl apply -f - + +.PHONY: uninstall +uninstall: manifests kustomize ## Uninstall CRDs from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion. + $(KUSTOMIZE) build config/crd | kubectl delete --ignore-not-found=$(ignore-not-found) -f - + +.PHONY: deploy +deploy: manifests kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config. + cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG} + $(KUSTOMIZE) build config/default | kubectl apply -f - + +.PHONY: undeploy +undeploy: ## Undeploy controller from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion. + $(KUSTOMIZE) build config/default | kubectl delete --ignore-not-found=$(ignore-not-found) -f - + +##@ Build Dependencies + +## Location to install dependencies to +LOCALBIN ?= $(shell pwd)/bin +$(LOCALBIN): + mkdir -p $(LOCALBIN) + +## Tool Binaries +KUSTOMIZE ?= $(LOCALBIN)/kustomize +CONTROLLER_GEN ?= $(LOCALBIN)/controller-gen +ENVTEST ?= $(LOCALBIN)/setup-envtest + +## Tool Versions +KUSTOMIZE_VERSION ?= v4.5.7 +CONTROLLER_TOOLS_VERSION ?= v0.11.1 + +KUSTOMIZE_INSTALL_SCRIPT ?= "https://raw.githubusercontent.com/kubernetes-sigs/kustomize/master/hack/install_kustomize.sh" +.PHONY: kustomize +kustomize: $(KUSTOMIZE) ## Download kustomize locally if necessary. If wrong version is installed, it will be removed before downloading. +$(KUSTOMIZE): $(LOCALBIN) + @if test -x $(LOCALBIN)/kustomize && ! $(LOCALBIN)/kustomize version | grep -q $(KUSTOMIZE_VERSION); then \ + echo "$(LOCALBIN)/kustomize version is not expected $(KUSTOMIZE_VERSION). Removing it before installing."; \ + rm -rf $(LOCALBIN)/kustomize; \ + fi + test -s $(LOCALBIN)/kustomize || { curl -Ss $(KUSTOMIZE_INSTALL_SCRIPT) | bash -s -- $(subst v,,$(KUSTOMIZE_VERSION)) $(LOCALBIN); } + +.PHONY: controller-gen +controller-gen: $(CONTROLLER_GEN) ## Download controller-gen locally if necessary. If wrong version is installed, it will be overwritten. +$(CONTROLLER_GEN): $(LOCALBIN) + test -s $(LOCALBIN)/controller-gen && $(LOCALBIN)/controller-gen --version | grep -q $(CONTROLLER_TOOLS_VERSION) || \ + GOBIN=$(LOCALBIN) go install sigs.k8s.io/controller-tools/cmd/controller-gen@$(CONTROLLER_TOOLS_VERSION) + +.PHONY: envtest +envtest: $(ENVTEST) ## Download envtest-setup locally if necessary. +$(ENVTEST): $(LOCALBIN) + test -s $(LOCALBIN)/setup-envtest || GOBIN=$(LOCALBIN) go install sigs.k8s.io/controller-runtime/tools/setup-envtest@latest + +.PHONY: operator-sdk +OPERATOR_SDK ?= $(LOCALBIN)/operator-sdk +operator-sdk: ## Download operator-sdk locally if necessary. +ifeq (,$(wildcard $(OPERATOR_SDK))) +ifeq (, $(shell which operator-sdk 2>/dev/null)) + @{ \ + set -e ;\ + mkdir -p $(dir $(OPERATOR_SDK)) ;\ + OS=$(shell go env GOOS) && ARCH=$(shell go env GOARCH) && \ + curl -sSLo $(OPERATOR_SDK) https://github.com/operator-framework/operator-sdk/releases/download/$(OPERATOR_SDK_VERSION)/operator-sdk_$${OS}_$${ARCH} ;\ + chmod +x $(OPERATOR_SDK) ;\ + } +else +OPERATOR_SDK = $(shell which operator-sdk) +endif +endif + +.PHONY: bundle +bundle: manifests kustomize operator-sdk ## Generate bundle manifests and metadata, then validate generated files. + $(OPERATOR_SDK) generate kustomize manifests -q + cd config/manager && $(KUSTOMIZE) edit set image controller=$(IMG) + $(KUSTOMIZE) build config/manifests | $(OPERATOR_SDK) generate bundle $(BUNDLE_GEN_FLAGS) + $(OPERATOR_SDK) bundle validate ./bundle + +.PHONY: bundle-build +bundle-build: ## Build the bundle image. + docker build -f bundle.Dockerfile -t $(BUNDLE_IMG) . + +.PHONY: bundle-push +bundle-push: ## Push the bundle image. + $(MAKE) docker-push IMG=$(BUNDLE_IMG) + +.PHONY: opm +OPM = ./bin/opm +opm: ## Download opm locally if necessary. +ifeq (,$(wildcard $(OPM))) +ifeq (,$(shell which opm 2>/dev/null)) + @{ \ + set -e ;\ + mkdir -p $(dir $(OPM)) ;\ + OS=$(shell go env GOOS) && ARCH=$(shell go env GOARCH) && \ + curl -sSLo $(OPM) https://github.com/operator-framework/operator-registry/releases/download/v1.23.0/$${OS}-$${ARCH}-opm ;\ + chmod +x $(OPM) ;\ + } +else +OPM = $(shell which opm) +endif +endif + +# A comma-separated list of bundle images (e.g. make catalog-build BUNDLE_IMGS=example.com/operator-bundle:v0.1.0,example.com/operator-bundle:v0.2.0). +# These images MUST exist in a registry and be pull-able. +BUNDLE_IMGS ?= $(BUNDLE_IMG) + +# The image tag given to the resulting catalog image (e.g. make catalog-build CATALOG_IMG=example.com/operator-catalog:v0.2.0). +CATALOG_IMG ?= $(IMAGE_TAG_BASE)-catalog:v$(VERSION) + +# Set CATALOG_BASE_IMG to an existing catalog image tag to add $BUNDLE_IMGS to that image. +ifneq ($(origin CATALOG_BASE_IMG), undefined) +FROM_INDEX_OPT := --from-index $(CATALOG_BASE_IMG) +endif + +# Build a catalog image by adding bundle images to an empty catalog using the operator package manager tool, 'opm'. +# This recipe invokes 'opm' in 'semver' bundle add mode. For more information on add modes, see: +# https://github.com/operator-framework/community-operators/blob/7f1438c/docs/packaging-operator.md#updating-your-existing-operator +.PHONY: catalog-build +catalog-build: opm ## Build a catalog image. + $(OPM) index add --container-tool docker --mode semver --tag $(CATALOG_IMG) --bundles $(BUNDLE_IMGS) $(FROM_INDEX_OPT) + +# Push the catalog image. +.PHONY: catalog-push +catalog-push: ## Push a catalog image. + $(MAKE) docker-push IMG=$(CATALOG_IMG) + +HELMIFY ?= $(LOCALBIN)/helmify + +.PHONY: helmify +helmify: $(HELMIFY) ## Download helmify locally if necessary. +$(HELMIFY): $(LOCALBIN) + test -s $(LOCALBIN)/helmify || GOBIN=$(LOCALBIN) go install github.com/arttor/helmify/cmd/helmify@latest + +helm: manifests kustomize helmify + $(KUSTOMIZE) build config/default | $(HELMIFY) -crd-dir zitadel-k8s-operator diff --git a/src/PROJECT b/src/PROJECT new file mode 100644 index 0000000..0edf841 --- /dev/null +++ b/src/PROJECT @@ -0,0 +1,68 @@ +# Code generated by tool. DO NOT EDIT. +# This file is used to track the info used to scaffold your project +# and allow the plugins properly work. +# More info: https://book.kubebuilder.io/reference/project-config.html +domain: topmanage.com +layout: +- go.kubebuilder.io/v4-alpha +plugins: + manifests.sdk.operatorframework.io/v2: {} + scorecard.sdk.operatorframework.io/v2: {} +projectName: src +repo: bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src +resources: +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: topmanage.com + group: zitadel + kind: ZitadelCluster + path: bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/api/v1alpha1 + version: v1alpha1 +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: topmanage.com + group: zitadel + kind: Organization + path: bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/api/v1alpha1 + version: v1alpha1 +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: topmanage.com + group: zitadel + kind: Project + path: bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/api/v1alpha1 + version: v1alpha1 +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: topmanage.com + group: zitadel + kind: OIDCApp + path: bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/api/v1alpha1 + version: v1alpha1 +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: topmanage.com + group: zitadel + kind: MachineUser + path: bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/api/v1alpha1 + version: v1alpha1 +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: topmanage.com + group: zitadel + kind: APIApp + path: bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/api/v1alpha1 + version: v1alpha1 +version: "3" diff --git a/src/README.md b/src/README.md new file mode 100644 index 0000000..59e68c3 --- /dev/null +++ b/src/README.md @@ -0,0 +1,94 @@ +# src +// TODO(user): Add simple overview of use/purpose + +## Description +// TODO(user): An in-depth paragraph about your project and overview of use + +## Getting Started +You’ll need a Kubernetes cluster to run against. You can use [KIND](https://sigs.k8s.io/kind) to get a local cluster for testing, or run against a remote cluster. +**Note:** Your controller will automatically use the current context in your kubeconfig file (i.e. whatever cluster `kubectl cluster-info` shows). + +### Running on the cluster +1. Install Instances of Custom Resources: + +```sh +kubectl apply -f config/samples/ +``` + +2. Build and push your image to the location specified by `IMG`: + +```sh +make docker-build docker-push IMG=/src:tag +``` + +3. Deploy the controller to the cluster with the image specified by `IMG`: + +```sh +make deploy IMG=/src:tag +``` + +### Uninstall CRDs +To delete the CRDs from the cluster: + +```sh +make uninstall +``` + +### Undeploy controller +UnDeploy the controller from the cluster: + +```sh +make undeploy +``` + +## Contributing +// TODO(user): Add detailed information on how you would like others to contribute to this project + +### How it works +This project aims to follow the Kubernetes [Operator pattern](https://kubernetes.io/docs/concepts/extend-kubernetes/operator/). + +It uses [Controllers](https://kubernetes.io/docs/concepts/architecture/controller/), +which provide a reconcile function responsible for synchronizing resources until the desired state is reached on the cluster. + +### Test It Out +1. Install the CRDs into the cluster: + +```sh +make install +``` + +2. Run your controller (this will run in the foreground, so switch to a new terminal if you want to leave it running): + +```sh +make run +``` + +**NOTE:** You can also run this in one step by running: `make install run` + +### Modifying the API definitions +If you are editing the API definitions, generate the manifests such as CRs or CRDs using: + +```sh +make manifests +``` + +**NOTE:** Run `make --help` for more information on all potential `make` targets + +More information can be found via the [Kubebuilder Documentation](https://book.kubebuilder.io/introduction.html) + +## License + +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + diff --git a/src/api/v1alpha1/apiapp_types.go b/src/api/v1alpha1/apiapp_types.go new file mode 100644 index 0000000..7b7641d --- /dev/null +++ b/src/api/v1alpha1/apiapp_types.go @@ -0,0 +1,134 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "context" + "fmt" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// APIAppSpec defines the desired state of APIApp +type APIAppSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file + ProjectRef ProjectRef `json:"projectRef"` + // +kubebuilder:validation:Enum=API_AUTH_METHOD_TYPE_BASIC;API_AUTH_METHOD_TYPE_PRIVATE_KEY_JWT + AuthMethodType string `json:"authMethodType"` +} + +// APIAppStatus defines the observed state of APIApp +type APIAppStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file + // +optional + // +operator-sdk:csv:customresourcedefinitions:type=status,xDescriptors={"urn:alm:descriptor:io.kubernetes.conditions"} + Conditions []metav1.Condition `json:"conditions,omitempty"` + // +kubebuilder:default="" + AppId string `json:"appId"` + // +kubebuilder:default="" + KeyId string `json:"keyId"` + // +kubebuilder:default="" + ClientId string `json:"clientId"` +} + +func (d *APIAppStatus) SetCondition(condition metav1.Condition) { + if d.Conditions == nil { + d.Conditions = make([]metav1.Condition, 0) + } + meta.SetStatusCondition(&d.Conditions, condition) +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status + +// APIApp is the Schema for the apiapps API +type APIApp struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec APIAppSpec `json:"spec,omitempty"` + Status APIAppStatus `json:"status,omitempty"` +} + +func (d *APIApp) IsBeingDeleted() bool { + return !d.DeletionTimestamp.IsZero() +} + +func (d *APIApp) IsReady() bool { + return meta.IsStatusConditionTrue(d.Status.Conditions, ConditionTypeReady) +} + +func (d *APIApp) ZitadelClusterRef(ctx context.Context, refresolver *RefResolver) (*ZitadelClusterRef, error) { + project, err := refresolver.ProjectRef(ctx, &d.Spec.ProjectRef, d.Namespace) + if err != nil { + return nil, err + } + if project.Status.ProjectId == "" { + return nil, fmt.Errorf("Project has not been created yet...") + } + org, err := refresolver.OrganizationRef(ctx, &project.Spec.OrganizationRef, d.Namespace) + if err != nil { + return nil, err + } + + if org.Status.OrgId == "" { + return nil, fmt.Errorf("Organization has not been created yet...") + } + ref, err := org.ZitadelClusterRef(ctx, refresolver) + if err != nil { + return nil, err + } + return ref, nil +} +func (d *APIApp) Organization(ctx context.Context, refresolver *RefResolver) (*Organization, error) { + project, err := refresolver.ProjectRef(ctx, &d.Spec.ProjectRef, d.Namespace) + if err != nil { + return nil, err + } + org, err := refresolver.OrganizationRef(ctx, &project.Spec.OrganizationRef, d.Namespace) + if err != nil { + return nil, err + } + return org, nil +} + +func (d *APIApp) Project(ctx context.Context, refresolver *RefResolver) (*Project, error) { + project, err := refresolver.ProjectRef(ctx, &d.Spec.ProjectRef, d.Namespace) + if err != nil { + return nil, err + } + return project, nil +} + +//+kubebuilder:object:root=true + +// APIAppList contains a list of APIApp +type APIAppList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []APIApp `json:"items"` +} + +func init() { + SchemeBuilder.Register(&APIApp{}, &APIAppList{}) +} diff --git a/src/api/v1alpha1/condition_types.go b/src/api/v1alpha1/condition_types.go new file mode 100644 index 0000000..a16473e --- /dev/null +++ b/src/api/v1alpha1/condition_types.go @@ -0,0 +1,32 @@ +package v1alpha1 + +const ( + ConditionTypeReady string = "Ready" + ConditionTypePATUpToDate string = "PATUpToDate" + + ConditionReasonRolesChanged string = "RolesChanged" + ConditionReasonPATUpToDate string = "UpToDate" + + ConditionReasonDeploymentNotReady string = "DeploymentNotReady" + ConditionReasonDeploymentReady string = "DeploymentReady" + ConditionReasonRestoreBackup string = "RestoreBackup" + + ConditionReasonRestoreNotComplete string = "RestoreNotComplete" + ConditionReasonRestoreComplete string = "RestoreComplete" + + ConditionReasonJobComplete string = "JobComplete" + ConditionReasonJobSuspended string = "JobSuspended" + ConditionReasonJobFailed string = "JobFailed" + ConditionReasonJobRunning string = "JobRunning" + + ConditionReasonCronJobScheduled string = "CronJobScheduled" + ConditionReasonCronJobFailed string = "CronJobScheduled" + ConditionReasonCronJobRunning string = "CronJobRunning" + ConditionReasonCronJobSuccess string = "CronJobSucess" + + ConditionReasonConnectionFailed string = "ConnectionFailed" + + ConditionReasonCreated string = "Created" + ConditionReasonHealthy string = "Healthy" + ConditionReasonFailed string = "Failed" +) diff --git a/src/api/v1alpha1/groupversion_info.go b/src/api/v1alpha1/groupversion_info.go new file mode 100644 index 0000000..a865d67 --- /dev/null +++ b/src/api/v1alpha1/groupversion_info.go @@ -0,0 +1,36 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1alpha1 contains API Schema definitions for the zitadel v1alpha1 API group +// +kubebuilder:object:generate=true +// +groupName=zitadel.topmanage.com +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects + GroupVersion = schema.GroupVersion{Group: "zitadel.topmanage.com", Version: "v1alpha1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/src/api/v1alpha1/machineuser_types.go b/src/api/v1alpha1/machineuser_types.go new file mode 100644 index 0000000..a4bc10a --- /dev/null +++ b/src/api/v1alpha1/machineuser_types.go @@ -0,0 +1,131 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "context" + "fmt" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +type UserGrant struct { + ProjectRef ProjectRef `json:"projectRef"` + RoleKeys []string `json:"roleKeys,omitempty"` +} + +// MachineUserSpec defines the desired state of MachineUser +type MachineUserSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file + // +kubebuilder:validation:Required + // +operator-sdk:csv:customresourcedefinitions:type=spec + OrganizationRef OrganizationRef `json:"organizationRef" webhook:"inmutable"` + // +kubebuilder:validation:Enum=ACCESS_TOKEN_TYPE_BEARER;ACCESS_TOKEN_TYPE_JWT + AccessTokenType string `json:"accessTokenType"` + UserGrants []UserGrant `json:"userGrants,omitempty"` +} + +// MachineUserStatus defines the observed state of MachineUser +type MachineUserStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file + // +optional + // +operator-sdk:csv:customresourcedefinitions:type=status,xDescriptors={"urn:alm:descriptor:io.kubernetes.conditions"} + Conditions []metav1.Condition `json:"conditions,omitempty"` + // +kubebuilder:default="" + UserId string `json:"userId"` + // +kubebuilder:default="" + KeyId string `json:"keyId"` + // +kubebuilder:default="" + PATId string `json:"patId"` +} + +func (d *MachineUserStatus) SetCondition(condition metav1.Condition) { + if d.Conditions == nil { + d.Conditions = make([]metav1.Condition, 0) + } + meta.SetStatusCondition(&d.Conditions, condition) +} + +func (d *MachineUserStatus) GetConditionStatus(conditionType string) bool { + if d.Conditions == nil { + d.Conditions = make([]metav1.Condition, 0) + } + return meta.IsStatusConditionTrue(d.Conditions, conditionType) +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status + +// MachineUser is the Schema for the machineusers API +type MachineUser struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec MachineUserSpec `json:"spec,omitempty"` + Status MachineUserStatus `json:"status,omitempty"` +} + +func (d *MachineUser) IsBeingDeleted() bool { + return !d.DeletionTimestamp.IsZero() +} + +func (d *MachineUser) IsReady() bool { + return meta.IsStatusConditionTrue(d.Status.Conditions, ConditionTypeReady) +} + +func (d *MachineUser) ZitadelClusterRef(ctx context.Context, refresolver *RefResolver) (*ZitadelClusterRef, error) { + org, err := refresolver.OrganizationRef(ctx, &d.Spec.OrganizationRef, d.Namespace) + if err != nil { + return nil, err + } + + if org.Status.OrgId == "" { + return nil, fmt.Errorf("Organization has not been created yet...") + } + ref, err := org.ZitadelClusterRef(ctx, refresolver) + if err != nil { + return nil, err + } + return ref, nil +} + +func (d *MachineUser) PatSecretName() string { + return d.Name + "-pat-secret" +} + +func (d *MachineUser) JWTSecretName() string { + return d.Name + "-machinekey-secret" +} + +//+kubebuilder:object:root=true + +// MachineUserList contains a list of MachineUser +type MachineUserList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []MachineUser `json:"items"` +} + +func init() { + SchemeBuilder.Register(&MachineUser{}, &MachineUserList{}) +} diff --git a/src/api/v1alpha1/oidcapp_types.go b/src/api/v1alpha1/oidcapp_types.go new file mode 100644 index 0000000..f5f57fa --- /dev/null +++ b/src/api/v1alpha1/oidcapp_types.go @@ -0,0 +1,161 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "context" + "fmt" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// +kubebuilder:validation:Enum=OIDC_RESPONSE_TYPE_CODE;OIDC_RESPONSE_TYPE_ID_TOKEN;OIDC_RESPONSE_TYPE_ID_TOKEN_TOKEN +type ResponseType string + +// +kubebuilder:validation:Enum=OIDC_GRANT_TYPE_AUTHORIZATION_CODE;OIDC_GRANT_TYPE_IMPLICIT;OIDC_GRANT_TYPE_REFRESH_TOKEN;OIDC_GRANT_TYPE_DEVICE_CODE;OIDC_GRANT_TYPE_TOKEN_EXCHANGE +type GrantType string + +// OIDCAppSpec defines the desired state of OIDCApp +type OIDCAppSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file + ProjectRef ProjectRef `json:"projectRef"` + RedirectUris []string `json:"redirectUris"` + ResponseTypes []ResponseType `json:"responseTypes"` + GrantTypes []GrantType `json:"grantTypes"` + // +kubebuilder:validation:Enum=OIDC_APP_TYPE_WEB;OIDC_APP_TYPE_USER_AGENT;OIDC_APP_TYPE_NATIVE + AppType string `json:"appType"` + // +kubebuilder:validation:Enum=OIDC_AUTH_METHOD_TYPE_BASIC;OIDC_AUTH_METHOD_TYPE_POST;OIDC_AUTH_METHOD_TYPE_NONE;OIDC_AUTH_METHOD_TYPE_PRIVATE_KEY_JWT + AuthMethodType string `json:"authMethodType"` + PostLogoutRedirectUris []string `json:"postLogoutRedirectUris"` + DevMode bool `json:"devMode"` + // +kubebuilder:validation:Enum=OIDC_TOKEN_TYPE_BEARER;OIDC_TOKEN_TYPE_JWT + AccessTokenType string `json:"accessTokenType"` + AccessTokenRoleAssertion bool `json:"accessTokenRoleAssertion"` + IdTokenRoleAssertion bool `json:"idTokenRoleAssertion"` + IdTokenUserinfoAssertion bool `json:"idTokenUserinfoAssertion"` + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:Format=duration + ClockSkew *metav1.Duration `json:"clockSkew"` + // +optional + AdditionalOrigins []string `json:"additionalOrigins"` + SkipNativeAppSuccessPage bool `json:"skipNativeAppSuccessPage"` +} + +// OIDCAppStatus defines the observed state of OIDCApp +type OIDCAppStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file + // +optional + // +operator-sdk:csv:customresourcedefinitions:type=status,xDescriptors={"urn:alm:descriptor:io.kubernetes.conditions"} + Conditions []metav1.Condition `json:"conditions,omitempty"` + // +kubebuilder:default="" + AppId string `json:"appId"` + // +kubebuilder:default="" + ClientId string `json:"clientId"` +} + +func (d *OIDCAppStatus) SetCondition(condition metav1.Condition) { + if d.Conditions == nil { + d.Conditions = make([]metav1.Condition, 0) + } + meta.SetStatusCondition(&d.Conditions, condition) +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status + +// OIDCApp is the Schema for the oidcapps API +type OIDCApp struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec OIDCAppSpec `json:"spec,omitempty"` + Status OIDCAppStatus `json:"status,omitempty"` +} + +func (d *OIDCApp) IsBeingDeleted() bool { + return !d.DeletionTimestamp.IsZero() +} + +func (d *OIDCApp) IsReady() bool { + return meta.IsStatusConditionTrue(d.Status.Conditions, ConditionTypeReady) +} + +func (d *OIDCApp) ZitadelClusterRef(ctx context.Context, refresolver *RefResolver) (*ZitadelClusterRef, error) { + project, err := refresolver.ProjectRef(ctx, &d.Spec.ProjectRef, d.Namespace) + if err != nil { + return nil, err + } + + if project.Status.ProjectId == "" { + return nil, fmt.Errorf("Project has not been created yet...") + } + org, err := refresolver.OrganizationRef(ctx, &project.Spec.OrganizationRef, d.Namespace) + if err != nil { + return nil, err + } + + if org.Status.OrgId == "" { + return nil, fmt.Errorf("Organization has not been created yet...") + } + ref, err := org.ZitadelClusterRef(ctx, refresolver) + if err != nil { + return nil, err + } + return ref, nil +} +func (d *OIDCApp) Organization(ctx context.Context, refresolver *RefResolver) (*Organization, error) { + project, err := refresolver.ProjectRef(ctx, &d.Spec.ProjectRef, d.Namespace) + if err != nil { + return nil, err + } + org, err := refresolver.OrganizationRef(ctx, &project.Spec.OrganizationRef, d.Namespace) + if err != nil { + return nil, err + } + return org, nil +} + +func (d *OIDCApp) Project(ctx context.Context, refresolver *RefResolver) (*Project, error) { + project, err := refresolver.ProjectRef(ctx, &d.Spec.ProjectRef, d.Namespace) + if err != nil { + return nil, err + } + return project, nil +} + +func (d *OIDCApp) ClientSecretName() string { + return d.Name + "-client-secret" +} + +//+kubebuilder:object:root=true + +// OIDCAppList contains a list of OIDCApp +type OIDCAppList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []OIDCApp `json:"items"` +} + +func init() { + SchemeBuilder.Register(&OIDCApp{}, &OIDCAppList{}) +} diff --git a/src/api/v1alpha1/organization_types.go b/src/api/v1alpha1/organization_types.go new file mode 100644 index 0000000..d3009d3 --- /dev/null +++ b/src/api/v1alpha1/organization_types.go @@ -0,0 +1,102 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "context" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +type OrganizationAdmin struct { + FirstName string `json:"firstName"` + LastName string `json:"lastName"` + Email string `json:"email"` + UserName string `json:"userName"` +} + +// OrganizationSpec defines the desired state of Organization +type OrganizationSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file + // +kubebuilder:validation:Required + // +operator-sdk:csv:customresourcedefinitions:type=spec + ZitadelClusterRef ZitadelClusterRef `json:"zitadelClusterRef" webhook:"inmutable"` + OrganizationAdmin OrganizationAdmin `json:"organizationAdmin"` +} + +// OrganizationStatus defines the observed state of Organization +type OrganizationStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file + // Conditions for the Database object. + // +optional + // +operator-sdk:csv:customresourcedefinitions:type=status,xDescriptors={"urn:alm:descriptor:io.kubernetes.conditions"} + Conditions []metav1.Condition `json:"conditions,omitempty"` + // +kubebuilder:default="" + OrgId string `json:"orgId"` + // +kubebuilder:default="" + AdminId string `json:"adminId"` +} + +func (d *OrganizationStatus) SetCondition(condition metav1.Condition) { + if d.Conditions == nil { + d.Conditions = make([]metav1.Condition, 0) + } + meta.SetStatusCondition(&d.Conditions, condition) +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status + +// Organization is the Schema for the organizations API +type Organization struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec OrganizationSpec `json:"spec,omitempty"` + Status OrganizationStatus `json:"status,omitempty"` +} + +func (d *Organization) IsBeingDeleted() bool { + return !d.DeletionTimestamp.IsZero() +} + +func (d *Organization) IsReady() bool { + return meta.IsStatusConditionTrue(d.Status.Conditions, ConditionTypeReady) +} + +func (d *Organization) ZitadelClusterRef(_ context.Context, _ *RefResolver) (*ZitadelClusterRef, error) { + return &d.Spec.ZitadelClusterRef, nil +} + +//+kubebuilder:object:root=true + +// OrganizationList contains a list of Organization +type OrganizationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Organization `json:"items"` +} + +func init() { + SchemeBuilder.Register(&Organization{}, &OrganizationList{}) +} diff --git a/src/api/v1alpha1/project_types.go b/src/api/v1alpha1/project_types.go new file mode 100644 index 0000000..a84dd7d --- /dev/null +++ b/src/api/v1alpha1/project_types.go @@ -0,0 +1,126 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "context" + "fmt" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +type Role struct { + Key string `json:"key"` + DisplayName string `json:"displayName"` + Group string `json:"group"` +} +type Grant struct { + OrganizationRef OrganizationRef `json:"organizationRef"` + RoleKeys []string `json:"roleKeys"` +} + +// ProjectSpec defines the desired state of Project +type ProjectSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file + // https://zitadel.com/docs/apis/resources/mgmt/management-service-add-project + // +kubebuilder:validation:Required + // +operator-sdk:csv:customresourcedefinitions:type=spec + OrganizationRef OrganizationRef `json:"organizationRef"` + // +optional + Roles []Role `json:"roles"` + // +optional + Grants []Grant `json:"grants"` + // +optional + ProjectRoleAssertion bool `json:"projectRoleAssertion,omitempty"` + // +optional + ProjectRoleCheck bool `json:"projectRoleCheck,omitempty"` + // +optional + HasProjectCheck bool `json:"hasProjectCheck,omitempty"` +} + +// ProjectStatus defines the observed state of Project +type ProjectStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file + // Conditions for the Database object. + // +optional + // +operator-sdk:csv:customresourcedefinitions:type=status,xDescriptors={"urn:alm:descriptor:io.kubernetes.conditions"} + Conditions []metav1.Condition `json:"conditions,omitempty"` + // +kubebuilder:default="" + ProjectId string `json:"projectId"` +} + +func (d *ProjectStatus) SetCondition(condition metav1.Condition) { + if d.Conditions == nil { + d.Conditions = make([]metav1.Condition, 0) + } + meta.SetStatusCondition(&d.Conditions, condition) +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status + +// Project is the Schema for the projects API +type Project struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ProjectSpec `json:"spec,omitempty"` + Status ProjectStatus `json:"status,omitempty"` +} + +func (d *Project) IsBeingDeleted() bool { + return !d.DeletionTimestamp.IsZero() +} + +func (d *Project) IsReady() bool { + return meta.IsStatusConditionTrue(d.Status.Conditions, ConditionTypeReady) +} + +func (d *Project) ZitadelClusterRef(ctx context.Context, refresolver *RefResolver) (*ZitadelClusterRef, error) { + org, err := refresolver.OrganizationRef(ctx, &d.Spec.OrganizationRef, d.Namespace) + if err != nil { + return nil, err + } + if org.Status.OrgId == "" { + return nil, fmt.Errorf("Organization has not been created yet...") + } + + ref, err := org.ZitadelClusterRef(ctx, refresolver) + if err != nil { + return nil, err + } + return ref, nil +} + +//+kubebuilder:object:root=true + +// ProjectList contains a list of Project +type ProjectList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Project `json:"items"` +} + +func init() { + SchemeBuilder.Register(&Project{}, &ProjectList{}) +} diff --git a/src/api/v1alpha1/ref_types.go b/src/api/v1alpha1/ref_types.go new file mode 100644 index 0000000..1eedd2b --- /dev/null +++ b/src/api/v1alpha1/ref_types.go @@ -0,0 +1,40 @@ +package v1alpha1 + +import ( + corev1 "k8s.io/api/core/v1" +) + +type OIDCAppRef struct { + // ObjectReference is a reference to a object. + // +kubebuilder:validation:Required + // +operator-sdk:csv:customresourcedefinitions:type=spec + corev1.ObjectReference `json:",inline"` +} + +type CrdbClusterRef struct { + // ObjectReference is a reference to a object. + // +kubebuilder:validation:Required + // +operator-sdk:csv:customresourcedefinitions:type=spec + corev1.ObjectReference `json:",inline"` +} + +type ZitadelClusterRef struct { + // ObjectReference is a reference to a object. + // +kubebuilder:validation:Required + // +operator-sdk:csv:customresourcedefinitions:type=spec + corev1.ObjectReference `json:",inline"` +} + +type OrganizationRef struct { + // ObjectReference is a reference to a object. + // +kubebuilder:validation:Required + // +operator-sdk:csv:customresourcedefinitions:type=spec + corev1.ObjectReference `json:",inline"` +} + +type ProjectRef struct { + // ObjectReference is a reference to a object. + // +kubebuilder:validation:Required + // +operator-sdk:csv:customresourcedefinitions:type=spec + corev1.ObjectReference `json:",inline"` +} diff --git a/src/api/v1alpha1/refresolver.go b/src/api/v1alpha1/refresolver.go new file mode 100644 index 0000000..5dfa87c --- /dev/null +++ b/src/api/v1alpha1/refresolver.go @@ -0,0 +1,144 @@ +package v1alpha1 + +import ( + "context" + "fmt" + crdbv1alpha1 "github.com/cockroachdb/cockroach-operator/apis/v1alpha1" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// +kubebuilder:object:generate=false +type RefResolver struct { + client client.Client +} + +func NewRefResolver(client client.Client) *RefResolver { + return &RefResolver{ + client: client, + } +} + +func (r *RefResolver) ZitadelCluster(ctx context.Context, ref *ZitadelClusterRef, + namespace string) (*ZitadelCluster, error) { + if ref.Kind != "" && ref.Kind != "ZitadelCluster" { + return nil, fmt.Errorf("Unsupported reference kind: '%s'", ref.Kind) + } + + key := types.NamespacedName{ + Name: ref.Name, + Namespace: namespace, + } + if ref.Namespace != "" { + key.Namespace = ref.Namespace + } + + var zitadel ZitadelCluster + if err := r.client.Get(ctx, key, &zitadel); err != nil { + return nil, err + } + return &zitadel, nil +} + +func (r *RefResolver) OIDCAppRef(ctx context.Context, ref *OIDCAppRef, + namespace string) (*OIDCApp, error) { + if ref.Kind != "" && ref.Kind != "OIDCApp" { + return nil, fmt.Errorf("Unsupported reference kind: '%s'", ref.Kind) + } + + key := types.NamespacedName{ + Name: ref.Name, + Namespace: namespace, + } + if ref.Namespace != "" { + key.Namespace = ref.Namespace + } + + var zitadel OIDCApp + if err := r.client.Get(ctx, key, &zitadel); err != nil { + return nil, err + } + return &zitadel, nil +} + +func (r *RefResolver) ProjectRef(ctx context.Context, ref *ProjectRef, + namespace string) (*Project, error) { + if ref.Kind != "" && ref.Kind != "Project" { + return nil, fmt.Errorf("Unsupported reference kind: '%s'", ref.Kind) + } + + key := types.NamespacedName{ + Name: ref.Name, + Namespace: namespace, + } + if ref.Namespace != "" { + key.Namespace = ref.Namespace + } + + var zitadel Project + if err := r.client.Get(ctx, key, &zitadel); err != nil { + return nil, err + } + return &zitadel, nil +} + +func (r *RefResolver) OrganizationRef(ctx context.Context, ref *OrganizationRef, + namespace string) (*Organization, error) { + if ref.Kind != "" && ref.Kind != "Organization" { + return nil, fmt.Errorf("Unsupported reference kind: '%s'", ref.Kind) + } + + key := types.NamespacedName{ + Name: ref.Name, + Namespace: namespace, + } + if ref.Namespace != "" { + key.Namespace = ref.Namespace + } + + var zitadel Organization + if err := r.client.Get(ctx, key, &zitadel); err != nil { + return nil, err + } + return &zitadel, nil +} + +func (r *RefResolver) CrdbClusterRef(ctx context.Context, ref *CrdbClusterRef, namespace string) (*crdbv1alpha1.CrdbCluster, error) { + if ref.Kind != "" && ref.Kind != "CrdbCluster" { + return nil, fmt.Errorf("Unsupported reference kind: '%s'", ref.Kind) + } + key := types.NamespacedName{ + Name: ref.Name, + Namespace: namespace, + } + if ref.Namespace != "" { + key.Namespace = ref.Namespace + } + + var crdb crdbv1alpha1.CrdbCluster + if err := r.client.Get(ctx, key, &crdb); err != nil { + return nil, err + } + return &crdb, nil +} + +func (r *RefResolver) SecretKeyRef(ctx context.Context, selector corev1.SecretKeySelector, + namespace string) (string, error) { + nn := types.NamespacedName{ + Name: selector.Name, + Namespace: namespace, + } + var secret v1.Secret + if err := r.client.Get(ctx, nn, &secret); err != nil { + return "", fmt.Errorf("error getting secret: %v", err) + } + + data, ok := secret.Data[selector.Key] + if !ok { + return "", fmt.Errorf("secret key \"%s\" not found", selector.Key) + } + + return string(data), nil +} diff --git a/src/api/v1alpha1/zitadelcluster_types.go b/src/api/v1alpha1/zitadelcluster_types.go new file mode 100644 index 0000000..c719b2f --- /dev/null +++ b/src/api/v1alpha1/zitadelcluster_types.go @@ -0,0 +1,151 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +type Image struct { + Name string `json:"name"` + Tag string `json:"tag"` +} + +type Password struct { + SecretKeyRef corev1.SecretKeySelector `json:"secretRef"` +} + +type SMTPConfig struct { + SenderAddress string `json:"senderAddress"` + SenderName string `json:"senderName"` + // +kubebuilder:default=true + TLS bool `json:"tls"` + Host string `json:"host"` + User *string `json:"user,omitempty"` + Password *Password `json:"password,omitempty"` + ReplyToAddress *string `json:"replyToAddress,omitempty"` +} + +type DomainSettings struct { + // +kubebuilder:default=true + UserLoginMustBeDomain bool `json:"userLoginMustBeDomain"` + // +kubebuilder:default=true + ValidateOrgDomains bool `json:"validateOrgDomains"` + // +kubebuilder:default=true + SMTPSenderAddressMatchesInstanceDomain bool `json:"smtpSenderAddressMatchesInstanceDomain"` +} + +// ZitadelClusterSpec defines the desired state of ZitadelCluster +type ZitadelClusterSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file + // +kubebuilder:default="DEFAULT" + FirstOrgName string `json:"firstOrgName"` + DomainSettings DomainSettings `json:"domainSettings"` + SMTPConfig SMTPConfig `json:"smtpConfig"` + Host string `json:"host"` + // +kubebuilder:default=443 + ExternalPort int64 `json:"externalPort"` + // +kubebuilder:default=true + ExternalSecure bool `json:"externalSecure"` + Image Image `json:"image"` + Resources corev1.ResourceRequirements `json:"resources"` + CrdbClusterRef CrdbClusterRef `json:"crdbClusterRef"` + // +kubebuilder:validation:Enum=demo;trial;staging;productive;testing + Purpose string `json:"purpose"` + // PodAnnotations to add to the Pods metadata. + // +optional + // +operator-sdk:csv:customresourcedefinitions:type=spec + PodAnnotations map[string]string `json:"podAnnotations,omitempty"` + // ServiceAnnotations to add to the service metadata. + // +optional + // +operator-sdk:csv:customresourcedefinitions:type=spec + ServiceAnnotations map[string]string `json:"serviceAnnotations,omitempty"` + // +kubebuilder:default=3 + // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors={"urn:alm:descriptor:com.tectonic.ui:podCount"} + Replicas int32 `json:"replicas,omitempty"` + RootTLSSecret corev1.SecretReference `json:"rootTLSSecret"` +} + +// ZitadelClusterStatus defines the observed state of ZitadelCluster +type ZitadelClusterStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file + Conditions []metav1.Condition `json:"conditions,omitempty"` + // +kubebuilder:default=3 + // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors={"urn:alm:descriptor:com.tectonic.ui:podCount"} + Replicas int32 `json:"replicas,omitempty"` + // +kubebuilder:default="" + DefaultInstanceId string `json:"defaultInstanceId"` + // +kubebuilder:default="" + SMTPProviderId string `json:"smtpProviderId"` +} + +// SetCondition sets a status condition +func (s *ZitadelClusterStatus) SetCondition(condition metav1.Condition) { + if s.Conditions == nil { + s.Conditions = make([]metav1.Condition, 0) + } + meta.SetStatusCondition(&s.Conditions, condition) +} + +func (s *ZitadelClusterStatus) IsReady() bool { + for _, c := range s.Conditions { + if c.Type == ConditionTypeReady && c.Status == metav1.ConditionTrue { + return true + } + } + return false +} + +func (s *ZitadelClusterStatus) FillWithDefaults(zitadel *ZitadelCluster) { + //(Haim ;^D ): No defaults yet +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status + +// ZitadelCluster is the Schema for the zitadelclusters API +type ZitadelCluster struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ZitadelClusterSpec `json:"spec,omitempty"` + Status ZitadelClusterStatus `json:"status,omitempty"` +} + +func (m *ZitadelCluster) SetDefaults() { + //(Haim ;^D ): No defaults yet +} + +//+kubebuilder:object:root=true + +// ZitadelClusterList contains a list of ZitadelCluster +type ZitadelClusterList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ZitadelCluster `json:"items"` +} + +func init() { + SchemeBuilder.Register(&ZitadelCluster{}, &ZitadelClusterList{}) +} diff --git a/src/api/v1alpha1/zz_generated.deepcopy.go b/src/api/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..4dcfe0e --- /dev/null +++ b/src/api/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,906 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIApp) DeepCopyInto(out *APIApp) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIApp. +func (in *APIApp) DeepCopy() *APIApp { + if in == nil { + return nil + } + out := new(APIApp) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *APIApp) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIAppList) DeepCopyInto(out *APIAppList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]APIApp, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIAppList. +func (in *APIAppList) DeepCopy() *APIAppList { + if in == nil { + return nil + } + out := new(APIAppList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *APIAppList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIAppSpec) DeepCopyInto(out *APIAppSpec) { + *out = *in + out.ProjectRef = in.ProjectRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIAppSpec. +func (in *APIAppSpec) DeepCopy() *APIAppSpec { + if in == nil { + return nil + } + out := new(APIAppSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIAppStatus) DeepCopyInto(out *APIAppStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIAppStatus. +func (in *APIAppStatus) DeepCopy() *APIAppStatus { + if in == nil { + return nil + } + out := new(APIAppStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CrdbClusterRef) DeepCopyInto(out *CrdbClusterRef) { + *out = *in + out.ObjectReference = in.ObjectReference +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CrdbClusterRef. +func (in *CrdbClusterRef) DeepCopy() *CrdbClusterRef { + if in == nil { + return nil + } + out := new(CrdbClusterRef) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DomainSettings) DeepCopyInto(out *DomainSettings) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainSettings. +func (in *DomainSettings) DeepCopy() *DomainSettings { + if in == nil { + return nil + } + out := new(DomainSettings) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Grant) DeepCopyInto(out *Grant) { + *out = *in + out.OrganizationRef = in.OrganizationRef + if in.RoleKeys != nil { + in, out := &in.RoleKeys, &out.RoleKeys + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Grant. +func (in *Grant) DeepCopy() *Grant { + if in == nil { + return nil + } + out := new(Grant) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Image) DeepCopyInto(out *Image) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Image. +func (in *Image) DeepCopy() *Image { + if in == nil { + return nil + } + out := new(Image) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineUser) DeepCopyInto(out *MachineUser) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineUser. +func (in *MachineUser) DeepCopy() *MachineUser { + if in == nil { + return nil + } + out := new(MachineUser) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MachineUser) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineUserList) DeepCopyInto(out *MachineUserList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MachineUser, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineUserList. +func (in *MachineUserList) DeepCopy() *MachineUserList { + if in == nil { + return nil + } + out := new(MachineUserList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MachineUserList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineUserSpec) DeepCopyInto(out *MachineUserSpec) { + *out = *in + out.OrganizationRef = in.OrganizationRef + if in.UserGrants != nil { + in, out := &in.UserGrants, &out.UserGrants + *out = make([]UserGrant, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineUserSpec. +func (in *MachineUserSpec) DeepCopy() *MachineUserSpec { + if in == nil { + return nil + } + out := new(MachineUserSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineUserStatus) DeepCopyInto(out *MachineUserStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineUserStatus. +func (in *MachineUserStatus) DeepCopy() *MachineUserStatus { + if in == nil { + return nil + } + out := new(MachineUserStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OIDCApp) DeepCopyInto(out *OIDCApp) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OIDCApp. +func (in *OIDCApp) DeepCopy() *OIDCApp { + if in == nil { + return nil + } + out := new(OIDCApp) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OIDCApp) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OIDCAppList) DeepCopyInto(out *OIDCAppList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]OIDCApp, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OIDCAppList. +func (in *OIDCAppList) DeepCopy() *OIDCAppList { + if in == nil { + return nil + } + out := new(OIDCAppList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OIDCAppList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OIDCAppRef) DeepCopyInto(out *OIDCAppRef) { + *out = *in + out.ObjectReference = in.ObjectReference +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OIDCAppRef. +func (in *OIDCAppRef) DeepCopy() *OIDCAppRef { + if in == nil { + return nil + } + out := new(OIDCAppRef) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OIDCAppSpec) DeepCopyInto(out *OIDCAppSpec) { + *out = *in + out.ProjectRef = in.ProjectRef + if in.RedirectUris != nil { + in, out := &in.RedirectUris, &out.RedirectUris + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ResponseTypes != nil { + in, out := &in.ResponseTypes, &out.ResponseTypes + *out = make([]ResponseType, len(*in)) + copy(*out, *in) + } + if in.GrantTypes != nil { + in, out := &in.GrantTypes, &out.GrantTypes + *out = make([]GrantType, len(*in)) + copy(*out, *in) + } + if in.PostLogoutRedirectUris != nil { + in, out := &in.PostLogoutRedirectUris, &out.PostLogoutRedirectUris + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ClockSkew != nil { + in, out := &in.ClockSkew, &out.ClockSkew + *out = new(v1.Duration) + **out = **in + } + if in.AdditionalOrigins != nil { + in, out := &in.AdditionalOrigins, &out.AdditionalOrigins + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OIDCAppSpec. +func (in *OIDCAppSpec) DeepCopy() *OIDCAppSpec { + if in == nil { + return nil + } + out := new(OIDCAppSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OIDCAppStatus) DeepCopyInto(out *OIDCAppStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OIDCAppStatus. +func (in *OIDCAppStatus) DeepCopy() *OIDCAppStatus { + if in == nil { + return nil + } + out := new(OIDCAppStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Organization) DeepCopyInto(out *Organization) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Organization. +func (in *Organization) DeepCopy() *Organization { + if in == nil { + return nil + } + out := new(Organization) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Organization) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrganizationAdmin) DeepCopyInto(out *OrganizationAdmin) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrganizationAdmin. +func (in *OrganizationAdmin) DeepCopy() *OrganizationAdmin { + if in == nil { + return nil + } + out := new(OrganizationAdmin) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrganizationList) DeepCopyInto(out *OrganizationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Organization, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrganizationList. +func (in *OrganizationList) DeepCopy() *OrganizationList { + if in == nil { + return nil + } + out := new(OrganizationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OrganizationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrganizationRef) DeepCopyInto(out *OrganizationRef) { + *out = *in + out.ObjectReference = in.ObjectReference +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrganizationRef. +func (in *OrganizationRef) DeepCopy() *OrganizationRef { + if in == nil { + return nil + } + out := new(OrganizationRef) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrganizationSpec) DeepCopyInto(out *OrganizationSpec) { + *out = *in + out.ZitadelClusterRef = in.ZitadelClusterRef + out.OrganizationAdmin = in.OrganizationAdmin +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrganizationSpec. +func (in *OrganizationSpec) DeepCopy() *OrganizationSpec { + if in == nil { + return nil + } + out := new(OrganizationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrganizationStatus) DeepCopyInto(out *OrganizationStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrganizationStatus. +func (in *OrganizationStatus) DeepCopy() *OrganizationStatus { + if in == nil { + return nil + } + out := new(OrganizationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Password) DeepCopyInto(out *Password) { + *out = *in + in.SecretKeyRef.DeepCopyInto(&out.SecretKeyRef) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Password. +func (in *Password) DeepCopy() *Password { + if in == nil { + return nil + } + out := new(Password) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Project) DeepCopyInto(out *Project) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Project. +func (in *Project) DeepCopy() *Project { + if in == nil { + return nil + } + out := new(Project) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Project) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectList) DeepCopyInto(out *ProjectList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Project, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectList. +func (in *ProjectList) DeepCopy() *ProjectList { + if in == nil { + return nil + } + out := new(ProjectList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProjectList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectRef) DeepCopyInto(out *ProjectRef) { + *out = *in + out.ObjectReference = in.ObjectReference +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectRef. +func (in *ProjectRef) DeepCopy() *ProjectRef { + if in == nil { + return nil + } + out := new(ProjectRef) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectSpec) DeepCopyInto(out *ProjectSpec) { + *out = *in + out.OrganizationRef = in.OrganizationRef + if in.Roles != nil { + in, out := &in.Roles, &out.Roles + *out = make([]Role, len(*in)) + copy(*out, *in) + } + if in.Grants != nil { + in, out := &in.Grants, &out.Grants + *out = make([]Grant, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectSpec. +func (in *ProjectSpec) DeepCopy() *ProjectSpec { + if in == nil { + return nil + } + out := new(ProjectSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectStatus) DeepCopyInto(out *ProjectStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectStatus. +func (in *ProjectStatus) DeepCopy() *ProjectStatus { + if in == nil { + return nil + } + out := new(ProjectStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Role) DeepCopyInto(out *Role) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Role. +func (in *Role) DeepCopy() *Role { + if in == nil { + return nil + } + out := new(Role) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SMTPConfig) DeepCopyInto(out *SMTPConfig) { + *out = *in + if in.User != nil { + in, out := &in.User, &out.User + *out = new(string) + **out = **in + } + if in.Password != nil { + in, out := &in.Password, &out.Password + *out = new(Password) + (*in).DeepCopyInto(*out) + } + if in.ReplyToAddress != nil { + in, out := &in.ReplyToAddress, &out.ReplyToAddress + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SMTPConfig. +func (in *SMTPConfig) DeepCopy() *SMTPConfig { + if in == nil { + return nil + } + out := new(SMTPConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserGrant) DeepCopyInto(out *UserGrant) { + *out = *in + out.ProjectRef = in.ProjectRef + if in.RoleKeys != nil { + in, out := &in.RoleKeys, &out.RoleKeys + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserGrant. +func (in *UserGrant) DeepCopy() *UserGrant { + if in == nil { + return nil + } + out := new(UserGrant) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ZitadelCluster) DeepCopyInto(out *ZitadelCluster) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ZitadelCluster. +func (in *ZitadelCluster) DeepCopy() *ZitadelCluster { + if in == nil { + return nil + } + out := new(ZitadelCluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ZitadelCluster) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ZitadelClusterList) DeepCopyInto(out *ZitadelClusterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ZitadelCluster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ZitadelClusterList. +func (in *ZitadelClusterList) DeepCopy() *ZitadelClusterList { + if in == nil { + return nil + } + out := new(ZitadelClusterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ZitadelClusterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ZitadelClusterRef) DeepCopyInto(out *ZitadelClusterRef) { + *out = *in + out.ObjectReference = in.ObjectReference +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ZitadelClusterRef. +func (in *ZitadelClusterRef) DeepCopy() *ZitadelClusterRef { + if in == nil { + return nil + } + out := new(ZitadelClusterRef) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ZitadelClusterSpec) DeepCopyInto(out *ZitadelClusterSpec) { + *out = *in + out.DomainSettings = in.DomainSettings + in.SMTPConfig.DeepCopyInto(&out.SMTPConfig) + out.Image = in.Image + in.Resources.DeepCopyInto(&out.Resources) + out.CrdbClusterRef = in.CrdbClusterRef + if in.PodAnnotations != nil { + in, out := &in.PodAnnotations, &out.PodAnnotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ServiceAnnotations != nil { + in, out := &in.ServiceAnnotations, &out.ServiceAnnotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + out.RootTLSSecret = in.RootTLSSecret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ZitadelClusterSpec. +func (in *ZitadelClusterSpec) DeepCopy() *ZitadelClusterSpec { + if in == nil { + return nil + } + out := new(ZitadelClusterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ZitadelClusterStatus) DeepCopyInto(out *ZitadelClusterStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ZitadelClusterStatus. +func (in *ZitadelClusterStatus) DeepCopy() *ZitadelClusterStatus { + if in == nil { + return nil + } + out := new(ZitadelClusterStatus) + in.DeepCopyInto(out) + return out +} diff --git a/src/cmd/main.go b/src/cmd/main.go new file mode 100644 index 0000000..5151801 --- /dev/null +++ b/src/cmd/main.go @@ -0,0 +1,156 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "flag" + "os" + "time" + + // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) + // to ensure that exec-entrypoint and run can make use of them. + crdbv1alpha1 "github.com/cockroachdb/cockroach-operator/apis/v1alpha1" + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + _ "k8s.io/client-go/plugin/pkg/client/auth" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/healthz" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + server "sigs.k8s.io/controller-runtime/pkg/metrics/server" + + zitadelv1alpha1 "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/api/v1alpha1" + "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/internal/controller" + "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/pkg/builder" + conditions "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/pkg/condition" + "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/pkg/controller/configmap" + "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/pkg/controller/secret" + "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/pkg/controller/service" + //+kubebuilder:scaffold:imports +) + +var ( + scheme = runtime.NewScheme() + setupLog = ctrl.Log.WithName("setup") +) + +func init() { + utilruntime.Must(clientgoscheme.AddToScheme(scheme)) + utilruntime.Must(crdbv1alpha1.AddToScheme(scheme)) + utilruntime.Must(zitadelv1alpha1.AddToScheme(scheme)) + //+kubebuilder:scaffold:scheme +} + +func main() { + var metricsAddr string + var enableLeaderElection bool + var probeAddr string + flag.StringVar(&metricsAddr, "metrics-bind-address", ":8080", "The address the metric endpoint binds to.") + flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.") + flag.BoolVar(&enableLeaderElection, "leader-elect", false, + "Enable leader election for controller manager. "+ + "Enabling this will ensure there is only one active controller manager.") + opts := zap.Options{ + Development: true, + } + opts.BindFlags(flag.CommandLine) + flag.Parse() + + ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts))) + + mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ + Scheme: scheme, + Metrics: server.Options{BindAddress: metricsAddr}, + HealthProbeBindAddress: probeAddr, + LeaderElection: enableLeaderElection, + LeaderElectionID: "88a0b43c.topmanage.com", + // LeaderElectionReleaseOnCancel defines if the leader should step down voluntarily + // when the Manager ends. This requires the binary to immediately end when the + // Manager is stopped, otherwise, this setting is unsafe. Setting this significantly + // speeds up voluntary leader transitions as the new leader don't have to wait + // LeaseDuration time first. + // + // In the default scaffold provided, the program ends immediately after + // the manager stops, so would be fine to enable this option. However, + // if you are doing or is intended to do any operation such as perform cleanups + // after the manager stops then its usage might be unsafe. + // LeaderElectionReleaseOnCancel: true, + }) + if err != nil { + setupLog.Error(err, "unable to start manager") + os.Exit(1) + } + client := mgr.GetClient() + scheme := mgr.GetScheme() + builder := builder.NewBuilder(scheme) + secretReconciler := secret.NewSecretReconciler(client, builder) + configmapReconciler := configmap.NewConfigMapReconciler(client, builder) + serviceReconciler := service.NewServiceReconciler(client) + refResolver := zitadelv1alpha1.NewRefResolver(client) + conditionReady := conditions.NewReady() + requeueZitadel := 30 * time.Second + if err = (&controller.ZitadelClusterReconciler{ + Client: client, + Scheme: scheme, + ConditionReady: conditionReady, + Builder: builder, + RefResolver: refResolver, + SecretReconciler: secretReconciler, + ConfigMapReconciler: configmapReconciler, + ServiceReconciler: serviceReconciler, + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "ZitadelCluster") + os.Exit(1) + } + if err = controller.NewOrganizationReconciler(client, refResolver, conditionReady, requeueZitadel).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "Organization") + os.Exit(1) + } + + if err = controller.NewProjectReconciler(client, refResolver, conditionReady, requeueZitadel).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "Project") + os.Exit(1) + } + if err = controller.NewOIDCAppReconciler(client, refResolver, builder, conditionReady, requeueZitadel).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "OIDCApp") + os.Exit(1) + } + if err = controller.NewMachineUserReconciler(client, refResolver, builder, conditionReady, requeueZitadel).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "MachineUser") + os.Exit(1) + } + if err = controller.NewAPIAppReconciler(client, refResolver, builder, conditionReady, requeueZitadel).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "APIApp") + os.Exit(1) + } + //+kubebuilder:scaffold:builder + + if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { + setupLog.Error(err, "unable to set up health check") + os.Exit(1) + } + if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil { + setupLog.Error(err, "unable to set up ready check") + os.Exit(1) + } + + setupLog.Info("starting manager") + if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { + setupLog.Error(err, "problem running manager") + os.Exit(1) + } +} diff --git a/src/config/crd/bases/zitadel.topmanage.com_apiapps.yaml b/src/config/crd/bases/zitadel.topmanage.com_apiapps.yaml new file mode 100644 index 0000000..b5bf07b --- /dev/null +++ b/src/config/crd/bases/zitadel.topmanage.com_apiapps.yaml @@ -0,0 +1,177 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.11.1 + creationTimestamp: null + name: apiapps.zitadel.topmanage.com +spec: + group: zitadel.topmanage.com + names: + kind: APIApp + listKind: APIAppList + plural: apiapps + singular: apiapp + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: APIApp is the Schema for the apiapps API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: APIAppSpec defines the desired state of APIApp + properties: + authMethodType: + enum: + - API_AUTH_METHOD_TYPE_BASIC + - API_AUTH_METHOD_TYPE_PRIVATE_KEY_JWT + type: string + projectRef: + description: 'INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + Important: Run "make" to regenerate code after modifying this file' + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of + an entire object, this string should contain a valid JSON/Go + field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen + only to have some well-defined way of referencing a part of + an object. TODO: this design is not final and this field is + subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + x-kubernetes-map-type: atomic + required: + - authMethodType + - projectRef + type: object + status: + description: APIAppStatus defines the observed state of APIApp + properties: + appId: + default: "" + type: string + clientId: + default: "" + type: string + conditions: + description: 'INSERT ADDITIONAL STATUS FIELD - define observed state + of cluster Important: Run "make" to regenerate code after modifying + this file' + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + keyId: + default: "" + type: string + required: + - appId + - clientId + - keyId + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/src/config/crd/bases/zitadel.topmanage.com_machineusers.yaml b/src/config/crd/bases/zitadel.topmanage.com_machineusers.yaml new file mode 100644 index 0000000..494c2ea --- /dev/null +++ b/src/config/crd/bases/zitadel.topmanage.com_machineusers.yaml @@ -0,0 +1,225 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.11.1 + creationTimestamp: null + name: machineusers.zitadel.topmanage.com +spec: + group: zitadel.topmanage.com + names: + kind: MachineUser + listKind: MachineUserList + plural: machineusers + singular: machineuser + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: MachineUser is the Schema for the machineusers API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: MachineUserSpec defines the desired state of MachineUser + properties: + accessTokenType: + enum: + - ACCESS_TOKEN_TYPE_BEARER + - ACCESS_TOKEN_TYPE_JWT + type: string + organizationRef: + description: 'INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + Important: Run "make" to regenerate code after modifying this file' + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of + an entire object, this string should contain a valid JSON/Go + field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen + only to have some well-defined way of referencing a part of + an object. TODO: this design is not final and this field is + subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + x-kubernetes-map-type: atomic + userGrants: + items: + properties: + projectRef: + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead + of an entire object, this string should contain a valid + JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container + within a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that + triggered the event) or if no container name is specified + "spec.containers[2]" (container with index 2 in this pod). + This syntax is chosen only to have some well-defined way + of referencing a part of an object. TODO: this design + is not final and this field is subject to change in the + future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + x-kubernetes-map-type: atomic + roleKeys: + items: + type: string + type: array + required: + - projectRef + type: object + type: array + required: + - accessTokenType + - organizationRef + type: object + status: + description: MachineUserStatus defines the observed state of MachineUser + properties: + conditions: + description: 'INSERT ADDITIONAL STATUS FIELD - define observed state + of cluster Important: Run "make" to regenerate code after modifying + this file' + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + keyId: + default: "" + type: string + patId: + default: "" + type: string + userId: + default: "" + type: string + required: + - keyId + - patId + - userId + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/src/config/crd/bases/zitadel.topmanage.com_oidcapps.yaml b/src/config/crd/bases/zitadel.topmanage.com_oidcapps.yaml new file mode 100644 index 0000000..4e8c735 --- /dev/null +++ b/src/config/crd/bases/zitadel.topmanage.com_oidcapps.yaml @@ -0,0 +1,241 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.11.1 + creationTimestamp: null + name: oidcapps.zitadel.topmanage.com +spec: + group: zitadel.topmanage.com + names: + kind: OIDCApp + listKind: OIDCAppList + plural: oidcapps + singular: oidcapp + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: OIDCApp is the Schema for the oidcapps API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: OIDCAppSpec defines the desired state of OIDCApp + properties: + accessTokenRoleAssertion: + type: boolean + accessTokenType: + enum: + - OIDC_TOKEN_TYPE_BEARER + - OIDC_TOKEN_TYPE_JWT + type: string + additionalOrigins: + items: + type: string + type: array + appType: + enum: + - OIDC_APP_TYPE_WEB + - OIDC_APP_TYPE_USER_AGENT + - OIDC_APP_TYPE_NATIVE + type: string + authMethodType: + enum: + - OIDC_AUTH_METHOD_TYPE_BASIC + - OIDC_AUTH_METHOD_TYPE_POST + - OIDC_AUTH_METHOD_TYPE_NONE + - OIDC_AUTH_METHOD_TYPE_PRIVATE_KEY_JWT + type: string + clockSkew: + format: duration + type: string + devMode: + type: boolean + grantTypes: + items: + enum: + - OIDC_GRANT_TYPE_AUTHORIZATION_CODE + - OIDC_GRANT_TYPE_IMPLICIT + - OIDC_GRANT_TYPE_REFRESH_TOKEN + - OIDC_GRANT_TYPE_DEVICE_CODE + - OIDC_GRANT_TYPE_TOKEN_EXCHANGE + type: string + type: array + idTokenRoleAssertion: + type: boolean + idTokenUserinfoAssertion: + type: boolean + postLogoutRedirectUris: + items: + type: string + type: array + projectRef: + description: 'INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + Important: Run "make" to regenerate code after modifying this file' + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of + an entire object, this string should contain a valid JSON/Go + field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen + only to have some well-defined way of referencing a part of + an object. TODO: this design is not final and this field is + subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + x-kubernetes-map-type: atomic + redirectUris: + items: + type: string + type: array + responseTypes: + items: + enum: + - OIDC_RESPONSE_TYPE_CODE + - OIDC_RESPONSE_TYPE_ID_TOKEN + - OIDC_RESPONSE_TYPE_ID_TOKEN_TOKEN + type: string + type: array + skipNativeAppSuccessPage: + type: boolean + required: + - accessTokenRoleAssertion + - accessTokenType + - appType + - authMethodType + - clockSkew + - devMode + - grantTypes + - idTokenRoleAssertion + - idTokenUserinfoAssertion + - postLogoutRedirectUris + - projectRef + - redirectUris + - responseTypes + - skipNativeAppSuccessPage + type: object + status: + description: OIDCAppStatus defines the observed state of OIDCApp + properties: + appId: + default: "" + type: string + clientId: + default: "" + type: string + conditions: + description: 'INSERT ADDITIONAL STATUS FIELD - define observed state + of cluster Important: Run "make" to regenerate code after modifying + this file' + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + required: + - appId + - clientId + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/src/config/crd/bases/zitadel.topmanage.com_organizations.yaml b/src/config/crd/bases/zitadel.topmanage.com_organizations.yaml new file mode 100644 index 0000000..748b880 --- /dev/null +++ b/src/config/crd/bases/zitadel.topmanage.com_organizations.yaml @@ -0,0 +1,184 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.11.1 + creationTimestamp: null + name: organizations.zitadel.topmanage.com +spec: + group: zitadel.topmanage.com + names: + kind: Organization + listKind: OrganizationList + plural: organizations + singular: organization + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: Organization is the Schema for the organizations API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: OrganizationSpec defines the desired state of Organization + properties: + organizationAdmin: + properties: + email: + type: string + firstName: + type: string + lastName: + type: string + userName: + type: string + required: + - email + - firstName + - lastName + - userName + type: object + zitadelClusterRef: + description: 'INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + Important: Run "make" to regenerate code after modifying this file' + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of + an entire object, this string should contain a valid JSON/Go + field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen + only to have some well-defined way of referencing a part of + an object. TODO: this design is not final and this field is + subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + x-kubernetes-map-type: atomic + required: + - organizationAdmin + - zitadelClusterRef + type: object + status: + description: OrganizationStatus defines the observed state of Organization + properties: + adminId: + default: "" + type: string + conditions: + description: 'INSERT ADDITIONAL STATUS FIELD - define observed state + of cluster Important: Run "make" to regenerate code after modifying + this file Conditions for the Database object.' + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + orgId: + default: "" + type: string + required: + - adminId + - orgId + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/src/config/crd/bases/zitadel.topmanage.com_projects.yaml b/src/config/crd/bases/zitadel.topmanage.com_projects.yaml new file mode 100644 index 0000000..5a54387 --- /dev/null +++ b/src/config/crd/bases/zitadel.topmanage.com_projects.yaml @@ -0,0 +1,234 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.11.1 + creationTimestamp: null + name: projects.zitadel.topmanage.com +spec: + group: zitadel.topmanage.com + names: + kind: Project + listKind: ProjectList + plural: projects + singular: project + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: Project is the Schema for the projects API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ProjectSpec defines the desired state of Project + properties: + grants: + items: + properties: + organizationRef: + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead + of an entire object, this string should contain a valid + JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container + within a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that + triggered the event) or if no container name is specified + "spec.containers[2]" (container with index 2 in this pod). + This syntax is chosen only to have some well-defined way + of referencing a part of an object. TODO: this design + is not final and this field is subject to change in the + future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + x-kubernetes-map-type: atomic + roleKeys: + items: + type: string + type: array + required: + - organizationRef + - roleKeys + type: object + type: array + hasProjectCheck: + type: boolean + organizationRef: + description: 'INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + Important: Run "make" to regenerate code after modifying this file + https://zitadel.com/docs/apis/resources/mgmt/management-service-add-project' + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of + an entire object, this string should contain a valid JSON/Go + field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen + only to have some well-defined way of referencing a part of + an object. TODO: this design is not final and this field is + subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + x-kubernetes-map-type: atomic + projectRoleAssertion: + type: boolean + projectRoleCheck: + type: boolean + roles: + items: + properties: + displayName: + type: string + group: + type: string + key: + type: string + required: + - displayName + - group + - key + type: object + type: array + required: + - organizationRef + type: object + status: + description: ProjectStatus defines the observed state of Project + properties: + conditions: + description: 'INSERT ADDITIONAL STATUS FIELD - define observed state + of cluster Important: Run "make" to regenerate code after modifying + this file Conditions for the Database object.' + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + projectId: + default: "" + type: string + required: + - projectId + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/src/config/crd/bases/zitadel.topmanage.com_zitadelclusters.yaml b/src/config/crd/bases/zitadel.topmanage.com_zitadelclusters.yaml new file mode 100644 index 0000000..cc22bb0 --- /dev/null +++ b/src/config/crd/bases/zitadel.topmanage.com_zitadelclusters.yaml @@ -0,0 +1,348 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.11.1 + creationTimestamp: null + name: zitadelclusters.zitadel.topmanage.com +spec: + group: zitadel.topmanage.com + names: + kind: ZitadelCluster + listKind: ZitadelClusterList + plural: zitadelclusters + singular: zitadelcluster + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: ZitadelCluster is the Schema for the zitadelclusters API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ZitadelClusterSpec defines the desired state of ZitadelCluster + properties: + crdbClusterRef: + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of + an entire object, this string should contain a valid JSON/Go + field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen + only to have some well-defined way of referencing a part of + an object. TODO: this design is not final and this field is + subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + x-kubernetes-map-type: atomic + domainSettings: + properties: + smtpSenderAddressMatchesInstanceDomain: + default: true + type: boolean + userLoginMustBeDomain: + default: true + type: boolean + validateOrgDomains: + default: true + type: boolean + required: + - smtpSenderAddressMatchesInstanceDomain + - userLoginMustBeDomain + - validateOrgDomains + type: object + externalPort: + default: 443 + format: int64 + type: integer + externalSecure: + default: true + type: boolean + firstOrgName: + default: DEFAULT + description: 'INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + Important: Run "make" to regenerate code after modifying this file' + type: string + host: + type: string + image: + properties: + name: + type: string + tag: + type: string + required: + - name + - tag + type: object + podAnnotations: + additionalProperties: + type: string + description: PodAnnotations to add to the Pods metadata. + type: object + purpose: + enum: + - demo + - trial + - staging + - productive + - testing + type: string + replicas: + default: 3 + format: int32 + type: integer + resources: + description: ResourceRequirements describes the compute resource requirements. + properties: + claims: + description: "Claims lists the names of resources, defined in + spec.resourceClaims, that are used by this container. \n This + is an alpha field and requires enabling the DynamicResourceAllocation + feature gate. \n This field is immutable. It can only be set + for containers." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims + of the Pod where this field is used. It makes that resource + available inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources + allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + rootTLSSecret: + description: SecretReference represents a Secret Reference. It has + enough information to retrieve secret in any namespace + properties: + name: + description: name is unique within a namespace to reference a + secret resource. + type: string + namespace: + description: namespace defines the space within which the secret + name must be unique. + type: string + type: object + x-kubernetes-map-type: atomic + serviceAnnotations: + additionalProperties: + type: string + description: ServiceAnnotations to add to the service metadata. + type: object + smtpConfig: + properties: + host: + type: string + password: + properties: + secretRef: + description: SecretKeySelector selects a key of a Secret. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + required: + - secretRef + type: object + replyToAddress: + type: string + senderAddress: + type: string + senderName: + type: string + tls: + default: true + type: boolean + user: + type: string + required: + - host + - senderAddress + - senderName + - tls + type: object + required: + - crdbClusterRef + - domainSettings + - externalPort + - externalSecure + - firstOrgName + - host + - image + - purpose + - resources + - rootTLSSecret + - smtpConfig + type: object + status: + description: ZitadelClusterStatus defines the observed state of ZitadelCluster + properties: + conditions: + description: 'INSERT ADDITIONAL STATUS FIELD - define observed state + of cluster Important: Run "make" to regenerate code after modifying + this file' + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + defaultInstanceId: + default: "" + type: string + replicas: + default: 3 + format: int32 + type: integer + smtpProviderId: + default: "" + type: string + required: + - defaultInstanceId + - smtpProviderId + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/src/config/crd/kustomization.yaml b/src/config/crd/kustomization.yaml new file mode 100644 index 0000000..08028de --- /dev/null +++ b/src/config/crd/kustomization.yaml @@ -0,0 +1,36 @@ +# This kustomization.yaml is not intended to be run by itself, +# since it depends on service name and namespace that are out of this kustomize package. +# It should be run by config/default +resources: +- bases/zitadel.topmanage.com_zitadelclusters.yaml +- bases/zitadel.topmanage.com_organizations.yaml +- bases/zitadel.topmanage.com_projects.yaml +- bases/zitadel.topmanage.com_oidcapps.yaml +- bases/zitadel.topmanage.com_machineusers.yaml +- bases/zitadel.topmanage.com_apiapps.yaml +#+kubebuilder:scaffold:crdkustomizeresource + +patchesStrategicMerge: +# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix. +# patches here are for enabling the conversion webhook for each CRD +#- patches/webhook_in_zitadelclusters.yaml +#- patches/webhook_in_organizations.yaml +#- patches/webhook_in_projects.yaml +#- patches/webhook_in_oidcapps.yaml +#- patches/webhook_in_machineusers.yaml +#- patches/webhook_in_apiapps.yaml +#+kubebuilder:scaffold:crdkustomizewebhookpatch + +# [CERTMANAGER] To enable cert-manager, uncomment all the sections with [CERTMANAGER] prefix. +# patches here are for enabling the CA injection for each CRD +#- patches/cainjection_in_zitadelclusters.yaml +#- patches/cainjection_in_organizations.yaml +#- patches/cainjection_in_projects.yaml +#- patches/cainjection_in_oidcapps.yaml +#- patches/cainjection_in_machineusers.yaml +#- patches/cainjection_in_apiapps.yaml +#+kubebuilder:scaffold:crdkustomizecainjectionpatch + +# the following config is for teaching kustomize how to do kustomization for CRDs. +configurations: +- kustomizeconfig.yaml diff --git a/src/config/crd/kustomizeconfig.yaml b/src/config/crd/kustomizeconfig.yaml new file mode 100644 index 0000000..ec5c150 --- /dev/null +++ b/src/config/crd/kustomizeconfig.yaml @@ -0,0 +1,19 @@ +# This file is for teaching kustomize how to substitute name and namespace reference in CRD +nameReference: +- kind: Service + version: v1 + fieldSpecs: + - kind: CustomResourceDefinition + version: v1 + group: apiextensions.k8s.io + path: spec/conversion/webhook/clientConfig/service/name + +namespace: +- kind: CustomResourceDefinition + version: v1 + group: apiextensions.k8s.io + path: spec/conversion/webhook/clientConfig/service/namespace + create: false + +varReference: +- path: metadata/annotations diff --git a/src/config/crd/patches/cainjection_in_apiapps.yaml b/src/config/crd/patches/cainjection_in_apiapps.yaml new file mode 100644 index 0000000..9332f3d --- /dev/null +++ b/src/config/crd/patches/cainjection_in_apiapps.yaml @@ -0,0 +1,7 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: CERTIFICATE_NAMESPACE/CERTIFICATE_NAME + name: apiapps.zitadel.topmanage.com diff --git a/src/config/crd/patches/cainjection_in_machineusers.yaml b/src/config/crd/patches/cainjection_in_machineusers.yaml new file mode 100644 index 0000000..49e3ea2 --- /dev/null +++ b/src/config/crd/patches/cainjection_in_machineusers.yaml @@ -0,0 +1,7 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: CERTIFICATE_NAMESPACE/CERTIFICATE_NAME + name: machineusers.zitadel.topmanage.com diff --git a/src/config/crd/patches/cainjection_in_oidcapps.yaml b/src/config/crd/patches/cainjection_in_oidcapps.yaml new file mode 100644 index 0000000..044872a --- /dev/null +++ b/src/config/crd/patches/cainjection_in_oidcapps.yaml @@ -0,0 +1,7 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: CERTIFICATE_NAMESPACE/CERTIFICATE_NAME + name: oidcapps.zitadel.topmanage.com diff --git a/src/config/crd/patches/cainjection_in_organizations.yaml b/src/config/crd/patches/cainjection_in_organizations.yaml new file mode 100644 index 0000000..96ad346 --- /dev/null +++ b/src/config/crd/patches/cainjection_in_organizations.yaml @@ -0,0 +1,7 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: CERTIFICATE_NAMESPACE/CERTIFICATE_NAME + name: organizations.zitadel.topmanage.com diff --git a/src/config/crd/patches/cainjection_in_projects.yaml b/src/config/crd/patches/cainjection_in_projects.yaml new file mode 100644 index 0000000..499c34e --- /dev/null +++ b/src/config/crd/patches/cainjection_in_projects.yaml @@ -0,0 +1,7 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: CERTIFICATE_NAMESPACE/CERTIFICATE_NAME + name: projects.zitadel.topmanage.com diff --git a/src/config/crd/patches/cainjection_in_zitadelclusters.yaml b/src/config/crd/patches/cainjection_in_zitadelclusters.yaml new file mode 100644 index 0000000..97750e2 --- /dev/null +++ b/src/config/crd/patches/cainjection_in_zitadelclusters.yaml @@ -0,0 +1,7 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: CERTIFICATE_NAMESPACE/CERTIFICATE_NAME + name: zitadelclusters.zitadel.topmanage.com diff --git a/src/config/crd/patches/webhook_in_apiapps.yaml b/src/config/crd/patches/webhook_in_apiapps.yaml new file mode 100644 index 0000000..04173a6 --- /dev/null +++ b/src/config/crd/patches/webhook_in_apiapps.yaml @@ -0,0 +1,16 @@ +# The following patch enables a conversion webhook for the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: apiapps.zitadel.topmanage.com +spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + service: + namespace: system + name: webhook-service + path: /convert + conversionReviewVersions: + - v1 diff --git a/src/config/crd/patches/webhook_in_machineusers.yaml b/src/config/crd/patches/webhook_in_machineusers.yaml new file mode 100644 index 0000000..f4ca8cc --- /dev/null +++ b/src/config/crd/patches/webhook_in_machineusers.yaml @@ -0,0 +1,16 @@ +# The following patch enables a conversion webhook for the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: machineusers.zitadel.topmanage.com +spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + service: + namespace: system + name: webhook-service + path: /convert + conversionReviewVersions: + - v1 diff --git a/src/config/crd/patches/webhook_in_oidcapps.yaml b/src/config/crd/patches/webhook_in_oidcapps.yaml new file mode 100644 index 0000000..1a7bc04 --- /dev/null +++ b/src/config/crd/patches/webhook_in_oidcapps.yaml @@ -0,0 +1,16 @@ +# The following patch enables a conversion webhook for the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: oidcapps.zitadel.topmanage.com +spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + service: + namespace: system + name: webhook-service + path: /convert + conversionReviewVersions: + - v1 diff --git a/src/config/crd/patches/webhook_in_organizations.yaml b/src/config/crd/patches/webhook_in_organizations.yaml new file mode 100644 index 0000000..e61684e --- /dev/null +++ b/src/config/crd/patches/webhook_in_organizations.yaml @@ -0,0 +1,16 @@ +# The following patch enables a conversion webhook for the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: organizations.zitadel.topmanage.com +spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + service: + namespace: system + name: webhook-service + path: /convert + conversionReviewVersions: + - v1 diff --git a/src/config/crd/patches/webhook_in_projects.yaml b/src/config/crd/patches/webhook_in_projects.yaml new file mode 100644 index 0000000..666f241 --- /dev/null +++ b/src/config/crd/patches/webhook_in_projects.yaml @@ -0,0 +1,16 @@ +# The following patch enables a conversion webhook for the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: projects.zitadel.topmanage.com +spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + service: + namespace: system + name: webhook-service + path: /convert + conversionReviewVersions: + - v1 diff --git a/src/config/crd/patches/webhook_in_zitadelclusters.yaml b/src/config/crd/patches/webhook_in_zitadelclusters.yaml new file mode 100644 index 0000000..dd696dc --- /dev/null +++ b/src/config/crd/patches/webhook_in_zitadelclusters.yaml @@ -0,0 +1,16 @@ +# The following patch enables a conversion webhook for the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: zitadelclusters.zitadel.topmanage.com +spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + service: + namespace: system + name: webhook-service + path: /convert + conversionReviewVersions: + - v1 diff --git a/src/config/default/kustomization.yaml b/src/config/default/kustomization.yaml new file mode 100644 index 0000000..7df9acc --- /dev/null +++ b/src/config/default/kustomization.yaml @@ -0,0 +1,144 @@ +# Adds namespace to all resources. +namespace: src-system + +# Value of this field is prepended to the +# names of all resources, e.g. a deployment named +# "wordpress" becomes "alices-wordpress". +# Note that it should also match with the prefix (text before '-') of the namespace +# field above. +namePrefix: src- + +# Labels to add to all resources and selectors. +#labels: +#- includeSelectors: true +# pairs: +# someName: someValue + +resources: +- ../crd +- ../rbac +- ../manager +# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in +# crd/kustomization.yaml +#- ../webhook +# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required. +#- ../certmanager +# [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'. +#- ../prometheus + +patchesStrategicMerge: +# Protect the /metrics endpoint by putting it behind auth. +# If you want your controller-manager to expose the /metrics +# endpoint w/o any authn/z, please comment the following line. +- manager_auth_proxy_patch.yaml + + + +# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in +# crd/kustomization.yaml +#- manager_webhook_patch.yaml + +# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. +# Uncomment 'CERTMANAGER' sections in crd/kustomization.yaml to enable the CA injection in the admission webhooks. +# 'CERTMANAGER' needs to be enabled to use ca injection +#- webhookcainjection_patch.yaml + +# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix. +# Uncomment the following replacements to add the cert-manager CA injection annotations +#replacements: +# - source: # Add cert-manager annotation to ValidatingWebhookConfiguration, MutatingWebhookConfiguration and CRDs +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: serving-cert # this name should match the one in certificate.yaml +# fieldPath: .metadata.namespace # namespace of the certificate CR +# targets: +# - select: +# kind: ValidatingWebhookConfiguration +# fieldPaths: +# - .metadata.annotations.[cert-manager.io/inject-ca-from] +# options: +# delimiter: '/' +# index: 0 +# create: true +# - select: +# kind: MutatingWebhookConfiguration +# fieldPaths: +# - .metadata.annotations.[cert-manager.io/inject-ca-from] +# options: +# delimiter: '/' +# index: 0 +# create: true +# - select: +# kind: CustomResourceDefinition +# fieldPaths: +# - .metadata.annotations.[cert-manager.io/inject-ca-from] +# options: +# delimiter: '/' +# index: 0 +# create: true +# - source: +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: serving-cert # this name should match the one in certificate.yaml +# fieldPath: .metadata.name +# targets: +# - select: +# kind: ValidatingWebhookConfiguration +# fieldPaths: +# - .metadata.annotations.[cert-manager.io/inject-ca-from] +# options: +# delimiter: '/' +# index: 1 +# create: true +# - select: +# kind: MutatingWebhookConfiguration +# fieldPaths: +# - .metadata.annotations.[cert-manager.io/inject-ca-from] +# options: +# delimiter: '/' +# index: 1 +# create: true +# - select: +# kind: CustomResourceDefinition +# fieldPaths: +# - .metadata.annotations.[cert-manager.io/inject-ca-from] +# options: +# delimiter: '/' +# index: 1 +# create: true +# - source: # Add cert-manager annotation to the webhook Service +# kind: Service +# version: v1 +# name: webhook-service +# fieldPath: .metadata.name # namespace of the service +# targets: +# - select: +# kind: Certificate +# group: cert-manager.io +# version: v1 +# fieldPaths: +# - .spec.dnsNames.0 +# - .spec.dnsNames.1 +# options: +# delimiter: '.' +# index: 0 +# create: true +# - source: +# kind: Service +# version: v1 +# name: webhook-service +# fieldPath: .metadata.namespace # namespace of the service +# targets: +# - select: +# kind: Certificate +# group: cert-manager.io +# version: v1 +# fieldPaths: +# - .spec.dnsNames.0 +# - .spec.dnsNames.1 +# options: +# delimiter: '.' +# index: 1 +# create: true diff --git a/src/config/default/manager_auth_proxy_patch.yaml b/src/config/default/manager_auth_proxy_patch.yaml new file mode 100644 index 0000000..b751266 --- /dev/null +++ b/src/config/default/manager_auth_proxy_patch.yaml @@ -0,0 +1,55 @@ +# This patch inject a sidecar container which is a HTTP proxy for the +# controller manager, it performs RBAC authorization against the Kubernetes API using SubjectAccessReviews. +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system +spec: + template: + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/arch + operator: In + values: + - amd64 + - arm64 + - ppc64le + - s390x + - key: kubernetes.io/os + operator: In + values: + - linux + containers: + - name: kube-rbac-proxy + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - "ALL" + image: gcr.io/kubebuilder/kube-rbac-proxy:v0.13.1 + args: + - "--secure-listen-address=0.0.0.0:8443" + - "--upstream=http://127.0.0.1:8080/" + - "--logtostderr=true" + - "--v=0" + ports: + - containerPort: 8443 + protocol: TCP + name: https + resources: + limits: + cpu: 500m + memory: 128Mi + requests: + cpu: 5m + memory: 64Mi + - name: manager + args: + - "--health-probe-bind-address=:8081" + - "--metrics-bind-address=127.0.0.1:8080" + - "--leader-elect" diff --git a/src/config/default/manager_config_patch.yaml b/src/config/default/manager_config_patch.yaml new file mode 100644 index 0000000..f6f5891 --- /dev/null +++ b/src/config/default/manager_config_patch.yaml @@ -0,0 +1,10 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system +spec: + template: + spec: + containers: + - name: manager diff --git a/src/config/manager/kustomization.yaml b/src/config/manager/kustomization.yaml new file mode 100644 index 0000000..5c5f0b8 --- /dev/null +++ b/src/config/manager/kustomization.yaml @@ -0,0 +1,2 @@ +resources: +- manager.yaml diff --git a/src/config/manager/manager.yaml b/src/config/manager/manager.yaml new file mode 100644 index 0000000..b57a826 --- /dev/null +++ b/src/config/manager/manager.yaml @@ -0,0 +1,102 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + control-plane: controller-manager + app.kubernetes.io/name: namespace + app.kubernetes.io/instance: system + app.kubernetes.io/component: manager + app.kubernetes.io/created-by: src + app.kubernetes.io/part-of: src + app.kubernetes.io/managed-by: kustomize + name: system +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system + labels: + control-plane: controller-manager + app.kubernetes.io/name: deployment + app.kubernetes.io/instance: controller-manager + app.kubernetes.io/component: manager + app.kubernetes.io/created-by: src + app.kubernetes.io/part-of: src + app.kubernetes.io/managed-by: kustomize +spec: + selector: + matchLabels: + control-plane: controller-manager + replicas: 1 + template: + metadata: + annotations: + kubectl.kubernetes.io/default-container: manager + labels: + control-plane: controller-manager + spec: + # TODO(user): Uncomment the following code to configure the nodeAffinity expression + # according to the platforms which are supported by your solution. + # It is considered best practice to support multiple architectures. You can + # build your manager image using the makefile target docker-buildx. + # affinity: + # nodeAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchExpressions: + # - key: kubernetes.io/arch + # operator: In + # values: + # - amd64 + # - arm64 + # - ppc64le + # - s390x + # - key: kubernetes.io/os + # operator: In + # values: + # - linux + securityContext: + runAsNonRoot: true + # TODO(user): For common cases that do not require escalating privileges + # it is recommended to ensure that all your Pods/Containers are restrictive. + # More info: https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted + # Please uncomment the following code if your project does NOT have to work on old Kubernetes + # versions < 1.19 or on vendors versions which do NOT support this field by default (i.e. Openshift < 4.11 ). + # seccompProfile: + # type: RuntimeDefault + containers: + - command: + - /manager + args: + - --leader-elect + image: controller:latest + name: manager + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - "ALL" + livenessProbe: + httpGet: + path: /healthz + port: 8081 + initialDelaySeconds: 15 + periodSeconds: 20 + readinessProbe: + httpGet: + path: /readyz + port: 8081 + initialDelaySeconds: 5 + periodSeconds: 10 + # TODO(user): Configure the resources accordingly based on the project requirements. + # More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + resources: + limits: + cpu: 500m + memory: 128Mi + requests: + cpu: 10m + memory: 64Mi + serviceAccountName: controller-manager + terminationGracePeriodSeconds: 10 diff --git a/src/config/manifests/kustomization.yaml b/src/config/manifests/kustomization.yaml new file mode 100644 index 0000000..c803400 --- /dev/null +++ b/src/config/manifests/kustomization.yaml @@ -0,0 +1,28 @@ +# These resources constitute the fully configured set of manifests +# used to generate the 'manifests/' directory in a bundle. +resources: +- bases/src.clusterserviceversion.yaml +- ../default +- ../samples +- ../scorecard + +# [WEBHOOK] To enable webhooks, uncomment all the sections with [WEBHOOK] prefix. +# Do NOT uncomment sections with prefix [CERTMANAGER], as OLM does not support cert-manager. +# These patches remove the unnecessary "cert" volume and its manager container volumeMount. +#patchesJson6902: +#- target: +# group: apps +# version: v1 +# kind: Deployment +# name: controller-manager +# namespace: system +# patch: |- +# # Remove the manager container's "cert" volumeMount, since OLM will create and mount a set of certs. +# # Update the indices in this path if adding or removing containers/volumeMounts in the manager's Deployment. +# - op: remove + +# path: /spec/template/spec/containers/0/volumeMounts/0 +# # Remove the "cert" volume, since OLM will create and mount a set of certs. +# # Update the indices in this path if adding or removing volumes in the manager's Deployment. +# - op: remove +# path: /spec/template/spec/volumes/0 diff --git a/src/config/prometheus/kustomization.yaml b/src/config/prometheus/kustomization.yaml new file mode 100644 index 0000000..ed13716 --- /dev/null +++ b/src/config/prometheus/kustomization.yaml @@ -0,0 +1,2 @@ +resources: +- monitor.yaml diff --git a/src/config/prometheus/monitor.yaml b/src/config/prometheus/monitor.yaml new file mode 100644 index 0000000..ecad441 --- /dev/null +++ b/src/config/prometheus/monitor.yaml @@ -0,0 +1,26 @@ + +# Prometheus Monitor Service (Metrics) +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + labels: + control-plane: controller-manager + app.kubernetes.io/name: servicemonitor + app.kubernetes.io/instance: controller-manager-metrics-monitor + app.kubernetes.io/component: metrics + app.kubernetes.io/created-by: src + app.kubernetes.io/part-of: src + app.kubernetes.io/managed-by: kustomize + name: controller-manager-metrics-monitor + namespace: system +spec: + endpoints: + - path: /metrics + port: https + scheme: https + bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token + tlsConfig: + insecureSkipVerify: true + selector: + matchLabels: + control-plane: controller-manager diff --git a/src/config/rbac/apiapp_editor_role.yaml b/src/config/rbac/apiapp_editor_role.yaml new file mode 100644 index 0000000..3bf43db --- /dev/null +++ b/src/config/rbac/apiapp_editor_role.yaml @@ -0,0 +1,31 @@ +# permissions for end users to edit apiapps. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: apiapp-editor-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: src + app.kubernetes.io/part-of: src + app.kubernetes.io/managed-by: kustomize + name: apiapp-editor-role +rules: +- apiGroups: + - zitadel.topmanage.com + resources: + - apiapps + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - zitadel.topmanage.com + resources: + - apiapps/status + verbs: + - get diff --git a/src/config/rbac/apiapp_viewer_role.yaml b/src/config/rbac/apiapp_viewer_role.yaml new file mode 100644 index 0000000..042b6f3 --- /dev/null +++ b/src/config/rbac/apiapp_viewer_role.yaml @@ -0,0 +1,27 @@ +# permissions for end users to view apiapps. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: apiapp-viewer-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: src + app.kubernetes.io/part-of: src + app.kubernetes.io/managed-by: kustomize + name: apiapp-viewer-role +rules: +- apiGroups: + - zitadel.topmanage.com + resources: + - apiapps + verbs: + - get + - list + - watch +- apiGroups: + - zitadel.topmanage.com + resources: + - apiapps/status + verbs: + - get diff --git a/src/config/rbac/auth_proxy_client_clusterrole.yaml b/src/config/rbac/auth_proxy_client_clusterrole.yaml new file mode 100644 index 0000000..e542bd6 --- /dev/null +++ b/src/config/rbac/auth_proxy_client_clusterrole.yaml @@ -0,0 +1,16 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: metrics-reader + app.kubernetes.io/component: kube-rbac-proxy + app.kubernetes.io/created-by: src + app.kubernetes.io/part-of: src + app.kubernetes.io/managed-by: kustomize + name: metrics-reader +rules: +- nonResourceURLs: + - "/metrics" + verbs: + - get diff --git a/src/config/rbac/auth_proxy_role.yaml b/src/config/rbac/auth_proxy_role.yaml new file mode 100644 index 0000000..5c8d16e --- /dev/null +++ b/src/config/rbac/auth_proxy_role.yaml @@ -0,0 +1,24 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: proxy-role + app.kubernetes.io/component: kube-rbac-proxy + app.kubernetes.io/created-by: src + app.kubernetes.io/part-of: src + app.kubernetes.io/managed-by: kustomize + name: proxy-role +rules: +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create +- apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create diff --git a/src/config/rbac/auth_proxy_role_binding.yaml b/src/config/rbac/auth_proxy_role_binding.yaml new file mode 100644 index 0000000..1d2f986 --- /dev/null +++ b/src/config/rbac/auth_proxy_role_binding.yaml @@ -0,0 +1,19 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/name: clusterrolebinding + app.kubernetes.io/instance: proxy-rolebinding + app.kubernetes.io/component: kube-rbac-proxy + app.kubernetes.io/created-by: src + app.kubernetes.io/part-of: src + app.kubernetes.io/managed-by: kustomize + name: proxy-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: proxy-role +subjects: +- kind: ServiceAccount + name: controller-manager + namespace: system diff --git a/src/config/rbac/auth_proxy_service.yaml b/src/config/rbac/auth_proxy_service.yaml new file mode 100644 index 0000000..62b13c0 --- /dev/null +++ b/src/config/rbac/auth_proxy_service.yaml @@ -0,0 +1,21 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + control-plane: controller-manager + app.kubernetes.io/name: service + app.kubernetes.io/instance: controller-manager-metrics-service + app.kubernetes.io/component: kube-rbac-proxy + app.kubernetes.io/created-by: src + app.kubernetes.io/part-of: src + app.kubernetes.io/managed-by: kustomize + name: controller-manager-metrics-service + namespace: system +spec: + ports: + - name: https + port: 8443 + protocol: TCP + targetPort: https + selector: + control-plane: controller-manager diff --git a/src/config/rbac/kustomization.yaml b/src/config/rbac/kustomization.yaml new file mode 100644 index 0000000..731832a --- /dev/null +++ b/src/config/rbac/kustomization.yaml @@ -0,0 +1,18 @@ +resources: +# All RBAC will be applied under this service account in +# the deployment namespace. You may comment out this resource +# if your manager will use a service account that exists at +# runtime. Be sure to update RoleBinding and ClusterRoleBinding +# subjects if changing service account names. +- service_account.yaml +- role.yaml +- role_binding.yaml +- leader_election_role.yaml +- leader_election_role_binding.yaml +# Comment the following 4 lines if you want to disable +# the auth proxy (https://github.com/brancz/kube-rbac-proxy) +# which protects your /metrics endpoint. +- auth_proxy_service.yaml +- auth_proxy_role.yaml +- auth_proxy_role_binding.yaml +- auth_proxy_client_clusterrole.yaml diff --git a/src/config/rbac/leader_election_role.yaml b/src/config/rbac/leader_election_role.yaml new file mode 100644 index 0000000..0d272be --- /dev/null +++ b/src/config/rbac/leader_election_role.yaml @@ -0,0 +1,44 @@ +# permissions to do leader election. +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app.kubernetes.io/name: role + app.kubernetes.io/instance: leader-election-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: src + app.kubernetes.io/part-of: src + app.kubernetes.io/managed-by: kustomize + name: leader-election-role +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch diff --git a/src/config/rbac/leader_election_role_binding.yaml b/src/config/rbac/leader_election_role_binding.yaml new file mode 100644 index 0000000..7d4eb79 --- /dev/null +++ b/src/config/rbac/leader_election_role_binding.yaml @@ -0,0 +1,19 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/name: rolebinding + app.kubernetes.io/instance: leader-election-rolebinding + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: src + app.kubernetes.io/part-of: src + app.kubernetes.io/managed-by: kustomize + name: leader-election-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: leader-election-role +subjects: +- kind: ServiceAccount + name: controller-manager + namespace: system diff --git a/src/config/rbac/machineuser_editor_role.yaml b/src/config/rbac/machineuser_editor_role.yaml new file mode 100644 index 0000000..e38d5c7 --- /dev/null +++ b/src/config/rbac/machineuser_editor_role.yaml @@ -0,0 +1,31 @@ +# permissions for end users to edit machineusers. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: machineuser-editor-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: src + app.kubernetes.io/part-of: src + app.kubernetes.io/managed-by: kustomize + name: machineuser-editor-role +rules: +- apiGroups: + - zitadel.topmanage.com + resources: + - machineusers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - zitadel.topmanage.com + resources: + - machineusers/status + verbs: + - get diff --git a/src/config/rbac/machineuser_viewer_role.yaml b/src/config/rbac/machineuser_viewer_role.yaml new file mode 100644 index 0000000..970fdb4 --- /dev/null +++ b/src/config/rbac/machineuser_viewer_role.yaml @@ -0,0 +1,27 @@ +# permissions for end users to view machineusers. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: machineuser-viewer-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: src + app.kubernetes.io/part-of: src + app.kubernetes.io/managed-by: kustomize + name: machineuser-viewer-role +rules: +- apiGroups: + - zitadel.topmanage.com + resources: + - machineusers + verbs: + - get + - list + - watch +- apiGroups: + - zitadel.topmanage.com + resources: + - machineusers/status + verbs: + - get diff --git a/src/config/rbac/oidcapp_editor_role.yaml b/src/config/rbac/oidcapp_editor_role.yaml new file mode 100644 index 0000000..2b8e3fd --- /dev/null +++ b/src/config/rbac/oidcapp_editor_role.yaml @@ -0,0 +1,31 @@ +# permissions for end users to edit oidcapps. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: oidcapp-editor-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: src + app.kubernetes.io/part-of: src + app.kubernetes.io/managed-by: kustomize + name: oidcapp-editor-role +rules: +- apiGroups: + - zitadel.topmanage.com + resources: + - oidcapps + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - zitadel.topmanage.com + resources: + - oidcapps/status + verbs: + - get diff --git a/src/config/rbac/oidcapp_viewer_role.yaml b/src/config/rbac/oidcapp_viewer_role.yaml new file mode 100644 index 0000000..8286752 --- /dev/null +++ b/src/config/rbac/oidcapp_viewer_role.yaml @@ -0,0 +1,27 @@ +# permissions for end users to view oidcapps. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: oidcapp-viewer-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: src + app.kubernetes.io/part-of: src + app.kubernetes.io/managed-by: kustomize + name: oidcapp-viewer-role +rules: +- apiGroups: + - zitadel.topmanage.com + resources: + - oidcapps + verbs: + - get + - list + - watch +- apiGroups: + - zitadel.topmanage.com + resources: + - oidcapps/status + verbs: + - get diff --git a/src/config/rbac/organization_editor_role.yaml b/src/config/rbac/organization_editor_role.yaml new file mode 100644 index 0000000..586654f --- /dev/null +++ b/src/config/rbac/organization_editor_role.yaml @@ -0,0 +1,31 @@ +# permissions for end users to edit organizations. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: organization-editor-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: src + app.kubernetes.io/part-of: src + app.kubernetes.io/managed-by: kustomize + name: organization-editor-role +rules: +- apiGroups: + - zitadel.topmanage.com + resources: + - organizations + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - zitadel.topmanage.com + resources: + - organizations/status + verbs: + - get diff --git a/src/config/rbac/organization_viewer_role.yaml b/src/config/rbac/organization_viewer_role.yaml new file mode 100644 index 0000000..b27740b --- /dev/null +++ b/src/config/rbac/organization_viewer_role.yaml @@ -0,0 +1,27 @@ +# permissions for end users to view organizations. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: organization-viewer-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: src + app.kubernetes.io/part-of: src + app.kubernetes.io/managed-by: kustomize + name: organization-viewer-role +rules: +- apiGroups: + - zitadel.topmanage.com + resources: + - organizations + verbs: + - get + - list + - watch +- apiGroups: + - zitadel.topmanage.com + resources: + - organizations/status + verbs: + - get diff --git a/src/config/rbac/project_editor_role.yaml b/src/config/rbac/project_editor_role.yaml new file mode 100644 index 0000000..c42c5e1 --- /dev/null +++ b/src/config/rbac/project_editor_role.yaml @@ -0,0 +1,31 @@ +# permissions for end users to edit projects. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: project-editor-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: src + app.kubernetes.io/part-of: src + app.kubernetes.io/managed-by: kustomize + name: project-editor-role +rules: +- apiGroups: + - zitadel.topmanage.com + resources: + - projects + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - zitadel.topmanage.com + resources: + - projects/status + verbs: + - get diff --git a/src/config/rbac/project_viewer_role.yaml b/src/config/rbac/project_viewer_role.yaml new file mode 100644 index 0000000..4270f83 --- /dev/null +++ b/src/config/rbac/project_viewer_role.yaml @@ -0,0 +1,27 @@ +# permissions for end users to view projects. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: project-viewer-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: src + app.kubernetes.io/part-of: src + app.kubernetes.io/managed-by: kustomize + name: project-viewer-role +rules: +- apiGroups: + - zitadel.topmanage.com + resources: + - projects + verbs: + - get + - list + - watch +- apiGroups: + - zitadel.topmanage.com + resources: + - projects/status + verbs: + - get diff --git a/src/config/rbac/role.yaml b/src/config/rbac/role.yaml new file mode 100644 index 0000000..8e3f4a9 --- /dev/null +++ b/src/config/rbac/role.yaml @@ -0,0 +1,328 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + name: manager-role +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - get + - list + - patch + - watch +- apiGroups: + - "" + resources: + - endpoints + verbs: + - create + - get + - list + - patch + - watch +- apiGroups: + - "" + resources: + - endpoints/restricted + verbs: + - create + - get + - list + - patch + - watch +- apiGroups: + - "" + resources: + - events + verbs: + - create + - list + - patch + - watch +- apiGroups: + - "" + resources: + - pods + verbs: + - delete + - get +- apiGroups: + - "" + resources: + - secrets + verbs: + - create + - list + - patch + - watch +- apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - create + - list + - patch + - watch +- apiGroups: + - "" + resources: + - services + verbs: + - create + - list + - patch + - watch +- apiGroups: + - apps + resources: + - deployments + verbs: + - create + - list + - patch + - watch +- apiGroups: + - batch + resources: + - jobs + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - certificates.k8s.io + resources: + - certificatesigningrequests + verbs: + - create + - delete + - get + - list + - patch + - watch +- apiGroups: + - certificates.k8s.io + resources: + - certificatesigningrequests/approval + verbs: + - update +- apiGroups: + - certificates.k8s.io + resources: + - certificatesigningrequests/status + verbs: + - get + - patch + - update +- apiGroups: + - crdb.cockroachlabs.com + resources: + - crdbclusters + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - crdb.cockroachlabs.com + resources: + - crdbclusters/finalizers + verbs: + - update +- apiGroups: + - crdb.cockroachlabs.com + resources: + - crdbclusters/status + verbs: + - get + - patch + - update +- apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - create + - list + - patch + - watch +- apiGroups: + - rbac.authorization.k8s.io + resources: + - clusterrolebindings + - rolebindings + - roles + verbs: + - create + - list + - patch + - watch +- apiGroups: + - zitadel.topmanage.com + resources: + - apiapps + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - zitadel.topmanage.com + resources: + - apiapps/finalizers + verbs: + - update +- apiGroups: + - zitadel.topmanage.com + resources: + - apiapps/status + verbs: + - get + - patch + - update +- apiGroups: + - zitadel.topmanage.com + resources: + - machineusers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - zitadel.topmanage.com + resources: + - machineusers/finalizers + verbs: + - update +- apiGroups: + - zitadel.topmanage.com + resources: + - machineusers/status + verbs: + - get + - patch + - update +- apiGroups: + - zitadel.topmanage.com + resources: + - oidcapps + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - zitadel.topmanage.com + resources: + - oidcapps/finalizers + verbs: + - update +- apiGroups: + - zitadel.topmanage.com + resources: + - oidcapps/status + verbs: + - get + - patch + - update +- apiGroups: + - zitadel.topmanage.com + resources: + - organizations + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - zitadel.topmanage.com + resources: + - organizations/finalizers + verbs: + - update +- apiGroups: + - zitadel.topmanage.com + resources: + - organizations/status + verbs: + - get + - patch + - update +- apiGroups: + - zitadel.topmanage.com + resources: + - projects + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - zitadel.topmanage.com + resources: + - projects/finalizers + verbs: + - update +- apiGroups: + - zitadel.topmanage.com + resources: + - projects/status + verbs: + - get + - patch + - update +- apiGroups: + - zitadel.topmanage.com + resources: + - zitadelclusters + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - zitadel.topmanage.com + resources: + - zitadelclusters/finalizers + verbs: + - update +- apiGroups: + - zitadel.topmanage.com + resources: + - zitadelclusters/status + verbs: + - get + - patch + - update diff --git a/src/config/rbac/role_binding.yaml b/src/config/rbac/role_binding.yaml new file mode 100644 index 0000000..4d224de --- /dev/null +++ b/src/config/rbac/role_binding.yaml @@ -0,0 +1,19 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/name: clusterrolebinding + app.kubernetes.io/instance: manager-rolebinding + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: src + app.kubernetes.io/part-of: src + app.kubernetes.io/managed-by: kustomize + name: manager-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: manager-role +subjects: +- kind: ServiceAccount + name: controller-manager + namespace: system diff --git a/src/config/rbac/service_account.yaml b/src/config/rbac/service_account.yaml new file mode 100644 index 0000000..a94f6f4 --- /dev/null +++ b/src/config/rbac/service_account.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/name: serviceaccount + app.kubernetes.io/instance: controller-manager-sa + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: src + app.kubernetes.io/part-of: src + app.kubernetes.io/managed-by: kustomize + name: controller-manager + namespace: system diff --git a/src/config/rbac/zitadelcluster_editor_role.yaml b/src/config/rbac/zitadelcluster_editor_role.yaml new file mode 100644 index 0000000..b8d3287 --- /dev/null +++ b/src/config/rbac/zitadelcluster_editor_role.yaml @@ -0,0 +1,31 @@ +# permissions for end users to edit zitadelclusters. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: zitadelcluster-editor-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: src + app.kubernetes.io/part-of: src + app.kubernetes.io/managed-by: kustomize + name: zitadelcluster-editor-role +rules: +- apiGroups: + - zitadel.topmanage.com + resources: + - zitadelclusters + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - zitadel.topmanage.com + resources: + - zitadelclusters/status + verbs: + - get diff --git a/src/config/rbac/zitadelcluster_viewer_role.yaml b/src/config/rbac/zitadelcluster_viewer_role.yaml new file mode 100644 index 0000000..5ad726d --- /dev/null +++ b/src/config/rbac/zitadelcluster_viewer_role.yaml @@ -0,0 +1,27 @@ +# permissions for end users to view zitadelclusters. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: zitadelcluster-viewer-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: src + app.kubernetes.io/part-of: src + app.kubernetes.io/managed-by: kustomize + name: zitadelcluster-viewer-role +rules: +- apiGroups: + - zitadel.topmanage.com + resources: + - zitadelclusters + verbs: + - get + - list + - watch +- apiGroups: + - zitadel.topmanage.com + resources: + - zitadelclusters/status + verbs: + - get diff --git a/src/config/samples/kustomization.yaml b/src/config/samples/kustomization.yaml new file mode 100644 index 0000000..fe16a01 --- /dev/null +++ b/src/config/samples/kustomization.yaml @@ -0,0 +1,9 @@ +## Append samples of your project ## +resources: +- zitadel_v1alpha1_zitadelcluster.yaml +- zitadel_v1alpha1_organization.yaml +- zitadel_v1alpha1_project.yaml +- zitadel_v1alpha1_oidcapp.yaml +- zitadel_v1alpha1_machineuser.yaml +- zitadel_v1alpha1_apiapp.yaml +#+kubebuilder:scaffold:manifestskustomizesamples diff --git a/src/config/samples/zitadel_v1alpha1_apiapp.yaml b/src/config/samples/zitadel_v1alpha1_apiapp.yaml new file mode 100644 index 0000000..3aea6ef --- /dev/null +++ b/src/config/samples/zitadel_v1alpha1_apiapp.yaml @@ -0,0 +1,12 @@ +apiVersion: zitadel.topmanage.com/v1alpha1 +kind: APIApp +metadata: + labels: + app.kubernetes.io/name: apiapp + app.kubernetes.io/instance: apiapp-sample + app.kubernetes.io/part-of: src + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/created-by: src + name: apiapp-sample +spec: + # TODO(user): Add fields here diff --git a/src/config/samples/zitadel_v1alpha1_machineuser.yaml b/src/config/samples/zitadel_v1alpha1_machineuser.yaml new file mode 100644 index 0000000..5625cfe --- /dev/null +++ b/src/config/samples/zitadel_v1alpha1_machineuser.yaml @@ -0,0 +1,12 @@ +apiVersion: zitadel.topmanage.com/v1alpha1 +kind: MachineUser +metadata: + labels: + app.kubernetes.io/name: machineuser + app.kubernetes.io/instance: machineuser-sample + app.kubernetes.io/part-of: src + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/created-by: src + name: machineuser-sample +spec: + # TODO(user): Add fields here diff --git a/src/config/samples/zitadel_v1alpha1_oidcapp.yaml b/src/config/samples/zitadel_v1alpha1_oidcapp.yaml new file mode 100644 index 0000000..f096e2c --- /dev/null +++ b/src/config/samples/zitadel_v1alpha1_oidcapp.yaml @@ -0,0 +1,12 @@ +apiVersion: zitadel.topmanage.com/v1alpha1 +kind: OIDCApp +metadata: + labels: + app.kubernetes.io/name: oidcapp + app.kubernetes.io/instance: oidcapp-sample + app.kubernetes.io/part-of: src + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/created-by: src + name: oidcapp-sample +spec: + # TODO(user): Add fields here diff --git a/src/config/samples/zitadel_v1alpha1_organization.yaml b/src/config/samples/zitadel_v1alpha1_organization.yaml new file mode 100644 index 0000000..bfeedd7 --- /dev/null +++ b/src/config/samples/zitadel_v1alpha1_organization.yaml @@ -0,0 +1,12 @@ +apiVersion: zitadel.topmanage.com/v1alpha1 +kind: Organization +metadata: + labels: + app.kubernetes.io/name: organization + app.kubernetes.io/instance: organization-sample + app.kubernetes.io/part-of: src + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/created-by: src + name: organization-sample +spec: + # TODO(user): Add fields here diff --git a/src/config/samples/zitadel_v1alpha1_project.yaml b/src/config/samples/zitadel_v1alpha1_project.yaml new file mode 100644 index 0000000..5be9337 --- /dev/null +++ b/src/config/samples/zitadel_v1alpha1_project.yaml @@ -0,0 +1,12 @@ +apiVersion: zitadel.topmanage.com/v1alpha1 +kind: Project +metadata: + labels: + app.kubernetes.io/name: project + app.kubernetes.io/instance: project-sample + app.kubernetes.io/part-of: src + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/created-by: src + name: project-sample +spec: + # TODO(user): Add fields here diff --git a/src/config/samples/zitadel_v1alpha1_zitadelcluster.yaml b/src/config/samples/zitadel_v1alpha1_zitadelcluster.yaml new file mode 100644 index 0000000..f9a06df --- /dev/null +++ b/src/config/samples/zitadel_v1alpha1_zitadelcluster.yaml @@ -0,0 +1,12 @@ +apiVersion: zitadel.topmanage.com/v1alpha1 +kind: ZitadelCluster +metadata: + labels: + app.kubernetes.io/name: zitadelcluster + app.kubernetes.io/instance: zitadelcluster-sample + app.kubernetes.io/part-of: src + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/created-by: src + name: zitadelcluster-sample +spec: + # TODO(user): Add fields here diff --git a/src/config/scorecard/bases/config.yaml b/src/config/scorecard/bases/config.yaml new file mode 100644 index 0000000..c770478 --- /dev/null +++ b/src/config/scorecard/bases/config.yaml @@ -0,0 +1,7 @@ +apiVersion: scorecard.operatorframework.io/v1alpha3 +kind: Configuration +metadata: + name: config +stages: +- parallel: true + tests: [] diff --git a/src/config/scorecard/kustomization.yaml b/src/config/scorecard/kustomization.yaml new file mode 100644 index 0000000..50cd2d0 --- /dev/null +++ b/src/config/scorecard/kustomization.yaml @@ -0,0 +1,16 @@ +resources: +- bases/config.yaml +patchesJson6902: +- path: patches/basic.config.yaml + target: + group: scorecard.operatorframework.io + version: v1alpha3 + kind: Configuration + name: config +- path: patches/olm.config.yaml + target: + group: scorecard.operatorframework.io + version: v1alpha3 + kind: Configuration + name: config +#+kubebuilder:scaffold:patchesJson6902 diff --git a/src/config/scorecard/patches/basic.config.yaml b/src/config/scorecard/patches/basic.config.yaml new file mode 100644 index 0000000..8aa4158 --- /dev/null +++ b/src/config/scorecard/patches/basic.config.yaml @@ -0,0 +1,10 @@ +- op: add + path: /stages/0/tests/- + value: + entrypoint: + - scorecard-test + - basic-check-spec + image: quay.io/operator-framework/scorecard-test:unknown + labels: + suite: basic + test: basic-check-spec-test diff --git a/src/config/scorecard/patches/olm.config.yaml b/src/config/scorecard/patches/olm.config.yaml new file mode 100644 index 0000000..47153a8 --- /dev/null +++ b/src/config/scorecard/patches/olm.config.yaml @@ -0,0 +1,50 @@ +- op: add + path: /stages/0/tests/- + value: + entrypoint: + - scorecard-test + - olm-bundle-validation + image: quay.io/operator-framework/scorecard-test:unknown + labels: + suite: olm + test: olm-bundle-validation-test +- op: add + path: /stages/0/tests/- + value: + entrypoint: + - scorecard-test + - olm-crds-have-validation + image: quay.io/operator-framework/scorecard-test:unknown + labels: + suite: olm + test: olm-crds-have-validation-test +- op: add + path: /stages/0/tests/- + value: + entrypoint: + - scorecard-test + - olm-crds-have-resources + image: quay.io/operator-framework/scorecard-test:unknown + labels: + suite: olm + test: olm-crds-have-resources-test +- op: add + path: /stages/0/tests/- + value: + entrypoint: + - scorecard-test + - olm-spec-descriptors + image: quay.io/operator-framework/scorecard-test:unknown + labels: + suite: olm + test: olm-spec-descriptors-test +- op: add + path: /stages/0/tests/- + value: + entrypoint: + - scorecard-test + - olm-status-descriptors + image: quay.io/operator-framework/scorecard-test:unknown + labels: + suite: olm + test: olm-status-descriptors-test diff --git a/src/go.mod b/src/go.mod new file mode 100644 index 0000000..7411773 --- /dev/null +++ b/src/go.mod @@ -0,0 +1,106 @@ +module bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src + +go 1.21 + +toolchain go1.21.5 + +require ( + github.com/cockroachdb/cockroach-operator v0.0.0-00010101000000-000000000000 + github.com/gorilla/schema v1.2.0 + github.com/hashicorp/go-multierror v1.1.1 + github.com/onsi/ginkgo/v2 v2.13.2 + github.com/onsi/gomega v1.29.0 + github.com/sethvargo/go-password v0.2.0 + github.com/zitadel/oidc v1.13.5 + github.com/zitadel/zitadel-go/v2 v2.2.3 + golang.org/x/exp v0.0.0-20240325151524-a685a6edb6d8 + golang.org/x/oauth2 v0.19.0 + google.golang.org/grpc v1.63.2 + google.golang.org/protobuf v1.33.0 + gopkg.in/square/go-jose.v2 v2.6.0 + k8s.io/api v0.29.0 + k8s.io/apimachinery v0.29.0 + k8s.io/client-go v9.0.0+incompatible + sigs.k8s.io/controller-runtime v0.16.3 +) + +replace ( + github.com/cockroachdb/cockroach-operator => github.com/HaimKortovich/cockroach-operator v0.0.0-20240314212554-9b6db51d3a78 + github.com/gin-gonic/gin v1.4.0 => github.com/gin-gonic/gin v1.7.0 + github.com/nats-io/nats-server/v2 v2.1.2 => github.com/nats-io/nats-server/v2 v2.2.0 + github.com/opencontainers/runc v1.0.0-rc91 => github.com/opencontainers/runc v1.0.2 + k8s.io/client-go => k8s.io/client-go v0.29.0 +) + +require ( + github.com/beorn7/perks v1.0.1 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/cockroachdb/errors v1.8.0 // indirect + github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f // indirect + github.com/cockroachdb/redact v1.0.6 // indirect + github.com/cockroachdb/sentry-go v0.6.1-cockroachdb.2 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect + github.com/envoyproxy/protoc-gen-validate v1.0.4 // indirect + github.com/evanphx/json-patch/v5 v5.6.0 // indirect + github.com/fsnotify/fsnotify v1.6.0 // indirect + github.com/go-logr/logr v1.4.1 // indirect + github.com/go-logr/zapr v1.2.4 // indirect + github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect + github.com/go-openapi/swag v0.22.3 // indirect + github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/protobuf v1.5.4 // indirect + github.com/google/gnostic-models v0.6.8 // indirect + github.com/google/go-cmp v0.6.0 // indirect + github.com/google/gofuzz v1.2.0 // indirect + github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/gorilla/securecookie v1.1.2 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1 // indirect + github.com/hashicorp/errwrap v1.0.0 // indirect + github.com/imdario/mergo v0.3.12 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/kr/pretty v0.3.1 // indirect + github.com/kr/text v0.2.0 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/prometheus/client_golang v1.16.0 // indirect + github.com/prometheus/client_model v0.4.0 // indirect + github.com/prometheus/common v0.44.0 // indirect + github.com/prometheus/procfs v0.10.1 // indirect + github.com/rogpeppe/go-internal v1.11.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/stretchr/testify v1.9.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + go.uber.org/zap v1.25.0 // indirect + golang.org/x/crypto v0.21.0 // indirect + golang.org/x/net v0.23.0 // indirect + golang.org/x/sys v0.18.0 // indirect + golang.org/x/term v0.18.0 // indirect + golang.org/x/text v0.14.0 // indirect + golang.org/x/time v0.3.0 // indirect + golang.org/x/tools v0.19.0 // indirect + gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect + google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + k8s.io/apiextensions-apiserver v0.28.3 // indirect + k8s.io/component-base v0.28.3 // indirect + k8s.io/klog/v2 v2.110.1 // indirect + k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect + k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect + sigs.k8s.io/yaml v1.3.0 // indirect +) diff --git a/src/go.sum b/src/go.sum new file mode 100644 index 0000000..d30f3d8 --- /dev/null +++ b/src/go.sum @@ -0,0 +1,499 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +emperror.dev/errors v0.8.0 h1:4lycVEx0sdJkwDUfQ9pdu6SR0x7rgympt5f4+ok8jDk= +emperror.dev/errors v0.8.0/go.mod h1:YcRvLPh626Ubn2xqtoprejnA5nFha+TJ+2vew48kWuE= +github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/CloudyKit/fastprinter v0.0.0-20170127035650-74b38d55f37a/go.mod h1:EFZQ978U7x8IRnstaskI3IysnWY5Ao3QgZUKOXlsAdw= +github.com/CloudyKit/jet v2.1.3-0.20180809161101-62edd43e4f88+incompatible/go.mod h1:HPYO+50pSWkPoj9Q/eq0aRGByCL6ScRlUmiEX5Zgm+w= +github.com/HaimKortovich/cockroach-operator v0.0.0-20240314212554-9b6db51d3a78 h1:rRSec2ojZGn0gklqK4gesawQb+IyIORtzDkPFlOcPmo= +github.com/HaimKortovich/cockroach-operator v0.0.0-20240314212554-9b6db51d3a78/go.mod h1:WhFXNu2OvcYz3zG3LpZI87EjKNLdk9JLbEcd8qDItTw= +github.com/Joker/hpp v1.0.0/go.mod h1:8x5n+M1Hp5hC0g8okX3sR3vFQwynaX/UgSOM9MeBKzY= +github.com/Joker/jade v1.0.1-0.20190614124447-d475f43051e7/go.mod h1:6E6s8o2AE4KhCrqr6GRJjdC/gNfTdxkIXvuGZZda2VM= +github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0= +github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g= +github.com/banzaicloud/k8s-objectmatcher v1.8.0 h1:Nugn25elKtPMTA2br+JgHNeSQ04sc05MDPmpJnd1N2A= +github.com/banzaicloud/k8s-objectmatcher v1.8.0/go.mod h1:p2LSNAjlECf07fbhDyebTkPUIYnU05G+WfGgkTmgeMg= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= +github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= +github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cockroachdb/datadriven v1.0.0/go.mod h1:5Ib8Meh+jk1RlHIXej6Pzevx/NLlNvQB9pmSBZErGA4= +github.com/cockroachdb/errors v1.6.1/go.mod h1:tm6FTP5G81vwJ5lC0SizQo374JNCOPrHyXGitRJoDqM= +github.com/cockroachdb/errors v1.8.0 h1:4IrYIc17U7TSuLYlol83tc7ZKmJIs8PbJ/YE+bzoyik= +github.com/cockroachdb/errors v1.8.0/go.mod h1:m/IWRCPXYZ6TvLLDuC0kfLR1pp/+BiZ0h16WHaBMRMM= +github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f h1:o/kfcElHqOiXqcou5a3rIlMc7oJbMQkeLk0VQJ7zgqY= +github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= +github.com/cockroachdb/redact v1.0.6 h1:W34uRRyNR4dlZFd0MibhNELsZSgMkl52uRV/tA1xToY= +github.com/cockroachdb/redact v1.0.6/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= +github.com/cockroachdb/sentry-go v0.6.1-cockroachdb.2 h1:IKgmqgMQlVJIZj19CdocBeSfSaiCbEBZGKODaixqtHM= +github.com/cockroachdb/sentry-go v0.6.1-cockroachdb.2/go.mod h1:8BT+cPK6xvFOcRlk0R8eg+OTkcqI6baNH4xAkpiYVvQ= +github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZifjYj7uP3BG/gKcuzL9xWVV/Y+cK33KM= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU4zdyUgIUNhlgg0A= +github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= +github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw= +github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= +github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww= +github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= +github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8= +github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/flosch/pongo2 v0.0.0-20190707114632-bbf5a6c351f4/go.mod h1:T9YF2M40nIgbVgp3rreNmTged+9HrbNTIQf1PsaIiTA= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= +github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/gavv/httpexpect v2.0.0+incompatible/go.mod h1:x+9tiU1YnrOvnB725RkpoLv1M62hOWzwo5OXotisrKc= +github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= +github.com/gin-gonic/gin v1.7.0/go.mod h1:jD2toBW3GZUr5UMcdrwQA10I7RuaFOl/SGeDjXkfUtY= +github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= +github.com/go-errors/errors v1.0.1 h1:LUHzmkK3GUKUrL/1gfBUxAHzcev3apQlezX/+O7ma6w= +github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= +github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/zapr v1.2.4 h1:QHVo+6stLbfJmYGkQ7uGHUCu5hnAFAj6mDe6Ea0SeOo= +github.com/go-logr/zapr v1.2.4/go.mod h1:FyHWQIzQORZ0QVE1BtVHv3cKtNLuXsbNLtpuhNapBOA= +github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8= +github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= +github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= +github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= +github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= +github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= +github.com/gogo/googleapis v0.0.0-20180223154316-0cd9801be74a/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= +github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/gogo/status v1.1.0/go.mod h1:BFv9nrluPLmrS0EmGVvLaPNmRosr9KapBYd5/hpY1WM= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/gomodule/redigo v1.7.1-0.20190724094224-574c33c3df38/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4= +github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= +github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/schema v1.2.0 h1:YufUaxZYCKGFuAq3c96BOhjgd5nmXiOY9NGzF247Tsc= +github.com/gorilla/schema v1.2.0/go.mod h1:kgLaKoK1FELgZqMAVxx/5cbj0kT+57qxUrAlIO2eleU= +github.com/gorilla/securecookie v1.1.2 h1:YCIWL56dvtr73r6715mJs5ZvhtnY73hBvEF8kXD8ePA= +github.com/gorilla/securecookie v1.1.2/go.mod h1:NfCASbcHqRSY+3a8tlWJwsQap2VX5pwzwo4h3eOamfo= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= +github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1 h1:/c3QmbOGMGTOumP2iT/rCwB7b0QDGLKzqOmktBjT+Is= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1/go.mod h1:5SN9VR2LTsRFsrEC6FHgRbTWrTHu6tqPeKxEQv15giM= +github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/hydrogen18/memlistener v0.0.0-20141126152155-54553eb933fb/go.mod h1:qEIFzExnS6016fRpRfxrExeVn2gbClQA99gQhnIcdhE= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= +github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imkira/go-interpol v1.1.0/go.mod h1:z0h2/2T3XF8kyEPpRgJ3kmNv+C43p+I/CoI+jC3w2iA= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/iris-contrib/blackfriday v2.0.0+incompatible/go.mod h1:UzZ2bDEoaSGPbkg6SAB4att1aAwTmVIx/5gCVqeyUdI= +github.com/iris-contrib/go.uuid v2.0.0+incompatible/go.mod h1:iz2lgM/1UnEf1kP0L/+fafWORmlnuysV2EMP8MW+qe0= +github.com/iris-contrib/i18n v0.0.0-20171121225848-987a633949d0/go.mod h1:pMCz62A0xJL6I+umB2YTlFRwWXaDFA0jy+5HzGiJjqI= +github.com/iris-contrib/schema v0.0.1/go.mod h1:urYA3uvUNG1TIIjOSCzHr9/LmbQo8LrOcOqfqxa4hXw= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/juju/errors v0.0.0-20181118221551-089d3ea4e4d5/go.mod h1:W54LbzXuIE0boCoNJfwqpmkKJ1O4TCTZMetAt6jGk7Q= +github.com/juju/loggo v0.0.0-20180524022052-584905176618/go.mod h1:vgyd7OREkbtVEN/8IXZe5Ooef3LQePvuBm9UWj6ZL8U= +github.com/juju/testing v0.0.0-20180920084828-472a3e8b2073/go.mod h1:63prj8cnj0tU0S9OHjGJn+b1h0ZghCndfnbQolrYTwA= +github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k= +github.com/kataras/golog v0.0.9/go.mod h1:12HJgwBIZFNGL0EJnMRhmvGA0PQGx8VFwrZtM4CqbAk= +github.com/kataras/iris/v12 v12.0.1/go.mod h1:udK4vLQKkdDqMGJJVd/msuMtN6hpYJhg/lSzuxjhO+U= +github.com/kataras/neffos v0.0.10/go.mod h1:ZYmJC07hQPW67eKuzlfY7SO3bC0mw83A3j6im82hfqw= +github.com/kataras/pio v0.0.0-20190103105442-ea782b38602d/go.mod h1:NV88laa9UiiDuX9AhMbDPkGYSPugBOV6yTZB1l2K9Z0= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.8.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.9.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/labstack/echo/v4 v4.1.11/go.mod h1:i541M3Fj6f76NZtHSj7TXnyM8n2gaodfvfxNnFqi74g= +github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k= +github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= +github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= +github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/mediocregopher/mediocre-go-lib v0.0.0-20181029021733-cb65787f37ed/go.mod h1:dSsfyI2zABAdhcbvkXqgxOxrCsbYeHCPgrZkku60dSg= +github.com/mediocregopher/radix/v3 v3.3.0/go.mod h1:EmfVyvspXz1uZEyPBMyGK+kjWiKQGvsUt6O3Pj+LDCQ= +github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= +github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/moul/http2curl v1.0.0/go.mod h1:8UbvGypXm98wA/IqH45anm5Y2Z6ep6O31QGOAZ3H0fQ= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/nats-io/nats.go v1.8.1/go.mod h1:BrFz9vVn0fU3AcH9Vn4Kd7W0NpJ651tD5omQ3M8LwxM= +github.com/nats-io/nkeys v0.0.2/go.mod h1:dab7URMsZm6Z/jp9Z5UGa87Uutgc2mVpXLC4B7TDb/4= +github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.13.0/go.mod h1:+REjRxOmWfHCjfv9TTWB1jD1Frx4XydAD3zm1lskyM0= +github.com/onsi/ginkgo/v2 v2.13.2 h1:Bi2gGVkfn6gQcjNjZJVO8Gf0FHzMPf2phUei9tejVMs= +github.com/onsi/ginkgo/v2 v2.13.2/go.mod h1:XStQ8QcGwLyF4HdfcZB8SFOS/MWCgDuXMSBe6zrvLgM= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.29.0 h1:KIA/t2t5UBzoirT4H9tsML45GEbo3ouUnBHsCfD2tVg= +github.com/onsi/gomega v1.29.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= +github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= +github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= +github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= +github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= +github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= +github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= +github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= +github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw= +github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/sethvargo/go-password v0.2.0 h1:BTDl4CC/gjf/axHMaDQtw507ogrXLci6XRiLc7i/UHI= +github.com/sethvargo/go-password v0.2.0/go.mod h1:Ym4Mr9JXLBycr02MFuVQ/0JHidNetSgbzutTr3zsYXE= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= +github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= +github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fasthttp v1.6.0/go.mod h1:FstJa9V+Pj9vQ7OJie2qMHdwemEDaDiSdBnvPM1Su9w= +github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= +github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0/go.mod h1:/LWChgwKmvncFJFHJ7Gvn9wZArjbV5/FppcK2fKk/tI= +github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg= +github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM= +github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZkTdatxwunjIkc= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/zitadel/oidc v1.13.5 h1:7jhh68NGZitLqwLiVU9Dtwa4IraJPFF1vS+4UupO93U= +github.com/zitadel/oidc v1.13.5/go.mod h1:rHs1DhU3Sv3tnI6bQRVlFa3u0lCwtR7S21WHY+yXgPA= +github.com/zitadel/zitadel-go/v2 v2.2.3 h1:A7XDaTQSkaMz4U/v0Il/c+POp946PowrotNDlOfQU1k= +github.com/zitadel/zitadel-go/v2 v2.2.3/go.mod h1:8cdeqkMW5nBqQLU8U3Ww5f6B92d9YkHeMd8Dhc+PpUs= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= +go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= +go.uber.org/zap v1.25.0 h1:4Hvk6GtkucQ790dqmj7l1eEnRdKm3k3ZUrUMS2d5+5c= +go.uber.org/zap v1.25.0/go.mod h1:JIAUzQIH94IC4fOJQm7gMmBJP5k7wQfdcnYdPoEXJYk= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= +golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20240325151524-a685a6edb6d8 h1:aAcj0Da7eBAtrTp03QXWvm88pSyOt+UgdZw2BFZ+lEw= +golang.org/x/exp v0.0.0-20240325151524-a685a6edb6d8/go.mod h1:CQ1k9gNrJ50XIzaKCRR2hssIjF07kZFEiieALBM/ARQ= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190327091125-710a502c58a2/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= +golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.19.0 h1:9+E/EZBCbTLNrbN35fHv/a/d/mOBatymz1zbtQrXpIg= +golang.org/x/oauth2 v0.19.0/go.mod h1:vYi7skDa1x015PmRRYZ7+s1cWyPgrPiSYRe4rnsexc8= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= +golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= +golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181221001348-537d06c36207/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190327201419-c70d86f8b7cf/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.19.0 h1:tfGCXNR1OsFG+sVdLAitlpjAvD/I6dHDKnYrpEZUHkw= +golang.org/x/tools v0.19.0/go.mod h1:qoJWxmGSIBmAeriMx19ogtrEPrGtDbPK634QFIcLAhc= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= +gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180518175338-11a468237815/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de h1:F6qOa9AZTYJXOUEr4jDysRDLrm4PHePlge4v4TGAlxY= +google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:VUhTRKeHn9wwcdrk73nvdC9gF178Tzhmt/qyaFcPLSo= +google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de h1:jFNzHPIeuzhdRwVhbZdiym9q0ory/xY3sA+v2wPg8I0= +google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:5iCWqnniDlqZHrd3neWVTOwvh/v6s3232omMecelax8= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de h1:cZGRis4/ot9uVm639a+rHCUaG0JJHEsdyzSQTMX+suY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:H4O17MA/PE9BsGx3w+a+W2VOLLD1Qf7oJneAoU6WktY= +google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM= +google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= +gopkg.in/square/go-jose.v2 v2.6.0 h1:NGk74WTnPKBNUhNzQX7PYcTLUjoq7mzKk2OKbvwk2iI= +gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +k8s.io/api v0.29.0 h1:NiCdQMY1QOp1H8lfRyeEf8eOwV6+0xA6XEE44ohDX2A= +k8s.io/api v0.29.0/go.mod h1:sdVmXoz2Bo/cb77Pxi71IPTSErEW32xa4aXwKH7gfBA= +k8s.io/apiextensions-apiserver v0.28.3 h1:Od7DEnhXHnHPZG+W9I97/fSQkVpVPQx2diy+2EtmY08= +k8s.io/apiextensions-apiserver v0.28.3/go.mod h1:NE1XJZ4On0hS11aWWJUTNkmVB03j9LM7gJSisbRt8Lc= +k8s.io/apimachinery v0.29.0 h1:+ACVktwyicPz0oc6MTMLwa2Pw3ouLAfAon1wPLtG48o= +k8s.io/apimachinery v0.29.0/go.mod h1:eVBxQ/cwiJxH58eK/jd/vAk4mrxmVlnpBH5J2GbMeis= +k8s.io/client-go v0.29.0 h1:KmlDtFcrdUzOYrBhXHgKw5ycWzc3ryPX5mQe0SkG3y8= +k8s.io/client-go v0.29.0/go.mod h1:yLkXH4HKMAywcrD82KMSmfYg2DlE8mepPR4JGSo5n38= +k8s.io/component-base v0.28.3 h1:rDy68eHKxq/80RiMb2Ld/tbH8uAE75JdCqJyi6lXMzI= +k8s.io/component-base v0.28.3/go.mod h1:fDJ6vpVNSk6cRo5wmDa6eKIG7UlIQkaFmZN2fYgIUD8= +k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0= +k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/controller-runtime v0.16.3 h1:2TuvuokmfXvDUamSx1SuAOO3eTyye+47mJCigwG62c4= +sigs.k8s.io/controller-runtime v0.16.3/go.mod h1:j7bialYoSn142nv9sCOJmQgDXQXxnroFU4VnX/brVJ0= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= +sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= diff --git a/src/hack/boilerplate.go.txt b/src/hack/boilerplate.go.txt new file mode 100644 index 0000000..ff72ff2 --- /dev/null +++ b/src/hack/boilerplate.go.txt @@ -0,0 +1,15 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ \ No newline at end of file diff --git a/src/internal/controller/apiapp_controller.go b/src/internal/controller/apiapp_controller.go new file mode 100644 index 0000000..f090878 --- /dev/null +++ b/src/internal/controller/apiapp_controller.go @@ -0,0 +1,281 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "encoding/json" + "fmt" + "strings" + "time" + + zitadelv1alpha1 "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/api/v1alpha1" + "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/pkg/builder" + condition "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/pkg/condition" + "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/pkg/controller/zitadel" + "github.com/zitadel/zitadel-go/v2/pkg/client/management" + "github.com/zitadel/zitadel-go/v2/pkg/client/middleware" + app "github.com/zitadel/zitadel-go/v2/pkg/client/zitadel/app" + "github.com/zitadel/zitadel-go/v2/pkg/client/zitadel/authn" + pb "github.com/zitadel/zitadel-go/v2/pkg/client/zitadel/management" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/util/workqueue" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + ctrlClient "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" +) + +// APIAppReconciler reconciles a APIApp object +type APIAppReconciler struct { + client.Client + RefResolver *zitadelv1alpha1.RefResolver + ConditionReady *condition.Ready + RequeueInterval time.Duration + Builder *builder.Builder +} + +func NewAPIAppReconciler(client client.Client, refResolver *zitadelv1alpha1.RefResolver, builder *builder.Builder, conditionReady *condition.Ready, + requeueInterval time.Duration) *APIAppReconciler { + return &APIAppReconciler{ + Client: client, + RefResolver: refResolver, + ConditionReady: conditionReady, + RequeueInterval: requeueInterval, + Builder: builder, + } +} + +//+kubebuilder:rbac:groups=zitadel.topmanage.com,resources=apiapps,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=zitadel.topmanage.com,resources=apiapps/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=zitadel.topmanage.com,resources=apiapps/finalizers,verbs=update + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +func (r *APIAppReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + var APIApp zitadelv1alpha1.APIApp + if err := r.Get(ctx, req.NamespacedName, &APIApp); err != nil { + return ctrl.Result{}, client.IgnoreNotFound(err) + } + wr := newWrappedAPIAppReconciler(r.Client, r.RefResolver, r.Builder, &APIApp) + wf := newWrappedAPIAppFinalizer(r.Client, &APIApp, r.RefResolver) + tf := zitadel.NewZitadelFinalizer(r.Client, wf) + tr := zitadel.NewZitadelReconciler(r.Client, r.ConditionReady, wr, tf, r.RequeueInterval) + + result, err := tr.Reconcile(ctx, &APIApp) + if err != nil { + return result, fmt.Errorf("error reconciling in APIAppReconciler: %v", err) + } + return result, nil +} + +type wrappedAPIAppReconciler struct { + client.Client + refResolver *zitadelv1alpha1.RefResolver + APIApp *zitadelv1alpha1.APIApp + Builder *builder.Builder +} + +func newWrappedAPIAppReconciler(client client.Client, refResolver *zitadelv1alpha1.RefResolver, builder *builder.Builder, + APIApp *zitadelv1alpha1.APIApp) zitadel.WrappedReconciler { + return &wrappedAPIAppReconciler{ + Client: client, + refResolver: refResolver, + APIApp: APIApp, + Builder: builder, + } +} + +type apiAppReoncilePhase struct { + Name string + Reconcile func(context.Context, *management.Client) error +} + +func (wr *wrappedAPIAppReconciler) Reconcile(ctx context.Context, ztdClient *management.Client) error { + phases := []projectReconcilePhase{ + { + Name: "apiapp", + Reconcile: wr.reconcileApp, + }, + { + Name: "keys", + Reconcile: wr.reconcileKeys, + }, + } + for _, p := range phases { + err := p.Reconcile(ctx, ztdClient) + if err != nil { + return err + } + } + return nil +} + +func (wr *wrappedAPIAppReconciler) reconcileApp(ctx context.Context, ztdClient *management.Client) error { + org, err := wr.APIApp.Organization(ctx, wr.refResolver) + if err != nil { + return err + } + project, err := wr.APIApp.Project(ctx, wr.refResolver) + if err != nil { + return err + } + if wr.APIApp.Status.AppId != "" { + appResp, err := ztdClient.GetAppByID(middleware.SetOrgID(ctx, org.Status.OrgId), &pb.GetAppByIDRequest{ + ProjectId: project.Status.ProjectId, + AppId: string(wr.APIApp.Status.AppId), + }) + if err != nil { + return fmt.Errorf("Error getting APIApp: %v", err) + } + if appResp.App != nil { + _, err := ztdClient.UpdateAPIAppConfig(middleware.SetOrgID(ctx, org.Status.OrgId), &pb.UpdateAPIAppConfigRequest{ProjectId: project.Status.ProjectId, AppId: wr.APIApp.Status.AppId, + AuthMethodType: app.APIAuthMethodType(app.APIAuthMethodType_value[wr.APIApp.Spec.AuthMethodType]), + }) + if err != nil { + if !strings.Contains(err.Error(), "No changes") { + return fmt.Errorf("Error updating APIApp: %v", err) + } + } + return nil + } + } + + resp, err := ztdClient.AddAPIApp(middleware.SetOrgID(ctx, org.Status.OrgId), + &pb.AddAPIAppRequest{ + Name: wr.APIApp.Name, + ProjectId: project.Status.ProjectId, + AuthMethodType: app.APIAuthMethodType(app.APIAuthMethodType_value[wr.APIApp.Spec.AuthMethodType]), + }, + ) + if err != nil { + if strings.Contains(err.Error(), "AlreadyExists") { + return nil + } + return fmt.Errorf("error creating APIApp in Zitadel: %v", err) + } + key := types.NamespacedName{ + Name: wr.APIApp.Name + "-client-secret", + Namespace: wr.APIApp.Namespace, + } + + secretData := map[string][]byte{"client-secret": []byte(resp.ClientSecret)} + secret, err := wr.Builder.BuildSecret(builder.SecretOpts{Immutable: false, Zitadel: nil, Key: key, Data: secretData}, wr.APIApp) + if err != nil { + return fmt.Errorf("error building Secret: %v", err) + } + if err := wr.Create(ctx, secret); err != nil { + return fmt.Errorf("error creating Client-secret Secret: %v", err) + } + patch := ctrlClient.MergeFrom(wr.APIApp.DeepCopy()) + wr.APIApp.Status.AppId = resp.AppId + wr.APIApp.Status.ClientId = resp.ClientId + return wr.Client.Status().Patch(ctx, wr.APIApp, patch) +} + +type Key struct { + Type string `json:"type"` + KeyID string `json:"keyId"` + Key string `json:"key"` + AppID string `json:"appId"` + ClientID string `json:"clientId"` +} + +func (wr *wrappedAPIAppReconciler) reconcileKeys(ctx context.Context, ztdClient *management.Client) error { + if wr.APIApp.Spec.AuthMethodType == "API_AUTH_METHOD_TYPE_PRIVATE_KEY_JWT" { + org, err := wr.APIApp.Organization(ctx, wr.refResolver) + if err != nil { + return err + } + project, err := wr.APIApp.Project(ctx, wr.refResolver) + if err != nil { + return err + } + if wr.APIApp.Status.KeyId != "" { + appKey, err := ztdClient.GetAppKey(middleware.SetOrgID(ctx, org.Status.OrgId), &pb.GetAppKeyRequest{ + ProjectId: project.Status.ProjectId, + AppId: wr.APIApp.Status.AppId, + KeyId: wr.APIApp.Status.KeyId, + }) + if err != nil { + if !strings.Contains(err.Error(), "not found") { + return fmt.Errorf("Could not get key: %v", err) + } + } + if appKey.Key != nil { + return nil + } + } + resp, err := ztdClient.AddAppKey(middleware.SetOrgID(ctx, org.Status.OrgId), &pb.AddAppKeyRequest{ + ProjectId: project.Status.ProjectId, + AppId: wr.APIApp.Status.AppId, + Type: authn.KeyType_KEY_TYPE_JSON, + ExpirationDate: nil, + }) + + if err != nil { + return fmt.Errorf("Error adding Key to app: %v", err) + } + + key := types.NamespacedName{ + Name: wr.APIApp.Name + "-privatekey-secret", + Namespace: wr.APIApp.Namespace, + } + var jsonKey Key + if err = json.Unmarshal(resp.KeyDetails, &jsonKey); err != nil { + return fmt.Errorf("Could not unmarshal key details: %v", err) + } + secretData := map[string][]byte{ + "clientId": []byte(jsonKey.ClientID), + "type": []byte(jsonKey.Type), + "keyId": []byte(jsonKey.KeyID), + "appId": []byte(jsonKey.AppID), + "key": []byte(jsonKey.Key), + } + secret, err := wr.Builder.BuildSecret(builder.SecretOpts{Immutable: false, Zitadel: nil, Key: key, Data: secretData}, wr.APIApp) + if err != nil { + return fmt.Errorf("error building Secret: %v", err) + } + if err := wr.Create(ctx, secret); err != nil { + return fmt.Errorf("error creating private-key Secret: %v", err) + } + patch := ctrlClient.MergeFrom(wr.APIApp.DeepCopy()) + wr.APIApp.Status.KeyId = resp.Id + return wr.Client.Status().Patch(ctx, wr.APIApp, patch) + } + return nil +} + +func (wr *wrappedAPIAppReconciler) PatchStatus(ctx context.Context, patcher condition.Patcher) error { + patch := client.MergeFrom(wr.APIApp.DeepCopy()) + patcher(&wr.APIApp.Status) + + if err := wr.Client.Status().Patch(ctx, wr.APIApp, patch); err != nil { + return fmt.Errorf("error patching APIApp status: %v", err) + } + return nil +} + +// SetupWithManager sets up the controller with the Manager. +func (r *APIAppReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&zitadelv1alpha1.APIApp{}). + Owns(&corev1.Secret{}). + WithOptions(controller.Options{RateLimiter: workqueue.NewItemExponentialFailureRateLimiter(time.Millisecond*500, time.Minute*3)}). + Complete(r) +} diff --git a/src/internal/controller/apiapp_controller_finalizer.go b/src/internal/controller/apiapp_controller_finalizer.go new file mode 100644 index 0000000..4ab174b --- /dev/null +++ b/src/internal/controller/apiapp_controller_finalizer.go @@ -0,0 +1,91 @@ +package controller + +import ( + "strings" + + zitadelv1alpha1 "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/api/v1alpha1" + "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/pkg/controller/zitadel" + + "context" + "fmt" + + "github.com/zitadel/zitadel-go/v2/pkg/client/management" + "github.com/zitadel/zitadel-go/v2/pkg/client/middleware" + pb "github.com/zitadel/zitadel-go/v2/pkg/client/zitadel/management" + "sigs.k8s.io/controller-runtime/pkg/client" + ctrlClient "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" +) + +const ( + APIAppFinalizerName = "apiapp.zitadel.topmanage.com/apiapp" +) + +type wrappedAPIAppFinalizer struct { + client.Client + APIApp *zitadelv1alpha1.APIApp + refresolver *zitadelv1alpha1.RefResolver +} + +func newWrappedAPIAppFinalizer(client client.Client, APIApp *zitadelv1alpha1.APIApp, refresolver *zitadelv1alpha1.RefResolver) zitadel.WrappedFinalizer { + return &wrappedAPIAppFinalizer{ + Client: client, + APIApp: APIApp, + refresolver: refresolver, + } +} + +func (wf *wrappedAPIAppFinalizer) AddFinalizer(ctx context.Context) error { + if wf.ContainsFinalizer() { + return nil + } + return wf.patch(ctx, wf.APIApp, func(APIApp *zitadelv1alpha1.APIApp) { + controllerutil.AddFinalizer(APIApp, APIAppFinalizerName) + }) +} + +func (wf *wrappedAPIAppFinalizer) RemoveFinalizer(ctx context.Context) error { + if !wf.ContainsFinalizer() { + return nil + } + return wf.patch(ctx, wf.APIApp, func(APIApp *zitadelv1alpha1.APIApp) { + controllerutil.RemoveFinalizer(wf.APIApp, APIAppFinalizerName) + }) +} + +func (wr *wrappedAPIAppFinalizer) ContainsFinalizer() bool { + return controllerutil.ContainsFinalizer(wr.APIApp, APIAppFinalizerName) +} + +func (wf *wrappedAPIAppFinalizer) Reconcile(ctx context.Context, ztdClient *management.Client) error { + if wf.APIApp.Status.AppId == "" { + return nil + } + org, err := wf.APIApp.Organization(ctx, wf.refresolver) + if err != nil { + return err + } + project, err := wf.APIApp.Project(ctx, wf.refresolver) + if err != nil { + return err + } + _, err = ztdClient.RemoveApp(middleware.SetOrgID(ctx, org.Status.OrgId), &pb.RemoveAppRequest{ProjectId: project.Status.ProjectId, AppId: wf.APIApp.Status.AppId}) + if err != nil { + if strings.Contains(err.Error(), "doesn't exist") { + return nil + } + return err + } + return nil +} + +func (wr *wrappedAPIAppFinalizer) patch(ctx context.Context, APIApp *zitadelv1alpha1.APIApp, + patchFn func(*zitadelv1alpha1.APIApp)) error { + patch := ctrlClient.MergeFrom(APIApp.DeepCopy()) + patchFn(APIApp) + + if err := wr.Client.Patch(ctx, APIApp, patch); err != nil { + return fmt.Errorf("error patching APIApp finalizer: %v", err) + } + return nil +} diff --git a/src/internal/controller/machineuser_controller.go b/src/internal/controller/machineuser_controller.go new file mode 100644 index 0000000..7e81bd6 --- /dev/null +++ b/src/internal/controller/machineuser_controller.go @@ -0,0 +1,439 @@ +package controller + +import ( + "context" + "encoding/json" + "fmt" + "reflect" + "sort" + "strings" + "time" + + zitadelv1alpha1 "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/api/v1alpha1" + "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/pkg/builder" + condition "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/pkg/condition" + "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/pkg/controller/zitadel" + "github.com/zitadel/zitadel-go/v2/pkg/client/management" + "github.com/zitadel/zitadel-go/v2/pkg/client/middleware" + "github.com/zitadel/zitadel-go/v2/pkg/client/zitadel/authn" + pb "github.com/zitadel/zitadel-go/v2/pkg/client/zitadel/management" + object "github.com/zitadel/zitadel-go/v2/pkg/client/zitadel/object" + project "github.com/zitadel/zitadel-go/v2/pkg/client/zitadel/project" + user "github.com/zitadel/zitadel-go/v2/pkg/client/zitadel/user" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/util/workqueue" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + clientpkg "sigs.k8s.io/controller-runtime/pkg/client" + ctrlClient "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" +) + +// MachineUserReconciler reconciles a MachineUser object +type MachineUserReconciler struct { + client.Client + RefResolver *zitadelv1alpha1.RefResolver + ConditionReady *condition.Ready + RequeueInterval time.Duration + Builder *builder.Builder +} + +func NewMachineUserReconciler(client client.Client, refResolver *zitadelv1alpha1.RefResolver, builder *builder.Builder, conditionReady *condition.Ready, + requeueInterval time.Duration) *MachineUserReconciler { + return &MachineUserReconciler{ + Client: client, + RefResolver: refResolver, + ConditionReady: conditionReady, + RequeueInterval: requeueInterval, + Builder: builder, + } +} + +//+kubebuilder:rbac:groups=zitadel.topmanage.com,resources=machineusers,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=zitadel.topmanage.com,resources=machineusers/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=zitadel.topmanage.com,resources=machineusers/finalizers,verbs=update + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +func (r *MachineUserReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + var MachineUser zitadelv1alpha1.MachineUser + if err := r.Get(ctx, req.NamespacedName, &MachineUser); err != nil { + return ctrl.Result{}, client.IgnoreNotFound(err) + } + wr := newWrappedMachineUserReconciler(r.Client, r.RefResolver, r.Builder, &MachineUser) + wf := newWrappedMachineUserFinalizer(r.Client, &MachineUser) + tf := zitadel.NewZitadelFinalizer(r.Client, wf) + tr := zitadel.NewZitadelReconciler(r.Client, r.ConditionReady, wr, tf, r.RequeueInterval) + + result, err := tr.Reconcile(ctx, &MachineUser) + if err != nil { + return result, fmt.Errorf("error reconciling in MachineUserReconciler: %v", err) + } + return result, nil +} + +type wrappedMachineUserReconciler struct { + client.Client + refResolver *zitadelv1alpha1.RefResolver + MachineUser *zitadelv1alpha1.MachineUser + Builder *builder.Builder +} + +func newWrappedMachineUserReconciler(client client.Client, refResolver *zitadelv1alpha1.RefResolver, builder *builder.Builder, + MachineUser *zitadelv1alpha1.MachineUser) zitadel.WrappedReconciler { + return &wrappedMachineUserReconciler{ + Client: client, + refResolver: refResolver, + MachineUser: MachineUser, + Builder: builder, + } +} + +type machineUserReconcilePhase struct { + Name string + Reconcile func(context.Context, *management.Client) error +} + +func (wr *wrappedMachineUserReconciler) Reconcile(ctx context.Context, ztdClient *management.Client) error { + phases := []machineUserReconcilePhase{ + { + Name: "machineUser", + Reconcile: wr.reconcileMachineUser, + }, + { + Name: "usergrants", + Reconcile: wr.reconcileUserGrants, + }, + { + Name: "pat", + Reconcile: wr.reconcilePAT, + }, + { + Name: "jwt", + Reconcile: wr.reconcileJWT, + }, + } + for _, p := range phases { + err := p.Reconcile(ctx, ztdClient) + if err != nil { + return err + } + } + return nil +} + +func (wr *wrappedMachineUserReconciler) reconcileMachineUser(ctx context.Context, ztdClient *management.Client) error { + org, err := wr.refResolver.OrganizationRef(ctx, &wr.MachineUser.Spec.OrganizationRef, wr.MachineUser.Namespace) + if err != nil { + return err + } + zitadel, err := wr.refResolver.ZitadelCluster(ctx, &org.Spec.ZitadelClusterRef, wr.MachineUser.Namespace) + if err != nil { + return err + } + machineUser, err := ztdClient.GetUserByLoginNameGlobal(ctx, &pb.GetUserByLoginNameGlobalRequest{ + LoginName: strings.ToLower(fmt.Sprintf("%s@%s.%s", wr.MachineUser.Name, org.Name, zitadel.Spec.Host)), + }) + if err != nil { + if !strings.Contains(err.Error(), "could not be found") { + return fmt.Errorf("Error getting machineuser: %v", err) + } + } + + var userid string + if machineUser == nil { + resp, err := ztdClient.AddMachineUser(middleware.SetOrgID(ctx, org.Status.OrgId), + &pb.AddMachineUserRequest{ + Name: wr.MachineUser.Name, + UserName: wr.MachineUser.Name, + Description: wr.MachineUser.Name, + AccessTokenType: user.AccessTokenType(user.AccessTokenType_value[wr.MachineUser.Spec.AccessTokenType]), + }, + ) + if err != nil { + return fmt.Errorf("error creating MachineUser in Zitadel: %v", err) + } + userid = resp.UserId + } else { + _, err = ztdClient.UpdateMachine(middleware.SetOrgID(ctx, org.Status.OrgId), + &pb.UpdateMachineRequest{ + UserId: machineUser.User.Id, + Name: wr.MachineUser.Name, + Description: wr.MachineUser.Name, + AccessTokenType: user.AccessTokenType(user.AccessTokenType_value[wr.MachineUser.Spec.AccessTokenType]), + }) + if err != nil { + if !strings.Contains(err.Error(), "User.NotChanged ") { + return fmt.Errorf("Error updating MchineUser: %v", err) + } + } + userid = machineUser.User.Id + } + + patch := ctrlClient.MergeFrom(wr.MachineUser.DeepCopy()) + wr.MachineUser.Status.UserId = userid + return wr.Client.Status().Patch(ctx, wr.MachineUser, patch) +} + +func (wr *wrappedMachineUserReconciler) reconcilePAT(ctx context.Context, ztdClient *management.Client) error { + org, err := wr.refResolver.OrganizationRef(ctx, &wr.MachineUser.Spec.OrganizationRef, wr.MachineUser.Namespace) + if err != nil { + return err + } + ctx = middleware.SetOrgID(ctx, org.Status.OrgId) + + token, err := ztdClient.GetPersonalAccessTokenByIDs(ctx, &pb.GetPersonalAccessTokenByIDsRequest{ + UserId: wr.MachineUser.Status.UserId, + TokenId: wr.MachineUser.Status.PATId, + }) + if err != nil { + if !(strings.Contains(err.Error(), "NotFound") || strings.Contains(err.Error(), "length must be between 1 and 200 runes")) { + return fmt.Errorf("Error getting PAT: %v", err) + } + } + + if token == nil || !wr.MachineUser.Status.GetConditionStatus(zitadelv1alpha1.ConditionTypePATUpToDate) { + if token != nil { + if _, err = ztdClient.RemovePersonalAccessToken(ctx, &pb.RemovePersonalAccessTokenRequest{ + UserId: wr.MachineUser.Status.UserId, + TokenId: wr.MachineUser.Status.PATId, + }); err != nil { + return fmt.Errorf("Error removing PAT: %v", err) + } + } + resp, err := ztdClient.AddPersonalAccessToken(ctx, &pb.AddPersonalAccessTokenRequest{ + UserId: wr.MachineUser.Status.UserId, + }) + if err != nil { + return fmt.Errorf("Error adding PAT: %v", err) + } + key := types.NamespacedName{ + Name: wr.MachineUser.PatSecretName(), + Namespace: wr.MachineUser.Namespace, + } + desiredPatSecret, err := wr.Builder.BuildSecret(builder.SecretOpts{ + Key: key, + Immutable: false, + Data: map[string][]byte{ + "pat": []byte(resp.Token), + }, + }, wr.MachineUser) + + if err != nil { + return fmt.Errorf("error building PAT Secret: %v", err) + } + + { + var existingPatSecret corev1.Secret + if err := wr.Get(ctx, key, &existingPatSecret); err != nil { + if !errors.IsNotFound(err) { + return fmt.Errorf("error getting PAT Secret: %v", err) + } + if err := wr.Create(ctx, desiredPatSecret); err != nil { + return fmt.Errorf("error creating PAT Secret: %v", err) + } + } + + patch := clientpkg.MergeFrom(existingPatSecret.DeepCopy()) + existingPatSecret.Data = desiredPatSecret.Data + if err = wr.Patch(ctx, &existingPatSecret, patch); err != nil { + return err + } + + } + + if err = wr.PatchStatus(ctx, condition.SetPatUpToDate); err != nil { + return err + } + + patch := ctrlClient.MergeFrom(wr.MachineUser.DeepCopy()) + wr.MachineUser.Status.PATId = resp.TokenId + return wr.Client.Status().Patch(ctx, wr.MachineUser, patch) + } + return nil +} + +func (wr *wrappedMachineUserReconciler) reconcileJWT(ctx context.Context, ztdClient *management.Client) error { + org, err := wr.refResolver.OrganizationRef(ctx, &wr.MachineUser.Spec.OrganizationRef, wr.MachineUser.Namespace) + if err != nil { + return err + } + ctx = middleware.SetOrgID(ctx, org.Status.OrgId) + + token, err := ztdClient.GetMachineKeyByIDs(ctx, &pb.GetMachineKeyByIDsRequest{ + UserId: wr.MachineUser.Status.UserId, + KeyId: wr.MachineUser.Status.KeyId, + }) + if err != nil { + if !(strings.Contains(err.Error(), "NotFound") || strings.Contains(err.Error(), "length must be between 1 and 200 runes")) { + return fmt.Errorf("Error getting JWT: %v", err) + } + } + + if token == nil { + resp, err := ztdClient.AddMachineKey(ctx, &pb.AddMachineKeyRequest{ + UserId: wr.MachineUser.Status.UserId, + Type: authn.KeyType_KEY_TYPE_JSON, + }) + if err != nil { + return fmt.Errorf("Error adding JWT: %v", err) + } + key := types.NamespacedName{ + Name: wr.MachineUser.JWTSecretName(), + Namespace: wr.MachineUser.Namespace, + } + + var jsonKey Key + if err = json.Unmarshal(resp.KeyDetails, &jsonKey); err != nil { + return fmt.Errorf("Could not unmarshal key details: %v", err) + } + + secretData := map[string][]byte{ + "clientId": []byte(jsonKey.ClientID), + "type": []byte(jsonKey.Type), + "keyId": []byte(jsonKey.KeyID), + "appId": []byte(jsonKey.AppID), + "key": []byte(jsonKey.Key), + } + jwtSecret, err := wr.Builder.BuildSecret(builder.SecretOpts{ + Key: key, + Immutable: false, + Data: secretData, + }, wr.MachineUser) + + if err != nil { + return fmt.Errorf("error building machine key Secret: %v", err) + } + if err := wr.Create(ctx, jwtSecret); err != nil { + return fmt.Errorf("error creating machine key Secret: %v", err) + } + patch := ctrlClient.MergeFrom(wr.MachineUser.DeepCopy()) + wr.MachineUser.Status.KeyId = resp.KeyId + return wr.Client.Status().Patch(ctx, wr.MachineUser, patch) + } + return nil +} + +func (wr *wrappedMachineUserReconciler) reconcileUserGrants(ctx context.Context, ztdClient *management.Client) error { + org, err := wr.refResolver.OrganizationRef(ctx, &wr.MachineUser.Spec.OrganizationRef, wr.MachineUser.Namespace) + if err != nil { + return err + } + ctx = middleware.SetOrgID(ctx, org.Status.OrgId) + existingUserGrants, err := ztdClient.ListUserGrants(ctx, &pb.ListUserGrantRequest{ + Queries: []*user.UserGrantQuery{ + { + Query: &user.UserGrantQuery_UserIdQuery{ + UserIdQuery: &user.UserGrantUserIDQuery{ + UserId: wr.MachineUser.Status.UserId, + }, + }, + }, + { + Query: &user.UserGrantQuery_WithGrantedQuery{ + WithGrantedQuery: &user.UserGrantWithGrantedQuery{ + WithGranted: true, + }, + }, + }, + }, + }) + if err != nil { + return fmt.Errorf("Error listing MachineUser grants: %v", err) + } + for _, userGrant := range wr.MachineUser.DeepCopy().Spec.UserGrants { + userGrantedProject, err := wr.refResolver.ProjectRef(ctx, &userGrant.ProjectRef, wr.MachineUser.Namespace) + if err != nil { + return err + } + var existingUserGrant *user.UserGrant + for _, eGrant := range existingUserGrants.Result { + if eGrant.ProjectId == userGrantedProject.Status.ProjectId && eGrant.UserId == wr.MachineUser.Status.UserId { + existingUserGrant = eGrant + break + } + } + if existingUserGrant == nil { + grantedProjects, err := ztdClient.ListGrantedProjects(ctx, &pb.ListGrantedProjectsRequest{ + Queries: []*project.ProjectQuery{ + { + Query: &project.ProjectQuery_NameQuery{ + NameQuery: &project.ProjectNameQuery{ + Name: userGrantedProject.Name, + Method: object.TextQueryMethod_TEXT_QUERY_METHOD_EQUALS, + }, + }, + }, + }, + }) + if err != nil { + return fmt.Errorf("Error listing granted projects: %v", err) + } + + var existingProjectGrant *project.GrantedProject + for _, existingGrantedProject := range grantedProjects.Result { + if existingGrantedProject.ProjectId == userGrantedProject.Status.ProjectId { + existingProjectGrant = existingGrantedProject + break + } + } + if existingProjectGrant == nil { + return fmt.Errorf("Error no project granted to user organization") + } + + if err = wr.PatchStatus(ctx, condition.SetPatOutOfDate); err != nil { + return err + } + + _, err = ztdClient.AddUserGrant(ctx, &pb.AddUserGrantRequest{ + UserId: wr.MachineUser.Status.UserId, + RoleKeys: userGrant.RoleKeys, + ProjectId: existingProjectGrant.ProjectId, + ProjectGrantId: existingProjectGrant.GrantId, + }) + if err != nil { + return fmt.Errorf("Error Adding MachineUser grant: %v", err) + } + } else { + sort.Strings(existingUserGrant.RoleKeys) + sort.Strings(userGrant.RoleKeys) + if !reflect.DeepEqual(existingUserGrant.RoleKeys, userGrant.RoleKeys) { + + if err = wr.PatchStatus(ctx, condition.SetPatOutOfDate); err != nil { + return err + } + + _, err := ztdClient.UpdateUserGrant(ctx, &pb.UpdateUserGrantRequest{ + UserId: wr.MachineUser.Status.UserId, + GrantId: existingUserGrant.Id, + RoleKeys: userGrant.RoleKeys, + }) + if err != nil { + return fmt.Errorf("Error Updating MachineUser grant: %v", err) + } + } + } + } + return nil +} + +func (wr *wrappedMachineUserReconciler) PatchStatus(ctx context.Context, patcher condition.Patcher) error { + patch := client.MergeFrom(wr.MachineUser.DeepCopy()) + patcher(&wr.MachineUser.Status) + + if err := wr.Client.Status().Patch(ctx, wr.MachineUser, patch); err != nil { + return fmt.Errorf("error patching MachineUser status: %v", err) + } + return nil +} + +// SetupWithManager sets up the controller with the Manager. +func (r *MachineUserReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&zitadelv1alpha1.MachineUser{}). + Owns(&corev1.Secret{}). + WithOptions(controller.Options{RateLimiter: workqueue.NewItemExponentialFailureRateLimiter(time.Millisecond*500, time.Minute*3)}). + Complete(r) +} diff --git a/src/internal/controller/machineuser_controller_finalizer.go b/src/internal/controller/machineuser_controller_finalizer.go new file mode 100644 index 0000000..7153821 --- /dev/null +++ b/src/internal/controller/machineuser_controller_finalizer.go @@ -0,0 +1,92 @@ +package controller + +import ( + "strings" + + zitadelv1alpha1 "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/api/v1alpha1" + "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/pkg/controller/zitadel" + + "context" + "fmt" + + "github.com/zitadel/zitadel-go/v2/pkg/client/management" + pb "github.com/zitadel/zitadel-go/v2/pkg/client/zitadel/management" + "sigs.k8s.io/controller-runtime/pkg/client" + ctrlClient "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" +) + +const ( + machineuserFinalizerName = "machineuser.zitadel.topmanage.com/machineuser" +) + +type wrappedMachineUserFinalizer struct { + client.Client + machineuser *zitadelv1alpha1.MachineUser +} + +func newWrappedMachineUserFinalizer(client client.Client, machineuser *zitadelv1alpha1.MachineUser) zitadel.WrappedFinalizer { + return &wrappedMachineUserFinalizer{ + Client: client, + machineuser: machineuser, + } +} + +func (wf *wrappedMachineUserFinalizer) AddFinalizer(ctx context.Context) error { + if wf.ContainsFinalizer() { + return nil + } + return wf.patch(ctx, wf.machineuser, func(machineuser *zitadelv1alpha1.MachineUser) { + controllerutil.AddFinalizer(machineuser, machineuserFinalizerName) + }) +} + +func (wf *wrappedMachineUserFinalizer) RemoveFinalizer(ctx context.Context) error { + if !wf.ContainsFinalizer() { + return nil + } + return wf.patch(ctx, wf.machineuser, func(machineuser *zitadelv1alpha1.MachineUser) { + controllerutil.RemoveFinalizer(wf.machineuser, machineuserFinalizerName) + }) +} + +func (wr *wrappedMachineUserFinalizer) ContainsFinalizer() bool { + return controllerutil.ContainsFinalizer(wr.machineuser, machineuserFinalizerName) +} + +func (wf *wrappedMachineUserFinalizer) Reconcile(ctx context.Context, ztdClient *management.Client) error { + if wf.machineuser.Status.UserId == "" { + return nil + } + { + _, err := ztdClient.GetUserByID(ctx, &pb.GetUserByIDRequest{ + Id: wf.machineuser.Status.UserId, + }) + if err != nil { + if strings.Contains(err.Error(), `not be found`) { + return nil + } + return err + } + } + _, err := ztdClient.RemoveUser(ctx, &pb.RemoveUserRequest{ + Id: wf.machineuser.Status.UserId, + }) + if err != nil { + if !strings.Contains(err.Error(), "not be found") { + return err + } + } + return nil +} + +func (wr *wrappedMachineUserFinalizer) patch(ctx context.Context, machineuser *zitadelv1alpha1.MachineUser, + patchFn func(*zitadelv1alpha1.MachineUser)) error { + patch := ctrlClient.MergeFrom(machineuser.DeepCopy()) + patchFn(machineuser) + + if err := wr.Client.Patch(ctx, machineuser, patch); err != nil { + return fmt.Errorf("error patching MachineUser finalizer: %v", err) + } + return nil +} diff --git a/src/internal/controller/oidcapp_controller.go b/src/internal/controller/oidcapp_controller.go new file mode 100644 index 0000000..0a9a1ee --- /dev/null +++ b/src/internal/controller/oidcapp_controller.go @@ -0,0 +1,219 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "fmt" + "strings" + "time" + + zitadelv1alpha1 "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/api/v1alpha1" + "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/pkg/builder" + condition "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/pkg/condition" + "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/pkg/controller/zitadel" + "github.com/zitadel/zitadel-go/v2/pkg/client/management" + "github.com/zitadel/zitadel-go/v2/pkg/client/middleware" + app "github.com/zitadel/zitadel-go/v2/pkg/client/zitadel/app" + pb "github.com/zitadel/zitadel-go/v2/pkg/client/zitadel/management" + durationpb "google.golang.org/protobuf/types/known/durationpb" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/util/workqueue" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + ctrlClient "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" +) + +// OIDCAppReconciler reconciles a OIDCApp object +type OIDCAppReconciler struct { + client.Client + RefResolver *zitadelv1alpha1.RefResolver + ConditionReady *condition.Ready + RequeueInterval time.Duration + Builder *builder.Builder +} + +func NewOIDCAppReconciler(client client.Client, refResolver *zitadelv1alpha1.RefResolver, builder *builder.Builder, conditionReady *condition.Ready, + requeueInterval time.Duration) *OIDCAppReconciler { + return &OIDCAppReconciler{ + Client: client, + RefResolver: refResolver, + ConditionReady: conditionReady, + RequeueInterval: requeueInterval, + Builder: builder, + } +} + +//+kubebuilder:rbac:groups=zitadel.topmanage.com,resources=oidcapps,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=zitadel.topmanage.com,resources=oidcapps/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=zitadel.topmanage.com,resources=oidcapps/finalizers,verbs=update + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +func (r *OIDCAppReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + var OIDCApp zitadelv1alpha1.OIDCApp + if err := r.Get(ctx, req.NamespacedName, &OIDCApp); err != nil { + return ctrl.Result{}, client.IgnoreNotFound(err) + } + wr := newWrappedOIDCAppReconciler(r.Client, r.RefResolver, r.Builder, &OIDCApp) + wf := newWrappedOIDCAppFinalizer(r.Client, &OIDCApp, r.RefResolver) + tf := zitadel.NewZitadelFinalizer(r.Client, wf) + tr := zitadel.NewZitadelReconciler(r.Client, r.ConditionReady, wr, tf, r.RequeueInterval) + + result, err := tr.Reconcile(ctx, &OIDCApp) + if err != nil { + return result, fmt.Errorf("error reconciling in OIDCAppReconciler: %v", err) + } + return result, nil +} + +type wrappedOIDCAppReconciler struct { + client.Client + refResolver *zitadelv1alpha1.RefResolver + OIDCApp *zitadelv1alpha1.OIDCApp + Builder *builder.Builder +} + +func newWrappedOIDCAppReconciler(client client.Client, refResolver *zitadelv1alpha1.RefResolver, builder *builder.Builder, + OIDCApp *zitadelv1alpha1.OIDCApp) zitadel.WrappedReconciler { + return &wrappedOIDCAppReconciler{ + Client: client, + refResolver: refResolver, + OIDCApp: OIDCApp, + Builder: builder, + } +} + +func (wr *wrappedOIDCAppReconciler) Reconcile(ctx context.Context, ztdClient *management.Client) error { + org, err := wr.OIDCApp.Organization(ctx, wr.refResolver) + if err != nil { + return err + } + project, err := wr.OIDCApp.Project(ctx, wr.refResolver) + if err != nil { + return err + } + responseTypes := []app.OIDCResponseType{} + for _, r := range wr.OIDCApp.Spec.ResponseTypes { + responseTypes = append(responseTypes, app.OIDCResponseType(app.OIDCResponseType_value[string(r)])) + } + grantTypes := []app.OIDCGrantType{} + for _, r := range wr.OIDCApp.Spec.GrantTypes { + grantTypes = append(grantTypes, app.OIDCGrantType(app.OIDCGrantType_value[string(r)])) + } + + if wr.OIDCApp.Status.AppId != "" { + appResp, err := ztdClient.GetAppByID(middleware.SetOrgID(ctx, org.Status.OrgId), &pb.GetAppByIDRequest{ + ProjectId: project.Status.ProjectId, + AppId: string(wr.OIDCApp.Status.AppId), + }) + // TODO: fix flow + if err != nil { + return fmt.Errorf("Error getting OIDCApp: %v", err) + } + if appResp.App != nil { + _, err := ztdClient.UpdateOIDCAppConfig(middleware.SetOrgID(ctx, org.Status.OrgId), &pb.UpdateOIDCAppConfigRequest{ProjectId: project.Status.ProjectId, AppId: wr.OIDCApp.Status.AppId, + RedirectUris: wr.OIDCApp.Spec.RedirectUris, + ResponseTypes: responseTypes, + GrantTypes: grantTypes, + AppType: app.OIDCAppType(app.OIDCAppType_value[wr.OIDCApp.Spec.AppType]), + AuthMethodType: app.OIDCAuthMethodType(app.OIDCAuthMethodType_value[wr.OIDCApp.Spec.AuthMethodType]), + PostLogoutRedirectUris: wr.OIDCApp.Spec.PostLogoutRedirectUris, + DevMode: wr.OIDCApp.Spec.DevMode, + AccessTokenType: app.OIDCTokenType(app.OIDCTokenType_value[wr.OIDCApp.Spec.AccessTokenType]), + AccessTokenRoleAssertion: wr.OIDCApp.Spec.AccessTokenRoleAssertion, + IdTokenRoleAssertion: wr.OIDCApp.Spec.IdTokenRoleAssertion, + IdTokenUserinfoAssertion: wr.OIDCApp.Spec.IdTokenUserinfoAssertion, + ClockSkew: durationpb.New(wr.OIDCApp.Spec.ClockSkew.Duration), + AdditionalOrigins: wr.OIDCApp.Spec.AdditionalOrigins, + SkipNativeAppSuccessPage: wr.OIDCApp.Spec.SkipNativeAppSuccessPage, + }) + if err != nil { + if !strings.Contains(err.Error(), "No changes") { + return fmt.Errorf("Error updating OIDCApp: %v", err) + } + } + return nil + } + } + + resp, err := ztdClient.AddOIDCApp(middleware.SetOrgID(ctx, org.Status.OrgId), + &pb.AddOIDCAppRequest{ + Name: wr.OIDCApp.Name, + ProjectId: project.Status.ProjectId, + RedirectUris: wr.OIDCApp.Spec.RedirectUris, + ResponseTypes: responseTypes, + GrantTypes: grantTypes, + AppType: app.OIDCAppType(app.OIDCAppType_value[wr.OIDCApp.Spec.AppType]), + AuthMethodType: app.OIDCAuthMethodType(app.OIDCAuthMethodType_value[wr.OIDCApp.Spec.AuthMethodType]), + PostLogoutRedirectUris: wr.OIDCApp.Spec.PostLogoutRedirectUris, + Version: app.OIDCVersion_OIDC_VERSION_1_0, + DevMode: wr.OIDCApp.Spec.DevMode, + AccessTokenType: app.OIDCTokenType(app.OIDCTokenType_value[wr.OIDCApp.Spec.AccessTokenType]), + AccessTokenRoleAssertion: wr.OIDCApp.Spec.AccessTokenRoleAssertion, + IdTokenRoleAssertion: wr.OIDCApp.Spec.IdTokenRoleAssertion, + IdTokenUserinfoAssertion: wr.OIDCApp.Spec.IdTokenUserinfoAssertion, + ClockSkew: durationpb.New(wr.OIDCApp.Spec.ClockSkew.Duration), + AdditionalOrigins: wr.OIDCApp.Spec.AdditionalOrigins, + SkipNativeAppSuccessPage: wr.OIDCApp.Spec.SkipNativeAppSuccessPage, + }, + ) + if err != nil { + if strings.Contains(err.Error(), "AlreadyExists") { + return nil + } + return fmt.Errorf("error creating OIDCApp in Zitadel: %v", err) + } + key := types.NamespacedName{ + Name: wr.OIDCApp.ClientSecretName(), + Namespace: wr.OIDCApp.Namespace, + } + + secretData := map[string][]byte{"client-secret": []byte(resp.ClientSecret)} + secret, err := wr.Builder.BuildSecret(builder.SecretOpts{Immutable: false, Zitadel: nil, Key: key, Data: secretData}, wr.OIDCApp) + if err != nil { + return fmt.Errorf("error building Secret: %v", err) + } + if err := wr.Create(ctx, secret); err != nil { + return fmt.Errorf("error creating Client-secret Secret: %v", err) + } + patch := ctrlClient.MergeFrom(wr.OIDCApp.DeepCopy()) + wr.OIDCApp.Status.AppId = resp.AppId + wr.OIDCApp.Status.ClientId = resp.ClientId + return wr.Client.Status().Patch(ctx, wr.OIDCApp, patch) +} + +func (wr *wrappedOIDCAppReconciler) PatchStatus(ctx context.Context, patcher condition.Patcher) error { + patch := client.MergeFrom(wr.OIDCApp.DeepCopy()) + patcher(&wr.OIDCApp.Status) + + if err := wr.Client.Status().Patch(ctx, wr.OIDCApp, patch); err != nil { + return fmt.Errorf("error patching OIDCApp status: %v", err) + } + return nil +} + +// SetupWithManager sets up the controller with the Manager. +func (r *OIDCAppReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&zitadelv1alpha1.OIDCApp{}). + Owns(&corev1.Secret{}). + WithOptions(controller.Options{RateLimiter: workqueue.NewItemExponentialFailureRateLimiter(time.Millisecond*500, time.Minute*3)}). + Complete(r) +} diff --git a/src/internal/controller/oidcapp_controller_finalizer.go b/src/internal/controller/oidcapp_controller_finalizer.go new file mode 100644 index 0000000..bda3e00 --- /dev/null +++ b/src/internal/controller/oidcapp_controller_finalizer.go @@ -0,0 +1,91 @@ +package controller + +import ( + "strings" + + zitadelv1alpha1 "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/api/v1alpha1" + "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/pkg/controller/zitadel" + + "context" + "fmt" + + "github.com/zitadel/zitadel-go/v2/pkg/client/management" + "github.com/zitadel/zitadel-go/v2/pkg/client/middleware" + pb "github.com/zitadel/zitadel-go/v2/pkg/client/zitadel/management" + "sigs.k8s.io/controller-runtime/pkg/client" + ctrlClient "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" +) + +const ( + OIDCAppFinalizerName = "oidcapp.zitadel.topmanage.com/oidcapp" +) + +type wrappedOIDCAppFinalizer struct { + client.Client + OIDCApp *zitadelv1alpha1.OIDCApp + refresolver *zitadelv1alpha1.RefResolver +} + +func newWrappedOIDCAppFinalizer(client client.Client, OIDCApp *zitadelv1alpha1.OIDCApp, refresolver *zitadelv1alpha1.RefResolver) zitadel.WrappedFinalizer { + return &wrappedOIDCAppFinalizer{ + Client: client, + OIDCApp: OIDCApp, + refresolver: refresolver, + } +} + +func (wf *wrappedOIDCAppFinalizer) AddFinalizer(ctx context.Context) error { + if wf.ContainsFinalizer() { + return nil + } + return wf.patch(ctx, wf.OIDCApp, func(OIDCApp *zitadelv1alpha1.OIDCApp) { + controllerutil.AddFinalizer(OIDCApp, OIDCAppFinalizerName) + }) +} + +func (wf *wrappedOIDCAppFinalizer) RemoveFinalizer(ctx context.Context) error { + if !wf.ContainsFinalizer() { + return nil + } + return wf.patch(ctx, wf.OIDCApp, func(OIDCApp *zitadelv1alpha1.OIDCApp) { + controllerutil.RemoveFinalizer(wf.OIDCApp, OIDCAppFinalizerName) + }) +} + +func (wr *wrappedOIDCAppFinalizer) ContainsFinalizer() bool { + return controllerutil.ContainsFinalizer(wr.OIDCApp, OIDCAppFinalizerName) +} + +func (wf *wrappedOIDCAppFinalizer) Reconcile(ctx context.Context, ztdClient *management.Client) error { + if wf.OIDCApp.Status.AppId == "" { + return nil + } + org, err := wf.OIDCApp.Organization(ctx, wf.refresolver) + if err != nil { + return err + } + project, err := wf.OIDCApp.Project(ctx, wf.refresolver) + if err != nil { + return err + } + _, err = ztdClient.RemoveApp(middleware.SetOrgID(ctx, org.Status.OrgId), &pb.RemoveAppRequest{ProjectId: project.Status.ProjectId, AppId: wf.OIDCApp.Status.AppId}) + if err != nil { + if strings.Contains(err.Error(), "doesn't exist") { + return nil + } + return err + } + return nil +} + +func (wr *wrappedOIDCAppFinalizer) patch(ctx context.Context, OIDCApp *zitadelv1alpha1.OIDCApp, + patchFn func(*zitadelv1alpha1.OIDCApp)) error { + patch := ctrlClient.MergeFrom(OIDCApp.DeepCopy()) + patchFn(OIDCApp) + + if err := wr.Client.Patch(ctx, OIDCApp, patch); err != nil { + return fmt.Errorf("error patching OIDCApp finalizer: %v", err) + } + return nil +} diff --git a/src/internal/controller/organization_controller.go b/src/internal/controller/organization_controller.go new file mode 100644 index 0000000..6b6634f --- /dev/null +++ b/src/internal/controller/organization_controller.go @@ -0,0 +1,228 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "fmt" + "strings" + "time" + + zitadelv1alpha1 "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/api/v1alpha1" + condition "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/pkg/condition" + "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/pkg/controller/zitadel" + "github.com/zitadel/zitadel-go/v2/pkg/client/management" + "github.com/zitadel/zitadel-go/v2/pkg/client/middleware" + pb "github.com/zitadel/zitadel-go/v2/pkg/client/zitadel/management" + "k8s.io/client-go/util/workqueue" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + ctrlClient "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" +) + +// OrganizationReconciler reconciles a Organization object +type OrganizationReconciler struct { + client.Client + RefResolver *zitadelv1alpha1.RefResolver + ConditionReady *condition.Ready + RequeueInterval time.Duration +} + +func NewOrganizationReconciler(client client.Client, refResolver *zitadelv1alpha1.RefResolver, conditionReady *condition.Ready, + requeueInterval time.Duration) *OrganizationReconciler { + return &OrganizationReconciler{ + Client: client, + RefResolver: refResolver, + ConditionReady: conditionReady, + RequeueInterval: requeueInterval, + } +} + +//+kubebuilder:rbac:groups=zitadel.topmanage.com,resources=organizations,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=zitadel.topmanage.com,resources=organizations/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=zitadel.topmanage.com,resources=organizations/finalizers,verbs=update + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +func (r *OrganizationReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + var organization zitadelv1alpha1.Organization + if err := r.Get(ctx, req.NamespacedName, &organization); err != nil { + return ctrl.Result{}, client.IgnoreNotFound(err) + } + wr := newWrappedOrganizationReconciler(r.Client, r.RefResolver, &organization) + wf := newWrappedOrganizationFinalizer(r.Client, &organization) + tf := zitadel.NewZitadelFinalizer(r.Client, wf) + tr := zitadel.NewZitadelReconciler(r.Client, r.ConditionReady, wr, tf, r.RequeueInterval) + + result, err := tr.Reconcile(ctx, &organization) + if err != nil { + return result, fmt.Errorf("error reconciling in OrganizationReconciler: %v", err) + } + return result, nil +} + +type wrappedOrganizationReconciler struct { + client.Client + refResolver *zitadelv1alpha1.RefResolver + organization *zitadelv1alpha1.Organization +} + +func newWrappedOrganizationReconciler(client client.Client, refResolver *zitadelv1alpha1.RefResolver, + organization *zitadelv1alpha1.Organization) zitadel.WrappedReconciler { + return &wrappedOrganizationReconciler{ + Client: client, + refResolver: refResolver, + organization: organization, + } +} + +type orgReconcilePhase struct { + Name string + Reconcile func(context.Context, *management.Client) error +} + +func (wr *wrappedOrganizationReconciler) Reconcile(ctx context.Context, ztdClient *management.Client) error { + phases := []orgReconcilePhase{ + { + Name: "organization", + Reconcile: wr.reconcileOrg, + }, + { + Name: "admin", + Reconcile: wr.reconcileInitialAdmin, + }, + } + for _, p := range phases { + err := p.Reconcile(ctx, ztdClient) + if err != nil { + return err + } + } + return nil +} + +func (wr *wrappedOrganizationReconciler) reconcileOrg(ctx context.Context, ztdClient *management.Client) error { + zitadelCluster, err := wr.refResolver.ZitadelCluster(ctx, &wr.organization.Spec.ZitadelClusterRef, wr.organization.Namespace) + if err != nil { + return err + } + orgRes, err := ztdClient.GetOrgByDomainGlobal(ctx, &pb.GetOrgByDomainGlobalRequest{ + Domain: strings.ToLower(fmt.Sprintf("%s.%s", wr.organization.Name, zitadelCluster.Spec.Host)), + }) + if err != nil { + if !strings.Contains(err.Error(), "not found") { + return fmt.Errorf("Error getting org: %v", err) + } + } + if orgRes == nil { + resp, err := ztdClient.AddOrg(ctx, &pb.AddOrgRequest{ + Name: strings.ToLower(wr.organization.Name), + }) + if err != nil { + return fmt.Errorf("error creating organization in Zitadel: %v", err) + } + patch := ctrlClient.MergeFrom(wr.organization.DeepCopy()) + wr.organization.Status.OrgId = resp.Id + return wr.Client.Status().Patch(ctx, wr.organization, patch) + } + patch := ctrlClient.MergeFrom(wr.organization.DeepCopy()) + wr.organization.Status.OrgId = orgRes.Org.Id + return wr.Client.Status().Patch(ctx, wr.organization, patch) +} + +func (wr *wrappedOrganizationReconciler) reconcileInitialAdmin(ctx context.Context, ztdClient *management.Client) error { + zitadelCluster, err := wr.refResolver.ZitadelCluster(ctx, &wr.organization.Spec.ZitadelClusterRef, wr.organization.Namespace) + if err != nil { + return err + } + adminUser, err := ztdClient.GetUserByLoginNameGlobal(ctx, &pb.GetUserByLoginNameGlobalRequest{ + LoginName: strings.ToLower(fmt.Sprintf("%s@%s.%s", wr.organization.Spec.OrganizationAdmin.UserName, wr.organization.Name, zitadelCluster.Spec.Host)), + }) + if err != nil { + if !strings.Contains(err.Error(), "could not be found") { + return fmt.Errorf("Error getting admin user: %v", err) + } + } + ctx = middleware.SetOrgID(ctx, wr.organization.Status.OrgId) + var userid string + if adminUser == nil { + resp, err := ztdClient.AddHumanUser(ctx, &pb.AddHumanUserRequest{ + UserName: wr.organization.Spec.OrganizationAdmin.UserName, + Profile: &pb.AddHumanUserRequest_Profile{ + FirstName: wr.organization.Spec.OrganizationAdmin.FirstName, + LastName: wr.organization.Spec.OrganizationAdmin.LastName, + }, + Email: &pb.AddHumanUserRequest_Email{ + Email: wr.organization.Spec.OrganizationAdmin.Email, + IsEmailVerified: false, + }, + }) + userid = resp.UserId + if err != nil { + return fmt.Errorf("Error adding human user: %v", err) + } + { + if _, err := ztdClient.AddOrgMember(ctx, &pb.AddOrgMemberRequest{ + UserId: userid, + Roles: []string{ + "ORG_OWNER", + }, + }); err != nil { + if !strings.Contains(err.Error(), "Errors.Org.Member.RolesNotChanged") { + return fmt.Errorf("Error adding org member: %v", err) + } + } + } + } else { + userid = adminUser.User.Id + } + + { + if _, err := ztdClient.UpdateOrgMember(ctx, &pb.UpdateOrgMemberRequest{ + UserId: userid, + Roles: []string{ + "ORG_OWNER", + }, + }); err != nil { + if !strings.Contains(err.Error(), "Errors.Org.Member.RolesNotChanged") { + return fmt.Errorf("Error updating org member: %v", err) + } + } + } + patch := client.MergeFrom(wr.organization.DeepCopy()) + wr.organization.Status.AdminId = userid + return wr.Status().Patch(ctx, wr.organization, patch) +} + +func (wr *wrappedOrganizationReconciler) PatchStatus(ctx context.Context, patcher condition.Patcher) error { + patch := client.MergeFrom(wr.organization.DeepCopy()) + patcher(&wr.organization.Status) + + if err := wr.Client.Status().Patch(ctx, wr.organization, patch); err != nil { + return fmt.Errorf("error patching Organization status: %v", err) + } + return nil +} + +// SetupWithManager sets up the controller with the Manager. +func (r *OrganizationReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&zitadelv1alpha1.Organization{}). + WithOptions(controller.Options{RateLimiter: workqueue.NewItemExponentialFailureRateLimiter(time.Millisecond*500, time.Minute*3)}). + Complete(r) +} diff --git a/src/internal/controller/organization_controller_finalizer.go b/src/internal/controller/organization_controller_finalizer.go new file mode 100644 index 0000000..2cc5982 --- /dev/null +++ b/src/internal/controller/organization_controller_finalizer.go @@ -0,0 +1,87 @@ +package controller + +import ( + "strings" + + zitadelv1alpha1 "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/api/v1alpha1" + "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/pkg/controller/zitadel" + + "context" + "fmt" + + "github.com/zitadel/zitadel-go/v2/pkg/client/management" + "github.com/zitadel/zitadel-go/v2/pkg/client/middleware" + pb "github.com/zitadel/zitadel-go/v2/pkg/client/zitadel/management" + "sigs.k8s.io/controller-runtime/pkg/client" + ctrlClient "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" +) + +const ( + organizationFinalizerName = "organization.zitadel.topmanage.com/organization" +) + +type wrappedOrganizationFinalizer struct { + client.Client + organization *zitadelv1alpha1.Organization +} + +func newWrappedOrganizationFinalizer(client client.Client, organization *zitadelv1alpha1.Organization) zitadel.WrappedFinalizer { + return &wrappedOrganizationFinalizer{ + Client: client, + organization: organization, + } +} + +func (wf *wrappedOrganizationFinalizer) AddFinalizer(ctx context.Context) error { + if wf.ContainsFinalizer() { + return nil + } + return wf.patch(ctx, wf.organization, func(organization *zitadelv1alpha1.Organization) { + controllerutil.AddFinalizer(organization, organizationFinalizerName) + }) +} + +func (wf *wrappedOrganizationFinalizer) RemoveFinalizer(ctx context.Context) error { + if !wf.ContainsFinalizer() { + return nil + } + return wf.patch(ctx, wf.organization, func(organization *zitadelv1alpha1.Organization) { + controllerutil.RemoveFinalizer(wf.organization, organizationFinalizerName) + }) +} + +func (wr *wrappedOrganizationFinalizer) ContainsFinalizer() bool { + return controllerutil.ContainsFinalizer(wr.organization, organizationFinalizerName) +} + +func (wf *wrappedOrganizationFinalizer) Reconcile(ctx context.Context, ztdClient *management.Client) error { + if wf.organization.Status.OrgId == "" { + return nil + } + { + _, err := ztdClient.GetMyOrg(middleware.SetOrgID(ctx, wf.organization.Status.OrgId), &pb.GetMyOrgRequest{}) + if err != nil { + if strings.Contains(err.Error(), `Organisation doesn't exist`) { + return nil + } + return err + } + } + _, err := ztdClient.RemoveOrg(middleware.SetOrgID(ctx, wf.organization.Status.OrgId), &pb.RemoveOrgRequest{}) + if err != nil { + return err + } + return nil +} + +func (wr *wrappedOrganizationFinalizer) patch(ctx context.Context, organization *zitadelv1alpha1.Organization, + patchFn func(*zitadelv1alpha1.Organization)) error { + patch := ctrlClient.MergeFrom(organization.DeepCopy()) + patchFn(organization) + + if err := wr.Client.Patch(ctx, organization, patch); err != nil { + return fmt.Errorf("error patching Organization finalizer: %v", err) + } + return nil +} diff --git a/src/internal/controller/project_controller.go b/src/internal/controller/project_controller.go new file mode 100644 index 0000000..2297a64 --- /dev/null +++ b/src/internal/controller/project_controller.go @@ -0,0 +1,299 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "fmt" + "reflect" + "sort" + "strings" + "time" + + zitadelv1alpha1 "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/api/v1alpha1" + condition "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/pkg/condition" + "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/pkg/controller/zitadel" + "github.com/zitadel/zitadel-go/v2/pkg/client/management" + "github.com/zitadel/zitadel-go/v2/pkg/client/middleware" + pb "github.com/zitadel/zitadel-go/v2/pkg/client/zitadel/management" + "github.com/zitadel/zitadel-go/v2/pkg/client/zitadel/project" + "golang.org/x/exp/maps" + "k8s.io/client-go/util/workqueue" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + ctrlClient "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" +) + +// ProjectReconciler reconciles a Project object +type ProjectReconciler struct { + client.Client + RefResolver *zitadelv1alpha1.RefResolver + ConditionReady *condition.Ready + RequeueInterval time.Duration +} + +func NewProjectReconciler(client client.Client, refResolver *zitadelv1alpha1.RefResolver, conditionReady *condition.Ready, + requeueInterval time.Duration) *ProjectReconciler { + return &ProjectReconciler{ + Client: client, + RefResolver: refResolver, + ConditionReady: conditionReady, + RequeueInterval: requeueInterval, + } +} + +//+kubebuilder:rbac:groups=zitadel.topmanage.com,resources=projects,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=zitadel.topmanage.com,resources=projects/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=zitadel.topmanage.com,resources=projects/finalizers,verbs=update + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +func (r *ProjectReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + var project zitadelv1alpha1.Project + if err := r.Get(ctx, req.NamespacedName, &project); err != nil { + return ctrl.Result{}, client.IgnoreNotFound(err) + } + wr := newWrappedProjectReconciler(r.Client, r.RefResolver, &project) + wf := newWrappedProjectFinalizer(r.Client, &project, r.RefResolver) + tf := zitadel.NewZitadelFinalizer(r.Client, wf) + tr := zitadel.NewZitadelReconciler(r.Client, r.ConditionReady, wr, tf, r.RequeueInterval) + + result, err := tr.Reconcile(ctx, &project) + if err != nil { + return result, fmt.Errorf("error reconciling in ProjectReconciler: %v", err) + } + return result, nil +} + +type wrappedProjectReconciler struct { + client.Client + refResolver *zitadelv1alpha1.RefResolver + project *zitadelv1alpha1.Project +} + +func newWrappedProjectReconciler(client client.Client, refResolver *zitadelv1alpha1.RefResolver, + project *zitadelv1alpha1.Project) zitadel.WrappedReconciler { + return &wrappedProjectReconciler{ + Client: client, + refResolver: refResolver, + project: project, + } +} + +type projectReconcilePhase struct { + Name string + Reconcile func(context.Context, *management.Client) error +} + +func (wr *wrappedProjectReconciler) Reconcile(ctx context.Context, ztdClient *management.Client) error { + phases := []projectReconcilePhase{ + { + Name: "project", + Reconcile: wr.reconcileProject, + }, + { + Name: "roles", + Reconcile: wr.reconcileRoles, + }, + { + Name: "grants", + Reconcile: wr.reconcileGrants, + }, + } + for _, p := range phases { + err := p.Reconcile(ctx, ztdClient) + if err != nil { + return err + } + } + return nil +} + +func (wr *wrappedProjectReconciler) reconcileProject(ctx context.Context, ztdClient *management.Client) error { + org, err := wr.refResolver.OrganizationRef(ctx, &wr.project.Spec.OrganizationRef, wr.project.Namespace) + if err != nil { + return err + } + if wr.project.Status.ProjectId != "" { + p, err := ztdClient.GetProjectByID(middleware.SetOrgID(ctx, org.Status.OrgId), &pb.GetProjectByIDRequest{Id: wr.project.Status.ProjectId}) + if p != nil { + _, err := ztdClient.UpdateProject(middleware.SetOrgID(ctx, org.Status.OrgId), + &pb.UpdateProjectRequest{ + Id: wr.project.Status.ProjectId, + Name: wr.project.Name, + ProjectRoleAssertion: wr.project.Spec.ProjectRoleAssertion, + ProjectRoleCheck: wr.project.Spec.ProjectRoleAssertion, + HasProjectCheck: wr.project.Spec.HasProjectCheck}, + ) + + if err != nil { + if !strings.Contains(err.Error(), "No changes") { + return fmt.Errorf("Error updating Project: %v", err) + } + } + return nil + } + if err != nil { + if !strings.Contains(err.Error(), "not found") { + return fmt.Errorf("Error getting project: %v", err) + } + } + } + + resp, err := ztdClient.AddProject(middleware.SetOrgID(ctx, org.Status.OrgId), + &pb.AddProjectRequest{ + Name: wr.project.Name, + ProjectRoleAssertion: wr.project.Spec.ProjectRoleAssertion, + ProjectRoleCheck: wr.project.Spec.ProjectRoleAssertion, + HasProjectCheck: wr.project.Spec.HasProjectCheck}, + ) + if err != nil { + if strings.Contains(err.Error(), "AlreadyExists") { + return nil + } + return fmt.Errorf("error creating project in Zitadel: %v", err) + } + patch := ctrlClient.MergeFrom(wr.project.DeepCopy()) + wr.project.Status.ProjectId = resp.Id + return wr.Client.Status().Patch(ctx, wr.project, patch) +} + +func (wr *wrappedProjectReconciler) reconcileRoles(ctx context.Context, ztdClient *management.Client) error { + org, err := wr.refResolver.OrganizationRef(ctx, &wr.project.Spec.OrganizationRef, wr.project.Namespace) + if err != nil { + return err + } + + resp, err := ztdClient.ListProjectRoles(middleware.SetOrgID(ctx, org.Status.OrgId), &pb.ListProjectRolesRequest{ + ProjectId: wr.project.Status.ProjectId, + }) + if err != nil { + return fmt.Errorf("Could not list project roles: %v", err) + } + roles := map[string]*pb.BulkAddProjectRolesRequest_Role{} + deleteRolesKeys := []string{} + for _, role := range wr.project.Spec.Roles { + roles[role.Key] = &pb.BulkAddProjectRolesRequest_Role{ + Key: role.Key, + DisplayName: role.DisplayName, + Group: role.Group, + } + } + + for _, role := range resp.Result { + if r, ok := roles[role.Key]; ok { + if r.DisplayName != role.DisplayName || r.Group != role.Group { + deleteRolesKeys = append(deleteRolesKeys, role.Key) + } else { + delete(roles, role.Key) + } + } else { + deleteRolesKeys = append(deleteRolesKeys, role.Key) + } + } + + if len(deleteRolesKeys) > 0 { + for _, key := range deleteRolesKeys { + if _, err = ztdClient.RemoveProjectRole(middleware.SetOrgID(ctx, org.Status.OrgId), &pb.RemoveProjectRoleRequest{ + ProjectId: wr.project.Status.ProjectId, + RoleKey: key, + }); err != nil { + return fmt.Errorf("Error removing project role: %v", err) + } + } + } + + if len(roles) > 0 { + _, err = ztdClient.BulkAddProjectRoles(middleware.SetOrgID(ctx, org.Status.OrgId), &pb.BulkAddProjectRolesRequest{ + ProjectId: wr.project.Status.ProjectId, + Roles: maps.Values(roles)}) + if err != nil { + return fmt.Errorf("Could not add roles to project: %v", err) + } + } + return nil +} + +func (wr *wrappedProjectReconciler) reconcileGrants(ctx context.Context, ztdClient *management.Client) error { + org, err := wr.refResolver.OrganizationRef(ctx, &wr.project.Spec.OrganizationRef, wr.project.Namespace) + if err != nil { + return err + } + existingGrants, err := ztdClient.ListProjectGrants(ctx, &pb.ListProjectGrantsRequest{ + ProjectId: wr.project.Status.ProjectId, + }) + if err != nil { + return fmt.Errorf("Error listing project grants: %v", err) + } + ctx = middleware.SetOrgID(ctx, org.Status.OrgId) + for _, grant := range wr.project.DeepCopy().Spec.Grants { + grantedOrg, err := wr.refResolver.OrganizationRef(ctx, &grant.OrganizationRef, wr.project.Namespace) + if err != nil { + return err + } + var existingGrant *project.GrantedProject + for _, eGrant := range existingGrants.Result { + if eGrant.GrantedOrgId == grantedOrg.Status.OrgId { + existingGrant = eGrant + break + } + } + if existingGrant == nil { + _, err := ztdClient.AddProjectGrant(ctx, &pb.AddProjectGrantRequest{ + ProjectId: wr.project.Status.ProjectId, + GrantedOrgId: grantedOrg.Status.OrgId, + RoleKeys: grant.RoleKeys, + }) + if err != nil { + return fmt.Errorf("Error Adding project grant: %v", err) + } + } else { + sort.Strings(existingGrant.GrantedRoleKeys) + sort.Strings(grant.RoleKeys) + if !reflect.DeepEqual(existingGrant.GrantedRoleKeys, grant.RoleKeys) { + _, err := ztdClient.UpdateProjectGrant(ctx, &pb.UpdateProjectGrantRequest{ + ProjectId: wr.project.Status.ProjectId, + GrantId: existingGrant.GrantId, + RoleKeys: grant.RoleKeys, + }) + if err != nil { + return fmt.Errorf("Error Updating project grant: %v", err) + } + } + } + } + return nil +} + +func (wr *wrappedProjectReconciler) PatchStatus(ctx context.Context, patcher condition.Patcher) error { + patch := client.MergeFrom(wr.project.DeepCopy()) + patcher(&wr.project.Status) + + if err := wr.Client.Status().Patch(ctx, wr.project, patch); err != nil { + return fmt.Errorf("error patching Project status: %v", err) + } + return nil +} + +// SetupWithManager sets up the controller with the Manager. +func (r *ProjectReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&zitadelv1alpha1.Project{}). + WithOptions(controller.Options{RateLimiter: workqueue.NewItemExponentialFailureRateLimiter(time.Millisecond*500, time.Minute*3)}). + Complete(r) +} diff --git a/src/internal/controller/project_controller_finalizer.go b/src/internal/controller/project_controller_finalizer.go new file mode 100644 index 0000000..7122bb3 --- /dev/null +++ b/src/internal/controller/project_controller_finalizer.go @@ -0,0 +1,94 @@ +package controller + +import ( + "strings" + + zitadelv1alpha1 "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/api/v1alpha1" + "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/pkg/controller/zitadel" + + "context" + "fmt" + + "github.com/zitadel/zitadel-go/v2/pkg/client/management" + "github.com/zitadel/zitadel-go/v2/pkg/client/middleware" + pb "github.com/zitadel/zitadel-go/v2/pkg/client/zitadel/management" + "sigs.k8s.io/controller-runtime/pkg/client" + ctrlClient "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" +) + +const ( + projectFinalizerName = "project.zitadel.topmanage.com/project" +) + +type wrappedProjectFinalizer struct { + client.Client + project *zitadelv1alpha1.Project + refresolver *zitadelv1alpha1.RefResolver +} + +func newWrappedProjectFinalizer(client client.Client, project *zitadelv1alpha1.Project, refresolver *zitadelv1alpha1.RefResolver) zitadel.WrappedFinalizer { + return &wrappedProjectFinalizer{ + Client: client, + project: project, + refresolver: refresolver, + } +} + +func (wf *wrappedProjectFinalizer) AddFinalizer(ctx context.Context) error { + if wf.ContainsFinalizer() { + return nil + } + return wf.patch(ctx, wf.project, func(project *zitadelv1alpha1.Project) { + controllerutil.AddFinalizer(project, projectFinalizerName) + }) +} + +func (wf *wrappedProjectFinalizer) RemoveFinalizer(ctx context.Context) error { + if !wf.ContainsFinalizer() { + return nil + } + return wf.patch(ctx, wf.project, func(project *zitadelv1alpha1.Project) { + controllerutil.RemoveFinalizer(wf.project, projectFinalizerName) + }) +} + +func (wr *wrappedProjectFinalizer) ContainsFinalizer() bool { + return controllerutil.ContainsFinalizer(wr.project, projectFinalizerName) +} + +func (wf *wrappedProjectFinalizer) Reconcile(ctx context.Context, ztdClient *management.Client) error { + if wf.project.Status.ProjectId == "" { + return nil + } + org, err := wf.refresolver.OrganizationRef(ctx, &wf.project.Spec.OrganizationRef, wf.project.Namespace) + if err != nil { + return err + } + { + + _, err := ztdClient.GetProjectByID(middleware.SetOrgID(ctx, org.Status.OrgId), &pb.GetProjectByIDRequest{Id: wf.project.Status.ProjectId}) + if err != nil { + if strings.Contains(err.Error(), `doesn't exist`) { + return nil + } + return err + } + } + _, err = ztdClient.RemoveProject(middleware.SetOrgID(ctx, org.Status.OrgId), &pb.RemoveProjectRequest{Id: wf.project.Status.ProjectId}) + if err != nil { + return err + } + return nil +} + +func (wr *wrappedProjectFinalizer) patch(ctx context.Context, project *zitadelv1alpha1.Project, + patchFn func(*zitadelv1alpha1.Project)) error { + patch := ctrlClient.MergeFrom(project.DeepCopy()) + patchFn(project) + + if err := wr.Client.Patch(ctx, project, patch); err != nil { + return fmt.Errorf("error patching Project finalizer: %v", err) + } + return nil +} diff --git a/src/internal/controller/suite_test.go b/src/internal/controller/suite_test.go new file mode 100644 index 0000000..74607f5 --- /dev/null +++ b/src/internal/controller/suite_test.go @@ -0,0 +1,80 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "path/filepath" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + + zitadelv1alpha1 "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/api/v1alpha1" + //+kubebuilder:scaffold:imports +) + +// These tests use Ginkgo (BDD-style Go testing framework). Refer to +// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. + +var cfg *rest.Config +var k8sClient client.Client +var testEnv *envtest.Environment + +func TestAPIs(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecs(t, "Controller Suite") +} + +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + + By("bootstrapping test environment") + testEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")}, + ErrorIfCRDPathMissing: true, + } + + var err error + // cfg is defined in this file globally. + cfg, err = testEnv.Start() + Expect(err).NotTo(HaveOccurred()) + Expect(cfg).NotTo(BeNil()) + + err = zitadelv1alpha1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + //+kubebuilder:scaffold:scheme + + k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) + Expect(err).NotTo(HaveOccurred()) + Expect(k8sClient).NotTo(BeNil()) + +}) + +var _ = AfterSuite(func() { + By("tearing down the test environment") + err := testEnv.Stop() + Expect(err).NotTo(HaveOccurred()) +}) diff --git a/src/internal/controller/zitadelcluster_controller.go b/src/internal/controller/zitadelcluster_controller.go new file mode 100644 index 0000000..0f95807 --- /dev/null +++ b/src/internal/controller/zitadelcluster_controller.go @@ -0,0 +1,601 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "crypto/x509" + "encoding/base64" + "encoding/json" + "encoding/pem" + "fmt" + "strings" + "time" + + zitadelv1alpha1 "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/api/v1alpha1" + builder "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/pkg/builder" + condition "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/pkg/condition" + "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/pkg/configuration" + configmap "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/pkg/controller/configmap" + secret "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/pkg/controller/secret" + "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/pkg/controller/service" + "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/pkg/deployment" + "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/pkg/machinekey" + "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/pkg/masterkey" + systemapiaccount "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/pkg/systemapi" + zitadelClient "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/pkg/zitadel" + "github.com/hashicorp/go-multierror" + "github.com/zitadel/zitadel-go/v2/pkg/client/system" + adm "github.com/zitadel/zitadel-go/v2/pkg/client/zitadel/admin" + authn "github.com/zitadel/zitadel-go/v2/pkg/client/zitadel/authn" + pb "github.com/zitadel/zitadel-go/v2/pkg/client/zitadel/system" + appsv1 "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/util/workqueue" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/log" +) + +type reconcilePhase struct { + Name string + Reconcile func(context.Context, *zitadelv1alpha1.ZitadelCluster) (ctrl.Result, error) +} + +type patcher func(*zitadelv1alpha1.ZitadelClusterStatus) error + +// ZitadelClusterReconciler reconciles a ZitadelCluster object +type ZitadelClusterReconciler struct { + client.Client + Scheme *runtime.Scheme + ConditionReady *condition.Ready + Builder *builder.Builder + SecretReconciler *secret.SecretReconciler + ConfigMapReconciler *configmap.ConfigMapReconciler + ServiceReconciler *service.ServiceReconciler + RefResolver *zitadelv1alpha1.RefResolver +} + +// +kubebuilder:rbac:groups="",resources=configmaps,verbs=get;list;watch;create;patch +// +kubebuilder:rbac:groups="",resources=services,verbs=list;watch;create;patch +// +kubebuilder:rbac:groups="",resources=secrets,verbs=list;watch;create;patch +// +kubebuilder:rbac:groups="",resources=endpoints,verbs=create;patch;get;list;watch +// +kubebuilder:rbac:groups="",resources=endpoints/restricted,verbs=create;patch;get;list;watch +// +kubebuilder:rbac:groups="",resources=pods,verbs=get;delete +// +kubebuilder:rbac:groups="",resources=events,verbs=list;watch;create;patch +// +kubebuilder:rbac:groups="",resources=serviceaccounts,verbs=list;watch;create;patch +// +kubebuilder:rbac:groups=apps,resources=deployments,verbs=list;watch;create;patch +// +kubebuilder:rbac:groups=apps,resources=deployments,verbs=list;watch;create;patch +// +kubebuilder:rbac:groups=policy,resources=poddisruptionbudgets,verbs=list;watch;create;patch +// +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=roles;rolebindings;clusterrolebindings,verbs=list;watch;create;patch +// +kubebuilder:rbac:groups=zitadel.topmanage.com,resources=zitadelclusters,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=zitadel.topmanage.com,resources=zitadelclusters/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=zitadel.topmanage.com,resources=zitadelclusters/finalizers,verbs=update +// +kubebuilder:rbac:groups=crdb.cockroachlabs.com,resources=crdbclusters,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=crdb.cockroachlabs.com,resources=crdbclusters/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=crdb.cockroachlabs.com,resources=crdbclusters/finalizers,verbs=update +// +kubebuilder:rbac:groups=certificates.k8s.io,resources=certificatesigningrequests,verbs=get;list;watch;create;patch;delete +// +kubebuilder:rbac:groups=certificates.k8s.io,resources=certificatesigningrequests/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=certificates.k8s.io,resources=certificatesigningrequests/approval,verbs=update +// +kubebuilder:rbac:groups=batch,resources=jobs,verbs=get;list;watch;create;update;patch;delete + +func (r *ZitadelClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + logger := log.FromContext(ctx) + logger.Info("Starting Reconcile") + + var zitadel zitadelv1alpha1.ZitadelCluster + + if err := r.Get(ctx, req.NamespacedName, &zitadel); err != nil { + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + phases := []reconcilePhase{ + { + Name: "Spec", + Reconcile: r.setSpecDefaults, + }, + { + Name: "Status", + Reconcile: r.setStatusDefaults, + }, + { + Name: "MasterkeySecret", + Reconcile: r.reconcileMasterKeySecret, + }, + { + Name: "ServiceAccount", + Reconcile: r.reconcileSystemAPIUser, + }, + { + Name: "Configuration", + Reconcile: r.reconcileConfig, + }, + { + Name: "InitJob", + Reconcile: r.reconcileInitJob, + }, + { + Name: "SetupJob", + Reconcile: r.reconcileSetupJob, + }, + { + Name: "Deployment", + Reconcile: r.reconcileDeployment, + }, + { + Name: "Service", + Reconcile: r.reconcileService, + }, + { + Name: "DefaultInstance", + Reconcile: r.reconcileDefaultInstance, + }, + { + Name: "SMTPConfig", + Reconcile: r.reconcileSMTPConfig, + }, + { + Name: "DomainPolicyConfig", + Reconcile: r.reconcileDomainPolicy, + }, + } + + for _, p := range phases { + result, err := p.Reconcile(ctx, &zitadel) + if err != nil { + if errors.IsNotFound(err) { + continue + } + + var errBundle *multierror.Error + errBundle = multierror.Append(errBundle, err) + + msg := fmt.Sprintf("Error reconciling %s: %v", p.Name, err) + patchErr := r.patchStatus(ctx, &zitadel, func(s *zitadelv1alpha1.ZitadelClusterStatus) error { + patcher := r.ConditionReady.PatcherFailed(msg) + patcher(s) + return nil + }) + if errors.IsNotFound(patchErr) { + errBundle = multierror.Append(errBundle, patchErr) + } + + if err := errBundle.ErrorOrNil(); err != nil { + return ctrl.Result{}, fmt.Errorf("error reconciling %s: %v", p.Name, err) + } + } + if !result.IsZero() { + return result, err + } + } + + if err := r.patchStatus(ctx, &zitadel, r.patcher(ctx, &zitadel)); err != nil && !errors.IsNotFound(err) { + return ctrl.Result{}, err + } + return ctrl.Result{RequeueAfter: 2 * time.Minute}, nil +} + +func (r *ZitadelClusterReconciler) setSpecDefaults(ctx context.Context, zitadel *zitadelv1alpha1.ZitadelCluster) (ctrl.Result, error) { + return ctrl.Result{}, r.patch(ctx, zitadel, func(zit *zitadelv1alpha1.ZitadelCluster) { + zit.SetDefaults() + }) +} + +func (r *ZitadelClusterReconciler) setStatusDefaults(ctx context.Context, zitadel *zitadelv1alpha1.ZitadelCluster) (ctrl.Result, error) { + return ctrl.Result{}, r.patchStatus(ctx, zitadel, func(status *zitadelv1alpha1.ZitadelClusterStatus) error { + status.FillWithDefaults(zitadel) + return nil + }) +} + +func (r *ZitadelClusterReconciler) reconcileMasterKeySecret(ctx context.Context, zitadel *zitadelv1alpha1.ZitadelCluster) (ctrl.Result, error) { + secretName := masterkey.MasterKeyName(zitadel) + key := types.NamespacedName{ + Name: secretName, + Namespace: zitadel.Namespace, + } + _, err := r.SecretReconciler.ReconcileRandomPassword(ctx, key, masterkey.Key, zitadel) + + if err != nil { + return ctrl.Result{}, err + } + return ctrl.Result{}, nil +} + +func (r *ZitadelClusterReconciler) reconcileSystemAPIUser(ctx context.Context, zitadel *zitadelv1alpha1.ZitadelCluster) (ctrl.Result, error) { + secretName := systemapiaccount.SystemAPIAccountName(zitadel) + key := types.NamespacedName{ + Name: secretName, + Namespace: zitadel.Namespace, + } + _, err := r.SecretReconciler.ReconcileRandomPrivateRSA(ctx, key, systemapiaccount.Key, zitadel) + + if err != nil { + return ctrl.Result{}, err + } + return ctrl.Result{}, nil +} + +func (r *ZitadelClusterReconciler) reconcileConfig(ctx context.Context, zitadel *zitadelv1alpha1.ZitadelCluster) (ctrl.Result, error) { + crdb, err := r.RefResolver.CrdbClusterRef(ctx, &zitadel.Spec.CrdbClusterRef, zitadel.Namespace) + if err != nil { + return ctrl.Result{}, err + } + configName := configuration.ConfigurationName(zitadel) + key := types.NamespacedName{ + Name: configName, + Namespace: zitadel.Namespace, + } + privateKeyData, err := r.RefResolver.SecretKeyRef(ctx, corev1.SecretKeySelector{LocalObjectReference: corev1.LocalObjectReference{Name: systemapiaccount.SystemAPIAccountName(zitadel)}, Key: systemapiaccount.Key}, zitadel.Namespace) + if err != nil { + return ctrl.Result{}, err + } + pemBlock, _ := pem.Decode([]byte(privateKeyData)) + if pemBlock == nil { + return ctrl.Result{}, fmt.Errorf("failed to decode PEM block") + } + privateKey, err := x509.ParsePKCS1PrivateKey(pemBlock.Bytes) + publicKeyBytes, err := x509.MarshalPKIXPublicKey(&privateKey.PublicKey) + if err != nil { + return ctrl.Result{}, err + } + publicKeyPem := pem.EncodeToMemory( + &pem.Block{ + Type: "RSA PUBLIC KEY", + Bytes: publicKeyBytes, + }, + ) + base64key := base64.StdEncoding.EncodeToString(publicKeyPem) + err = r.ConfigMapReconciler.ReconcileZitadelConfiguration(ctx, key, zitadel, crdb, base64key) + + if err != nil { + return ctrl.Result{}, err + } + return ctrl.Result{}, nil +} + +func (r *ZitadelClusterReconciler) reconcileInitJob(ctx context.Context, zitadel *zitadelv1alpha1.ZitadelCluster) (ctrl.Result, error) { + key := client.ObjectKeyFromObject(zitadel) + key.Name = "init-job-" + key.Name + desiredInitJob, err := r.Builder.BuildInitJob(zitadel, key) + if err != nil { + return ctrl.Result{}, fmt.Errorf("error building InitJob: %v", err) + } + var existingJob batchv1.Job + if err := r.Get(ctx, key, &existingJob); err != nil { + if !errors.IsNotFound(err) { + return ctrl.Result{}, fmt.Errorf("error getting InitJob: %v", err) + } + if err := r.Create(ctx, desiredInitJob); err != nil { + return ctrl.Result{}, fmt.Errorf("error creating InitJob: %v", err) + } + return ctrl.Result{}, nil + } + + patch := client.MergeFrom(existingJob.DeepCopy()) + existingJob.Spec.Template.Spec = desiredInitJob.Spec.Template.Spec + return ctrl.Result{}, r.Patch(ctx, &existingJob, patch) +} + +func (r *ZitadelClusterReconciler) reconcileSetupJob(ctx context.Context, zitadel *zitadelv1alpha1.ZitadelCluster) (ctrl.Result, error) { + key := client.ObjectKeyFromObject(zitadel) + key.Name = "setup-job-" + key.Name + desiredSetupjob, err := r.Builder.BuildSetupJob(zitadel, key) + if err != nil { + return ctrl.Result{}, fmt.Errorf("error building Setupjob: %v", err) + } + var existingJob batchv1.Job + if err := r.Get(ctx, key, &existingJob); err != nil { + if !errors.IsNotFound(err) { + return ctrl.Result{}, fmt.Errorf("error getting Setupjob: %v", err) + } + if err := r.Create(ctx, desiredSetupjob); err != nil { + return ctrl.Result{}, fmt.Errorf("error creating Setupjob: %v", err) + } + return ctrl.Result{}, nil + } + + patch := client.MergeFrom(existingJob.DeepCopy()) + existingJob.Spec.Template.Spec = desiredSetupjob.Spec.Template.Spec + return ctrl.Result{}, r.Patch(ctx, &existingJob, patch) +} + +func (r *ZitadelClusterReconciler) reconcileDeployment(ctx context.Context, zitadel *zitadelv1alpha1.ZitadelCluster) (ctrl.Result, error) { + // TODO: Reload on config changed + key := client.ObjectKeyFromObject(zitadel) + desiredSts, err := r.Builder.BuildDeployment(zitadel, key) + if err != nil { + return ctrl.Result{}, fmt.Errorf("error building Deployment: %v", err) + } + var existingDep appsv1.Deployment + if err := r.Get(ctx, key, &existingDep); err != nil { + if !errors.IsNotFound(err) { + return ctrl.Result{}, fmt.Errorf("error getting Deployment: %v", err) + } + if err := r.Create(ctx, desiredSts); err != nil { + return ctrl.Result{}, fmt.Errorf("error creating Deployment: %v", err) + } + return ctrl.Result{}, nil + } + + patch := client.MergeFrom(existingDep.DeepCopy()) + existingDep.Spec.Template = desiredSts.Spec.Template + existingDep.Spec.Replicas = desiredSts.Spec.Replicas + return ctrl.Result{}, r.Patch(ctx, &existingDep, patch) +} + +func (r *ZitadelClusterReconciler) reconcileService(ctx context.Context, zitadel *zitadelv1alpha1.ZitadelCluster) (ctrl.Result, error) { + return ctrl.Result{}, r.reconcileDefaultService(ctx, zitadel) +} + +func (r *ZitadelClusterReconciler) reconcileDefaultService(ctx context.Context, zitadel *zitadelv1alpha1.ZitadelCluster) error { + key := client.ObjectKeyFromObject(zitadel) + opts := builder.ServiceOpts{ + Ports: []corev1.ServicePort{ + { + Name: deployment.ZitadelName, + Port: deployment.ZitadelPort, + }, + }, + } + desiredSvc, err := r.Builder.BuildService(zitadel, key, opts) + if err != nil { + return fmt.Errorf("error building Service: %v", err) + } + return r.ServiceReconciler.Reconcile(ctx, desiredSvc) +} + +func (r *ZitadelClusterReconciler) reconcileDefaultInstance(ctx context.Context, zitadel *zitadelv1alpha1.ZitadelCluster) (ctrl.Result, error) { + // First create systemapi to get, delete and create instances + privateKeyData, err := r.RefResolver.SecretKeyRef(ctx, corev1.SecretKeySelector{LocalObjectReference: corev1.LocalObjectReference{Name: systemapiaccount.SystemAPIAccountName(zitadel)}, Key: systemapiaccount.Key}, zitadel.Namespace) + if err != nil { + return ctrl.Result{}, err + } + ztdClient, err := system.NewClient(GetIssuer(zitadel), GetAPI(zitadel), system.JWTProfileFromKey([]byte(privateKeyData), masterkey.OwnerName), system.WithInsecure()) + if err != nil { + return ctrl.Result{}, fmt.Errorf("Error creating sytem client: %v", err) + } + defer ztdClient.Connection.Close() + + // Delete all Instances that isn't the default + { + resp, err := ztdClient.ListInstances(ctx, &pb.ListInstancesRequest{}) + if err != nil { + return ctrl.Result{}, fmt.Errorf("Error listing instances: %v", err) + } + for _, instance := range resp.Result { + if instance.Id != zitadel.Status.DefaultInstanceId || instance.Id == "" { + fmt.Println("DELETING INSTANCE") + _, err := ztdClient.RemoveInstance(ctx, &pb.RemoveInstanceRequest{InstanceId: instance.Id}) + if err != nil { + return ctrl.Result{}, err + } + } + } + } + + // Check if instance already exists + _, err = ztdClient.GetInstance(ctx, &pb.GetInstanceRequest{InstanceId: zitadel.Status.DefaultInstanceId}) + if err != nil { + if strings.Contains(err.Error(), "Instance not found") { + // if Instance doesn't exist, then create and assign secrets + resp, err := ztdClient.CreateInstance(ctx, &pb.CreateInstanceRequest{ + InstanceName: zitadel.Spec.FirstOrgName, + FirstOrgName: zitadel.Spec.FirstOrgName, + CustomDomain: zitadel.Spec.Host, + Owner: &pb.CreateInstanceRequest_Machine_{Machine: &pb.CreateInstanceRequest_Machine{ + Name: "k8s-operator", + UserName: "k8s-operator", + MachineKey: &pb.CreateInstanceRequest_MachineKey{ + Type: authn.KeyType_KEY_TYPE_JSON}, + PersonalAccessToken: &pb.CreateInstanceRequest_PersonalAccessToken{}}, + }}) + if err != nil { + return ctrl.Result{}, fmt.Errorf("Error creating default instance: %v", err) + } + var machineKeyData zitadelClient.MachineKey + if err := json.Unmarshal(resp.MachineKey, &machineKeyData); err != nil { + return ctrl.Result{}, err + } + secretName := machinekey.MachineKeySecretName(zitadel) + key := types.NamespacedName{ + Name: secretName, + Namespace: zitadel.Namespace, + } + secretData := make(map[string][]byte) + jsonData, err := json.Marshal(machineKeyData) + if err != nil { + return ctrl.Result{}, err + } + secretData[machinekey.Key] = jsonData + secret, err := r.Builder.BuildSecret(builder.SecretOpts{Zitadel: zitadel, Key: key, Data: secretData}, zitadel) + if err != nil { + return ctrl.Result{}, fmt.Errorf("error building machinekey Secret: %v", err) + } + if err := r.Create(ctx, secret); err != nil { + return ctrl.Result{}, fmt.Errorf("error creating machinekey Secret: %v", err) + } + patch := client.MergeFrom(zitadel.DeepCopy()) + zitadel.Status.DefaultInstanceId = resp.InstanceId + return ctrl.Result{}, r.Status().Patch(ctx, zitadel, patch) + } else { + return ctrl.Result{}, fmt.Errorf("Error getting instance with id: %s: %v", zitadel.Status.DefaultInstanceId, err) + } + } + + return ctrl.Result{}, nil +} + +func (r *ZitadelClusterReconciler) reconcileSMTPConfig(ctx context.Context, zitadel *zitadelv1alpha1.ZitadelCluster) (ctrl.Result, error) { + adminClient, err := zitadelClient.NewAdminClient(ctx, zitadel, *r.RefResolver) + if err != nil { + return ctrl.Result{}, err + } + var smtpId string + resp, err := adminClient.GetSMTPConfig(ctx, &adm.GetSMTPConfigRequest{}) + if err != nil { + if !strings.Contains(err.Error(), "SMTP configuration not found") { + return ctrl.Result{}, fmt.Errorf("Error getting SMTP config: %v", err) + } + } + + if resp != nil && resp.SmtpConfig != nil { + adminRequest := &adm.UpdateSMTPConfigRequest{ + SenderAddress: zitadel.Spec.SMTPConfig.SenderAddress, + SenderName: zitadel.Spec.SMTPConfig.SenderName, + Tls: zitadel.Spec.SMTPConfig.TLS, + Host: zitadel.Spec.SMTPConfig.Host, + Id: resp.SmtpConfig.Id, + Password: "test", + Description: "autogenerated by k8s-operator", + } + if zitadel.Spec.SMTPConfig.User != nil && zitadel.Spec.SMTPConfig.Password != nil { + + passwordSecret, err := r.RefResolver.SecretKeyRef(ctx, zitadel.Spec.SMTPConfig.Password.SecretKeyRef, zitadel.Namespace) + if err != nil { + return ctrl.Result{}, err + } + adminRequest.Password = passwordSecret + adminRequest.User = *zitadel.Spec.SMTPConfig.User + } + if zitadel.Spec.SMTPConfig.ReplyToAddress != nil { + adminRequest.ReplyToAddress = *zitadel.Spec.SMTPConfig.ReplyToAddress + } + + if _, err = adminClient.UpdateSMTPConfig(ctx, adminRequest); err != nil { + if !strings.Contains(err.Error(), "No changes") { + return ctrl.Result{}, fmt.Errorf("Could not update SMTP config: %v", err) + } + } + smtpId = resp.SmtpConfig.Id + } else { + adminRequest := &adm.AddSMTPConfigRequest{ + SenderAddress: zitadel.Spec.SMTPConfig.SenderAddress, + SenderName: zitadel.Spec.SMTPConfig.SenderName, + Tls: zitadel.Spec.SMTPConfig.TLS, + Host: zitadel.Spec.SMTPConfig.Host, + Description: "autogenerated by k8s-operator", + Password: "test", + } + if zitadel.Spec.SMTPConfig.User != nil && zitadel.Spec.SMTPConfig.Password != nil { + passwordSecret, err := r.RefResolver.SecretKeyRef(ctx, zitadel.Spec.SMTPConfig.Password.SecretKeyRef, zitadel.Namespace) + if err != nil { + return ctrl.Result{}, err + } + adminRequest.Password = passwordSecret + adminRequest.User = *zitadel.Spec.SMTPConfig.User + } + if zitadel.Spec.SMTPConfig.ReplyToAddress != nil { + adminRequest.ReplyToAddress = *zitadel.Spec.SMTPConfig.ReplyToAddress + } + + addRes, err := adminClient.AddSMTPConfig(ctx, adminRequest) + if err != nil { + return ctrl.Result{}, fmt.Errorf("Could not add SMTP config: %v", err) + } + smtpId = addRes.Id + } + + if _, err := adminClient.ActivateSMTPConfig(ctx, &adm.ActivateSMTPConfigRequest{ + Id: smtpId, + }); err != nil { + return ctrl.Result{}, fmt.Errorf("Error activating SMTP config: %v", err) + } + patch := client.MergeFrom(zitadel.DeepCopy()) + zitadel.Status.SMTPProviderId = smtpId + return ctrl.Result{}, r.Status().Patch(ctx, zitadel, patch) +} + +func (r *ZitadelClusterReconciler) reconcileDomainPolicy(ctx context.Context, zitadel *zitadelv1alpha1.ZitadelCluster) (ctrl.Result, error) { + adminClient, err := zitadelClient.NewAdminClient(ctx, zitadel, *r.RefResolver) + if err != nil { + return ctrl.Result{}, err + } + + if _, err = adminClient.UpdateDomainPolicy(ctx, &adm.UpdateDomainPolicyRequest{ + UserLoginMustBeDomain: zitadel.Spec.DomainSettings.UserLoginMustBeDomain, + ValidateOrgDomains: zitadel.Spec.DomainSettings.ValidateOrgDomains, + SmtpSenderAddressMatchesInstanceDomain: zitadel.Spec.DomainSettings.SMTPSenderAddressMatchesInstanceDomain, + }); err != nil { + if !strings.Contains(err.Error(), "not been changed") { + return ctrl.Result{}, fmt.Errorf("Could not update domain policy config: %v", err) + } + } + return ctrl.Result{}, nil +} + +func GetIssuer(zitadel *zitadelv1alpha1.ZitadelCluster) string { + scheme := "http" + if zitadel.Spec.ExternalSecure { + scheme = "https" + } + if zitadel.Spec.ExternalPort == 443 { + return fmt.Sprintf("%s://%s", scheme, zitadel.Spec.Host) + } + return fmt.Sprintf("%s://%s:%d", scheme, zitadel.Spec.Host, zitadel.Spec.ExternalPort) +} + +func GetAPI(zitadel *zitadelv1alpha1.ZitadelCluster) string { + return fmt.Sprintf("%s:%d", deployment.ServiceFQDN(zitadel.ObjectMeta), deployment.ZitadelPort) +} + +func (r *ZitadelClusterReconciler) patchStatus(ctx context.Context, zitadel *zitadelv1alpha1.ZitadelCluster, + patcher patcher) error { + patch := client.MergeFrom(zitadel.DeepCopy()) + if err := patcher(&zitadel.Status); err != nil { + return err + } + return r.Status().Patch(ctx, zitadel, patch) +} + +func (r *ZitadelClusterReconciler) patcher(ctx context.Context, zitadel *zitadelv1alpha1.ZitadelCluster) patcher { + return func(s *zitadelv1alpha1.ZitadelClusterStatus) error { + var sts appsv1.Deployment + if err := r.Get(ctx, client.ObjectKeyFromObject(zitadel), &sts); err != nil { + return err + } + zitadel.Status.Replicas = sts.Status.ReadyReplicas + + condition.SetReadyWithDeployment(&zitadel.Status, &sts, zitadel.Status.DefaultInstanceId) + return nil + } +} + +func (r *ZitadelClusterReconciler) patch(ctx context.Context, zitadel *zitadelv1alpha1.ZitadelCluster, + patcher func(*zitadelv1alpha1.ZitadelCluster)) error { + patch := client.MergeFrom(zitadel.DeepCopy()) + patcher(zitadel) + return r.Patch(ctx, zitadel, patch) +} + +// SetupWithManager sets up the controller with the Manager. +func (r *ZitadelClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&zitadelv1alpha1.ZitadelCluster{}). + Owns(&appsv1.Deployment{}). + Owns(&corev1.Service{}). + Owns(&corev1.ConfigMap{}). + Owns(&corev1.Secret{}). + Owns(&zitadelv1alpha1.Organization{}). + WithOptions(controller.Options{RateLimiter: workqueue.NewItemExponentialFailureRateLimiter(time.Millisecond*500, time.Minute*3)}). + Complete(r) +} diff --git a/src/pkg/admin/admin.go b/src/pkg/admin/admin.go new file mode 100644 index 0000000..fc06c9a --- /dev/null +++ b/src/pkg/admin/admin.go @@ -0,0 +1,14 @@ +package admin + +import ( + zitadelv1alpha1 "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/api/v1alpha1" +) + +const ( + AccountName = "admin" + Key = "password" +) + +func AdminPasswordSecretName(zitadel *zitadelv1alpha1.ZitadelCluster) string { + return zitadel.Name + "-admin-password-secret" +} diff --git a/src/pkg/builder/builder.go b/src/pkg/builder/builder.go new file mode 100644 index 0000000..4a3d090 --- /dev/null +++ b/src/pkg/builder/builder.go @@ -0,0 +1,15 @@ +package builder + +import ( + "k8s.io/apimachinery/pkg/runtime" +) + +type Builder struct { + scheme *runtime.Scheme +} + +func NewBuilder(scheme *runtime.Scheme) *Builder { + return &Builder{ + scheme: scheme, + } +} diff --git a/src/pkg/builder/configmap_builder.go b/src/pkg/builder/configmap_builder.go new file mode 100644 index 0000000..793ebde --- /dev/null +++ b/src/pkg/builder/configmap_builder.go @@ -0,0 +1,39 @@ +package builder + +import ( + zitadelv1alpha1 "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/api/v1alpha1" + metadata "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/pkg/builder/metadata" + "fmt" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" +) + +type ConfigMapOpts struct { + Zitadel *zitadelv1alpha1.ZitadelCluster + Key types.NamespacedName + Data map[string]string + Labels map[string]string + Annotations map[string]string + Immutable bool +} + +func (b *Builder) BuildConfigMap(opts ConfigMapOpts, owner metav1.Object) (*corev1.ConfigMap, error) { + objMeta := + metadata.NewMetadataBuilder(opts.Key). + WithZitadel(opts.Zitadel). + WithLabels(opts.Labels). + WithAnnotations(opts.Annotations). + Build() + + configMap := &corev1.ConfigMap{ + Data: opts.Data, + ObjectMeta: objMeta, + Immutable: &opts.Immutable, + } + if err := controllerutil.SetControllerReference(owner, configMap, b.scheme); err != nil { + return nil, fmt.Errorf("error setting controller reference in ConfigMap manifest: %v", err) + } + return configMap, nil +} diff --git a/src/pkg/builder/deployment_builder.go b/src/pkg/builder/deployment_builder.go new file mode 100644 index 0000000..40dc694 --- /dev/null +++ b/src/pkg/builder/deployment_builder.go @@ -0,0 +1,160 @@ +package builder + +import ( + "fmt" + + zitadelv1alpha1 "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/api/v1alpha1" + labels "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/pkg/builder/labels" + metadata "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/pkg/builder/metadata" + configuration "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/pkg/configuration" + deployment "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/pkg/deployment" + "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/pkg/masterkey" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" +) + +func (b *Builder) BuildDeployment(zitadel *zitadelv1alpha1.ZitadelCluster, key types.NamespacedName) (*appsv1.Deployment, error) { + replicas := zitadel.Spec.Replicas + objMeta := + metadata.NewMetadataBuilder(key). + WithZitadel(zitadel). + Build() + selectorLabels := + labels.NewLabelsBuilder(). + WithZitadelSelectorLabels(zitadel). + Build() + podTemplate, err := b.buildDepPodTemplate(zitadel, selectorLabels) + if err != nil { + return nil, fmt.Errorf("error building pod template: %v", err) + } + + dep := &appsv1.Deployment{ + ObjectMeta: objMeta, + Spec: appsv1.DeploymentSpec{ + Replicas: &replicas, + Selector: &metav1.LabelSelector{ + MatchLabels: selectorLabels, + }, + Template: *podTemplate, + }} + if err := controllerutil.SetControllerReference(zitadel, dep, b.scheme); err != nil { + return nil, fmt.Errorf("error setting controller reference to Deployment: %v", err) + } + return dep, nil +} + +func (b *Builder) buildDepPodTemplate(zitadel *zitadelv1alpha1.ZitadelCluster, labels map[string]string) (*corev1.PodTemplateSpec, error) { + objMeta := + metadata.NewMetadataBuilder(client.ObjectKeyFromObject(zitadel)). + WithZitadel(zitadel). + WithLabels(labels). + WithAnnotations(zitadel.Spec.PodAnnotations). + Build() + group := int64(0) + + mode := int32(0444) + return &corev1.PodTemplateSpec{ + ObjectMeta: objMeta, + Spec: corev1.PodSpec{ + SecurityContext: &corev1.PodSecurityContext{FSGroup: &group}, + Containers: *b.buildDepContainers(zitadel), + Volumes: []corev1.Volume{ + {Name: "certs", VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: zitadel.Spec.RootTLSSecret.Name, + DefaultMode: &mode, + }, + }}, + {Name: "zitadel-config-yaml", VolumeSource: corev1.VolumeSource{ConfigMap: &corev1.ConfigMapVolumeSource{LocalObjectReference: corev1.LocalObjectReference{Name: configuration.ConfigurationName(zitadel)}}}}, + }, + }, + }, + nil +} + +func (b *Builder) buildDepContainers(zitadel *zitadelv1alpha1.ZitadelCluster) *[]corev1.Container { + readyProbeHandle := corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{HTTPHeaders: []corev1.HTTPHeader{}, + Port: intstr.FromInt(deployment.ZitadelPort), + Scheme: corev1.URISchemeHTTP, + Path: "/debug/ready", + }, + } + livenessProbeHandle := corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{HTTPHeaders: []corev1.HTTPHeader{}, + Port: intstr.FromInt(deployment.ZitadelPort), + Scheme: corev1.URISchemeHTTP, + Path: "/debug/healthz", + }, + } + + return &[]corev1.Container{ + { + Name: "zitadel", + Image: zitadel.Spec.Image.Name + ":" + zitadel.Spec.Image.Tag, + Args: []string{ + "start", + "--config", "/config/zitadel-config-yaml", + "--masterkeyFromEnv", + }, + ImagePullPolicy: corev1.PullIfNotPresent, + Env: []corev1.EnvVar{ + { + Name: "ZITADEL_MASTERKEY", + ValueFrom: &corev1.EnvVarSource{SecretKeyRef: &corev1.SecretKeySelector{LocalObjectReference: corev1.LocalObjectReference{Name: masterkey.MasterKeyName(zitadel)}, Key: masterkey.Key}}, + }, + + { + Name: "ZITADEL_DATABASE_COCKROACH_ADMIN_SSL_ROOTCERT", + Value: "/certs/ca.crt", + }, + { + Name: "ZITADEL_DATABASE_COCKROACH_ADMIN_SSL_CERT", + Value: "/certs/tls.crt", + }, + { + Name: "ZITADEL_DATABASE_COCKROACH_ADMIN_SSL_KEY", + Value: "/certs/tls.key", + }, + + { + Name: "ZITADEL_DATABASE_COCKROACH_USER_SSL_ROOTCERT", + Value: "/certs/ca.crt", + }, + { + Name: "ZITADEL_DATABASE_COCKROACH_USER_SSL_CERT", + Value: "/certs/tls.crt", + }, + { + Name: "ZITADEL_DATABASE_COCKROACH_USER_SSL_KEY", + Value: "/certs/tls.key", + }, + }, + Ports: []corev1.ContainerPort{ + {Name: deployment.ZitadelName, ContainerPort: deployment.ZitadelPort}, + }, + LivenessProbe: &corev1.Probe{ + ProbeHandler: livenessProbeHandle, + FailureThreshold: 10, + InitialDelaySeconds: 0, + PeriodSeconds: 5, + }, + ReadinessProbe: &corev1.Probe{ + ProbeHandler: readyProbeHandle, + FailureThreshold: 3, + InitialDelaySeconds: 0, + PeriodSeconds: 5, + }, + Resources: zitadel.Spec.Resources, + VolumeMounts: []corev1.VolumeMount{ + {Name: "zitadel-config-yaml", MountPath: "/config"}, + {Name: "certs", MountPath: "/certs"}, + }, + }, + } +} diff --git a/src/pkg/builder/job_builder.go b/src/pkg/builder/job_builder.go new file mode 100644 index 0000000..d168970 --- /dev/null +++ b/src/pkg/builder/job_builder.go @@ -0,0 +1,193 @@ +package builder + +import ( + "fmt" + + zitadelv1alpha1 "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/api/v1alpha1" + configuration "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/pkg/configuration" + "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/pkg/masterkey" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" +) + +func (b *Builder) BuildInitJob(zitadel *zitadelv1alpha1.ZitadelCluster, key types.NamespacedName) (*batchv1.Job, error) { + + backOffLimit := int32(5) + activeDeadlineSeconds := int64(300) + runAsNonRoot := true + enableServiceLinks := false + user := int64(1000) + mode := int32(0444) + initJob := &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: key.Name, + Namespace: key.Namespace, + }, + Spec: batchv1.JobSpec{ + BackoffLimit: &backOffLimit, + ActiveDeadlineSeconds: &activeDeadlineSeconds, + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + RestartPolicy: corev1.RestartPolicyOnFailure, + SecurityContext: &corev1.PodSecurityContext{ + RunAsNonRoot: &runAsNonRoot, + RunAsUser: &user, + }, + EnableServiceLinks: &enableServiceLinks, + Volumes: []corev1.Volume{ + {Name: "certs", VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: zitadel.Spec.RootTLSSecret.Name, + DefaultMode: &mode, + }, + }}, + {Name: "zitadel-config-yaml", VolumeSource: corev1.VolumeSource{ConfigMap: &corev1.ConfigMapVolumeSource{LocalObjectReference: corev1.LocalObjectReference{Name: configuration.ConfigurationName(zitadel)}}}}, + }, + Containers: []corev1.Container{ + { + Name: "zitadel-init", + Image: zitadel.Spec.Image.Name + ":" + zitadel.Spec.Image.Tag, + Args: []string{ + "init", + "--config", "/config/zitadel-config-yaml", + }, + Env: []corev1.EnvVar{ + { + Name: "ZITADEL_DATABASE_COCKROACH_ADMIN_SSL_ROOTCERT", + Value: "/certs/ca.crt", + }, + { + Name: "ZITADEL_DATABASE_COCKROACH_ADMIN_SSL_CERT", + Value: "/certs/tls.crt", + }, + { + Name: "ZITADEL_DATABASE_COCKROACH_ADMIN_SSL_KEY", + Value: "/certs/tls.key", + }, + { + Name: "ZITADEL_DATABASE_COCKROACH_USER_SSL_ROOTCERT", + Value: "/certs/ca.crt", + }, + { + Name: "ZITADEL_DATABASE_COCKROACH_USER_SSL_CERT", + Value: "/certs/tls.crt", + }, + { + Name: "ZITADEL_DATABASE_COCKROACH_USER_SSL_KEY", + Value: "/certs/tls.key", + }, + }, + VolumeMounts: []corev1.VolumeMount{ + {Name: "zitadel-config-yaml", MountPath: "/config"}, + {Name: "certs", MountPath: "/certs"}, + }, + }, + }, + }, + }, + }, + } + if err := controllerutil.SetControllerReference(zitadel, initJob, b.scheme); err != nil { + return nil, fmt.Errorf("error setting controller reference to InitJob: %v", err) + } + return initJob, nil +} + +func (b *Builder) BuildSetupJob(zitadel *zitadelv1alpha1.ZitadelCluster, key types.NamespacedName) (*batchv1.Job, error) { + + backOffLimit := int32(5) + activeDeadlineSeconds := int64(300) + runAsNonRoot := true + enableServiceLinks := false + user := int64(1000) + mode := int32(0444) + setupJob := &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: key.Name, + Namespace: key.Namespace, + }, + Spec: batchv1.JobSpec{ + BackoffLimit: &backOffLimit, + ActiveDeadlineSeconds: &activeDeadlineSeconds, + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + RestartPolicy: corev1.RestartPolicyOnFailure, + SecurityContext: &corev1.PodSecurityContext{ + RunAsNonRoot: &runAsNonRoot, + RunAsUser: &user, + }, + EnableServiceLinks: &enableServiceLinks, + Volumes: []corev1.Volume{ + {Name: "certs", VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: zitadel.Spec.RootTLSSecret.Name, + DefaultMode: &mode, + }, + }}, + {Name: "zitadel-config-yaml", VolumeSource: corev1.VolumeSource{ConfigMap: &corev1.ConfigMapVolumeSource{LocalObjectReference: corev1.LocalObjectReference{Name: configuration.ConfigurationName(zitadel)}}}}, + }, + Containers: []corev1.Container{ + { + Name: "zitadel-setup", + Image: zitadel.Spec.Image.Name + ":" + zitadel.Spec.Image.Tag, + Args: []string{ + "setup", + "--config", "/config/zitadel-config-yaml", + "--steps", "/config/zitadel-config-yaml", + "--masterkeyFromEnv", + "--init-projections=true", + }, + + Env: []corev1.EnvVar{ + { + Name: "ZITADEL_MASTERKEY", + ValueFrom: &corev1.EnvVarSource{SecretKeyRef: &corev1.SecretKeySelector{LocalObjectReference: corev1.LocalObjectReference{Name: masterkey.MasterKeyName(zitadel)}, Key: masterkey.Key}}, + }, + { + Name: "ZITADEL_FIRSTINSTANCE_MACHINEKEYPATH", + Value: "/machinekey/sa.json", + }, + { + Name: "ZITADEL_DATABASE_COCKROACH_ADMIN_SSL_ROOTCERT", + Value: "/certs/ca.crt", + }, + { + Name: "ZITADEL_DATABASE_COCKROACH_ADMIN_SSL_CERT", + Value: "/certs/tls.crt", + }, + { + Name: "ZITADEL_DATABASE_COCKROACH_ADMIN_SSL_KEY", + Value: "/certs/tls.key", + }, + + { + Name: "ZITADEL_DATABASE_COCKROACH_USER_SSL_ROOTCERT", + Value: "/certs/ca.crt", + }, + { + Name: "ZITADEL_DATABASE_COCKROACH_USER_SSL_CERT", + Value: "/certs/tls.crt", + }, + { + Name: "ZITADEL_DATABASE_COCKROACH_USER_SSL_KEY", + Value: "/certs/tls.key", + }, + }, + VolumeMounts: []corev1.VolumeMount{ + {Name: "zitadel-config-yaml", MountPath: "/config"}, + {Name: "certs", MountPath: "/certs"}, + }, + }, + }, + }, + }, + }, + } + if err := controllerutil.SetControllerReference(zitadel, setupJob, b.scheme); err != nil { + return nil, fmt.Errorf("error setting controller reference to SetupJob: %v", err) + } + return setupJob, nil +} diff --git a/src/pkg/builder/labels/labels.go b/src/pkg/builder/labels/labels.go new file mode 100644 index 0000000..cc45e34 --- /dev/null +++ b/src/pkg/builder/labels/labels.go @@ -0,0 +1,65 @@ +package builder + +import ( + zitadelv1alpha1 "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/api/v1alpha1" + deployment "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/pkg/deployment" +) + +const ( + appLabel = "app.kubernetes.io/name" + instanceLabel = "app.kubernetes.io/instance" + deploymentPodName = "deployment.kubernetes.io/pod-name" + appZitadel = "zitadel" + appExporter = "exporter" +) + +type LabelsBuilder struct { + labels map[string]string +} + +func NewLabelsBuilder() *LabelsBuilder { + return &LabelsBuilder{ + labels: map[string]string{}, + } +} + +func (b *LabelsBuilder) WithApp(app string) *LabelsBuilder { + b.labels[appLabel] = app + return b +} + +func (b *LabelsBuilder) WithInstance(instance string) *LabelsBuilder { + b.labels[instanceLabel] = instance + return b +} + +func (b *LabelsBuilder) WithZitadel(zitadel *zitadelv1alpha1.ZitadelCluster) *LabelsBuilder { + return b.WithApp(appZitadel). + WithInstance(zitadel.Name) +} + +func (b *LabelsBuilder) WithDeploymentPod(zitadel *zitadelv1alpha1.ZitadelCluster, podIndex int) *LabelsBuilder { + b.labels[deploymentPodName] = deployment.PodName(zitadel.ObjectMeta, podIndex) + return b +} + +func (b *LabelsBuilder) WithLabels(labels map[string]string) *LabelsBuilder { + for k, v := range labels { + b.labels[k] = v + } + return b +} + +func (b *LabelsBuilder) WithZitadelSelectorLabels(zitadel *zitadelv1alpha1.ZitadelCluster) *LabelsBuilder { + b = b.WithZitadel(zitadel) + return b +} + +func (b *LabelsBuilder) WithMetricsSelectorLabels(zitadel *zitadelv1alpha1.ZitadelCluster) *LabelsBuilder { + return b.WithApp(appExporter). + WithInstance(zitadel.Name) +} + +func (b *LabelsBuilder) Build() map[string]string { + return b.labels +} diff --git a/src/pkg/builder/metadata/metadata.go b/src/pkg/builder/metadata/metadata.go new file mode 100644 index 0000000..f13532b --- /dev/null +++ b/src/pkg/builder/metadata/metadata.go @@ -0,0 +1,48 @@ +package metadata + +import ( + zitadelv1alpha1 "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/api/v1alpha1" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" +) + +type MetadataBuilder struct { + objMeta metav1.ObjectMeta +} + +func NewMetadataBuilder(key types.NamespacedName) *MetadataBuilder { + return &MetadataBuilder{ + objMeta: metav1.ObjectMeta{ + Name: key.Name, + Namespace: key.Namespace, + Labels: map[string]string{}, + Annotations: map[string]string{}, + }, + } +} + +func (b *MetadataBuilder) WithZitadel(zitadel *zitadelv1alpha1.ZitadelCluster) *MetadataBuilder { + if zitadel == nil { + return b + } + return b +} + +func (b *MetadataBuilder) WithLabels(labels map[string]string) *MetadataBuilder { + for k, v := range labels { + b.objMeta.Labels[k] = v + } + return b +} + +func (b *MetadataBuilder) WithAnnotations(annotations map[string]string) *MetadataBuilder { + for k, v := range annotations { + b.objMeta.Annotations[k] = v + } + return b +} + +func (b *MetadataBuilder) Build() metav1.ObjectMeta { + return b.objMeta +} diff --git a/src/pkg/builder/organization_builder.go b/src/pkg/builder/organization_builder.go new file mode 100644 index 0000000..7126491 --- /dev/null +++ b/src/pkg/builder/organization_builder.go @@ -0,0 +1,42 @@ +package builder + +import ( + "fmt" + + zitadelv1alpha1 "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/api/v1alpha1" + metadata "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/pkg/builder/metadata" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" +) + +type OrganizationOpts struct { + Key types.NamespacedName + Zitadel *zitadelv1alpha1.ZitadelCluster +} + +func (b *Builder) BuildOrganization(opts OrganizationOpts, owner metav1.Object) (*zitadelv1alpha1.Organization, error) { + objMeta := + metadata.NewMetadataBuilder(opts.Key). + Build() + + org := &zitadelv1alpha1.Organization{ + ObjectMeta: objMeta, + Spec: zitadelv1alpha1.OrganizationSpec{ + ZitadelClusterRef: zitadelv1alpha1.ZitadelClusterRef{ + ObjectReference: corev1.ObjectReference{ + Kind: "ZitadelCluster", + Namespace: opts.Zitadel.Namespace, + Name: opts.Zitadel.Name, + APIVersion: "v1alpha1", + }, + }, + }, + } + + if err := controllerutil.SetControllerReference(owner, org, b.scheme); err != nil { + return nil, fmt.Errorf("error setting controller reference in Secret manifest: %v", err) + } + return org, nil +} diff --git a/src/pkg/builder/secret_builder.go b/src/pkg/builder/secret_builder.go new file mode 100644 index 0000000..73078f7 --- /dev/null +++ b/src/pkg/builder/secret_builder.go @@ -0,0 +1,38 @@ +package builder + +import ( + zitadelv1alpha1 "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/api/v1alpha1" + metadata "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/pkg/builder/metadata" + "fmt" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" +) + +type SecretOpts struct { + Zitadel *zitadelv1alpha1.ZitadelCluster + Key types.NamespacedName + Data map[string][]byte + Labels map[string]string + Annotations map[string]string + Immutable bool +} + +func (b *Builder) BuildSecret(opts SecretOpts, owner metav1.Object) (*corev1.Secret, error) { + objMeta := + metadata.NewMetadataBuilder(opts.Key). + WithZitadel(opts.Zitadel). + WithLabels(opts.Labels). + WithAnnotations(opts.Annotations). + Build() + secret := &corev1.Secret{ + ObjectMeta: objMeta, + Data: opts.Data, + Immutable: &opts.Immutable, + } + if err := controllerutil.SetControllerReference(owner, secret, b.scheme); err != nil { + return nil, fmt.Errorf("error setting controller reference in Secret manifest: %v", err) + } + return secret, nil +} diff --git a/src/pkg/builder/service_builder.go b/src/pkg/builder/service_builder.go new file mode 100644 index 0000000..1230ad0 --- /dev/null +++ b/src/pkg/builder/service_builder.go @@ -0,0 +1,43 @@ +package builder + +import ( + "fmt" + + zitadelv1alpha1 "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/api/v1alpha1" + labels "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/pkg/builder/labels" + metadata "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/pkg/builder/metadata" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" +) + +type ServiceOpts struct { + Ports []corev1.ServicePort +} + +func (b *Builder) BuildService(zitadel *zitadelv1alpha1.ZitadelCluster, key types.NamespacedName, + opts ServiceOpts) (*corev1.Service, error) { + objMeta := + metadata.NewMetadataBuilder(key). + WithZitadel(zitadel). + Build() + + svc := &corev1.Service{ + ObjectMeta: objMeta, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeClusterIP, + Ports: opts.Ports, + Selector: serviceSelectorLabels(opts, zitadel), + }, + } + if err := controllerutil.SetControllerReference(zitadel, svc, b.scheme); err != nil { + return nil, fmt.Errorf("error setting controller reference to Service: %v", err) + } + return svc, nil +} + +func serviceSelectorLabels(opts ServiceOpts, zitadel *zitadelv1alpha1.ZitadelCluster) map[string]string { + return labels.NewLabelsBuilder(). + WithZitadelSelectorLabels(zitadel). + Build() +} diff --git a/src/pkg/condition/condition.go b/src/pkg/condition/condition.go new file mode 100644 index 0000000..bacf590 --- /dev/null +++ b/src/pkg/condition/condition.go @@ -0,0 +1,68 @@ +package conditions + +import ( + "fmt" + "reflect" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type Conditioner interface { + SetCondition(condition metav1.Condition) +} + +type Patcher func(Conditioner) + +type Ready struct{} + +func NewReady() *Ready { + return &Ready{} +} + +func (p *Ready) PatcherFailed(msg string) Patcher { + return func(c Conditioner) { + SetReadyFailedWithMessage(c, msg) + } +} + +func (p *Ready) PatcherWithError(err error) Patcher { + return func(c Conditioner) { + if err == nil { + SetReadyCreated(c) + } else { + SetReadyFailed(c) + } + } +} + +func (p *Ready) PatcherRefResolver(err error, obj interface{}) Patcher { + return func(c Conditioner) { + if err == nil { + return + } + if apierrors.IsNotFound(err) { + SetReadyFailedWithMessage(c, fmt.Sprintf("%s not found", getType(obj))) + return + } + SetReadyFailedWithMessage(c, fmt.Sprintf("Error getting %s", getType(obj))) + } +} + +func (p *Ready) PatcherHealthy(err error) Patcher { + return func(c Conditioner) { + if err == nil { + SetReadyHealthty(c) + } else { + SetReadyUnhealthtyWithError(c, err) + } + } +} + +func getType(obj interface{}) string { + if t := reflect.TypeOf(obj); t.Kind() == reflect.Ptr { + return t.Elem().Name() + } else { + return t.Name() + } +} diff --git a/src/pkg/condition/pat.go b/src/pkg/condition/pat.go new file mode 100644 index 0000000..18ab0bc --- /dev/null +++ b/src/pkg/condition/pat.go @@ -0,0 +1,25 @@ +package conditions + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + zitadelv1alpha1 "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/api/v1alpha1" +) + +func SetPatOutOfDate(c Conditioner) { + c.SetCondition(metav1.Condition{ + Type: zitadelv1alpha1.ConditionTypePATUpToDate, + Status: metav1.ConditionFalse, + Reason: zitadelv1alpha1.ConditionReasonRolesChanged, + Message: "PAT out of date", + }) +} + +func SetPatUpToDate(c Conditioner) { + c.SetCondition(metav1.Condition{ + Type: zitadelv1alpha1.ConditionTypePATUpToDate, + Status: metav1.ConditionTrue, + Reason: zitadelv1alpha1.ConditionReasonPATUpToDate, + Message: "PAT up to date", + }) +} diff --git a/src/pkg/condition/ready.go b/src/pkg/condition/ready.go new file mode 100644 index 0000000..2a72b1f --- /dev/null +++ b/src/pkg/condition/ready.go @@ -0,0 +1,70 @@ +package conditions + +import ( + appsv1 "k8s.io/api/apps/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + zitadelv1alpha1 "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/api/v1alpha1" +) + +func SetReadyHealthty(c Conditioner) { + c.SetCondition(metav1.Condition{ + Type: zitadelv1alpha1.ConditionTypeReady, + Status: metav1.ConditionTrue, + Reason: zitadelv1alpha1.ConditionReasonHealthy, + Message: "Healthy", + }) +} + +func SetReadyUnhealthtyWithError(c Conditioner, err error) { + c.SetCondition(metav1.Condition{ + Type: zitadelv1alpha1.ConditionTypeReady, + Status: metav1.ConditionFalse, + Reason: zitadelv1alpha1.ConditionReasonHealthy, + Message: err.Error(), + }) +} + +func SetReadyCreatedWithMessage(c Conditioner, message string) { + c.SetCondition(metav1.Condition{ + Type: zitadelv1alpha1.ConditionTypeReady, + Status: metav1.ConditionTrue, + Reason: zitadelv1alpha1.ConditionReasonCreated, + Message: message, + }) +} + +func SetReadyCreated(c Conditioner) { + SetReadyCreatedWithMessage(c, "Created") +} + +func SetReadyFailedWithMessage(c Conditioner, message string) { + c.SetCondition(metav1.Condition{ + Type: zitadelv1alpha1.ConditionTypeReady, + Status: metav1.ConditionFalse, + Reason: zitadelv1alpha1.ConditionReasonFailed, + Message: message, + }) +} + +func SetReadyFailed(c Conditioner) { + SetReadyFailedWithMessage(c, "Failed") +} + +func SetReadyWithDeployment(c Conditioner, sts *appsv1.Deployment, instanceId string) { + if sts.Status.Replicas == 0 || sts.Status.ReadyReplicas != sts.Status.Replicas || instanceId == "" { + c.SetCondition(metav1.Condition{ + Type: zitadelv1alpha1.ConditionTypeReady, + Status: metav1.ConditionFalse, + Reason: zitadelv1alpha1.ConditionReasonDeploymentNotReady, + Message: "Not ready", + }) + return + } + c.SetCondition(metav1.Condition{ + Type: zitadelv1alpha1.ConditionTypeReady, + Status: metav1.ConditionTrue, + Reason: zitadelv1alpha1.ConditionReasonDeploymentReady, + Message: "Running", + }) +} diff --git a/src/pkg/configuration/configuration.go b/src/pkg/configuration/configuration.go new file mode 100644 index 0000000..54eb46b --- /dev/null +++ b/src/pkg/configuration/configuration.go @@ -0,0 +1,9 @@ +package configuration + +import ( + zitadelv1alpha1 "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/api/v1alpha1" +) + +func ConfigurationName(zitadel *zitadelv1alpha1.ZitadelCluster) string { + return zitadel.Name + "-configuration-configmap" +} diff --git a/src/pkg/controller/configmap/controller.go b/src/pkg/controller/configmap/controller.go new file mode 100644 index 0000000..065af24 --- /dev/null +++ b/src/pkg/controller/configmap/controller.go @@ -0,0 +1,79 @@ +package configmap + +import ( + "context" + "fmt" + + zitadelv1alpha1 "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/api/v1alpha1" + builder "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/pkg/builder" + "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/pkg/deployment" + "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/pkg/masterkey" + crdbv1alpha1 "github.com/cockroachdb/cockroach-operator/apis/v1alpha1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type ConfigMapReconciler struct { + client.Client + Builder *builder.Builder +} + +func NewConfigMapReconciler(client client.Client, builder *builder.Builder) *ConfigMapReconciler { + return &ConfigMapReconciler{ + Client: client, + Builder: builder, + } +} + +func (r *ConfigMapReconciler) ReconcileZitadelConfiguration(ctx context.Context, key types.NamespacedName, zitadel *zitadelv1alpha1.ZitadelCluster, crdb *crdbv1alpha1.CrdbCluster, base64key string) error { + config := make(map[string]string) + config["zitadel-config-yaml"] = + fmt.Sprintf(` +Database: + Cockroach: + Host: %s + User: + Username: root + SSL: + Mode: verify-full + Admin: + SSL: + Mode: verify-full +ExternalDomain: %s +ExternalPort: %d +ExternalSecure: %t +TLS: + Enabled: false +SystemAPIUsers: + - %s: + KeyData: %s + Memberships: + - MemberType: System + Roles: + - "SYSTEM_OWNER" + - "IAM_OWNER" + - "ORG_OWNER" +`, deployment.ServiceFQDNWithService(crdb.ObjectMeta, crdb.Name), zitadel.Spec.Host, zitadel.Spec.ExternalPort, zitadel.Spec.ExternalSecure, masterkey.OwnerName, base64key) + + opts := builder.ConfigMapOpts{ + Zitadel: zitadel, + Key: key, + Immutable: true, + Data: config, + } + configmap, err := r.Builder.BuildConfigMap(opts, zitadel) + if err != nil { + return fmt.Errorf("error building replication password ConfigMap: %v", err) + } + var existingConfigMap corev1.ConfigMap + if err := r.Get(ctx, key, &existingConfigMap); err == nil { + patch := client.MergeFrom(existingConfigMap.DeepCopy()) + existingConfigMap.Data = configmap.Data + return r.Patch(ctx, &existingConfigMap, patch) + } + if err := r.Create(ctx, configmap); err != nil { + return fmt.Errorf("error creating replication password ConfigMap: %v", err) + } + return nil +} diff --git a/src/pkg/controller/secret/controller.go b/src/pkg/controller/secret/controller.go new file mode 100644 index 0000000..b0101e3 --- /dev/null +++ b/src/pkg/controller/secret/controller.go @@ -0,0 +1,93 @@ +package secret + +import ( + "context" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "fmt" + + zitadelv1alpha1 "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/api/v1alpha1" + builder "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/pkg/builder" + "github.com/sethvargo/go-password/password" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type SecretReconciler struct { + client.Client + Builder *builder.Builder +} + +func NewSecretReconciler(client client.Client, builder *builder.Builder) *SecretReconciler { + return &SecretReconciler{ + Client: client, + Builder: builder, + } +} + +func (r *SecretReconciler) ReconcileRandomPassword(ctx context.Context, key types.NamespacedName, secretKey string, + zitadel *zitadelv1alpha1.ZitadelCluster) (string, error) { + var existingSecret corev1.Secret + if err := r.Get(ctx, key, &existingSecret); err == nil { + return string(existingSecret.Data[secretKey]), nil + } + password, err := password.Generate(32, 4, 2, false, false) + if err != nil { + return "", fmt.Errorf("error generating replication password: %v", err) + } + opts := builder.SecretOpts{ + Zitadel: zitadel, + Key: key, + Immutable: true, + Data: map[string][]byte{ + secretKey: []byte(password), + }, + } + secret, err := r.Builder.BuildSecret(opts, zitadel) + if err != nil { + return "", fmt.Errorf("error building replication password Secret: %v", err) + } + if err := r.Create(ctx, secret); err != nil { + return "", fmt.Errorf("error creating replication password Secret: %v", err) + } + + return password, nil +} + +func (r *SecretReconciler) ReconcileRandomPrivateRSA(ctx context.Context, key types.NamespacedName, secretKey string, + zitadel *zitadelv1alpha1.ZitadelCluster) (string, error) { + var existingSecret corev1.Secret + if err := r.Get(ctx, key, &existingSecret); err == nil { + return string(existingSecret.Data[secretKey]), nil + } + privateKey, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + return "", fmt.Errorf("error generating replication private key: %v", err) + } + privkeyPem := pem.EncodeToMemory( + &pem.Block{ + Type: "RSA PRIVATE KEY", + Bytes: x509.MarshalPKCS1PrivateKey(privateKey), + }, + ) + opts := builder.SecretOpts{ + Zitadel: zitadel, + Key: key, + Immutable: true, + Data: map[string][]byte{ + secretKey: privkeyPem, + }, + } + secret, err := r.Builder.BuildSecret(opts, zitadel) + if err != nil { + return "", fmt.Errorf("error building replication password Secret: %v", err) + } + if err := r.Create(ctx, secret); err != nil { + return "", fmt.Errorf("error creating replication password Secret: %v", err) + } + + return string(privkeyPem), nil +} diff --git a/src/pkg/controller/service/controller.go b/src/pkg/controller/service/controller.go new file mode 100644 index 0000000..0b43bee --- /dev/null +++ b/src/pkg/controller/service/controller.go @@ -0,0 +1,48 @@ +package service + +import ( + "context" + "fmt" + + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type ServiceReconciler struct { + client.Client +} + +func NewServiceReconciler(client client.Client) *ServiceReconciler { + return &ServiceReconciler{ + Client: client, + } +} + +func (r *ServiceReconciler) Reconcile(ctx context.Context, desiredSvc *corev1.Service) error { + key := client.ObjectKeyFromObject(desiredSvc) + var existingSvc corev1.Service + if err := r.Get(ctx, key, &existingSvc); err != nil { + if !apierrors.IsNotFound(err) { + return fmt.Errorf("error getting Service: %v", err) + } + if err := r.Create(ctx, desiredSvc); err != nil { + return fmt.Errorf("error creating Service: %v", err) + } + return nil + } + + patch := client.MergeFrom(existingSvc.DeepCopy()) + existingSvc.Spec.Ports = desiredSvc.Spec.Ports + existingSvc.Spec.AllocateLoadBalancerNodePorts = desiredSvc.Spec.AllocateLoadBalancerNodePorts + existingSvc.Spec.Selector = desiredSvc.Spec.Selector + existingSvc.Spec.Type = desiredSvc.Spec.Type + for k, v := range desiredSvc.Annotations { + existingSvc.Annotations[k] = v + } + for k, v := range desiredSvc.Labels { + existingSvc.Labels[k] = v + } + + return r.Patch(ctx, &existingSvc, patch) +} diff --git a/src/pkg/controller/zitadel/controller.go b/src/pkg/controller/zitadel/controller.go new file mode 100644 index 0000000..438e416 --- /dev/null +++ b/src/pkg/controller/zitadel/controller.go @@ -0,0 +1,133 @@ +package zitadel + +import ( + "context" + "errors" + "fmt" + "time" + + zitadelv1alpha1 "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/api/v1alpha1" + condition "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/pkg/condition" + health "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/pkg/health" + zitadelClient "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/pkg/zitadel" + "github.com/hashicorp/go-multierror" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" +) + +type ZitadelReconciler struct { + Client client.Client + RefResolver *zitadelv1alpha1.RefResolver + ConditionReady *condition.Ready + + WrappedReconciler WrappedReconciler + Finalizer Finalizer + RequeueInterval time.Duration +} + +func NewZitadelReconciler(client client.Client, cr *condition.Ready, wr WrappedReconciler, f Finalizer, + requeueInterval time.Duration) Reconciler { + return &ZitadelReconciler{ + Client: client, + RefResolver: zitadelv1alpha1.NewRefResolver(client), + ConditionReady: cr, + WrappedReconciler: wr, + Finalizer: f, + RequeueInterval: requeueInterval, + } +} + +func (r *ZitadelReconciler) Reconcile(ctx context.Context, resource Resource) (ctrl.Result, error) { + if resource.IsBeingDeleted() { + if err := r.Finalizer.Finalize(ctx, resource); err != nil { + return ctrl.Result{}, fmt.Errorf("error finalizing %s: %v", resource.GetName(), err) + } + return ctrl.Result{}, nil + } + zitadelRef, err := resource.ZitadelClusterRef(ctx, r.RefResolver) + if err != nil { + return ctrl.Result{}, err + } + zitadel, err := r.RefResolver.ZitadelCluster(ctx, zitadelRef, resource.GetNamespace()) + if err != nil { + var errBundle *multierror.Error + errBundle = multierror.Append(errBundle, err) + + err = r.WrappedReconciler.PatchStatus(ctx, r.ConditionReady.PatcherRefResolver(err, zitadel)) + errBundle = multierror.Append(errBundle, err) + + return ctrl.Result{}, fmt.Errorf("error getting ZitadelCluster: %v", errBundle) + } + + if err := waitForZitadelCluster(ctx, r.Client, resource, zitadel); err != nil { + var errBundle *multierror.Error + errBundle = multierror.Append(errBundle, err) + + err := r.WrappedReconciler.PatchStatus(ctx, r.ConditionReady.PatcherWithError(err)) + errBundle = multierror.Append(errBundle, err) + + return ctrl.Result{}, fmt.Errorf("error waiting for Zitadel: %v", errBundle) + } + + ztdClient, err := zitadelClient.NewClient(ctx, zitadel, *r.RefResolver) + if err != nil { + var errBundle *multierror.Error + errBundle = multierror.Append(errBundle, err) + + msg := fmt.Sprintf("Error connecting to Zitadel: %v", err) + err = r.WrappedReconciler.PatchStatus(ctx, r.ConditionReady.PatcherFailed(msg)) + errBundle = multierror.Append(errBundle, err) + + return r.retryResult(ctx, resource, errBundle) + } + defer ztdClient.Connection.Close() + err = r.WrappedReconciler.Reconcile(ctx, ztdClient) + var errBundle *multierror.Error + errBundle = multierror.Append(errBundle, err) + + if err := errBundle.ErrorOrNil(); err != nil { + msg := fmt.Sprintf("Error creating %s: %v", resource.GetName(), err) + err = r.WrappedReconciler.PatchStatus(ctx, r.ConditionReady.PatcherFailed(msg)) + errBundle = multierror.Append(errBundle, err) + + return r.retryResult(ctx, resource, errBundle) + } + + if err = r.Finalizer.AddFinalizer(ctx); err != nil { + errBundle = multierror.Append(errBundle, fmt.Errorf("error adding finalizer to %s: %v", resource.GetName(), err)) + } + + err = r.WrappedReconciler.PatchStatus(ctx, r.ConditionReady.PatcherWithError(errBundle.ErrorOrNil())) + errBundle = multierror.Append(errBundle, err) + + if err := errBundle.ErrorOrNil(); err != nil { + return ctrl.Result{}, err + } + return r.requeueResult(ctx, resource) +} + +func (r *ZitadelReconciler) retryResult(ctx context.Context, resource Resource, err error) (ctrl.Result, error) { + return ctrl.Result{}, err +} + +func (r *ZitadelReconciler) requeueResult(ctx context.Context, resource Resource) (ctrl.Result, error) { + if r.RequeueInterval > 0 { + log.FromContext(ctx).V(1).Info("Requeuing ZITADEL resource") + return ctrl.Result{RequeueAfter: r.RequeueInterval}, nil + } + return ctrl.Result{}, nil +} + +func waitForZitadelCluster(ctx context.Context, client client.Client, resource Resource, + zitadel *zitadelv1alpha1.ZitadelCluster) error { + var zitadelErr *multierror.Error + healthy, err := health.IsZitadelClusterHealthy(ctx, client, zitadel) + if err != nil { + zitadelErr = multierror.Append(zitadelErr, err) + } + if !healthy { + zitadelErr = multierror.Append(zitadelErr, errors.New("Zitadel not healthy")) + } + return zitadelErr.ErrorOrNil() +} diff --git a/src/pkg/controller/zitadel/finalizer.go b/src/pkg/controller/zitadel/finalizer.go new file mode 100644 index 0000000..c125f8f --- /dev/null +++ b/src/pkg/controller/zitadel/finalizer.go @@ -0,0 +1,76 @@ +package zitadel + +import ( + "context" + "fmt" + + zitadelv1alpha1 "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/api/v1alpha1" + zitadelClient "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/pkg/zitadel" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type ZitadelFinalizer struct { + Client client.Client + RefResolver *zitadelv1alpha1.RefResolver + + WrappedFinalizer WrappedFinalizer +} + +func NewZitadelFinalizer(client client.Client, wf WrappedFinalizer) Finalizer { + return &ZitadelFinalizer{ + Client: client, + RefResolver: zitadelv1alpha1.NewRefResolver(client), + WrappedFinalizer: wf, + } +} + +func (tf *ZitadelFinalizer) AddFinalizer(ctx context.Context) error { + if tf.WrappedFinalizer.ContainsFinalizer() { + return nil + } + if err := tf.WrappedFinalizer.AddFinalizer(ctx); err != nil { + return fmt.Errorf("error adding finalizer in TemplateFinalizer: %v", err) + } + return nil +} + +func (tf *ZitadelFinalizer) Finalize(ctx context.Context, resource Resource) error { + if !tf.WrappedFinalizer.ContainsFinalizer() { + return nil + } + + zitadelRef, err := resource.ZitadelClusterRef(ctx, tf.RefResolver) + if err != nil { + return err + } + zitadel, err := tf.RefResolver.ZitadelCluster(ctx, zitadelRef, resource.GetNamespace()) + if err != nil { + if apierrors.IsNotFound(err) { + if err := tf.WrappedFinalizer.RemoveFinalizer(ctx); err != nil { + return fmt.Errorf("error removing %s finalizer: %v", resource.GetName(), err) + } + return nil + } + return fmt.Errorf("error getting ZitadelCluster: %v", err) + } + + if err := waitForZitadelCluster(ctx, tf.Client, resource, zitadel); err != nil { + return fmt.Errorf("error waiting for ZitadelCluster: %v", err) + } + + ztdClient, err := zitadelClient.NewClient(ctx, zitadel, *tf.RefResolver) + if err != nil { + return fmt.Errorf("error connecting to ZitadelCluster: %v", err) + } + defer ztdClient.Connection.Close() + + if err := tf.WrappedFinalizer.Reconcile(ctx, ztdClient); err != nil { + return fmt.Errorf("error reconciling in TemplateFinalizer: %v", err) + } + + if err := tf.WrappedFinalizer.RemoveFinalizer(ctx); err != nil { + return fmt.Errorf("error removing finalizer in TemplateFinalizer: %v", err) + } + return nil +} diff --git a/src/pkg/controller/zitadel/types.go b/src/pkg/controller/zitadel/types.go new file mode 100644 index 0000000..c91859a --- /dev/null +++ b/src/pkg/controller/zitadel/types.go @@ -0,0 +1,39 @@ +package zitadel + +import ( + "context" + + zitadelv1alpha1 "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/api/v1alpha1" + "github.com/zitadel/zitadel-go/v2/pkg/client/management" + + condition "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/pkg/condition" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + ctrl "sigs.k8s.io/controller-runtime" +) + +type Resource interface { + v1.Object + ZitadelClusterRef(context.Context, *zitadelv1alpha1.RefResolver) (*zitadelv1alpha1.ZitadelClusterRef, error) + IsBeingDeleted() bool +} + +type Reconciler interface { + Reconcile(ctx context.Context, resource Resource) (ctrl.Result, error) +} + +type WrappedReconciler interface { + Reconcile(context.Context, *management.Client) error + PatchStatus(context.Context, condition.Patcher) error +} + +type Finalizer interface { + AddFinalizer(context.Context) error + Finalize(context.Context, Resource) error +} + +type WrappedFinalizer interface { + AddFinalizer(context.Context) error + RemoveFinalizer(context.Context) error + ContainsFinalizer() bool + Reconcile(context.Context, *management.Client) error +} diff --git a/src/pkg/deployment/deployment.go b/src/pkg/deployment/deployment.go new file mode 100644 index 0000000..e43f50f --- /dev/null +++ b/src/pkg/deployment/deployment.go @@ -0,0 +1,48 @@ +package deployment + +import ( + "fmt" + "os" + "strconv" + "strings" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + ZitadelName = "zitadel" + ZitadelPort = 8080 + SecretMountPath = "/var/secrets/" +) + +func ServiceFQDNWithService(meta metav1.ObjectMeta, service string) string { + clusterName := os.Getenv("CLUSTER_NAME") + if clusterName == "" { + clusterName = "cluster.local" + } + return fmt.Sprintf("%s.%s.svc.%s", service, meta.Namespace, clusterName) +} + +func ServiceFQDN(meta metav1.ObjectMeta) string { + return ServiceFQDNWithService(meta, meta.Name) +} + +func PodName(meta metav1.ObjectMeta, podIndex int) string { + return fmt.Sprintf("%s-%d", meta.Name, podIndex) +} + +func PodFQDNWithService(meta metav1.ObjectMeta, podIndex int, service string) string { + return fmt.Sprintf("%s.%s", PodName(meta, podIndex), ServiceFQDNWithService(meta, service)) +} + +func PodIndex(podName string) (*int, error) { + parts := strings.Split(podName, "-") + if len(parts) == 0 { + return nil, fmt.Errorf("invalid Pod name: %v", podName) + } + index, err := strconv.Atoi(parts[len(parts)-1]) + if err != nil { + return nil, fmt.Errorf("invalid Pod name: %v, error: %v", podName, err) + } + return &index, nil +} diff --git a/src/pkg/health/health.go b/src/pkg/health/health.go new file mode 100644 index 0000000..a32ec34 --- /dev/null +++ b/src/pkg/health/health.go @@ -0,0 +1,37 @@ +package health + +import ( + "context" + + zitadelv1alpha1 "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/api/v1alpha1" + + "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/pkg/deployment" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" +) + +type EndpointPolicy string + +func IsZitadelClusterHealthy(ctx context.Context, client ctrlclient.Client, zitadel *zitadelv1alpha1.ZitadelCluster) (bool, error) { + key := ctrlclient.ObjectKeyFromObject(zitadel) + var dep appsv1.Deployment + if err := client.Get(ctx, key, &dep); err != nil { + return false, ctrlclient.IgnoreNotFound(err) + } + if dep.Status.ReadyReplicas != zitadel.Spec.Replicas { + return false, nil + } + var endpoints corev1.Endpoints + if err := client.Get(ctx, key, &endpoints); err != nil { + return false, ctrlclient.IgnoreNotFound(err) + } + for _, subset := range endpoints.Subsets { + for _, port := range subset.Ports { + if port.Port == deployment.ZitadelPort { + return len(subset.Addresses) > 0, nil + } + } + } + return false, nil +} diff --git a/src/pkg/machinekey/machinekey.go b/src/pkg/machinekey/machinekey.go new file mode 100644 index 0000000..a344810 --- /dev/null +++ b/src/pkg/machinekey/machinekey.go @@ -0,0 +1,13 @@ +package machinekey + +import ( + zitadelv1alpha1 "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/api/v1alpha1" +) + +const ( + Key = "machinekey.json" +) + +func MachineKeySecretName(zitadel *zitadelv1alpha1.ZitadelCluster) string { + return zitadel.Name + "-machinekey-secret" +} diff --git a/src/pkg/masterkey/masterkey.go b/src/pkg/masterkey/masterkey.go new file mode 100644 index 0000000..591bef5 --- /dev/null +++ b/src/pkg/masterkey/masterkey.go @@ -0,0 +1,14 @@ +package masterkey + +import ( + zitadelv1alpha1 "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/api/v1alpha1" +) + +const ( + OwnerName = "k8s-operator" + Key = "key" +) + +func MasterKeyName(zitadel *zitadelv1alpha1.ZitadelCluster) string { + return zitadel.Name + "-masterkey-secret" +} diff --git a/src/pkg/systemapi/systemapi_account.go b/src/pkg/systemapi/systemapi_account.go new file mode 100644 index 0000000..fae531c --- /dev/null +++ b/src/pkg/systemapi/systemapi_account.go @@ -0,0 +1,13 @@ +package systemapiaccount + +import ( + zitadelv1alpha1 "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/api/v1alpha1" +) + +const ( + Key = "private.pem" +) + +func SystemAPIAccountName(zitadel *zitadelv1alpha1.ZitadelCluster) string { + return zitadel.Name + "-systemapiaccount-secret" +} diff --git a/src/pkg/zitadel/zitadel.go b/src/pkg/zitadel/zitadel.go new file mode 100644 index 0000000..6acfc6c --- /dev/null +++ b/src/pkg/zitadel/zitadel.go @@ -0,0 +1,195 @@ +package zitadel + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "reflect" + "strings" + "time" + + zitadelv1alpha1 "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/api/v1alpha1" + "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/pkg/deployment" + "bitbucket.org/topmanage-software-engineering/zitadel-k8s-operator/src/pkg/machinekey" + "github.com/gorilla/schema" + "google.golang.org/grpc" + + "github.com/zitadel/oidc/pkg/client" + httphelper "github.com/zitadel/oidc/pkg/http" + "github.com/zitadel/oidc/pkg/oidc" + "github.com/zitadel/zitadel-go/v2/pkg/client/admin" + "github.com/zitadel/zitadel-go/v2/pkg/client/management" + "github.com/zitadel/zitadel-go/v2/pkg/client/zitadel" + "golang.org/x/oauth2" + "gopkg.in/square/go-jose.v2" + corev1 "k8s.io/api/core/v1" +) + +type MachineKey struct { + Type string `json:"type"` + KeyID string `json:"keyId"` + Key string `json:"key"` + UserID string `json:"userId"` +} + +func NewClient(ctx context.Context, zitadelCluster *zitadelv1alpha1.ZitadelCluster, refresolver zitadelv1alpha1.RefResolver) (*management.Client, error) { + machineKeyData, err := refresolver.SecretKeyRef(ctx, corev1.SecretKeySelector{LocalObjectReference: corev1.LocalObjectReference{Name: machinekey.MachineKeySecretName(zitadelCluster)}, Key: machinekey.Key}, zitadelCluster.Namespace) + if err != nil { + return nil, err + } + api, err := management.NewClient(GetIssuer(zitadelCluster), GetAPI(zitadelCluster), []string{oidc.ScopeOpenID, zitadel.ScopeZitadelAPI()}, zitadel.WithInsecure(), zitadel.WithJWTProfileTokenSource(Discover([]byte(machineKeyData), GetAPIUrl(zitadelCluster), GetAuthority(zitadelCluster), GetAPI(zitadelCluster))), + zitadel.WithDialOptions(grpc.WithAuthority(GetAuthority(zitadelCluster))), + ) + + if err != nil { + return nil, fmt.Errorf("ERROR CREATING CLIENT: %v", err) + } + return api, nil +} + +func NewAdminClient(ctx context.Context, zitadelCluster *zitadelv1alpha1.ZitadelCluster, refresolver zitadelv1alpha1.RefResolver) (*admin.Client, error) { + machineKeyData, err := refresolver.SecretKeyRef(ctx, corev1.SecretKeySelector{LocalObjectReference: corev1.LocalObjectReference{Name: machinekey.MachineKeySecretName(zitadelCluster)}, Key: machinekey.Key}, zitadelCluster.Namespace) + if err != nil { + return nil, err + } + api, err := admin.NewClient(GetIssuer(zitadelCluster), GetAPI(zitadelCluster), []string{oidc.ScopeOpenID, zitadel.ScopeZitadelAPI()}, zitadel.WithInsecure(), zitadel.WithJWTProfileTokenSource(Discover([]byte(machineKeyData), GetAPIUrl(zitadelCluster), GetAuthority(zitadelCluster), GetAPI(zitadelCluster))), + zitadel.WithDialOptions(grpc.WithAuthority(GetAuthority(zitadelCluster))), + ) + if err != nil { + return nil, fmt.Errorf("ERROR CREATING CLIENT: %v", err) + } + return api, nil +} + +func GetAuthority(zitadel *zitadelv1alpha1.ZitadelCluster) string { + return fmt.Sprintf("%s:%d", zitadel.Spec.Host, zitadel.Spec.ExternalPort) +} + +func GetIssuer(zitadel *zitadelv1alpha1.ZitadelCluster) string { + scheme := "http" + if zitadel.Spec.ExternalSecure { + scheme = "https" + } + return fmt.Sprintf("%s://%s:%d", scheme, zitadel.Spec.Host, zitadel.Spec.ExternalPort) +} + +func GetAPI(zitadel *zitadelv1alpha1.ZitadelCluster) string { + return fmt.Sprintf("%s:%d", deployment.ServiceFQDN(zitadel.ObjectMeta), deployment.ZitadelPort) +} + +func GetAPIUrl(zitadel *zitadelv1alpha1.ZitadelCluster) string { + return fmt.Sprintf("http://%s:%d", deployment.ServiceFQDN(zitadel.ObjectMeta), deployment.ZitadelPort) +} + +type jwtProfileTokenSource struct { + clientID string + audience []string + signer jose.Signer + scopes []string + httpClient *http.Client + tokenEndpoint string + host string +} + +func Discover(key []byte, discoverUrl string, host string, api string) func(issuer string, scopes []string) (oauth2.TokenSource, error) { + return func(issuer string, scopes []string) (oauth2.TokenSource, error) { + var machineKeyData MachineKey + if err := json.Unmarshal(key, &machineKeyData); err != nil { + return nil, err + } + signer, err := client.NewSignerFromPrivateKeyByte([]byte(machineKeyData.Key), machineKeyData.KeyID) + if err != nil { + return nil, err + } + source := &jwtProfileTokenSource{ + host: host, + clientID: machineKeyData.UserID, + audience: []string{issuer}, + signer: signer, + scopes: scopes, + httpClient: http.DefaultClient, + } + config, err := GetDiscoveryConfig(discoverUrl, http.DefaultClient, host, api) + if err != nil { + return nil, err + } + source.tokenEndpoint = config.TokenEndpoint + return source, nil + } +} + +func GetDiscoveryConfig(issuer string, httpClient *http.Client, host string, api string, wellKnownUrl ...string) (*oidc.DiscoveryConfiguration, error) { + wellKnown := strings.TrimSuffix(issuer, "/") + oidc.DiscoveryEndpoint + if len(wellKnownUrl) == 1 && wellKnownUrl[0] != "" { + wellKnown = wellKnownUrl[0] + } + req, err := http.NewRequest("GET", wellKnown, nil) + if err != nil { + return nil, err + } + req.Host = host + discoveryConfig := new(oidc.DiscoveryConfiguration) + err = httphelper.HttpRequest(httpClient, req, &discoveryConfig) + discoveryConfig.TokenEndpoint = replaceEndpoint(discoveryConfig.TokenEndpoint, host, api) + discoveryConfig.AuthorizationEndpoint = replaceEndpoint(discoveryConfig.AuthorizationEndpoint, host, api) + discoveryConfig.IntrospectionEndpoint = replaceEndpoint(discoveryConfig.IntrospectionEndpoint, host, api) + discoveryConfig.EndSessionEndpoint = replaceEndpoint(discoveryConfig.EndSessionEndpoint, host, api) + discoveryConfig.RevocationEndpoint = replaceEndpoint(discoveryConfig.RevocationEndpoint, host, api) + discoveryConfig.UserinfoEndpoint = replaceEndpoint(discoveryConfig.UserinfoEndpoint, host, api) + + if err != nil { + return nil, err + } + return discoveryConfig, nil +} + +func replaceEndpoint(endpoint string, host string, api string) string { + return strings.ReplaceAll(strings.ReplaceAll(endpoint, host, api), "https", "http") +} + +func (j *jwtProfileTokenSource) TokenEndpoint() string { + return j.tokenEndpoint +} + +func (j *jwtProfileTokenSource) HttpClient() *http.Client { + return j.httpClient +} + +func (j *jwtProfileTokenSource) Token() (*oauth2.Token, error) { + assertion, err := client.SignedJWTProfileAssertion(j.clientID, j.audience, time.Hour, j.signer) + if err != nil { + return nil, err + } + token, err := callTokenEndpoint(oidc.NewJWTProfileGrantRequest(assertion, j.scopes...), nil, j, j.host) + if err != nil { + return nil, err + } + return token, err +} + +var Encoder = func() httphelper.Encoder { + e := schema.NewEncoder() + e.RegisterEncoder(oidc.SpaceDelimitedArray{}, func(value reflect.Value) string { + return value.Interface().(oidc.SpaceDelimitedArray).Encode() + }) + return e +}() + +func callTokenEndpoint(request interface{}, authFn interface{}, caller client.TokenEndpointCaller, host string) (newToken *oauth2.Token, err error) { + req, err := httphelper.FormRequest(caller.TokenEndpoint(), request, Encoder, authFn) + if err != nil { + return nil, err + } + tokenRes := new(oidc.AccessTokenResponse) + req.Host = host + if err := httphelper.HttpRequest(caller.HttpClient(), req, &tokenRes); err != nil { + return nil, fmt.Errorf("Error calling token endpoint: %v", err) + } + return &oauth2.Token{ + AccessToken: tokenRes.AccessToken, + TokenType: tokenRes.TokenType, + RefreshToken: tokenRes.RefreshToken, + Expiry: time.Now().UTC().Add(time.Duration(tokenRes.ExpiresIn) * time.Second), + }, nil +}