This commit is contained in:
470
internal/controller/cluster_controller.go
Normal file
470
internal/controller/cluster_controller.go
Normal file
@@ -0,0 +1,470 @@
|
||||
/*
|
||||
Copyright 2024.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/x509"
|
||||
"encoding/base64"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
zitadelv1alpha1 "gitea.corredorconect.com/software-engineering/zitadel-k8s-operator/api/v1alpha1"
|
||||
builder "gitea.corredorconect.com/software-engineering/zitadel-k8s-operator/pkg/builder"
|
||||
condition "gitea.corredorconect.com/software-engineering/zitadel-k8s-operator/pkg/condition"
|
||||
"gitea.corredorconect.com/software-engineering/zitadel-k8s-operator/pkg/configuration"
|
||||
configmap "gitea.corredorconect.com/software-engineering/zitadel-k8s-operator/pkg/controller/configmap"
|
||||
secret "gitea.corredorconect.com/software-engineering/zitadel-k8s-operator/pkg/controller/secret"
|
||||
"gitea.corredorconect.com/software-engineering/zitadel-k8s-operator/pkg/controller/service"
|
||||
"gitea.corredorconect.com/software-engineering/zitadel-k8s-operator/pkg/deployment"
|
||||
"gitea.corredorconect.com/software-engineering/zitadel-k8s-operator/pkg/masterkey"
|
||||
systemapiaccount "gitea.corredorconect.com/software-engineering/zitadel-k8s-operator/pkg/systemapi"
|
||||
"github.com/hashicorp/go-multierror"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
)
|
||||
|
||||
type reconcilePhase struct {
|
||||
Name string
|
||||
Reconcile func(context.Context, *zitadelv1alpha1.Cluster) (ctrl.Result, error)
|
||||
}
|
||||
|
||||
type patcher func(*zitadelv1alpha1.ClusterStatus) error
|
||||
|
||||
// ClusterReconciler reconciles a Cluster object
|
||||
type ClusterReconciler struct {
|
||||
client.Client
|
||||
Scheme *runtime.Scheme
|
||||
ConditionReady *condition.Ready
|
||||
Builder *builder.Builder
|
||||
SecretReconciler *secret.SecretReconciler
|
||||
ConfigMapReconciler *configmap.ConfigMapReconciler
|
||||
ServiceReconciler *service.ServiceReconciler
|
||||
RefResolver *zitadelv1alpha1.RefResolver
|
||||
}
|
||||
|
||||
// +kubebuilder:rbac:groups="",resources=configmaps,verbs=get;list;watch;create;patch
|
||||
// +kubebuilder:rbac:groups="",resources=services,verbs=list;watch;create;patch
|
||||
// +kubebuilder:rbac:groups="",resources=secrets,verbs=list;watch;create;patch
|
||||
// +kubebuilder:rbac:groups="",resources=endpoints,verbs=create;patch;get;list;watch
|
||||
// +kubebuilder:rbac:groups="",resources=endpoints/restricted,verbs=create;patch;get;list;watch
|
||||
// +kubebuilder:rbac:groups="",resources=pods,verbs=get;delete
|
||||
// +kubebuilder:rbac:groups="",resources=events,verbs=list;watch;create;patch
|
||||
// +kubebuilder:rbac:groups="",resources=serviceaccounts,verbs=list;watch;create;patch
|
||||
// +kubebuilder:rbac:groups=apps,resources=deployments,verbs=list;watch;create;patch
|
||||
// +kubebuilder:rbac:groups=apps,resources=deployments,verbs=list;watch;create;patch
|
||||
// +kubebuilder:rbac:groups=policy,resources=poddisruptionbudgets,verbs=list;watch;create;patch
|
||||
// +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=roles;rolebindings;clusterrolebindings,verbs=list;watch;create;patch
|
||||
// +kubebuilder:rbac:groups=zitadel.github.com,resources=clusters,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=zitadel.github.com,resources=clusters/status,verbs=get;update;patch
|
||||
// +kubebuilder:rbac:groups=zitadel.github.com,resources=clusters/finalizers,verbs=update
|
||||
// +kubebuilder:rbac:groups=zitadel.github.com,resources=instances,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=zitadel.github.com,resources=instances/status,verbs=get;update;patch
|
||||
// +kubebuilder:rbac:groups=zitadel.github.com,resources=instances/finalizers,verbs=update
|
||||
// +kubebuilder:rbac:groups=postgresql.cnpg.io,resources=clusters,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=postgresql.cnpg.io,resources=clusters/status,verbs=get;update;patch
|
||||
// +kubebuilder:rbac:groups=postgresql.cnpg.io,resources=clusters/finalizers,verbs=update
|
||||
// +kubebuilder:rbac:groups=certificates.k8s.io,resources=certificatesigningrequests,verbs=get;list;watch;create;patch;delete
|
||||
// +kubebuilder:rbac:groups=certificates.k8s.io,resources=certificatesigningrequests/status,verbs=get;update;patch
|
||||
// +kubebuilder:rbac:groups=certificates.k8s.io,resources=certificatesigningrequests/approval,verbs=update
|
||||
// +kubebuilder:rbac:groups=batch,resources=jobs,verbs=get;list;watch;create;update;patch;delete
|
||||
|
||||
func (r *ClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
logger := log.FromContext(ctx)
|
||||
logger.Info("Starting Reconcile")
|
||||
|
||||
var zitadel zitadelv1alpha1.Cluster
|
||||
|
||||
if err := r.Get(ctx, req.NamespacedName, &zitadel); err != nil {
|
||||
return ctrl.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
phases := []reconcilePhase{
|
||||
{
|
||||
Name: "Spec",
|
||||
Reconcile: r.setSpecDefaults,
|
||||
},
|
||||
{
|
||||
Name: "Status",
|
||||
Reconcile: r.setStatusDefaults,
|
||||
},
|
||||
{
|
||||
Name: "MasterkeySecret",
|
||||
Reconcile: r.reconcileMasterKeySecret,
|
||||
},
|
||||
{
|
||||
Name: "ServiceAccount",
|
||||
Reconcile: r.reconcileSystemAPIUser,
|
||||
},
|
||||
{
|
||||
Name: "Configuration",
|
||||
Reconcile: r.reconcileConfig,
|
||||
},
|
||||
{
|
||||
Name: "InitJob",
|
||||
Reconcile: r.reconcileInitJob,
|
||||
},
|
||||
{
|
||||
Name: "SetupJob",
|
||||
Reconcile: r.reconcileSetupJob,
|
||||
},
|
||||
{
|
||||
Name: "Deployment",
|
||||
Reconcile: r.reconcileDeployment,
|
||||
},
|
||||
{
|
||||
Name: "Service",
|
||||
Reconcile: r.reconcileService,
|
||||
},
|
||||
}
|
||||
|
||||
for _, p := range phases {
|
||||
result, err := p.Reconcile(ctx, &zitadel)
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
continue
|
||||
}
|
||||
|
||||
var errBundle *multierror.Error
|
||||
errBundle = multierror.Append(errBundle, err)
|
||||
|
||||
msg := fmt.Sprintf("Error reconciling %s: %v", p.Name, err)
|
||||
patchErr := r.patchStatus(ctx, &zitadel, func(s *zitadelv1alpha1.ClusterStatus) error {
|
||||
patcher := r.ConditionReady.PatcherFailed(msg)
|
||||
patcher(s)
|
||||
return nil
|
||||
})
|
||||
if errors.IsNotFound(patchErr) {
|
||||
errBundle = multierror.Append(errBundle, patchErr)
|
||||
}
|
||||
|
||||
if err := errBundle.ErrorOrNil(); err != nil {
|
||||
return ctrl.Result{}, fmt.Errorf("error reconciling %s: %v", p.Name, err)
|
||||
}
|
||||
}
|
||||
if !result.IsZero() {
|
||||
return result, err
|
||||
}
|
||||
}
|
||||
|
||||
if err := r.patchStatus(ctx, &zitadel, r.patcher(ctx, &zitadel)); err != nil && !errors.IsNotFound(err) {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
return ctrl.Result{RequeueAfter: 15 * time.Minute}, nil
|
||||
}
|
||||
|
||||
func (r *ClusterReconciler) setSpecDefaults(ctx context.Context, zitadel *zitadelv1alpha1.Cluster) (ctrl.Result, error) {
|
||||
return ctrl.Result{}, r.patch(ctx, zitadel, func(zit *zitadelv1alpha1.Cluster) {
|
||||
zit.SetDefaults()
|
||||
})
|
||||
}
|
||||
|
||||
func (r *ClusterReconciler) setStatusDefaults(ctx context.Context, zitadel *zitadelv1alpha1.Cluster) (ctrl.Result, error) {
|
||||
return ctrl.Result{}, r.patchStatus(ctx, zitadel, func(status *zitadelv1alpha1.ClusterStatus) error {
|
||||
status.FillWithDefaults(zitadel)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (r *ClusterReconciler) reconcileMasterKeySecret(ctx context.Context, zitadel *zitadelv1alpha1.Cluster) (ctrl.Result, error) {
|
||||
secretName := masterkey.MasterKeyName(zitadel)
|
||||
key := types.NamespacedName{
|
||||
Name: secretName,
|
||||
Namespace: zitadel.Namespace,
|
||||
}
|
||||
_, err := r.SecretReconciler.ReconcileRandomPassword(ctx, key, masterkey.Key, zitadel)
|
||||
|
||||
if err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
func (r *ClusterReconciler) reconcileSystemAPIUser(ctx context.Context, zitadel *zitadelv1alpha1.Cluster) (ctrl.Result, error) {
|
||||
secretName := systemapiaccount.SystemAPIAccountName(zitadel)
|
||||
key := types.NamespacedName{
|
||||
Name: secretName,
|
||||
Namespace: zitadel.Namespace,
|
||||
}
|
||||
_, err := r.SecretReconciler.ReconcileRandomPrivateRSA(ctx, key, systemapiaccount.Key, zitadel)
|
||||
|
||||
if err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
func (r *ClusterReconciler) reconcileConfig(ctx context.Context, zitadel *zitadelv1alpha1.Cluster) (ctrl.Result, error) {
|
||||
postgres, err := r.RefResolver.PostgreSQLClusterRef(ctx, &zitadel.Spec.PostgreSQLClusterRef, zitadel.Namespace)
|
||||
if err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
configName := configuration.ConfigurationName(zitadel)
|
||||
key := types.NamespacedName{
|
||||
Name: configName,
|
||||
Namespace: zitadel.Namespace,
|
||||
}
|
||||
privateKeyData, err := r.RefResolver.SecretKeyRef(ctx, corev1.SecretKeySelector{LocalObjectReference: corev1.LocalObjectReference{Name: systemapiaccount.SystemAPIAccountName(zitadel)}, Key: systemapiaccount.Key}, zitadel.Namespace)
|
||||
if err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
pemBlock, _ := pem.Decode([]byte(privateKeyData))
|
||||
if pemBlock == nil {
|
||||
return ctrl.Result{}, fmt.Errorf("failed to decode PEM block")
|
||||
}
|
||||
privateKey, err := x509.ParsePKCS1PrivateKey(pemBlock.Bytes)
|
||||
publicKeyBytes, err := x509.MarshalPKIXPublicKey(&privateKey.PublicKey)
|
||||
if err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
publicKeyPem := pem.EncodeToMemory(
|
||||
&pem.Block{
|
||||
Type: "RSA PUBLIC KEY",
|
||||
Bytes: publicKeyBytes,
|
||||
},
|
||||
)
|
||||
base64key := base64.StdEncoding.EncodeToString(publicKeyPem)
|
||||
err = r.ConfigMapReconciler.ReconcileZitadelConfiguration(ctx, key, zitadel, postgres, base64key)
|
||||
|
||||
if err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
func (r *ClusterReconciler) reconcileInitJob(ctx context.Context, zitadel *zitadelv1alpha1.Cluster) (ctrl.Result, error) {
|
||||
key := client.ObjectKeyFromObject(zitadel)
|
||||
key.Name = "init-job-" + key.Name
|
||||
|
||||
// Build the desired job
|
||||
desiredInitJob, err := r.Builder.BuildInitJob(zitadel, key)
|
||||
if err != nil {
|
||||
return ctrl.Result{}, fmt.Errorf("error building InitJob: %v", err)
|
||||
}
|
||||
|
||||
var existingJob batchv1.Job
|
||||
err = r.Get(ctx, key, &existingJob)
|
||||
if err != nil {
|
||||
if !errors.IsNotFound(err) {
|
||||
return ctrl.Result{}, fmt.Errorf("error getting InitJob: %v", err)
|
||||
}
|
||||
// If job is not found, create the job
|
||||
if err := r.Create(ctx, desiredInitJob); err != nil {
|
||||
return ctrl.Result{}, fmt.Errorf("error creating InitJob: %v", err)
|
||||
}
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
// Compare the image in the existing job with the desired image
|
||||
existingImage := existingJob.Spec.Template.Spec.Containers[0].Image
|
||||
desiredImage := desiredInitJob.Spec.Template.Spec.Containers[0].Image
|
||||
|
||||
// If the images don't match, delete the existing job and wait for deletion
|
||||
if existingImage != desiredImage {
|
||||
|
||||
if err := r.Delete(ctx, &existingJob); err != nil {
|
||||
return ctrl.Result{}, fmt.Errorf("error deleting existing InitJob: %v", err)
|
||||
}
|
||||
|
||||
// Wait for the job to be fully deleted before creating a new one
|
||||
for {
|
||||
err := r.Get(ctx, key, &existingJob)
|
||||
if errors.IsNotFound(err) {
|
||||
break // Job has been deleted, we can proceed
|
||||
}
|
||||
if err != nil {
|
||||
return ctrl.Result{}, fmt.Errorf("error checking if InitJob is deleted: %v", err)
|
||||
}
|
||||
// Sleep for a short interval to avoid tight loop
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
|
||||
// Now create the new job
|
||||
if err := r.Create(ctx, desiredInitJob); err != nil {
|
||||
return ctrl.Result{}, fmt.Errorf("error creating new InitJob: %v", err)
|
||||
}
|
||||
}
|
||||
if err := r.Get(ctx, key, &existingJob); err != nil {
|
||||
return ctrl.Result{}, fmt.Errorf("error fetching existing InitJob status: %v", err)
|
||||
}
|
||||
|
||||
if existingJob.Status.Succeeded != 1 { // Replace with actual success condition
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
// If the job exists and the image matches, no action is needed
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
func (r *ClusterReconciler) reconcileSetupJob(ctx context.Context, zitadel *zitadelv1alpha1.Cluster) (ctrl.Result, error) {
|
||||
key := client.ObjectKeyFromObject(zitadel)
|
||||
key.Name = "setup-job-" + key.Name
|
||||
|
||||
// Build the desired job
|
||||
desiredSetupJob, err := r.Builder.BuildSetupJob(zitadel, key)
|
||||
if err != nil {
|
||||
return ctrl.Result{}, fmt.Errorf("error building SetupJob: %v", err)
|
||||
}
|
||||
|
||||
var existingJob batchv1.Job
|
||||
err = r.Get(ctx, key, &existingJob)
|
||||
if err != nil {
|
||||
if !errors.IsNotFound(err) {
|
||||
return ctrl.Result{}, fmt.Errorf("error getting SetupJob: %v", err)
|
||||
}
|
||||
// If job is not found, create the job
|
||||
if err := r.Create(ctx, desiredSetupJob); err != nil {
|
||||
return ctrl.Result{}, fmt.Errorf("error creating SetupJob: %v", err)
|
||||
}
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
// Compare the image in the existing job with the desired image
|
||||
existingImage := existingJob.Spec.Template.Spec.Containers[0].Image
|
||||
desiredImage := desiredSetupJob.Spec.Template.Spec.Containers[0].Image
|
||||
|
||||
// If the images don't match, delete the existing job and wait for deletion
|
||||
if existingImage != desiredImage {
|
||||
|
||||
if err := r.Delete(ctx, &existingJob); err != nil {
|
||||
return ctrl.Result{}, fmt.Errorf("error deleting existing SetupJob: %v", err)
|
||||
}
|
||||
|
||||
// Wait for the job to be fully deleted before creating a new one
|
||||
for {
|
||||
err := r.Get(ctx, key, &existingJob)
|
||||
if errors.IsNotFound(err) {
|
||||
break // Job has been deleted, we can proceed
|
||||
}
|
||||
if err != nil {
|
||||
return ctrl.Result{}, fmt.Errorf("error checking if SetupJob is deleted: %v", err)
|
||||
}
|
||||
// Sleep for a short interval to avoid tight loop
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
|
||||
// Now create the new job
|
||||
if err := r.Create(ctx, desiredSetupJob); err != nil {
|
||||
return ctrl.Result{}, fmt.Errorf("error creating new SetupJob: %v", err)
|
||||
}
|
||||
}
|
||||
if err := r.Get(ctx, key, &existingJob); err != nil {
|
||||
return ctrl.Result{}, fmt.Errorf("error fetching existing SetupJob status: %v", err)
|
||||
}
|
||||
|
||||
if existingJob.Status.Succeeded != 1 { // Replace with actual success condition
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
// If the job exists and the image matches, no action is needed
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
func (r *ClusterReconciler) reconcileDeployment(ctx context.Context, zitadel *zitadelv1alpha1.Cluster) (ctrl.Result, error) {
|
||||
// TODO: Reload on config changed
|
||||
key := client.ObjectKeyFromObject(zitadel)
|
||||
desiredSts, err := r.Builder.BuildDeployment(zitadel, key)
|
||||
if err != nil {
|
||||
return ctrl.Result{}, fmt.Errorf("error building Deployment: %v", err)
|
||||
}
|
||||
var existingDep appsv1.Deployment
|
||||
if err := r.Get(ctx, key, &existingDep); err != nil {
|
||||
if !errors.IsNotFound(err) {
|
||||
return ctrl.Result{}, fmt.Errorf("error getting Deployment: %v", err)
|
||||
}
|
||||
if err := r.Create(ctx, desiredSts); err != nil {
|
||||
return ctrl.Result{}, fmt.Errorf("error creating Deployment: %v", err)
|
||||
}
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
patch := client.MergeFrom(existingDep.DeepCopy())
|
||||
existingDep.Spec.Template = desiredSts.Spec.Template
|
||||
existingDep.Spec.Replicas = desiredSts.Spec.Replicas
|
||||
return ctrl.Result{}, r.Patch(ctx, &existingDep, patch)
|
||||
}
|
||||
|
||||
func (r *ClusterReconciler) reconcileService(ctx context.Context, zitadel *zitadelv1alpha1.Cluster) (ctrl.Result, error) {
|
||||
return ctrl.Result{}, r.reconcileDefaultService(ctx, zitadel)
|
||||
}
|
||||
|
||||
func (r *ClusterReconciler) reconcileDefaultService(ctx context.Context, zitadel *zitadelv1alpha1.Cluster) error {
|
||||
key := client.ObjectKeyFromObject(zitadel)
|
||||
opts := builder.ServiceOpts{
|
||||
Ports: []corev1.ServicePort{
|
||||
{
|
||||
Name: deployment.ZitadelName,
|
||||
Port: deployment.ZitadelPort,
|
||||
},
|
||||
},
|
||||
}
|
||||
desiredSvc, err := r.Builder.BuildService(zitadel, key, opts)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error building Service: %v", err)
|
||||
}
|
||||
return r.ServiceReconciler.Reconcile(ctx, desiredSvc)
|
||||
}
|
||||
|
||||
func (r *ClusterReconciler) patchStatus(ctx context.Context, zitadel *zitadelv1alpha1.Cluster,
|
||||
patcher patcher) error {
|
||||
patch := client.MergeFrom(zitadel.DeepCopy())
|
||||
if err := patcher(&zitadel.Status); err != nil {
|
||||
return err
|
||||
}
|
||||
return r.Status().Patch(ctx, zitadel, patch)
|
||||
}
|
||||
|
||||
func (r *ClusterReconciler) patcher(ctx context.Context, zitadel *zitadelv1alpha1.Cluster) patcher {
|
||||
return func(s *zitadelv1alpha1.ClusterStatus) error {
|
||||
var sts appsv1.Deployment
|
||||
if err := r.Get(ctx, client.ObjectKeyFromObject(zitadel), &sts); err != nil {
|
||||
return err
|
||||
}
|
||||
zitadel.Status.Replicas = sts.Status.ReadyReplicas
|
||||
|
||||
condition.SetReadyWithDeployment(&zitadel.Status, &sts)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (r *ClusterReconciler) patch(ctx context.Context, zitadel *zitadelv1alpha1.Cluster,
|
||||
patcher func(*zitadelv1alpha1.Cluster)) error {
|
||||
patch := client.MergeFrom(zitadel.DeepCopy())
|
||||
patcher(zitadel)
|
||||
return r.Patch(ctx, zitadel, patch)
|
||||
}
|
||||
|
||||
// SetupWithManager sets up the controller with the Manager.
|
||||
func (r *ClusterReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&zitadelv1alpha1.Cluster{}).
|
||||
Owns(&appsv1.Deployment{}).
|
||||
Owns(&corev1.Service{}).
|
||||
Owns(&corev1.ConfigMap{}).
|
||||
Owns(&corev1.Secret{}).
|
||||
WithOptions(controller.Options{RateLimiter: workqueue.NewTypedItemExponentialFailureRateLimiter[reconcile.Request](time.Millisecond*500, time.Minute*3)}).
|
||||
Complete(r)
|
||||
}
|
||||
402
internal/controller/instance_controller.go
Normal file
402
internal/controller/instance_controller.go
Normal file
@@ -0,0 +1,402 @@
|
||||
/*
|
||||
Copyright 2024.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
zitadelv1alpha1 "gitea.corredorconect.com/software-engineering/zitadel-k8s-operator/api/v1alpha1"
|
||||
condition "gitea.corredorconect.com/software-engineering/zitadel-k8s-operator/pkg/condition"
|
||||
"gitea.corredorconect.com/software-engineering/zitadel-k8s-operator/pkg/controller/service"
|
||||
"gitea.corredorconect.com/software-engineering/zitadel-k8s-operator/pkg/controller/system"
|
||||
"gitea.corredorconect.com/software-engineering/zitadel-k8s-operator/pkg/deployment"
|
||||
zitadelresourcesv1alpha1 "gitea.corredorconect.com/software-engineering/zitadel-resources-operator/api/v1alpha1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
ctrlClient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
systemClient "github.com/zitadel/zitadel-go/v3/pkg/client/system"
|
||||
authn "github.com/zitadel/zitadel-go/v3/pkg/client/zitadel/authn"
|
||||
pb "github.com/zitadel/zitadel-go/v3/pkg/client/zitadel/system"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
"gitea.corredorconect.com/software-engineering/zitadel-k8s-operator/pkg/builder"
|
||||
)
|
||||
|
||||
// InstanceReconciler reconciles a Instance object
|
||||
type InstanceReconciler struct {
|
||||
client.Client
|
||||
RefResolver *zitadelv1alpha1.RefResolver
|
||||
ConditionReady *condition.Ready
|
||||
RequeueInterval time.Duration
|
||||
Builder *builder.Builder
|
||||
ServiceReconciler *service.ServiceReconciler
|
||||
}
|
||||
|
||||
func NewInstanceReconciler(client client.Client, refResolver *zitadelv1alpha1.RefResolver,
|
||||
builder *builder.Builder,
|
||||
conditionReady *condition.Ready,
|
||||
serviceReconciler *service.ServiceReconciler,
|
||||
requeueInterval time.Duration) *InstanceReconciler {
|
||||
return &InstanceReconciler{
|
||||
Client: client,
|
||||
RefResolver: refResolver,
|
||||
ConditionReady: conditionReady,
|
||||
RequeueInterval: requeueInterval,
|
||||
ServiceReconciler: serviceReconciler,
|
||||
Builder: builder,
|
||||
}
|
||||
}
|
||||
|
||||
//+kubebuilder:rbac:groups=zitadel.topmanage.com,resources=instances,verbs=get;list;watch;create;update;patch;delete
|
||||
//+kubebuilder:rbac:groups=zitadel.topmanage.com,resources=instances/status,verbs=get;update;patch
|
||||
//+kubebuilder:rbac:groups=zitadel.topmanage.com,resources=instances/finalizers,verbs=update
|
||||
// +kubebuilder:rbac:groups=zitadel.github.com,resources=machineusers,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=zitadel.github.com,resources=machineusers/status,verbs=get;update;patch
|
||||
// +kubebuilder:rbac:groups=zitadel.github.com,resources=machineusers/finalizers,verbs=update
|
||||
// +kubebuilder:rbac:groups=zitadel.github.com,resources=connections,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=zitadel.github.com,resources=connections/status,verbs=get;update;patch
|
||||
// +kubebuilder:rbac:groups=zitadel.github.com,resources=connections/finalizers,verbs=update
|
||||
// +kubebuilder:rbac:groups=zitadel.github.com,resources=organizations,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=zitadel.github.com,resources=organizations/status,verbs=get;update;patch
|
||||
// +kubebuilder:rbac:groups=zitadel.github.com,resources=organizations/finalizers,verbs=update
|
||||
|
||||
// Reconcile is part of the main kubernetes reconciliation loop which aims to
|
||||
// move the current state of the cluster closer to the desired state.
|
||||
func (r *InstanceReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
var instance zitadelv1alpha1.Instance
|
||||
if err := r.Get(ctx, req.NamespacedName, &instance); err != nil {
|
||||
return ctrl.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
wr := newWrappedInstanceReconciler(r.Client, r.RefResolver, r.Builder, r.ServiceReconciler, &instance)
|
||||
wf := newWrappedInstanceFinalizer(r.Client, &instance)
|
||||
tf := system.NewSystemFinalizer(r.Client, wf)
|
||||
tr := system.NewSystemReconciler(r.Client, r.ConditionReady, wr, tf, r.RequeueInterval)
|
||||
|
||||
result, err := tr.Reconcile(ctx, &instance)
|
||||
if err != nil {
|
||||
return result, fmt.Errorf("error reconciling in InstanceReconciler: %v", err)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
type wrappedInstanceReconciler struct {
|
||||
client.Client
|
||||
refResolver *zitadelv1alpha1.RefResolver
|
||||
instance *zitadelv1alpha1.Instance
|
||||
Builder *builder.Builder
|
||||
ServiceReconciler *service.ServiceReconciler
|
||||
}
|
||||
|
||||
func newWrappedInstanceReconciler(client client.Client, refResolver *zitadelv1alpha1.RefResolver, builder *builder.Builder,
|
||||
serviceReconciler *service.ServiceReconciler,
|
||||
instance *zitadelv1alpha1.Instance) system.WrappedSystemReconciler {
|
||||
return &wrappedInstanceReconciler{
|
||||
Client: client,
|
||||
refResolver: refResolver,
|
||||
instance: instance,
|
||||
Builder: builder,
|
||||
ServiceReconciler: serviceReconciler,
|
||||
}
|
||||
}
|
||||
|
||||
type instanceReconcilePhase struct {
|
||||
Name string
|
||||
Reconcile func(context.Context, *systemClient.Client) error
|
||||
}
|
||||
|
||||
func (wr *wrappedInstanceReconciler) Reconcile(ctx context.Context, ztdClient *systemClient.Client) error {
|
||||
phases := []instanceReconcilePhase{
|
||||
{
|
||||
Name: "instance",
|
||||
Reconcile: wr.reconcileInstance,
|
||||
},
|
||||
{
|
||||
Name: "connection",
|
||||
Reconcile: wr.reconcileConnection,
|
||||
},
|
||||
{
|
||||
Name: "organization",
|
||||
Reconcile: wr.reconcileFirstOrganization,
|
||||
},
|
||||
{
|
||||
Name: "loginUIMachineUser",
|
||||
Reconcile: wr.reconcileLoginUIMachineUser,
|
||||
},
|
||||
{
|
||||
Name: "loginUIDeployment",
|
||||
Reconcile: wr.reconcileLoginUIDeployment,
|
||||
},
|
||||
{
|
||||
Name: "loginUIService",
|
||||
Reconcile: wr.reconcileLoginUIService,
|
||||
},
|
||||
}
|
||||
for _, p := range phases {
|
||||
err := p.Reconcile(ctx, ztdClient)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (wr *wrappedInstanceReconciler) reconcileInstance(ctx context.Context, ztdClient *systemClient.Client) error {
|
||||
var instanceId *string
|
||||
if wr.instance.Status.InstanceId != nil {
|
||||
getInstanceRes, err := ztdClient.GetInstance(ctx, &pb.GetInstanceRequest{InstanceId: *wr.instance.Status.InstanceId})
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error getting Instance: %v", err)
|
||||
}
|
||||
if getInstanceRes.Instance != nil {
|
||||
instanceId = &getInstanceRes.Instance.Id
|
||||
}
|
||||
}
|
||||
if instanceId == nil {
|
||||
createInstanceRes, err := ztdClient.CreateInstance(ctx, &pb.CreateInstanceRequest{
|
||||
InstanceName: wr.instance.Spec.InstanceName,
|
||||
FirstOrgName: wr.instance.Spec.Org.Name,
|
||||
CustomDomain: wr.instance.Spec.CustomDomain,
|
||||
DefaultLanguage: wr.instance.Spec.DefaultLanguage,
|
||||
Owner: &pb.CreateInstanceRequest_Machine_{
|
||||
Machine: &pb.CreateInstanceRequest_Machine{
|
||||
UserName: wr.instance.MachineUserName(),
|
||||
Name: wr.instance.MachineName(),
|
||||
PersonalAccessToken: &pb.CreateInstanceRequest_PersonalAccessToken{
|
||||
ExpirationDate: nil,
|
||||
},
|
||||
MachineKey: &pb.CreateInstanceRequest_MachineKey{
|
||||
ExpirationDate: nil,
|
||||
Type: authn.KeyType_KEY_TYPE_JSON,
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating Instance: %v", err)
|
||||
}
|
||||
|
||||
key := types.NamespacedName{
|
||||
Name: wr.instance.MachineSecretName(),
|
||||
Namespace: wr.instance.Namespace,
|
||||
}
|
||||
secretData := map[string][]byte{
|
||||
"pat": []byte(createInstanceRes.Pat),
|
||||
"machinekey": createInstanceRes.MachineKey,
|
||||
}
|
||||
secret, err := wr.Builder.BuildSecret(builder.SecretOpts{Key: key, Data: secretData}, wr.instance)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error building instance machine Secret: %v", err)
|
||||
}
|
||||
|
||||
if err := wr.Create(ctx, secret); err != nil {
|
||||
return fmt.Errorf("error creating machinekey Secret: %v", err)
|
||||
}
|
||||
instanceId = &createInstanceRes.InstanceId
|
||||
}
|
||||
patch := ctrlClient.MergeFrom(wr.instance.DeepCopy())
|
||||
wr.instance.Status.InstanceId = instanceId
|
||||
return wr.Client.Status().Patch(ctx, wr.instance, patch)
|
||||
}
|
||||
|
||||
func (wr *wrappedInstanceReconciler) reconcileConnection(ctx context.Context, ztdClient *systemClient.Client) error {
|
||||
key := types.NamespacedName{
|
||||
Name: wr.instance.ConnectionObjectName(),
|
||||
Namespace: wr.instance.Namespace,
|
||||
}
|
||||
desiredConnection, err := wr.Builder.BuildConnection(key, wr.instance)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error building Initial Connectionanization: %v", err)
|
||||
}
|
||||
|
||||
var existingConnection zitadelresourcesv1alpha1.Connection
|
||||
if err := wr.Get(ctx, key, &existingConnection); err != nil {
|
||||
if !errors.IsNotFound(err) {
|
||||
return fmt.Errorf("error getting Initial Connectionanization: %v", err)
|
||||
}
|
||||
if err := wr.Create(ctx, desiredConnection); err != nil {
|
||||
return fmt.Errorf("error creating Initial Connectionanization: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
patch := client.MergeFrom(existingConnection.DeepCopy())
|
||||
existingConnection.Spec.Host = desiredConnection.Spec.Host
|
||||
existingConnection.Spec.Authentication = desiredConnection.Spec.Authentication
|
||||
return wr.Patch(ctx, &existingConnection, patch)
|
||||
}
|
||||
|
||||
func (wr *wrappedInstanceReconciler) reconcileFirstOrganization(ctx context.Context, ztdClient *systemClient.Client) error {
|
||||
key := types.NamespacedName{
|
||||
Name: wr.instance.FirstOrgObjectName(),
|
||||
Namespace: wr.instance.Namespace,
|
||||
}
|
||||
desiredOrg, err := wr.Builder.BuildOrganization(builder.OrganizationOpts{Key: key, Zitadel: wr.instance, OrganizationName: wr.instance.Spec.Org.Name}, wr.instance)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error building Initial Organization: %v", err)
|
||||
}
|
||||
|
||||
var existingOrg zitadelresourcesv1alpha1.Organization
|
||||
if err := wr.Get(ctx, key, &existingOrg); err != nil {
|
||||
if !errors.IsNotFound(err) {
|
||||
return fmt.Errorf("error getting Initial Organization: %v", err)
|
||||
}
|
||||
if err := wr.Create(ctx, desiredOrg); err != nil {
|
||||
return fmt.Errorf("error creating Initial Organization: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
patch := client.MergeFrom(existingOrg.DeepCopy())
|
||||
existingOrg.Spec.OrganzationName = desiredOrg.Spec.OrganzationName
|
||||
return wr.Patch(ctx, &existingOrg, patch)
|
||||
}
|
||||
|
||||
func (wr *wrappedInstanceReconciler) reconcileLoginUIMachineUser(ctx context.Context, ztdClient *systemClient.Client) error {
|
||||
key := types.NamespacedName{
|
||||
Name: wr.instance.LoginMachineUserName(),
|
||||
Namespace: wr.instance.Namespace,
|
||||
}
|
||||
|
||||
desiredMachineUser, err := wr.Builder.BuildMachineUser(key, builder.MachineUserOpts{Instance: wr.instance,
|
||||
InternalPermissions: []zitadelresourcesv1alpha1.InternalPermissions{
|
||||
{
|
||||
|
||||
Resource: zitadelresourcesv1alpha1.Resource{
|
||||
Instance: &zitadelresourcesv1alpha1.InstanceResource{},
|
||||
},
|
||||
Roles: []string{
|
||||
"IAM_LOGIN_CLIENT",
|
||||
},
|
||||
},
|
||||
},
|
||||
Username: "login-ui",
|
||||
}, wr.instance)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error building LoginUI MachineUser: %v", err)
|
||||
}
|
||||
|
||||
var existingMachineUser zitadelresourcesv1alpha1.MachineUser
|
||||
if err := wr.Get(ctx, key, &existingMachineUser); err != nil {
|
||||
if !errors.IsNotFound(err) {
|
||||
return fmt.Errorf("error getting MachineUser: %v", err)
|
||||
}
|
||||
if err := wr.Create(ctx, desiredMachineUser); err != nil {
|
||||
return fmt.Errorf("error creating MachineUser: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
patch := client.MergeFrom(existingMachineUser.DeepCopy())
|
||||
existingMachineUser.Spec.Authorizations = desiredMachineUser.Spec.Authorizations
|
||||
existingMachineUser.Spec.InternalPermissions = desiredMachineUser.Spec.InternalPermissions
|
||||
existingMachineUser.Spec.Metadata = desiredMachineUser.Spec.Metadata
|
||||
return wr.Patch(ctx, &existingMachineUser, patch)
|
||||
}
|
||||
|
||||
func (wr *wrappedInstanceReconciler) reconcileLoginUIDeployment(ctx context.Context, ztdClient *systemClient.Client) error {
|
||||
if wr.instance.Status.InstanceId != nil {
|
||||
return fmt.Errorf("Instance not ready...")
|
||||
}
|
||||
cluster, err := wr.refResolver.Cluster(ctx, &wr.instance.Spec.ClusterRef, wr.instance.Namespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
key := client.ObjectKeyFromObject(wr.instance)
|
||||
key.Name = key.Name + "-login-ui"
|
||||
|
||||
instanceRes, err := ztdClient.GetInstance(ctx, &pb.GetInstanceRequest{InstanceId: *wr.instance.Status.InstanceId})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var customDomain string
|
||||
for _, d := range instanceRes.Instance.Domains {
|
||||
if d.Primary {
|
||||
customDomain = d.Domain
|
||||
break
|
||||
}
|
||||
}
|
||||
desiredSts, err := wr.Builder.BuildLoginDeployment(cluster, wr.instance, customDomain, key)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error building Login UI Deployment: %v", err)
|
||||
}
|
||||
var existingDep appsv1.Deployment
|
||||
if err := wr.Get(ctx, key, &existingDep); err != nil {
|
||||
if !errors.IsNotFound(err) {
|
||||
return fmt.Errorf("error getting Login UI Deployment: %v", err)
|
||||
}
|
||||
if err := wr.Create(ctx, desiredSts); err != nil {
|
||||
return fmt.Errorf("error creating Login UI Deployment: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
patch := client.MergeFrom(existingDep.DeepCopy())
|
||||
existingDep.Spec.Template = desiredSts.Spec.Template
|
||||
existingDep.Spec.Replicas = desiredSts.Spec.Replicas
|
||||
return wr.Patch(ctx, &existingDep, patch)
|
||||
}
|
||||
|
||||
func (wr *wrappedInstanceReconciler) reconcileLoginUIService(ctx context.Context, ztdClient *systemClient.Client) error {
|
||||
key := client.ObjectKeyFromObject(wr.instance)
|
||||
key.Name = key.Name + "-login-ui"
|
||||
opts := builder.ServiceOpts{
|
||||
Ports: []corev1.ServicePort{
|
||||
{
|
||||
Name: deployment.LoginName,
|
||||
Port: deployment.LoginPort,
|
||||
},
|
||||
},
|
||||
}
|
||||
desiredSvc, err := wr.Builder.BuildLoginService(wr.instance, key, opts)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error building Service: %v", err)
|
||||
}
|
||||
return wr.ServiceReconciler.Reconcile(ctx, desiredSvc)
|
||||
}
|
||||
|
||||
func (wr *wrappedInstanceReconciler) PatchStatus(ctx context.Context, patcher condition.Patcher) error {
|
||||
patch := client.MergeFrom(wr.instance.DeepCopy())
|
||||
patcher(&wr.instance.Status)
|
||||
|
||||
if err := wr.Client.Status().Patch(ctx, wr.instance, patch); err != nil {
|
||||
return fmt.Errorf("error patching Instance status: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetupWithManager sets up the controller with the Manager.
|
||||
func (r *InstanceReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&zitadelv1alpha1.Instance{}).
|
||||
Owns(&corev1.Secret{}).
|
||||
Owns(&appsv1.Deployment{}).
|
||||
Owns(&corev1.Service{}).
|
||||
Owns(&zitadelresourcesv1alpha1.Connection{}).
|
||||
Owns(&zitadelresourcesv1alpha1.Organization{}).
|
||||
Owns(&zitadelresourcesv1alpha1.MachineUser{}).
|
||||
WithOptions(controller.Options{RateLimiter: workqueue.NewTypedItemExponentialFailureRateLimiter[reconcile.Request](time.Millisecond*500, time.Minute*3)}).
|
||||
Complete(r)
|
||||
}
|
||||
74
internal/controller/instance_controller_finalizer.go
Normal file
74
internal/controller/instance_controller_finalizer.go
Normal file
@@ -0,0 +1,74 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
zitadelv1alpha1 "gitea.corredorconect.com/software-engineering/zitadel-k8s-operator/api/v1alpha1"
|
||||
"gitea.corredorconect.com/software-engineering/zitadel-k8s-operator/pkg/controller/system"
|
||||
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
systemClient "github.com/zitadel/zitadel-go/v3/pkg/client/system"
|
||||
pb "github.com/zitadel/zitadel-go/v3/pkg/client/zitadel/system"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
ctrlClient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
)
|
||||
|
||||
const (
|
||||
instanceFinalizerName = "instance.zitadel.github.com/instance"
|
||||
)
|
||||
|
||||
type wrappedInstanceFinalizer struct {
|
||||
client.Client
|
||||
instance *zitadelv1alpha1.Instance
|
||||
}
|
||||
|
||||
func newWrappedInstanceFinalizer(client client.Client, instance *zitadelv1alpha1.Instance) system.WrappedSystemFinalizer {
|
||||
return &wrappedInstanceFinalizer{
|
||||
Client: client,
|
||||
instance: instance,
|
||||
}
|
||||
}
|
||||
|
||||
func (wf *wrappedInstanceFinalizer) AddFinalizer(ctx context.Context) error {
|
||||
if wf.ContainsFinalizer() {
|
||||
return nil
|
||||
}
|
||||
return wf.patch(ctx, wf.instance, func(instance *zitadelv1alpha1.Instance) {
|
||||
controllerutil.AddFinalizer(instance, instanceFinalizerName)
|
||||
})
|
||||
}
|
||||
|
||||
func (wf *wrappedInstanceFinalizer) RemoveFinalizer(ctx context.Context) error {
|
||||
if !wf.ContainsFinalizer() {
|
||||
return nil
|
||||
}
|
||||
return wf.patch(ctx, wf.instance, func(instance *zitadelv1alpha1.Instance) {
|
||||
controllerutil.RemoveFinalizer(wf.instance, instanceFinalizerName)
|
||||
})
|
||||
}
|
||||
|
||||
func (wr *wrappedInstanceFinalizer) ContainsFinalizer() bool {
|
||||
return controllerutil.ContainsFinalizer(wr.instance, instanceFinalizerName)
|
||||
}
|
||||
|
||||
func (wf *wrappedInstanceFinalizer) Reconcile(ctx context.Context, ztdClient *systemClient.Client) error {
|
||||
if wf.instance.Status.InstanceId != nil {
|
||||
_, err := ztdClient.RemoveInstance(ctx, &pb.RemoveInstanceRequest{InstanceId: *wf.instance.Status.InstanceId})
|
||||
if err != nil {
|
||||
return fmt.Errorf("error removing Instance: %v", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (wr *wrappedInstanceFinalizer) patch(ctx context.Context, instance *zitadelv1alpha1.Instance,
|
||||
patchFn func(*zitadelv1alpha1.Instance)) error {
|
||||
patch := ctrlClient.MergeFrom(instance.DeepCopy())
|
||||
patchFn(instance)
|
||||
|
||||
if err := wr.Client.Patch(ctx, instance, patch); err != nil {
|
||||
return fmt.Errorf("error patching Instance finalizer: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
80
internal/controller/suite_test.go
Normal file
80
internal/controller/suite_test.go
Normal file
@@ -0,0 +1,80 @@
|
||||
/*
|
||||
Copyright 2024.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package controller
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/rest"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/envtest"
|
||||
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log/zap"
|
||||
|
||||
zitadelv1alpha1 "gitea.corredorconect.com/software-engineering/zitadel-k8s-operator/api/v1alpha1"
|
||||
//+kubebuilder:scaffold:imports
|
||||
)
|
||||
|
||||
// These tests use Ginkgo (BDD-style Go testing framework). Refer to
|
||||
// http://onsi.github.io/ginkgo/ to learn more about Ginkgo.
|
||||
|
||||
var cfg *rest.Config
|
||||
var k8sClient client.Client
|
||||
var testEnv *envtest.Environment
|
||||
|
||||
func TestAPIs(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
|
||||
RunSpecs(t, "Controller Suite")
|
||||
}
|
||||
|
||||
var _ = BeforeSuite(func() {
|
||||
logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true)))
|
||||
|
||||
By("bootstrapping test environment")
|
||||
testEnv = &envtest.Environment{
|
||||
CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")},
|
||||
ErrorIfCRDPathMissing: true,
|
||||
}
|
||||
|
||||
var err error
|
||||
// cfg is defined in this file globally.
|
||||
cfg, err = testEnv.Start()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(cfg).NotTo(BeNil())
|
||||
|
||||
err = zitadelv1alpha1.AddToScheme(scheme.Scheme)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
//+kubebuilder:scaffold:scheme
|
||||
|
||||
k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(k8sClient).NotTo(BeNil())
|
||||
|
||||
})
|
||||
|
||||
var _ = AfterSuite(func() {
|
||||
By("tearing down the test environment")
|
||||
err := testEnv.Stop()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
Reference in New Issue
Block a user