Skip to content

Commit 0f40af5

Browse files
committed
fix: Instability of cleanup process of old configmaps and secrets
CTlog, Rekor and Fulcio has broblem when newly created object has been deleted during clean up process of old objects.
1 parent d2a59e6 commit 0f40af5

File tree

4 files changed

+108
-47
lines changed

4 files changed

+108
-47
lines changed

internal/controller/ctlog/actions/server_config.go

+31-14
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,9 @@ package actions
22

33
import (
44
"context"
5+
"errors"
56
"fmt"
7+
"strconv"
68

79
rhtasv1alpha1 "github.com/securesign/operator/api/v1alpha1"
810
"github.com/securesign/operator/internal/controller/common/action"
@@ -131,10 +133,15 @@ func (i serverConfig) Handle(ctx context.Context, instance *rhtasv1alpha1.CTlog)
131133
},
132134
}
133135

136+
annotations := map[string]string{
137+
"rhtas.redhat.com/generation": strconv.FormatInt(instance.Generation, 10),
138+
}
139+
134140
if _, err = utils.CreateOrUpdate(ctx, i.Client,
135141
newConfig,
136142
ensure.ControllerReference[*corev1.Secret](instance, i.Client),
137143
ensure.Labels[*corev1.Secret](maps.Keys(configLabels), configLabels),
144+
ensure.Annotations[*corev1.Secret](maps.Keys(annotations), annotations),
138145
utils.EnsureSecretData(true, cfg),
139146
); err != nil {
140147
return i.Error(ctx, fmt.Errorf("could not create Server config: %w", err), instance,
@@ -147,13 +154,35 @@ func (i serverConfig) Handle(ctx context.Context, instance *rhtasv1alpha1.CTlog)
147154
})
148155
}
149156

150-
// try to discover existing config and clear them out
157+
defer i.cleanup(ctx, instance, newConfig.Name, configLabels)
158+
159+
instance.Status.ServerConfigRef = &rhtasv1alpha1.LocalObjectReference{Name: newConfig.Name}
160+
161+
i.Recorder.Eventf(instance, corev1.EventTypeNormal, "CTLogConfigCreated", "Secret with ctlog configuration created: %s", newConfig.Name)
162+
meta.SetStatusCondition(&instance.Status.Conditions, metav1.Condition{
163+
Type: ConfigCondition,
164+
Status: metav1.ConditionTrue,
165+
Reason: constants.Ready,
166+
Message: "Server config created",
167+
ObservedGeneration: instance.Generation,
168+
})
169+
return i.StatusUpdate(ctx, instance)
170+
}
171+
172+
func (i serverConfig) cleanup(ctx context.Context, instance *rhtasv1alpha1.CTlog, newSecretName string, configLabels map[string]string) {
173+
if newSecretName == "" {
174+
i.Logger.Error(errors.New("new Secret name is empty"), "unable to clean old objects", "namespace", instance.Namespace)
175+
return
176+
}
177+
178+
// try to discover existing secrets and clear them out
151179
partialConfigs, err := utils.ListSecrets(ctx, i.Client, instance.Namespace, labels2.SelectorFromSet(configLabels).String())
152180
if err != nil {
153181
i.Logger.Error(err, "problem with listing configmaps", "namespace", instance.Namespace)
182+
return
154183
}
155184
for _, partialConfig := range partialConfigs.Items {
156-
if partialConfig.Name == newConfig.Name {
185+
if partialConfig.Name == newSecretName {
157186
continue
158187
}
159188

@@ -166,18 +195,6 @@ func (i serverConfig) Handle(ctx context.Context, instance *rhtasv1alpha1.CTlog)
166195
i.Logger.Info("Remove invalid Secret with ctlog configuration", "Name", partialConfig.Name)
167196
i.Recorder.Eventf(instance, corev1.EventTypeNormal, "CTLogConfigDeleted", "Secret with ctlog configuration deleted: %s", partialConfig.Name)
168197
}
169-
170-
instance.Status.ServerConfigRef = &rhtasv1alpha1.LocalObjectReference{Name: newConfig.Name}
171-
172-
i.Recorder.Eventf(instance, corev1.EventTypeNormal, "CTLogConfigCreated", "Secret with ctlog configuration created: %s", newConfig.Name)
173-
meta.SetStatusCondition(&instance.Status.Conditions, metav1.Condition{
174-
Type: ConfigCondition,
175-
Status: metav1.ConditionTrue,
176-
Reason: constants.Ready,
177-
Message: "Server config created",
178-
ObservedGeneration: instance.Generation,
179-
})
180-
return i.StatusUpdate(ctx, instance)
181198
}
182199

183200
func (i serverConfig) handlePrivateKey(instance *rhtasv1alpha1.CTlog) (*ctlogUtils.KeyConfig, error) {

internal/controller/fulcio/actions/server_config.go

+36-17
Original file line numberDiff line numberDiff line change
@@ -2,8 +2,10 @@ package actions
22

33
import (
44
"context"
5+
"errors"
56
"fmt"
67
"reflect"
8+
"strconv"
79

810
rhtasv1alpha1 "github.com/securesign/operator/api/v1alpha1"
911
"github.com/securesign/operator/internal/controller/common/action"
@@ -115,10 +117,16 @@ func (i serverConfig) Handle(ctx context.Context, instance *rhtasv1alpha1.Fulcio
115117
Namespace: instance.Namespace,
116118
},
117119
}
120+
121+
annotations := map[string]string{
122+
"rhtas.redhat.com/generation": strconv.FormatInt(instance.Generation, 10),
123+
}
124+
118125
if _, err = kubernetes.CreateOrUpdate(ctx, i.Client,
119126
newConfig,
120127
ensure.ControllerReference[*v1.ConfigMap](instance, i.Client),
121128
ensure.Labels[*v1.ConfigMap](maps.Keys(configLabel), configLabel),
129+
ensure.Annotations[*v1.ConfigMap](maps.Keys(annotations), annotations),
122130
kubernetes.EnsureConfigMapData(
123131
true,
124132
map[string]string{
@@ -129,13 +137,35 @@ func (i serverConfig) Handle(ctx context.Context, instance *rhtasv1alpha1.Fulcio
129137
return i.Error(ctx, fmt.Errorf("could not create Server config: %w", err), instance)
130138
}
131139

140+
defer i.cleanup(ctx, instance, newConfig.Name, configLabel)
141+
142+
i.Recorder.Eventf(instance, v1.EventTypeNormal, "FulcioConfigUpdated", "Fulcio config updated: %s", newConfig.Name)
143+
instance.Status.ServerConfigRef = &rhtasv1alpha1.LocalObjectReference{Name: newConfig.Name}
144+
145+
meta.SetStatusCondition(&instance.Status.Conditions,
146+
metav1.Condition{
147+
Type: constants.Ready,
148+
Status: metav1.ConditionFalse,
149+
Reason: constants.Creating,
150+
Message: "Server config created"},
151+
)
152+
return i.StatusUpdate(ctx, instance)
153+
}
154+
155+
func (i serverConfig) cleanup(ctx context.Context, instance *rhtasv1alpha1.Fulcio, newConfigName string, configLabels map[string]string) {
156+
if newConfigName == "" {
157+
i.Logger.Error(errors.New("new ConfigMap name is empty"), "unable to clean old objects", "namespace", instance.Namespace)
158+
return
159+
}
160+
132161
// remove old server configmaps
133-
partialConfigs, err := kubernetes.ListConfigMaps(ctx, i.Client, instance.Namespace, labels2.SelectorFromSet(configLabel).String())
162+
partialConfigs, err := kubernetes.ListConfigMaps(ctx, i.Client, instance.Namespace, labels2.SelectorFromSet(configLabels).String())
134163
if err != nil {
135164
i.Logger.Error(err, "problem with finding configmap")
165+
return
136166
}
137167
for _, partialConfig := range partialConfigs.Items {
138-
if partialConfig.Name == newConfig.Name {
168+
if partialConfig.Name == newConfigName {
139169
continue
140170
}
141171

@@ -147,21 +177,10 @@ func (i serverConfig) Handle(ctx context.Context, instance *rhtasv1alpha1.Fulcio
147177
})
148178
if err != nil {
149179
i.Logger.Error(err, "problem with deleting configmap", "name", partialConfig.Name)
150-
} else {
151-
i.Logger.Info("Remove invalid ConfigMap with rekor-server configuration", "name", partialConfig.Name)
152-
i.Recorder.Eventf(instance, v1.EventTypeNormal, "FulcioConfigDeleted", "Fulcio config deleted: %s", partialConfig.Name)
180+
i.Recorder.Eventf(instance, v1.EventTypeWarning, "FulcioConfigDeleted", "Unable to delete secret: %s", partialConfig.Name)
181+
continue
153182
}
183+
i.Logger.Info("Remove invalid ConfigMap with Fulcio configuration", "name", partialConfig.Name)
184+
i.Recorder.Eventf(instance, v1.EventTypeNormal, "FulcioConfigDeleted", "Fulcio config deleted: %s", partialConfig.Name)
154185
}
155-
156-
i.Recorder.Eventf(instance, v1.EventTypeNormal, "FulcioConfigUpdated", "Fulcio config updated: %s", newConfig.Name)
157-
instance.Status.ServerConfigRef = &rhtasv1alpha1.LocalObjectReference{Name: newConfig.Name}
158-
159-
meta.SetStatusCondition(&instance.Status.Conditions,
160-
metav1.Condition{
161-
Type: constants.Ready,
162-
Status: metav1.ConditionFalse,
163-
Reason: constants.Creating,
164-
Message: "Server config created"},
165-
)
166-
return i.StatusUpdate(ctx, instance)
167186
}

internal/controller/rekor/actions/server/sharding_config.go

+35-16
Original file line numberDiff line numberDiff line change
@@ -2,8 +2,10 @@ package server
22

33
import (
44
"context"
5+
"errors"
56
"fmt"
67
"reflect"
8+
"strconv"
79

810
rhtasv1alpha1 "github.com/securesign/operator/api/v1alpha1"
911
"github.com/securesign/operator/internal/controller/common/action"
@@ -88,22 +90,49 @@ func (i shardingConfig) Handle(ctx context.Context, instance *rhtasv1alpha1.Reko
8890
Namespace: instance.Namespace,
8991
},
9092
}
93+
94+
annotations := map[string]string{
95+
"rhtas.redhat.com/generation": strconv.FormatInt(instance.Generation, 10),
96+
}
97+
9198
if _, err = kubernetes.CreateOrUpdate(ctx, i.Client,
9299
newConfig,
93100
ensure.ControllerReference[*v1.ConfigMap](instance, i.Client),
94101
ensure.Labels[*v1.ConfigMap](maps.Keys(labels), labels),
102+
ensure.Annotations[*v1.ConfigMap](maps.Keys(annotations), annotations),
95103
kubernetes.EnsureConfigMapData(true, content),
96104
); err != nil {
97105
return i.Error(ctx, fmt.Errorf("could not create sharding config: %w", err), instance)
98106
}
99107

108+
defer i.cleanup(ctx, instance, newConfig.Name, labels)
109+
110+
i.Recorder.Eventf(instance, v1.EventTypeNormal, "ShardingConfigCreated", "ConfigMap with sharding configuration created: %s", newConfig.Name)
111+
instance.Status.ServerConfigRef = &rhtasv1alpha1.LocalObjectReference{Name: newConfig.Name}
112+
113+
meta.SetStatusCondition(&instance.Status.Conditions, metav1.Condition{
114+
Type: actions.ServerCondition,
115+
Status: metav1.ConditionFalse,
116+
Reason: constants.Creating,
117+
Message: "Sharding config created",
118+
})
119+
return i.StatusUpdate(ctx, instance)
120+
}
121+
122+
func (i shardingConfig) cleanup(ctx context.Context, instance *rhtasv1alpha1.Rekor, newConfigName string, configLabels map[string]string) {
123+
if newConfigName == "" {
124+
i.Logger.Error(errors.New("new ConfigMap name is empty"), "unable to clean old objects", "namespace", instance.Namespace)
125+
return
126+
}
127+
100128
// remove old server configmaps
101-
partialConfigs, err := kubernetes.ListConfigMaps(ctx, i.Client, instance.Namespace, labels2.SelectorFromSet(labels).String())
129+
partialConfigs, err := kubernetes.ListConfigMaps(ctx, i.Client, instance.Namespace, labels2.SelectorFromSet(configLabels).String())
102130
if err != nil {
103131
i.Logger.Error(err, "problem with finding configmap")
132+
return
104133
}
105134
for _, partialConfig := range partialConfigs.Items {
106-
if partialConfig.Name == newConfig.Name {
135+
if partialConfig.Name == newConfigName {
107136
continue
108137
}
109138

@@ -115,22 +144,12 @@ func (i shardingConfig) Handle(ctx context.Context, instance *rhtasv1alpha1.Reko
115144
})
116145
if err != nil {
117146
i.Logger.Error(err, "problem with deleting configmap", "name", partialConfig.Name)
118-
} else {
119-
i.Logger.Info("Remove invalid ConfigMap with rekor-sharding configuration", "name", partialConfig.Name)
120-
i.Recorder.Eventf(instance, v1.EventTypeNormal, "ShardingConfigDeleted", "ConfigMap with sharding configuration deleted: %s", partialConfig.Name)
147+
i.Recorder.Eventf(instance, v1.EventTypeWarning, "ShardingConfigDeleted", "Unable to delete secret: %s", partialConfig.Name)
148+
continue
121149
}
150+
i.Logger.Info("Remove invalid ConfigMap with rekor-sharding configuration", "name", partialConfig.Name)
151+
i.Recorder.Eventf(instance, v1.EventTypeNormal, "ShardingConfigDeleted", "ConfigMap with sharding configuration deleted: %s", partialConfig.Name)
122152
}
123-
124-
i.Recorder.Eventf(instance, v1.EventTypeNormal, "ShardingConfigCreated", "ConfigMap with sharding configuration created: %s", newConfig.Name)
125-
instance.Status.ServerConfigRef = &rhtasv1alpha1.LocalObjectReference{Name: newConfig.Name}
126-
127-
meta.SetStatusCondition(&instance.Status.Conditions, metav1.Condition{
128-
Type: actions.ServerCondition,
129-
Status: metav1.ConditionFalse,
130-
Reason: constants.Creating,
131-
Message: "Sharding config created",
132-
})
133-
return i.StatusUpdate(ctx, instance)
134153
}
135154

136155
func createShardingConfigData(sharding []rhtasv1alpha1.RekorLogRange) (map[string]string, error) {

test/e2e/support/common.go

+6
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@ import (
1616
monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
1717
v12 "k8s.io/api/apps/v1"
1818
v13 "k8s.io/api/batch/v1"
19+
"k8s.io/apimachinery/pkg/runtime/schema"
1920
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
2021
"k8s.io/client-go/kubernetes"
2122
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
@@ -124,6 +125,10 @@ func DumpNamespace(ctx context.Context, cli client.Client, ns string) {
124125
// Example usage with mock data
125126
k8s := map[string]logTarget{}
126127

128+
secretList := &metav1.PartialObjectMetadataList{}
129+
gvk := schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Secret"}
130+
secretList.SetGroupVersionKind(gvk)
131+
127132
toDump := map[string]client.ObjectList{
128133
"securesign.yaml": &v1alpha1.SecuresignList{},
129134
"fulcio.yaml": &v1alpha1.FulcioList{},
@@ -138,6 +143,7 @@ func DumpNamespace(ctx context.Context, cli client.Client, ns string) {
138143
"job.yaml": &v13.JobList{},
139144
"cronjob.yaml": &v13.CronJobList{},
140145
"event.yaml": &v1.EventList{},
146+
"secret.yaml": secretList,
141147
}
142148

143149
core.GinkgoWriter.Println("----------------------- Dumping namespace " + ns + " -----------------------")

0 commit comments

Comments
 (0)