fix: allow the keycloak instances to stop gracefully on deletion (#36655)

also reusing curl pods when there is no label

closes: #34868 #28244

Signed-off-by: Steve Hawkins <shawkins@redhat.com>
This commit is contained in:
Steven Hawkins 2025-01-28 12:27:13 -05:00 committed by GitHub
parent ece56095a7
commit b841788343
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 15 additions and 30 deletions

View File

@ -86,7 +86,6 @@ import java.util.LinkedHashMap;
import java.util.List;
import java.util.Optional;
import java.util.UUID;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.TimeUnit;
import java.util.function.Function;
import java.util.stream.Collectors;
@ -298,37 +297,21 @@ public class BaseOperatorTest implements QuarkusTestAfterEachCallback {
public void cleanup() {
Log.info("Deleting Keycloak CR");
// due to https://github.com/operator-framework/java-operator-sdk/issues/2314 we
// try to ensure that the operator has processed the delete event from root objects
// this can be simplified to just the root deletion after we pick up the fix
// it can be further simplified after https://github.com/fabric8io/kubernetes-client/issues/5838
// to just a timed foreground deletion
var roots = List.of(Keycloak.class, KeycloakRealmImport.class);
var dependents = List.of(StatefulSet.class, Secret.class, Service.class, Pod.class, Job.class);
var rootsDeleted = CompletableFuture.allOf(roots.stream()
.map(c -> k8sclient.resources(c).informOnCondition(List::isEmpty)).toArray(CompletableFuture[]::new));
roots.forEach(c -> k8sclient.resources(c).withGracePeriod(10).delete());
roots.forEach(c -> k8sclient.resources(c).delete());
// enforce that at least the statefulset / pods are gone
try {
rootsDeleted.get(1, TimeUnit.MINUTES);
k8sclient
.apps()
.statefulSets()
.inNamespace(namespace)
.withLabels(Constants.DEFAULT_LABELS).informOnCondition(List::isEmpty).get(20, TimeUnit.SECONDS);
} catch (Exception e) {
// delete event should have arrived quickly because this is a background delete
throw new RuntimeException(e);
throw KubernetesClientException.launderThrowable(e);
}
dependents.stream().map(c -> k8sclient.resources(c).withLabels(Constants.DEFAULT_LABELS))
.forEach(r -> r.withGracePeriod(10).delete());
// enforce that the dependents are gone
Awaitility.await().during(5, TimeUnit.SECONDS).until(() -> {
if (dependents.stream().anyMatch(
c -> !k8sclient.resources(c).withLabels(Constants.DEFAULT_LABELS).list().getItems().isEmpty())) {
// the operator must have recreated because it hasn't gotten the keycloak
// deleted event, keep cleaning
dependents.stream().map(c -> k8sclient.resources(c).withLabels(Constants.DEFAULT_LABELS))
.forEach(r -> r.withGracePeriod(0).delete());
return false;
}
return true;
});
}
@Override

View File

@ -128,7 +128,7 @@ public final class K8sUtils {
public static CurlResult inClusterCurl(KubernetesClient k8sClient, String namespace, Map<String, String> labels, String... args) {
Log.infof("Starting cURL in namespace '%s' with labels '%s'", namespace, labels);
var podName = "curl-pod-" + UUID.randomUUID();
var podName = "curl-pod" + (labels.isEmpty()?"":("-" + UUID.randomUUID()));
try {
var builder = new PodBuilder();
builder.withNewMetadata()
@ -142,20 +142,22 @@ public final class K8sUtils {
try {
k8sClient.resource(curlPod).create();
} catch (KubernetesClientException e) {
if (e.getCode() != HttpURLConnection.HTTP_CONFLICT) {
if (!labels.isEmpty() || e.getCode() != HttpURLConnection.HTTP_CONFLICT) {
throw e;
}
}
ByteArrayOutputStream output = new ByteArrayOutputStream();
try (ExecWatch watch = k8sClient.pods().resource(curlPod).withReadyWaitTimeout(60000)
try (ExecWatch watch = k8sClient.pods().resource(curlPod).withReadyWaitTimeout(15000)
.writingOutput(output)
.exec(Stream.concat(Stream.of("curl"), Stream.of(args)).toArray(String[]::new))) {
var exitCode = watch.exitCode().get(15, TimeUnit.SECONDS);
var exitCode = watch.exitCode().get(5, TimeUnit.SECONDS);
return new CurlResult(exitCode, output.toString(StandardCharsets.UTF_8));
} finally {
k8sClient.resource(curlPod).delete();
if (!labels.isEmpty()) {
k8sClient.resource(curlPod).delete();
}
}
} catch (Exception ex) {
throw KubernetesClientException.launderThrowable(ex);