Remove CACHE-EMBEDDED-REMOTE-STORE experimental feature

Closes #34160

Signed-off-by: Michal Hajas <mhajas@redhat.com>
This commit is contained in:
Michal Hajas 2025-04-16 14:01:55 +02:00 committed by GitHub
parent 263e7fbf45
commit 4dc4de7c12
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
136 changed files with 323 additions and 8665 deletions

View File

@ -125,8 +125,6 @@ public class Profile {
PASSKEYS("Passkeys", Type.PREVIEW),
CACHE_EMBEDDED_REMOTE_STORE("Support for remote-store in embedded Infinispan caches", Type.EXPERIMENTAL),
USER_EVENT_METRICS("Collect metrics based on user events", Type.DEFAULT),
IPA_TUURA_FEDERATION("IPA-Tuura user federation provider", Type.EXPERIMENTAL),

View File

@ -13,6 +13,9 @@ include::topics/templates/document-attributes.adoc[]
:release_header_latest_link: {releasenotes_link_latest}
include::topics/templates/release-header.adoc[]
== {project_name_full} 26.3.0
include::topics/26_3_0.adoc[leveloffset=2]
== {project_name_full} 26.2.0
include::topics/26_2_0.adoc[leveloffset=2]

View File

@ -0,0 +1,7 @@
= Usage of remote stores for embedded caches is restricted
Previously, usage of remote stores was discouraged and the experimental feature `cache-embedded-remote-store` needed to be enabled to run in this setup.
In this release, the experimental feature was removed and usage of remote stores was restricted.
If you previously used the `cache-embedded-remote-store` feature, see the link:{upgradingguide_link}[{upgradingguide_name}] for migration steps.

View File

@ -120,9 +120,6 @@
:jdgserver_name: Infinispan
:jdgserver_version: 9.4.19
:jdgserver_version_latest: 11.0.9
:jdgserver_crossdcdocs_link: https://infinispan.org/docs/11.0.x/titles/xsite/xsite.html
:jdgserver_version_latest: 11.0.8
:subsystem_undertow_xml_urn: urn:jboss:domain:undertow:12.0
:subsystem_infinispan_xml_urn: urn:jboss:domain:infinispan:12.0

View File

@ -0,0 +1,17 @@
== Breaking changes
Breaking changes are identified as requiring changes from existing users to their configurations.
== Notable changes
Notable changes where an internal behavior changed to prevent common misconfigurations, fix bugs or simplify running {project_name}.
=== Usage of remote stores embedded caches is restricted
The experimental feature `cache-embedded-remote-store` was removed in this release and usage of remote stores for embedded caches was restricted.
Consider the following cases and recommended migration steps.
. If you are using remote stores for running {project_name} in multiple data centers, follow link:{highavailabilityguide_link}[{highavailabilityguide_name}] for deploying {project_name}.
. If you are using remote stores to keep user sessions available after a {project_name} restart, use the `peristent-user-session` feature which is enabled by default since.
WARNING: {project_name} will refuse to start if the `persistent-user-session` feature is disabled and remote store is configured for any of the user session caches.

View File

@ -1,6 +1,10 @@
[[migration-changes]]
== Migration Changes
=== Migrating to 26.3.0
include::changes-26_3_0.adoc[leveloffset=2]
=== Migrating to 26.2.0
include::changes-26_2_0.adoc[leveloffset=2]

View File

@ -1,98 +0,0 @@
/*
* Copyright 2016 Red Hat, Inc. and/or its affiliates
* and other contributors as indicated by the @author tags.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.keycloak.cluster.infinispan;
import java.util.Set;
import org.infinispan.Cache;
import org.infinispan.client.hotrod.Flag;
import org.infinispan.client.hotrod.RemoteCache;
import org.infinispan.commons.api.BasicCache;
import org.infinispan.persistence.remote.RemoteStore;
import org.jboss.logging.Logger;
/**
* @author <a href="mailto:mposolda@redhat.com">Marek Posolda</a>
*/
abstract class CrossDCAwareCacheFactory {
protected static final Logger logger = Logger.getLogger(CrossDCAwareCacheFactory.class);
abstract BasicCache<String, Object> getCache();
static CrossDCAwareCacheFactory getFactory(Cache<String, Object> workCache, Set<RemoteStore> remoteStores) {
if (remoteStores.isEmpty()) {
logger.debugf("No configured remoteStore available. Cross-DC scenario is not used");
return new InfinispanCacheWrapperFactory(workCache);
} else {
logger.debugf("RemoteStore is available. Cross-DC scenario will be used");
if (remoteStores.size() > 1) {
logger.warnf("More remoteStores configured for work cache. Will use just the first one");
}
// For cross-DC scenario, we need to return underlying remoteCache for atomic operations to work properly
RemoteStore remoteStore = remoteStores.iterator().next();
RemoteCache remoteCache = remoteStore.getRemoteCache();
if (remoteCache == null) {
String cacheName = remoteStore.getConfiguration().remoteCacheName();
throw new IllegalStateException("Remote cache '" + cacheName + "' is not available.");
}
return new RemoteCacheWrapperFactory(remoteCache);
}
}
// We don't have external JDG configured. No cross-DC.
private static class InfinispanCacheWrapperFactory extends CrossDCAwareCacheFactory {
private final Cache<String, Object> workCache;
InfinispanCacheWrapperFactory(Cache<String, Object> workCache) {
this.workCache = workCache;
}
@Override
BasicCache<String, Object> getCache() {
return workCache;
}
}
// We have external JDG configured. Cross-DC should be enabled
private static class RemoteCacheWrapperFactory extends CrossDCAwareCacheFactory {
private final RemoteCache<String, Object> remoteCache;
RemoteCacheWrapperFactory(RemoteCache<String, Object> remoteCache) {
this.remoteCache = remoteCache;
}
@Override
BasicCache<String, Object> getCache() {
// Flags are per-invocation!
return remoteCache.withFlags(Flag.FORCE_RETURN_VALUE);
}
}
}

View File

@ -24,12 +24,15 @@ import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import org.infinispan.Cache;
import org.jboss.logging.Logger;
import org.keycloak.cluster.ClusterEvent;
import org.keycloak.cluster.ClusterListener;
import org.keycloak.cluster.ClusterProvider;
import org.keycloak.cluster.ExecutionResult;
import org.keycloak.common.util.Retry;
import org.keycloak.common.util.Time;
import org.keycloak.models.sessions.infinispan.CacheDecorators;
/**
*
@ -44,15 +47,15 @@ public class InfinispanClusterProvider implements ClusterProvider {
private final int clusterStartupTime;
private final String myAddress;
private final CrossDCAwareCacheFactory crossDCAwareCacheFactory;
private final Cache<String, Object> workCache;
private final InfinispanNotificationsManager notificationsManager; // Just to extract notifications related stuff to separate class
private final ExecutorService localExecutor;
public InfinispanClusterProvider(int clusterStartupTime, String myAddress, CrossDCAwareCacheFactory crossDCAwareCacheFactory, InfinispanNotificationsManager notificationsManager, ExecutorService localExecutor) {
public InfinispanClusterProvider(int clusterStartupTime, String myAddress, Cache<String, Object> workCache, InfinispanNotificationsManager notificationsManager, ExecutorService localExecutor) {
this.myAddress = myAddress;
this.clusterStartupTime = clusterStartupTime;
this.crossDCAwareCacheFactory = crossDCAwareCacheFactory;
this.workCache = workCache;
this.notificationsManager = notificationsManager;
this.localExecutor = localExecutor;
}
@ -138,7 +141,7 @@ public class InfinispanClusterProvider implements ClusterProvider {
private boolean tryLock(String cacheKey, int taskTimeoutInSeconds) {
LockEntry myLock = new LockEntry(myAddress);
LockEntry existingLock = InfinispanClusterProviderFactory.putIfAbsentWithRetries(crossDCAwareCacheFactory, cacheKey, myLock, taskTimeoutInSeconds);
LockEntry existingLock = (LockEntry) workCache.putIfAbsent(cacheKey, myLock, Time.toMillis(taskTimeoutInSeconds), TimeUnit.MILLISECONDS);
if (existingLock != null) {
if (logger.isTraceEnabled()) {
logger.tracef("Task %s in progress already by node %s. Ignoring task.", cacheKey, existingLock.node());
@ -157,7 +160,7 @@ public class InfinispanClusterProvider implements ClusterProvider {
// More attempts to send the message (it may fail if some node fails in the meantime)
Retry.executeWithBackoff((int iteration) -> {
crossDCAwareCacheFactory.getCache().remove(cacheKey);
CacheDecorators.ignoreReturnValues(workCache).remove(cacheKey);
if (logger.isTraceEnabled()) {
logger.tracef("Task %s removed from the cache", cacheKey);
}

View File

@ -22,12 +22,9 @@ import java.util.Collection;
import java.util.Set;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
import java.util.stream.Collectors;
import org.infinispan.Cache;
import org.infinispan.client.hotrod.exceptions.HotRodClientException;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.context.Flag;
import org.infinispan.lifecycle.ComponentStatus;
@ -36,15 +33,12 @@ import org.infinispan.notifications.cachemanagerlistener.annotation.Merged;
import org.infinispan.notifications.cachemanagerlistener.annotation.ViewChanged;
import org.infinispan.notifications.cachemanagerlistener.event.MergeEvent;
import org.infinispan.notifications.cachemanagerlistener.event.ViewChangedEvent;
import org.infinispan.persistence.remote.RemoteStore;
import org.infinispan.remoting.transport.Address;
import org.jboss.logging.Logger;
import org.keycloak.Config;
import org.keycloak.cluster.ClusterProvider;
import org.keycloak.cluster.ClusterProviderFactory;
import org.keycloak.common.Profile;
import org.keycloak.common.util.MultiSiteUtils;
import org.keycloak.common.util.Retry;
import org.keycloak.common.util.Time;
import org.keycloak.connections.infinispan.DefaultInfinispanConnectionProviderFactory;
import org.keycloak.connections.infinispan.InfinispanConnectionProvider;
@ -66,12 +60,7 @@ public class InfinispanClusterProviderFactory implements ClusterProviderFactory,
// Infinispan cache
private volatile Cache<String, Object> workCache;
// Ensure that atomic operations (like putIfAbsent) must work correctly in any of: non-clustered, clustered or cross-Data-Center (cross-DC) setups
private CrossDCAwareCacheFactory crossDCAwareCacheFactory;
private int clusterStartupTime;
// Just to extract notifications related stuff to separate class
private InfinispanNotificationsManager notificationsManager;
@ -87,7 +76,7 @@ public class InfinispanClusterProviderFactory implements ClusterProviderFactory,
public ClusterProvider create(KeycloakSession session) {
lazyInit(session);
String myAddress = InfinispanUtil.getTopologyInfo(session).getMyNodeName();
return new InfinispanClusterProvider(clusterStartupTime, myAddress, crossDCAwareCacheFactory, notificationsManager, localExecutor);
return new InfinispanClusterProvider(clusterStartupTime, myAddress, workCache, notificationsManager, localExecutor);
}
private void lazyInit(KeycloakSession session) {
@ -100,17 +89,13 @@ public class InfinispanClusterProviderFactory implements ClusterProviderFactory,
workCacheListener = new ViewChangeListener();
workCache.getCacheManager().addListener(workCacheListener);
// See if we have RemoteStore (external JDG) configured for cross-Data-Center scenario
Set<RemoteStore> remoteStores = InfinispanUtil.getRemoteStores(workCache);
crossDCAwareCacheFactory = CrossDCAwareCacheFactory.getFactory(workCache, remoteStores);
clusterStartupTime = initClusterStartupTime(session);
TopologyInfo topologyInfo = InfinispanUtil.getTopologyInfo(session);
String myAddress = topologyInfo.getMyNodeName();
String mySite = topologyInfo.getMySiteName();
notificationsManager = InfinispanNotificationsManager.create(session, workCache, myAddress, mySite, remoteStores);
notificationsManager = InfinispanNotificationsManager.create(workCache, myAddress, mySite);
}
}
}
@ -118,7 +103,7 @@ public class InfinispanClusterProviderFactory implements ClusterProviderFactory,
protected int initClusterStartupTime(KeycloakSession session) {
Integer existingClusterStartTime = (Integer) crossDCAwareCacheFactory.getCache().get(InfinispanClusterProvider.CLUSTER_STARTUP_TIME_KEY);
Integer existingClusterStartTime = (Integer) workCache.get(InfinispanClusterProvider.CLUSTER_STARTUP_TIME_KEY);
if (existingClusterStartTime != null) {
logger.debugf("Loaded cluster startup time: %s", Time.toDate(existingClusterStartTime).toString());
return existingClusterStartTime;
@ -126,7 +111,7 @@ public class InfinispanClusterProviderFactory implements ClusterProviderFactory,
// clusterStartTime not yet initialized. Let's try to put our startupTime
int serverStartTime = (int) (session.getKeycloakSessionFactory().getServerStartupTimestamp() / 1000);
existingClusterStartTime = putIfAbsentWithRetries(crossDCAwareCacheFactory, InfinispanClusterProvider.CLUSTER_STARTUP_TIME_KEY, serverStartTime, -1);
existingClusterStartTime = (Integer) workCache.putIfAbsent(InfinispanClusterProvider.CLUSTER_STARTUP_TIME_KEY, serverStartTime);
if (existingClusterStartTime == null) {
logger.debugf("Initialized cluster startup time to %s", Time.toDate(serverStartTime).toString());
return serverStartTime;
@ -137,38 +122,6 @@ public class InfinispanClusterProviderFactory implements ClusterProviderFactory,
}
}
// Will retry few times for the case when backup site not available in cross-dc environment.
// The site might be taken offline automatically if "take-offline" properly configured
static <V> V putIfAbsentWithRetries(CrossDCAwareCacheFactory crossDCAwareCacheFactory, String key, V value, int taskTimeoutInSeconds) {
AtomicReference<V> resultRef = new AtomicReference<>();
Retry.executeWithBackoff(iteration -> {
try {
V result;
if (taskTimeoutInSeconds > 0) {
long lifespanMs = InfinispanUtil.toHotrodTimeMs(crossDCAwareCacheFactory.getCache(), Time.toMillis(taskTimeoutInSeconds));
result = (V) crossDCAwareCacheFactory.getCache().putIfAbsent(key, value, lifespanMs, TimeUnit.MILLISECONDS);
} else {
result = (V) crossDCAwareCacheFactory.getCache().putIfAbsent(key, value);
}
resultRef.set(result);
} catch (HotRodClientException re) {
logger.warnf(re, "Failed to write key '%s' and value '%s' in iteration '%d' . Retrying", key, value, iteration);
// Rethrow the exception. Retry will take care of handle the exception and eventually retry the operation.
throw re;
}
}, 10, 10);
return resultRef.get();
}
@Override
public void init(Config.Scope config) {
}

View File

@ -17,27 +17,7 @@
package org.keycloak.cluster.infinispan;
import java.util.Collection;
import java.util.List;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.RejectedExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.function.Supplier;
import org.infinispan.Cache;
import org.infinispan.client.hotrod.RemoteCache;
import org.infinispan.client.hotrod.annotation.ClientCacheEntryCreated;
import org.infinispan.client.hotrod.annotation.ClientCacheEntryModified;
import org.infinispan.client.hotrod.annotation.ClientCacheEntryRemoved;
import org.infinispan.client.hotrod.annotation.ClientListener;
import org.infinispan.client.hotrod.event.ClientCacheEntryCreatedEvent;
import org.infinispan.client.hotrod.event.ClientCacheEntryModifiedEvent;
import org.infinispan.client.hotrod.event.ClientCacheEntryRemovedEvent;
import org.infinispan.client.hotrod.exceptions.HotRodClientException;
import org.infinispan.context.Flag;
import org.infinispan.notifications.Listener;
import org.infinispan.notifications.cachelistener.annotation.CacheEntryCreated;
@ -46,16 +26,19 @@ import org.infinispan.notifications.cachelistener.annotation.CacheEntryRemoved;
import org.infinispan.notifications.cachelistener.event.CacheEntryCreatedEvent;
import org.infinispan.notifications.cachelistener.event.CacheEntryModifiedEvent;
import org.infinispan.notifications.cachelistener.event.CacheEntryRemovedEvent;
import org.infinispan.persistence.remote.RemoteStore;
import org.jboss.logging.Logger;
import org.keycloak.cluster.ClusterEvent;
import org.keycloak.cluster.ClusterListener;
import org.keycloak.cluster.ClusterProvider;
import org.keycloak.common.util.ConcurrentMultivaluedHashMap;
import org.keycloak.common.util.Retry;
import org.keycloak.connections.infinispan.DefaultInfinispanConnectionProviderFactory;
import org.keycloak.executors.ExecutorsProvider;
import org.keycloak.models.KeycloakSession;
import org.keycloak.models.sessions.infinispan.CacheDecorators;
import java.util.Collection;
import java.util.List;
import java.util.UUID;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.TimeUnit;
import static org.keycloak.cluster.infinispan.InfinispanClusterProvider.TASK_KEY_PREFIX;
@ -68,58 +51,31 @@ public class InfinispanNotificationsManager {
protected static final Logger logger = Logger.getLogger(InfinispanNotificationsManager.class);
private static final int BACKOFF_BASE_MILLIS = 10;
private static final int MAX_BACKOFF_RETRIES = 10;
private final ConcurrentMultivaluedHashMap<String, ClusterListener> listeners = new ConcurrentMultivaluedHashMap<>();
private final ConcurrentMap<String, TaskCallback> taskCallbacks = new ConcurrentHashMap<>();
private final Cache<String, Object> workCache;
private final RemoteCache<String, Object> workRemoteCache;
private final String myAddress;
private final String mySite;
private final ExecutorService listenersExecutor;
protected InfinispanNotificationsManager(Cache<String, Object> workCache, RemoteCache<String, Object> workRemoteCache, String myAddress, String mySite, ExecutorService listenersExecutor) {
protected InfinispanNotificationsManager(Cache<String, Object> workCache, String myAddress, String mySite) {
this.workCache = workCache;
this.workRemoteCache = workRemoteCache;
this.myAddress = myAddress;
this.mySite = mySite;
this.listenersExecutor = listenersExecutor;
}
// Create and init manager including all listeners etc
public static InfinispanNotificationsManager create(KeycloakSession session, Cache<String, Object> workCache, String myAddress, String mySite, Set<RemoteStore> remoteStores) {
RemoteCache<String, Object> workRemoteCache = null;
if (!remoteStores.isEmpty()) {
RemoteStore remoteStore = remoteStores.iterator().next();
workRemoteCache = remoteStore.getRemoteCache();
if (mySite == null) {
throw new IllegalStateException("Multiple datacenters available, but site name is not configured! Check your configuration");
}
}
ExecutorService listenersExecutor = workRemoteCache==null ? null : session.getProvider(ExecutorsProvider.class).getExecutor("work-cache-event-listener");
InfinispanNotificationsManager manager = new InfinispanNotificationsManager(workCache, workRemoteCache, myAddress, mySite, listenersExecutor);
public static InfinispanNotificationsManager create(Cache<String, Object> workCache, String myAddress, String mySite) {
InfinispanNotificationsManager manager = new InfinispanNotificationsManager(workCache, myAddress, mySite);
// We need CacheEntryListener for communication within current DC
workCache.addListener(manager.new CacheEntryListener());
logger.debugf("Added listener for infinispan cache: %s", workCache.getName());
// Added listener for remoteCache to notify other DCs
if (workRemoteCache != null) {
workRemoteCache.addClientListener(manager.new HotRodListener(workRemoteCache));
logger.debugf("Added listener for HotRod remoteStore cache: %s", workRemoteCache.getName());
}
return manager;
}
@ -152,30 +108,8 @@ public class InfinispanNotificationsManager {
logger.tracef("Sending event with key %s: %s", eventKey, events);
}
if (dcNotify == ClusterProvider.DCNotify.LOCAL_DC_ONLY || workRemoteCache == null) {
// Just put it to workCache, but skip notifying remoteCache
workCache.getAdvancedCache().withFlags(Flag.IGNORE_RETURN_VALUES, Flag.SKIP_CACHE_STORE)
.put(eventKey, wrappedEvent, 120, TimeUnit.SECONDS);
} else {
// Add directly to remoteCache. Will notify remote listeners on all nodes in all DCs
Retry.executeWithBackoff((int iteration) -> {
try {
DefaultInfinispanConnectionProviderFactory.runWithReadLockOnCacheManager(() ->
workRemoteCache.put(eventKey, wrappedEvent, 120, TimeUnit.SECONDS)
);
} catch (HotRodClientException re) {
if (logger.isDebugEnabled()) {
logger.debugf(re, "Failed sending notification to remote cache '%s'. Key: '%s', iteration '%s'. Will try to retry the task",
workRemoteCache.getName(), eventKey, iteration);
}
// Rethrow the exception. Retry will take care of handle the exception and eventually retry the operation.
throw re;
}
}, 10, 10);
}
CacheDecorators.ignoreReturnValues(workCache)
.put(eventKey, wrappedEvent, 120, TimeUnit.SECONDS);
}
@ -196,72 +130,6 @@ public class InfinispanNotificationsManager {
public void cacheEntryRemoved(CacheEntryRemovedEvent<String, Object> event) {
taskFinished(event.getKey());
}
}
@ClientListener
public class HotRodListener {
private final RemoteCache<String, Object> remoteCache;
public HotRodListener(RemoteCache<String, Object> remoteCache) {
this.remoteCache = remoteCache;
}
@ClientCacheEntryCreated
public void created(ClientCacheEntryCreatedEvent<String> event) {
hotrodEventReceived(event.getKey());
}
@ClientCacheEntryModified
public void updated(ClientCacheEntryModifiedEvent<String> event) {
hotrodEventReceived(event.getKey());
}
@ClientCacheEntryRemoved
public void removed(ClientCacheEntryRemovedEvent<String> event) {
taskFinished(event.getKey());
}
private void hotrodEventReceived(String key) {
// TODO: Look at CacheEventConverter stuff to possibly include value in the event and avoid additional remoteCache request
try {
listenersExecutor.submit(() -> {
Supplier<Object> fetchEvent = () -> remoteCache.get(key);
Object event = DefaultInfinispanConnectionProviderFactory.runWithReadLockOnCacheManager(fetchEvent);
int iteration = 0;
// Event might have been generated from a node which is more up-to-date, so the fetch might return null.
// Retry until we find a node that is up-to-date and has the entry.
while (event == null && iteration < MAX_BACKOFF_RETRIES) {
++iteration;
try {
Thread.sleep(Retry.computeBackoffInterval(BACKOFF_BASE_MILLIS, iteration));
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
return;
}
event = DefaultInfinispanConnectionProviderFactory.runWithReadLockOnCacheManager(fetchEvent);
}
eventReceived(key, event);
});
} catch (RejectedExecutionException ree) {
// server is shutting down or pool was terminated - don't throw errors
if (ree.getMessage() != null && (ree.getMessage().contains("Terminated") || ree.getMessage().contains("Shutting down"))) {
logger.warnf("Rejected submitting of the event for key: %s because server is shutting down or pool was terminated.", key);
logger.debug(ree);
} else {
// avoid touching the cache when creating a log message to avoid a deadlock in Infinispan 12.1.7.Final
logger.errorf("Rejected submitting of the event for key: %s. Server going to shutdown or pool exhausted. Pool: %s", key, listenersExecutor.toString());
throw ree;
}
}
}
}
private void eventReceived(String key, Object obj) {

View File

@ -37,12 +37,10 @@ import org.infinispan.util.concurrent.BlockingManager;
public class DefaultInfinispanConnectionProvider implements InfinispanConnectionProvider {
private final EmbeddedCacheManager cacheManager;
private final RemoteCacheProvider remoteCacheProvider;
private final TopologyInfo topologyInfo;
public DefaultInfinispanConnectionProvider(EmbeddedCacheManager cacheManager, RemoteCacheProvider remoteCacheProvider, TopologyInfo topologyInfo) {
public DefaultInfinispanConnectionProvider(EmbeddedCacheManager cacheManager, TopologyInfo topologyInfo) {
this.cacheManager = cacheManager;
this.remoteCacheProvider = remoteCacheProvider;
this.topologyInfo = topologyInfo;
}
@ -61,7 +59,7 @@ public class DefaultInfinispanConnectionProvider implements InfinispanConnection
@Override
public <K, V> RemoteCache<K, V> getRemoteCache(String cacheName) {
return remoteCacheProvider.getRemoteCache(cacheName);
throw new IllegalStateException("Remote stores cannot be used with Embedded Infinispan.");
}
@Override

View File

@ -25,9 +25,7 @@ import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.function.Supplier;
import org.infinispan.client.hotrod.ProtocolVersion;
import org.infinispan.client.hotrod.RemoteCacheManager;
import org.infinispan.commons.dataconversion.MediaType;
import org.infinispan.configuration.cache.CacheMode;
@ -37,7 +35,6 @@ import org.infinispan.configuration.global.GlobalConfigurationBuilder;
import org.infinispan.eviction.EvictionStrategy;
import org.infinispan.manager.DefaultCacheManager;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.persistence.remote.configuration.RemoteStoreConfigurationBuilder;
import org.infinispan.transaction.LockingMode;
import org.infinispan.transaction.TransactionMode;
import org.infinispan.transaction.lookup.EmbeddedTransactionManagerLookup;
@ -106,8 +103,6 @@ public class DefaultInfinispanConnectionProviderFactory implements InfinispanCon
private volatile EmbeddedCacheManager cacheManager;
private volatile RemoteCacheProvider remoteCacheProvider;
protected volatile boolean containerManaged;
private volatile TopologyInfo topologyInfo;
@ -120,7 +115,7 @@ public class DefaultInfinispanConnectionProviderFactory implements InfinispanCon
return InfinispanUtils.isRemoteInfinispan() ?
new RemoteInfinispanConnectionProvider(cacheManager, remoteCacheManager, topologyInfo) :
new DefaultInfinispanConnectionProvider(cacheManager, remoteCacheProvider, topologyInfo);
new DefaultInfinispanConnectionProvider(cacheManager, topologyInfo);
}
@ -140,16 +135,6 @@ public class DefaultInfinispanConnectionProviderFactory implements InfinispanCon
}
}
public static <T> T runWithReadLockOnCacheManager(Supplier<T> task) {
Lock lock = DefaultInfinispanConnectionProviderFactory.READ_WRITE_LOCK.readLock();
lock.lock();
try {
return task.get();
} finally {
lock.unlock();
}
}
public static void runWithWriteLockOnCacheManager(Runnable task) {
Lock lock = DefaultInfinispanConnectionProviderFactory.READ_WRITE_LOCK.writeLock();
lock.lock();
@ -167,9 +152,6 @@ public class DefaultInfinispanConnectionProviderFactory implements InfinispanCon
if (cacheManager != null) {
cacheManager.stop();
}
if (remoteCacheProvider != null) {
remoteCacheProvider.stop();
}
});
}
@ -230,7 +212,6 @@ public class DefaultInfinispanConnectionProviderFactory implements InfinispanCon
logger.infof(topologyInfo.toString());
remoteCacheProvider = new RemoteCacheProvider(config, localCacheManager);
// only set the cache manager attribute at the very end to avoid passing a half-initialized entry callers
cacheManager = localCacheManager;
remoteCacheManager = rcm;
@ -398,9 +379,6 @@ public class DefaultInfinispanConnectionProviderFactory implements InfinispanCon
// copy base configuration
var builder = createCacheConfigurationBuilder();
builder.read(baseConfiguration);
if (config.getBoolean("remoteStoreEnabled", false)) {
configureRemoteCacheStore(builder, config.getBoolean("async", false), cacheName);
}
cacheManager.defineConfiguration(cacheName, builder.build());
cacheManager.getCache(cacheName);
}
@ -425,47 +403,6 @@ public class DefaultInfinispanConnectionProviderFactory implements InfinispanCon
return cb.build();
}
// Used for cross-data centers scenario. Usually integration with external JDG server, which itself handles communication between DCs.
private void configureRemoteCacheStore(ConfigurationBuilder builder, boolean async, String cacheName) {
String jdgServer = config.get("remoteStoreHost", "127.0.0.1");
Integer jdgPort = config.getInt("remoteStorePort", 11222);
// After upgrade to Infinispan 12.1.7.Final it's required that both remote store and embedded cache use
// the same key media type to allow segmentation. Also, the number of segments in an embedded cache needs to match number of segments in the remote store.
boolean segmented = config.getBoolean("segmented", false);
//noinspection removal
builder.persistence()
.passivation(false)
.addStore(RemoteStoreConfigurationBuilder.class)
.ignoreModifications(false)
.purgeOnStartup(false)
.preload(false)
.shared(true)
.remoteCacheName(cacheName)
.segmented(segmented)
.rawValues(true)
.forceReturnValues(false)
.protocolVersion(getHotrodVersion())
.addServer()
.host(jdgServer)
.port(jdgPort)
.async()
.enabled(async);
}
private ProtocolVersion getHotrodVersion() {
String hotrodVersionStr = config.get("hotrodProtocolVersion", ProtocolVersion.DEFAULT_PROTOCOL_VERSION.toString());
ProtocolVersion hotrodVersion = ProtocolVersion.parseVersion(hotrodVersionStr);
if (hotrodVersion == null) {
hotrodVersion = ProtocolVersion.DEFAULT_PROTOCOL_VERSION;
}
logger.debugf("HotRod protocol version: %s", hotrodVersion);
return hotrodVersion;
}
protected Configuration getKeysCacheConfig() {
ConfigurationBuilder cb = createCacheConfigurationBuilder();

View File

@ -17,10 +17,6 @@
package org.keycloak.connections.infinispan;
import org.infinispan.Cache;
import org.infinispan.client.hotrod.ProtocolVersion;
import org.infinispan.client.hotrod.RemoteCache;
import org.infinispan.commons.api.BasicCache;
import org.infinispan.commons.dataconversion.MediaType;
import org.infinispan.commons.time.TimeService;
import org.infinispan.commons.util.FileLookup;
@ -29,13 +25,10 @@ import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.configuration.global.GlobalConfigurationBuilder;
import org.infinispan.configuration.global.TransportConfigurationBuilder;
import org.infinispan.eviction.EvictionStrategy;
import org.infinispan.factories.ComponentRegistry;
import org.infinispan.factories.GlobalComponentRegistry;
import org.infinispan.factories.impl.BasicComponentRegistry;
import org.infinispan.factories.impl.ComponentRef;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.persistence.manager.PersistenceManager;
import org.infinispan.persistence.remote.RemoteStore;
import org.infinispan.remoting.transport.jgroups.JGroupsTransport;
import org.infinispan.util.EmbeddedTimeService;
import org.jboss.logging.Logger;
@ -44,7 +37,6 @@ import org.keycloak.common.util.Time;
import org.keycloak.models.KeycloakSession;
import java.time.Instant;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
@ -57,48 +49,11 @@ public class InfinispanUtil {
public static final int MAXIMUM_REPLACE_RETRIES = 25;
// See if we have RemoteStore (external JDG) configured for cross-Data-Center scenario
public static Set<RemoteStore> getRemoteStores(Cache<?, ?> ispnCache) {
return ComponentRegistry.componentOf(ispnCache, PersistenceManager.class).getStores(RemoteStore.class);
}
public static RemoteCache getRemoteCache(Cache<?, ?> ispnCache) {
Set<RemoteStore> remoteStores = getRemoteStores(ispnCache);
if (remoteStores.isEmpty()) {
return null;
} else {
return remoteStores.iterator().next().getRemoteCache();
}
}
public static TopologyInfo getTopologyInfo(KeycloakSession session) {
return session.getProvider(InfinispanConnectionProvider.class).getTopologyInfo();
}
/**
* Convert the given value to the proper value, which can be used when calling operations for the infinispan remoteCache.
*
* Infinispan HotRod protocol of versions older than 3.0 uses the "lifespan" or "maxIdle" as the normal expiration time when the value is 30 days or less.
* However for the bigger values, it assumes that the value is unix timestamp.
*
* @param ispnCache
* @param lifespanOrigMs
* @return
*/
public static long toHotrodTimeMs(BasicCache<?, ?> ispnCache, long lifespanOrigMs) {
if (ispnCache instanceof RemoteCache<?, ?> remoteCache && lifespanOrigMs > 2592000000L) {
ProtocolVersion protocolVersion = remoteCache.getRemoteCacheContainer().getConfiguration().version();
if (ProtocolVersion.PROTOCOL_VERSION_30.compareTo(protocolVersion) > 0) {
return Time.currentTimeMillis() + lifespanOrigMs;
}
}
return lifespanOrigMs;
}
private static final Object CHANNEL_INIT_SYNCHRONIZER = new Object();
public static void configureTransport(GlobalConfigurationBuilder gcb, String nodeName, String siteName, String jgroupsUdpMcastAddr,

View File

@ -1,209 +0,0 @@
/*
* Copyright 2017 Red Hat, Inc. and/or its affiliates
* and other contributors as indicated by the @author tags.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.keycloak.connections.infinispan;
import java.io.IOException;
import java.lang.reflect.Field;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import javax.security.auth.callback.Callback;
import javax.security.auth.callback.CallbackHandler;
import javax.security.auth.callback.NameCallback;
import javax.security.auth.callback.PasswordCallback;
import javax.security.auth.callback.UnsupportedCallbackException;
import javax.security.sasl.RealmCallback;
import org.infinispan.client.hotrod.RemoteCache;
import org.infinispan.client.hotrod.RemoteCacheManager;
import org.infinispan.client.hotrod.configuration.Configuration;
import org.infinispan.client.hotrod.configuration.ConfigurationBuilder;
import org.infinispan.commons.configuration.Combine;
import org.infinispan.manager.EmbeddedCacheManager;
import org.jboss.logging.Logger;
import org.keycloak.Config;
import org.keycloak.common.util.reflections.Reflections;
import java.util.stream.Collectors;
import org.infinispan.client.hotrod.exceptions.HotRodClientException;
/**
* Get either just remoteCache associated with remoteStore associated with infinispan cache of given name. If security is enabled, then
* return secured remoteCache based on the template provided by remoteStore configuration but with added "authentication" configuration
* of secured hotrod endpoint (RemoteStore doesn't yet allow to configure "security" of hotrod endpoints)
*
* TODO: Remove this class once we upgrade to infinispan version, which allows to configure security for remoteStore itself
*
* @author <a href="mailto:mposolda@redhat.com">Marek Posolda</a>
*/
public class RemoteCacheProvider {
public static final String SCRIPT_CACHE_NAME = "___script_cache";
protected static final Logger logger = Logger.getLogger(RemoteCacheProvider.class);
private final Config.Scope config;
private final EmbeddedCacheManager cacheManager;
private final Map<String, RemoteCache> availableCaches = new HashMap<>();
// Enlist secured managers, which are managed by us and should be shutdown on stop
private final Map<String, RemoteCacheManager> managedManagers = new HashMap<>();
public RemoteCacheProvider(Config.Scope config, EmbeddedCacheManager cacheManager) {
this.config = config;
this.cacheManager = cacheManager;
}
public RemoteCache getRemoteCache(String cacheName) {
if (availableCaches.get(cacheName) == null) {
synchronized (this) {
if (availableCaches.get(cacheName) == null) {
RemoteCache remoteCache = loadRemoteCache(cacheName);
availableCaches.put(cacheName, remoteCache);
}
}
}
return availableCaches.get(cacheName);
}
public void stop() {
logger.debugf("Shutdown %d registered secured remoteCache managers", managedManagers.size());
for (RemoteCacheManager mgr : managedManagers.values()) {
mgr.stop();
}
}
protected synchronized RemoteCache loadRemoteCache(String cacheName) {
RemoteCache remoteCache = InfinispanUtil.getRemoteCache(cacheManager.getCache(cacheName));
if (remoteCache == null) {
return null;
}
logger.infof("Hotrod version for remoteCache %s: %s", remoteCache.getName(), remoteCache.getRemoteCacheManager().getConfiguration().version());
Boolean remoteStoreSecurity = config.getBoolean("remoteStoreSecurityEnabled");
if (remoteStoreSecurity == null) {
try {
logger.debugf("Detecting remote security settings of HotRod server, cache %s. Disable by explicitly setting \"remoteStoreSecurityEnabled\" property in spi=connectionsInfinispan/provider=default", cacheName);
remoteStoreSecurity = false;
final RemoteCache<Object, Object> scriptCache = remoteCache.getRemoteCacheManager().getCache(SCRIPT_CACHE_NAME);
if (scriptCache == null) {
logger.debug("Cannot detect remote security settings of HotRod server, disabling.");
} else {
scriptCache.containsKey("");
}
} catch (HotRodClientException ex) {
logger.debug("Seems that HotRod server requires authentication, enabling.");
remoteStoreSecurity = true;
}
}
if (remoteStoreSecurity) {
logger.infof("Remote store security for cache %s is enabled. Disable by setting \"remoteStoreSecurityEnabled\" property to \"false\" in spi=connectionsInfinispan/provider=default", cacheName);
RemoteCacheManager securedMgr = getOrCreateSecuredRemoteCacheManager(config, cacheName, remoteCache.getRemoteCacheManager());
return securedMgr.getCache(remoteCache.getName());
} else {
logger.infof("Remote store security for cache %s is disabled. If server fails to connect to remote JDG server, enable it.", cacheName);
return remoteCache;
}
}
protected RemoteCacheManager getOrCreateSecuredRemoteCacheManager(Config.Scope config, String cacheName, RemoteCacheManager origManager) {
String serverName = config.get("remoteStoreSecurityServerName", "keycloak-jdg-server");
String realm = config.get("remoteStoreSecurityRealm", "AllowScriptManager");
String username = config.get("remoteStoreSecurityUsername", "___script_manager");
String password = config.get("remoteStoreSecurityPassword", "not-so-secret-password");
// Create configuration template from the original configuration provided at remoteStore level
Configuration origConfig = origManager.getConfiguration();
ConfigurationBuilder cfgBuilder = new ConfigurationBuilder()
.read(origConfig, Combine.DEFAULT);
String securedHotRodEndpoint = origConfig.servers().stream()
.map(serverConfiguration -> serverConfiguration.host() + ":" + serverConfiguration.port())
.collect(Collectors.joining(";"));
if (managedManagers.containsKey(securedHotRodEndpoint)) {
return managedManagers.get(securedHotRodEndpoint);
}
logger.infof("Creating secured RemoteCacheManager for Server: '%s', Cache: '%s', Realm: '%s', Username: '%s', Secured HotRod endpoint: '%s'", serverName, cacheName, realm, username, securedHotRodEndpoint);
// Workaround as I need a way to override servers and it's not possible to remove existing :/
try {
Field serversField = cfgBuilder.getClass().getDeclaredField("servers");
Reflections.setAccessible(serversField);
List origServers = Reflections.getFieldValue(serversField, cfgBuilder, List.class);
origServers.clear();
} catch (NoSuchFieldException nsfe) {
throw new RuntimeException(nsfe);
}
// Create configuration based on the configuration template from remoteStore. Just add security and override secured endpoint
Configuration newConfig = cfgBuilder
.addServers(securedHotRodEndpoint)
.security()
.authentication()
.serverName(serverName) //define server name, should be specified in XML configuration on JDG side
.saslMechanism("DIGEST-MD5") // define SASL mechanism, in this example we use DIGEST with MD5 hash
.callbackHandler(new LoginHandler(username, password.toCharArray(), realm)) // define login handler, implementation defined
.enable()
.build();
final RemoteCacheManager remoteCacheManager = new RemoteCacheManager(newConfig);
managedManagers.put(securedHotRodEndpoint, remoteCacheManager);
return remoteCacheManager;
}
private static class LoginHandler implements CallbackHandler {
final private String login;
final private char[] password;
final private String realm;
private LoginHandler(String login, char[] password, String realm) {
this.login = login;
this.password = password;
this.realm = realm;
}
@Override
public void handle(Callback[] callbacks) throws IOException, UnsupportedCallbackException {
for (Callback callback : callbacks) {
if (callback instanceof NameCallback) {
((NameCallback) callback).setName(login);
} else if (callback instanceof PasswordCallback) {
((PasswordCallback) callback).setPassword(password);
} else if (callback instanceof RealmCallback) {
((RealmCallback) callback).setText(realm);
} else {
throw new UnsupportedCallbackException(callback);
}
}
}
}
}

View File

@ -47,7 +47,7 @@ public class TopologyInfo {
// name
private final String myNodeName;
// Used just if "site" is configured (typically in cross-dc environment). Otherwise null
// Used just if "site" is configured (typically in multi-site environment). Otherwise null
private final String mySiteName;
private final boolean isGeneratedNodeName;

View File

@ -38,7 +38,7 @@ public class InfinispanCacheCrlProvider implements CacheCrlProvider {
public void clearCache() {
crlCache.clear();
ClusterProvider cluster = session.getProvider(ClusterProvider.class);
cluster.notify(InfinispanCacheCrlProviderFactory.CRL_CLEAR_CACHE_EVENTS, ClearCacheEvent.getInstance(), true, ClusterProvider.DCNotify.ALL_DCS);
cluster.notify(InfinispanCacheCrlProviderFactory.CRL_CLEAR_CACHE_EVENTS, ClearCacheEvent.getInstance(), true);
}
@Override

View File

@ -38,7 +38,7 @@ public class InfinispanCachePublicKeyProvider implements CachePublicKeyProvider
public void clearCache() {
keys.clear();
ClusterProvider cluster = session.getProvider(ClusterProvider.class);
cluster.notify(InfinispanCachePublicKeyProviderFactory.KEYS_CLEAR_CACHE_EVENTS, ClearCacheEvent.getInstance(), true, ClusterProvider.DCNotify.ALL_DCS);
cluster.notify(InfinispanCachePublicKeyProviderFactory.KEYS_CLEAR_CACHE_EVENTS, ClearCacheEvent.getInstance(), true);
}
@Override

View File

@ -120,7 +120,7 @@ public class InfinispanPublicKeyStorageProvider implements PublicKeyStorageProvi
.peek(keys::remove)
.map(PublicKeyStorageInvalidationEvent::create)
.toList();
cluster.notify(InfinispanCachePublicKeyProviderFactory.PUBLIC_KEY_STORAGE_INVALIDATION_EVENT, events, true, ClusterProvider.DCNotify.ALL_DCS);
cluster.notify(InfinispanCachePublicKeyProviderFactory.PUBLIC_KEY_STORAGE_INVALIDATION_EVENT, events, true);
}
@Override

View File

@ -80,7 +80,6 @@ import org.keycloak.models.cache.infinispan.stream.InIdentityProviderPredicate;
import org.keycloak.models.cache.infinispan.stream.InRealmPredicate;
import org.keycloak.models.sessions.infinispan.changes.ReplaceFunction;
import org.keycloak.models.sessions.infinispan.changes.SessionEntityWrapper;
import org.keycloak.models.sessions.infinispan.changes.sessions.LastSessionRefreshEvent;
import org.keycloak.models.sessions.infinispan.changes.sessions.SessionData;
import org.keycloak.models.sessions.infinispan.entities.AuthenticatedClientSessionEntity;
import org.keycloak.models.sessions.infinispan.entities.AuthenticatedClientSessionStore;
@ -96,7 +95,6 @@ import org.keycloak.models.sessions.infinispan.entities.UserSessionEntity;
import org.keycloak.models.sessions.infinispan.events.RealmRemovedSessionEvent;
import org.keycloak.models.sessions.infinispan.events.RemoveAllUserLoginFailuresEvent;
import org.keycloak.models.sessions.infinispan.events.RemoveUserSessionsEvent;
import org.keycloak.models.sessions.infinispan.initializer.InitializerState;
import org.keycloak.models.sessions.infinispan.stream.AuthClientSessionSetMapper;
import org.keycloak.models.sessions.infinispan.stream.CollectionToStreamMapper;
import org.keycloak.models.sessions.infinispan.stream.GroupAndCountCollectorSupplier;
@ -150,14 +148,10 @@ import org.keycloak.storage.managers.UserStorageSyncManager;
ScopeUpdatedEvent.class,
ScopeRemovedEvent.class,
// models.sessions.infinispan.initializer package
InitializerState.class,
// models.sessions.infinispan.changes package
SessionEntityWrapper.class,
// models.sessions.infinispan.changes.sessions package
LastSessionRefreshEvent.class,
SessionData.class,
// models.cache.infinispan.authorization.stream package

View File

@ -215,7 +215,7 @@ public abstract class CacheManager {
public void sendInvalidationEvents(KeycloakSession session, Collection<InvalidationEvent> invalidationEvents, String eventKey) {
session.getProvider(ClusterProvider.class)
.notify(eventKey, invalidationEvents, true, ClusterProvider.DCNotify.ALL_DCS);
.notify(eventKey, invalidationEvents, true);
}

View File

@ -409,7 +409,7 @@ public class RealmAdapter implements CachedRealmModel {
@Override
protected void commitImpl() {
ClusterProvider cluster = session.getProvider(ClusterProvider.class);
cluster.notify(InfinispanUserCacheProviderFactory.USER_CLEAR_CACHE_EVENTS, ClearCacheEvent.getInstance(), false, ClusterProvider.DCNotify.ALL_DCS);
cluster.notify(InfinispanUserCacheProviderFactory.USER_CLEAR_CACHE_EVENTS, ClearCacheEvent.getInstance(), false);
}
@Override

View File

@ -22,10 +22,7 @@ import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.stream.Collectors;
import java.util.stream.Stream;
@ -186,7 +183,7 @@ public class RealmCacheSession implements CacheRealmProvider {
@Override
public void clear() {
ClusterProvider cluster = session.getProvider(ClusterProvider.class);
cluster.notify(InfinispanCacheRealmProviderFactory.REALM_CLEAR_CACHE_EVENTS, ClearCacheEvent.getInstance(), false, ClusterProvider.DCNotify.ALL_DCS);
cluster.notify(InfinispanCacheRealmProviderFactory.REALM_CLEAR_CACHE_EVENTS, ClearCacheEvent.getInstance(), false);
}
@Override

View File

@ -113,7 +113,7 @@ public class UserCacheSession implements UserCache, OnCreateComponent, OnUpdateC
public void clear() {
cache.clear();
ClusterProvider cluster = session.getProvider(ClusterProvider.class);
cluster.notify(InfinispanUserCacheProviderFactory.USER_CLEAR_CACHE_EVENTS, ClearCacheEvent.getInstance(), true, ClusterProvider.DCNotify.ALL_DCS);
cluster.notify(InfinispanUserCacheProviderFactory.USER_CLEAR_CACHE_EVENTS, ClearCacheEvent.getInstance(), true);
}
public UserProvider getDelegate() {

View File

@ -28,12 +28,10 @@ import org.keycloak.models.KeycloakSession;
import org.keycloak.models.RealmModel;
import org.keycloak.models.UserSessionModel;
import org.keycloak.models.session.UserSessionPersisterProvider;
import org.keycloak.models.sessions.infinispan.changes.SessionEntityWrapper;
import org.keycloak.models.sessions.infinispan.changes.ClientSessionUpdateTask;
import org.keycloak.models.sessions.infinispan.changes.SessionUpdateTask;
import org.keycloak.models.sessions.infinispan.changes.SessionsChangelogBasedTransaction;
import org.keycloak.models.sessions.infinispan.changes.Tasks;
import org.keycloak.models.sessions.infinispan.changes.sessions.CrossDCLastSessionRefreshChecker;
import org.keycloak.models.sessions.infinispan.entities.AuthenticatedClientSessionEntity;
import java.util.UUID;
@ -44,14 +42,13 @@ import java.util.UUID;
public class AuthenticatedClientSessionAdapter implements AuthenticatedClientSessionModel {
private final KeycloakSession kcSession;
private final SessionRefreshStore provider;
private AuthenticatedClientSessionEntity entity;
private final ClientModel client;
private final SessionsChangelogBasedTransaction<UUID, AuthenticatedClientSessionEntity> clientSessionUpdateTx;
private UserSessionModel userSession;
private boolean offline;
public AuthenticatedClientSessionAdapter(KeycloakSession kcSession, SessionRefreshStore provider,
public AuthenticatedClientSessionAdapter(KeycloakSession kcSession,
AuthenticatedClientSessionEntity entity, ClientModel client, UserSessionModel userSession,
SessionsChangelogBasedTransaction<UUID, AuthenticatedClientSessionEntity> clientSessionUpdateTx, boolean offline) {
if (userSession == null) {
@ -59,7 +56,6 @@ public class AuthenticatedClientSessionAdapter implements AuthenticatedClientSes
}
this.kcSession = kcSession;
this.provider = provider;
this.entity = entity;
this.userSession = userSession;
this.client = client;
@ -155,12 +151,6 @@ public class AuthenticatedClientSessionAdapter implements AuthenticatedClientSes
entity.setTimestamp(timestamp);
}
@Override
public CrossDCMessageStatus getCrossDCMessageStatus(SessionEntityWrapper<AuthenticatedClientSessionEntity> sessionWrapper) {
return new CrossDCLastSessionRefreshChecker(provider.getLastSessionRefreshStore(), provider.getOfflineLastSessionRefreshStore())
.shouldSaveClientSessionToRemoteCache(kcSession, client.getRealm(), sessionWrapper, userSession, offline, timestamp);
}
@Override
public boolean isOffline() {
return offline;

View File

@ -21,8 +21,6 @@ import org.infinispan.AdvancedCache;
import org.infinispan.Cache;
import org.infinispan.context.Flag;
import static org.keycloak.connections.infinispan.InfinispanUtil.getRemoteStores;
/**
* @author <a href="mailto:mposolda@redhat.com">Marek Posolda</a>
*/
@ -38,35 +36,12 @@ public class CacheDecorators {
}
/**
* Adds {@link Flag#SKIP_CACHE_LOAD} and {@link Flag#SKIP_CACHE_STORE} flags to the cache.
* Adds {@link Flag#IGNORE_RETURN_VALUES} flag to the cache.
* @param cache
* @return Cache with the flags applied.
* @return Cache with the flag applied.
*/
public static <K, V> AdvancedCache<K, V> skipCacheLoadersIfRemoteStoreIsEnabled(Cache<K, V> cache) {
if (!getRemoteStores(cache).isEmpty()) {
// Disabling of the cache load and cache store is only needed when a remote store is used and handled separately.
return cache.getAdvancedCache().withFlags(Flag.SKIP_CACHE_LOAD, Flag.SKIP_CACHE_STORE);
} else {
// If there is no remote store, use write through for all stores of the cache.
// Mixing remote and non-remote caches is not supported.
return cache.getAdvancedCache().withFlags(Flag.SKIP_CACHE_LOAD);
}
}
/**
* Adds {@link Flag#SKIP_CACHE_STORE} flag to the cache.
* @param cache
* @return Cache with the flags applied.
*/
public static <K, V> AdvancedCache<K, V> skipCacheStoreIfRemoteCacheIsEnabled(Cache<K, V> cache) {
if (!getRemoteStores(cache).isEmpty()) {
// Disabling of the cache load and cache store is only needed when a remote store is used and handled separately.
return cache.getAdvancedCache().withFlags(Flag.SKIP_CACHE_STORE);
} else {
// If there is no remote store, use write through for all stores of the cache.
// Mixing remote and non-remote caches is not supported.
return cache.getAdvancedCache();
}
public static <K, V> AdvancedCache<K, V> ignoreReturnValues(Cache<K, V> cache) {
return cache.getAdvancedCache().withFlags(Flag.IGNORE_RETURN_VALUES);
}
}

View File

@ -37,7 +37,6 @@ import org.keycloak.models.sessions.infinispan.changes.Tasks;
import org.keycloak.models.sessions.infinispan.entities.RootAuthenticationSessionEntity;
import org.keycloak.models.sessions.infinispan.events.RealmRemovedSessionEvent;
import org.keycloak.models.sessions.infinispan.events.SessionEventsSenderTransaction;
import org.keycloak.models.sessions.infinispan.remotestore.RemoteCacheInvoker;
import org.keycloak.models.sessions.infinispan.stream.SessionWrapperPredicate;
import org.keycloak.models.sessions.infinispan.util.InfinispanKeyGenerator;
import org.keycloak.models.sessions.infinispan.util.SessionTimeouts;
@ -60,14 +59,14 @@ public class InfinispanAuthenticationSessionProvider implements AuthenticationSe
protected final InfinispanChangelogBasedTransaction<String, RootAuthenticationSessionEntity> sessionTx;
protected final SessionEventsSenderTransaction clusterEventsSenderTx;
public InfinispanAuthenticationSessionProvider(KeycloakSession session, RemoteCacheInvoker remoteCacheInvoker, InfinispanKeyGenerator keyGenerator,
Cache<String, SessionEntityWrapper<RootAuthenticationSessionEntity>> cache, int authSessionsLimit, SerializeExecutionsByKey<String> serializer) {
public InfinispanAuthenticationSessionProvider(KeycloakSession session, InfinispanKeyGenerator keyGenerator,
Cache<String, SessionEntityWrapper<RootAuthenticationSessionEntity>> cache, int authSessionsLimit, SerializeExecutionsByKey<String> serializer) {
this.session = session;
this.keyGenerator = keyGenerator;
this.authSessionsLimit = authSessionsLimit;
this.cache = cache;
this.sessionTx = new InfinispanChangelogBasedTransaction<>(session, cache, remoteCacheInvoker, SessionTimeouts::getAuthSessionLifespanMS, SessionTimeouts::getAuthSessionMaxIdleMS, serializer);
this.sessionTx = new InfinispanChangelogBasedTransaction<>(session, cache, SessionTimeouts::getAuthSessionLifespanMS, SessionTimeouts::getAuthSessionMaxIdleMS, serializer);
this.clusterEventsSenderTx = new SessionEventsSenderTransaction(session);
session.getTransactionManager().enlistAfterCompletion(sessionTx);
@ -118,8 +117,8 @@ public class InfinispanAuthenticationSessionProvider implements AuthenticationSe
public void onRealmRemoved(RealmModel realm) {
// Send message to all DCs. The remoteCache will notify client listeners on all DCs for remove authentication sessions
clusterEventsSenderTx.addEvent(
RealmRemovedSessionEvent.createEvent(RealmRemovedSessionEvent.class, InfinispanAuthenticationSessionProviderFactory.REALM_REMOVED_AUTHSESSION_EVENT, session, realm.getId(), false),
ClusterProvider.DCNotify.ALL_DCS);
RealmRemovedSessionEvent.createEvent(RealmRemovedSessionEvent.class, InfinispanAuthenticationSessionProviderFactory.REALM_REMOVED_AUTHSESSION_EVENT, session, realm.getId())
);
}
protected void onRealmRemovedEvent(String realmId) {

View File

@ -31,7 +31,6 @@ import org.keycloak.connections.infinispan.InfinispanConnectionProvider;
import org.keycloak.infinispan.util.InfinispanUtils;
import org.keycloak.models.KeycloakSession;
import org.keycloak.models.KeycloakSessionFactory;
import org.keycloak.models.KeycloakSessionTask;
import org.keycloak.models.cache.infinispan.events.AuthenticationSessionAuthNoteUpdateEvent;
import org.keycloak.models.sessions.infinispan.changes.SerializeExecutionsByKey;
import org.keycloak.models.sessions.infinispan.changes.SessionEntityWrapper;
@ -39,10 +38,6 @@ import org.keycloak.models.sessions.infinispan.entities.AuthenticationSessionEnt
import org.keycloak.models.sessions.infinispan.entities.RootAuthenticationSessionEntity;
import org.keycloak.models.sessions.infinispan.events.AbstractAuthSessionClusterListener;
import org.keycloak.models.sessions.infinispan.events.RealmRemovedSessionEvent;
import org.keycloak.models.sessions.infinispan.initializer.InfinispanCacheInitializer;
import org.keycloak.models.sessions.infinispan.initializer.InitializerState;
import org.keycloak.models.sessions.infinispan.remotestore.RemoteCacheInvoker;
import org.keycloak.models.sessions.infinispan.remotestore.RemoteCacheSessionsLoader;
import org.keycloak.models.sessions.infinispan.util.InfinispanKeyGenerator;
import org.keycloak.models.utils.KeycloakModelUtils;
import org.keycloak.models.utils.PostMigrationEvent;
@ -74,7 +69,6 @@ public class InfinispanAuthenticationSessionProviderFactory implements Authentic
public static final String REALM_REMOVED_AUTHSESSION_EVENT = "REALM_REMOVED_EVENT_AUTHSESSIONS";
private RemoteCacheInvoker remoteCacheInvoker;
SerializeExecutionsByKey<String> serializer = new SerializeExecutionsByKey<>();
@Override
@ -90,7 +84,6 @@ public class InfinispanAuthenticationSessionProviderFactory implements Authentic
@Override
public void postInit(KeycloakSessionFactory factory) {
this.remoteCacheInvoker = new RemoteCacheInvoker();
keyGenerator = new InfinispanKeyGenerator();
factory.register(new ProviderEventListener() {
@ -139,7 +132,7 @@ public class InfinispanAuthenticationSessionProviderFactory implements Authentic
InfinispanConnectionProvider connections = session.getProvider(InfinispanConnectionProvider.class);
Cache<String, SessionEntityWrapper<RootAuthenticationSessionEntity>> cache = connections.getCache(InfinispanConnectionProvider.AUTHENTICATION_SESSIONS_CACHE_NAME);
this.authSessionsCache = cache;
return new InfinispanAuthenticationSessionProvider(session, remoteCacheInvoker, keyGenerator, cache, authSessionsLimit, serializer);
return new InfinispanAuthenticationSessionProvider(session, keyGenerator, cache, authSessionsLimit, serializer);
}
private void updateAuthNotes(ClusterEvent clEvent) {

View File

@ -240,7 +240,7 @@ public class InfinispanKeycloakTransaction implements KeycloakTransaction {
}
}
// Ignore return values. Should have better performance within cluster / cross-dc env
// Ignore return values. Should have better performance within cluster
private static <K, V> BasicCache<K, V> decorateCache(BasicCache<K, V> cache) {
if (cache instanceof RemoteCache)
return cache;

View File

@ -19,13 +19,11 @@ package org.keycloak.models.sessions.infinispan;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import java.util.function.Supplier;
import org.infinispan.client.hotrod.exceptions.HotRodClientException;
import org.infinispan.commons.api.BasicCache;
import org.jboss.logging.Logger;
import org.keycloak.common.util.Time;
import org.keycloak.connections.infinispan.InfinispanUtil;
import org.keycloak.models.KeycloakSession;
import org.keycloak.models.ModelException;
import org.keycloak.models.SingleUseObjectProvider;
@ -44,11 +42,11 @@ public class InfinispanSingleUseObjectProvider implements SingleUseObjectProvide
public static final Logger logger = Logger.getLogger(InfinispanSingleUseObjectProvider.class);
private final KeycloakSession session;
private final Supplier<BasicCache<String, SingleUseObjectValueEntity>> singleUseObjectCache;
private final BasicCache<String, SingleUseObjectValueEntity> singleUseObjectCache;
private final boolean persistRevokedTokens;
private final InfinispanKeycloakTransaction tx;
public InfinispanSingleUseObjectProvider(KeycloakSession session, Supplier<BasicCache<String, SingleUseObjectValueEntity>> singleUseObjectCache, boolean persistRevokedTokens) {
public InfinispanSingleUseObjectProvider(KeycloakSession session, BasicCache<String, SingleUseObjectValueEntity> singleUseObjectCache, boolean persistRevokedTokens) {
this.session = session;
this.singleUseObjectCache = singleUseObjectCache;
this.persistRevokedTokens = persistRevokedTokens;
@ -60,8 +58,7 @@ public class InfinispanSingleUseObjectProvider implements SingleUseObjectProvide
public void put(String key, long lifespanSeconds, Map<String, String> notes) {
SingleUseObjectValueEntity tokenValue = new SingleUseObjectValueEntity(notes);
try {
BasicCache<String, SingleUseObjectValueEntity> cache = singleUseObjectCache.get();
tx.put(cache, key, tokenValue, InfinispanUtil.toHotrodTimeMs(cache, Time.toMillis(lifespanSeconds)), TimeUnit.MILLISECONDS);
tx.put(singleUseObjectCache, key, tokenValue, Time.toMillis(lifespanSeconds), TimeUnit.MILLISECONDS);
} catch (HotRodClientException re) {
// No need to retry. The hotrod (remoteCache) has some retries in itself in case of some random network error happened.
if (logger.isDebugEnabled()) {
@ -85,8 +82,7 @@ public class InfinispanSingleUseObjectProvider implements SingleUseObjectProvide
SingleUseObjectValueEntity singleUseObjectValueEntity;
BasicCache<String, SingleUseObjectValueEntity> cache = singleUseObjectCache.get();
singleUseObjectValueEntity = tx.get(cache, key);
singleUseObjectValueEntity = tx.get(singleUseObjectCache, key);
return singleUseObjectValueEntity != null ? singleUseObjectValueEntity.getNotes() : null;
}
@ -97,8 +93,7 @@ public class InfinispanSingleUseObjectProvider implements SingleUseObjectProvide
}
try {
BasicCache<String, SingleUseObjectValueEntity> cache = singleUseObjectCache.get();
SingleUseObjectValueEntity existing = cache.remove(key);
SingleUseObjectValueEntity existing = singleUseObjectCache.remove(key);
return existing == null ? null : existing.getNotes();
} catch (HotRodClientException re) {
// No need to retry. The hotrod (remoteCache) has some retries in itself in case of some random network error happened.
@ -117,8 +112,7 @@ public class InfinispanSingleUseObjectProvider implements SingleUseObjectProvide
throw new ModelException("Revoked tokens can't be replaced");
}
BasicCache<String, SingleUseObjectValueEntity> cache = singleUseObjectCache.get();
return cache.replace(key, new SingleUseObjectValueEntity(notes)) != null;
return singleUseObjectCache.replace(key, new SingleUseObjectValueEntity(notes)) != null;
}
@Override
@ -128,11 +122,9 @@ public class InfinispanSingleUseObjectProvider implements SingleUseObjectProvide
}
SingleUseObjectValueEntity tokenValue = new SingleUseObjectValueEntity(null);
BasicCache<String, SingleUseObjectValueEntity> cache = singleUseObjectCache.get();
try {
long lifespanMs = InfinispanUtil.toHotrodTimeMs(cache, Time.toMillis(lifespanInSeconds));
SingleUseObjectValueEntity existing = cache.putIfAbsent(key, tokenValue, lifespanMs, TimeUnit.MILLISECONDS);
SingleUseObjectValueEntity existing = singleUseObjectCache.putIfAbsent(key, tokenValue, Time.toMillis(lifespanInSeconds), TimeUnit.MILLISECONDS);
return existing == null;
} catch (HotRodClientException re) {
// No need to retry. The hotrod (remoteCache) has some retries in itself in case of some random network error happened.
@ -146,8 +138,7 @@ public class InfinispanSingleUseObjectProvider implements SingleUseObjectProvide
@Override
public boolean contains(String key) {
BasicCache<String, SingleUseObjectValueEntity> cache = singleUseObjectCache.get();
return cache.containsKey(key);
return singleUseObjectCache.containsKey(key);
}
@Override

View File

@ -27,14 +27,11 @@ import java.util.concurrent.TimeUnit;
import java.util.function.Supplier;
import org.infinispan.Cache;
import org.infinispan.client.hotrod.Flag;
import org.infinispan.client.hotrod.RemoteCache;
import org.infinispan.commons.api.BasicCache;
import org.jboss.logging.Logger;
import org.keycloak.Config;
import org.keycloak.common.util.Time;
import org.keycloak.connections.infinispan.InfinispanConnectionProvider;
import org.keycloak.connections.infinispan.InfinispanUtil;
import org.keycloak.infinispan.util.InfinispanUtils;
import org.keycloak.models.KeycloakSession;
import org.keycloak.models.KeycloakSessionFactory;
@ -62,7 +59,7 @@ public class InfinispanSingleUseObjectProviderFactory implements SingleUseObject
private static final Logger LOG = Logger.getLogger(InfinispanSingleUseObjectProviderFactory.class);
protected volatile Supplier<BasicCache<String, SingleUseObjectValueEntity>> singleUseObjectCache;
protected BasicCache<String, SingleUseObjectValueEntity> singleUseObjectCache;
private volatile boolean initialized;
private boolean persistRevokedTokens;
@ -81,16 +78,7 @@ public class InfinispanSingleUseObjectProviderFactory implements SingleUseObject
static Supplier<BasicCache<String, SingleUseObjectValueEntity>> getSingleUseObjectCache(KeycloakSession session) {
InfinispanConnectionProvider connections = session.getProvider(InfinispanConnectionProvider.class);
Cache cache = connections.getCache(InfinispanConnectionProvider.ACTION_TOKEN_CACHE);
RemoteCache remoteCache = InfinispanUtil.getRemoteCache(cache);
if (remoteCache != null) {
LOG.debugf("Having remote stores. Using remote cache '%s' for single-use cache of token", remoteCache.getName());
return () -> remoteCache.withFlags(Flag.FORCE_RETURN_VALUE);
} else {
LOG.debugf("Not having remote stores. Using basic cache '%s' for single-use cache of token", cache.getName());
return () -> cache;
}
return () -> cache;
}
@Override
@ -103,17 +91,16 @@ public class InfinispanSingleUseObjectProviderFactory implements SingleUseObject
synchronized (this) {
if (!initialized) {
RevokedTokenPersisterProvider provider = session.getProvider(RevokedTokenPersisterProvider.class);
BasicCache<String, SingleUseObjectValueEntity> cache = singleUseObjectCache.get();
if (cache.get(LOADED) == null) {
if (singleUseObjectCache.get(LOADED) == null) {
// in a cluster, multiple Keycloak instances might load the same data in parallel, but that wouldn't matter
provider.getAllRevokedTokens().forEach(revokedToken -> {
long lifespanSeconds = revokedToken.expiry() - Time.currentTime();
if (lifespanSeconds > 0) {
cache.put(revokedToken.tokenId() + SingleUseObjectProvider.REVOKED_KEY, new SingleUseObjectValueEntity(Collections.emptyMap()),
InfinispanUtil.toHotrodTimeMs(cache, Time.toMillis(lifespanSeconds)), TimeUnit.MILLISECONDS);
singleUseObjectCache.put(revokedToken.tokenId() + SingleUseObjectProvider.REVOKED_KEY, new SingleUseObjectValueEntity(Collections.emptyMap()),
Time.toMillis(lifespanSeconds), TimeUnit.MILLISECONDS);
}
});
cache.put(LOADED, new SingleUseObjectValueEntity(Collections.emptyMap()));
singleUseObjectCache.put(LOADED, new SingleUseObjectValueEntity(Collections.emptyMap()));
}
initialized = true;
}
@ -126,7 +113,8 @@ public class InfinispanSingleUseObjectProviderFactory implements SingleUseObject
// It is necessary to put the cache initialization here, otherwise the cache would be initialized lazily, that
// means also listeners will start only after first cache initialization - that would be too late
if (singleUseObjectCache == null) {
this.singleUseObjectCache = getSingleUseObjectCache(factory.create());
InfinispanConnectionProvider connections = factory.create().getProvider(InfinispanConnectionProvider.class);
singleUseObjectCache = connections.getCache(InfinispanConnectionProvider.ACTION_TOKEN_CACHE);
}
if (persistRevokedTokens) {

View File

@ -18,7 +18,6 @@ package org.keycloak.models.sessions.infinispan;
import org.infinispan.Cache;
import org.jboss.logging.Logger;
import org.keycloak.cluster.ClusterProvider;
import org.keycloak.models.KeycloakSession;
import org.keycloak.models.UserLoginFailureProvider;
import org.keycloak.models.RealmModel;
@ -33,7 +32,6 @@ import org.keycloak.models.sessions.infinispan.entities.LoginFailureEntity;
import org.keycloak.models.sessions.infinispan.entities.LoginFailureKey;
import org.keycloak.models.sessions.infinispan.events.RemoveAllUserLoginFailuresEvent;
import org.keycloak.models.sessions.infinispan.events.SessionEventsSenderTransaction;
import org.keycloak.models.sessions.infinispan.remotestore.RemoteCacheInvoker;
import org.keycloak.models.sessions.infinispan.stream.Mappers;
import org.keycloak.models.sessions.infinispan.stream.SessionWrapperPredicate;
import org.keycloak.models.sessions.infinispan.util.FuturesHelper;
@ -59,12 +57,11 @@ public class InfinispanUserLoginFailureProvider implements UserLoginFailureProvi
protected final SessionEventsSenderTransaction clusterEventsSenderTx;
public InfinispanUserLoginFailureProvider(KeycloakSession session,
RemoteCacheInvoker remoteCacheInvoker,
Cache<LoginFailureKey, SessionEntityWrapper<LoginFailureEntity>> loginFailureCache,
SerializeExecutionsByKey<LoginFailureKey> serializer) {
this.session = session;
this.loginFailureCache = loginFailureCache;
this.loginFailuresTx = new InfinispanChangelogBasedTransaction<>(session, loginFailureCache, remoteCacheInvoker, SessionTimeouts::getLoginFailuresLifespanMs, SessionTimeouts::getLoginFailuresMaxIdleMs, serializer);
this.loginFailuresTx = new InfinispanChangelogBasedTransaction<>(session, loginFailureCache, SessionTimeouts::getLoginFailuresLifespanMs, SessionTimeouts::getLoginFailuresMaxIdleMs, serializer);
this.clusterEventsSenderTx = new SessionEventsSenderTransaction(session);
session.getTransactionManager().enlistAfterCompletion(clusterEventsSenderTx);
@ -107,8 +104,8 @@ public class InfinispanUserLoginFailureProvider implements UserLoginFailureProvi
log.tracef("removeAllUserLoginFailures(%s)%s", realm, getShortStackTrace());
clusterEventsSenderTx.addEvent(
RemoveAllUserLoginFailuresEvent.createEvent(RemoveAllUserLoginFailuresEvent.class, InfinispanUserLoginFailureProviderFactory.REMOVE_ALL_LOGIN_FAILURES_EVENT, session, realm.getId(), true),
ClusterProvider.DCNotify.LOCAL_DC_ONLY);
RemoveAllUserLoginFailuresEvent.createEvent(RemoveAllUserLoginFailuresEvent.class, InfinispanUserLoginFailureProviderFactory.REMOVE_ALL_LOGIN_FAILURES_EVENT, session, realm.getId())
);
}
protected void removeAllLocalUserLoginFailuresEvent(String realmId) {
@ -118,9 +115,9 @@ public class InfinispanUserLoginFailureProvider implements UserLoginFailureProvi
Cache<LoginFailureKey, SessionEntityWrapper<LoginFailureEntity>> localCache = CacheDecorators.localCache(loginFailureCache);
Cache<LoginFailureKey, SessionEntityWrapper<LoginFailureEntity>> localCacheStoreIgnore = CacheDecorators.skipCacheLoadersIfRemoteStoreIsEnabled(localCache);
localCacheStoreIgnore
// Go through local cache data only
// entries from other nodes will be removed by each instance receiving the event
localCache
.entrySet()
.stream()
.filter(SessionWrapperPredicate.create(realmId))

View File

@ -17,19 +17,13 @@
package org.keycloak.models.sessions.infinispan;
import org.infinispan.Cache;
import org.infinispan.client.hotrod.RemoteCache;
import org.infinispan.persistence.remote.RemoteStore;
import org.jboss.logging.Logger;
import org.keycloak.Config;
import org.keycloak.cluster.ClusterProvider;
import org.keycloak.common.util.Time;
import org.keycloak.connections.infinispan.InfinispanConnectionProvider;
import org.keycloak.connections.infinispan.InfinispanUtil;
import org.keycloak.infinispan.util.InfinispanUtils;
import org.keycloak.models.KeycloakSession;
import org.keycloak.models.KeycloakSessionFactory;
import org.keycloak.models.KeycloakSessionTask;
import org.keycloak.models.RealmModel;
import org.keycloak.models.UserLoginFailureProvider;
import org.keycloak.models.UserLoginFailureProviderFactory;
import org.keycloak.models.UserModel;
@ -37,36 +31,22 @@ import org.keycloak.models.sessions.infinispan.changes.SerializeExecutionsByKey;
import org.keycloak.models.sessions.infinispan.changes.SessionEntityWrapper;
import org.keycloak.models.sessions.infinispan.entities.LoginFailureEntity;
import org.keycloak.models.sessions.infinispan.entities.LoginFailureKey;
import org.keycloak.models.sessions.infinispan.entities.SessionEntity;
import org.keycloak.models.sessions.infinispan.events.AbstractUserSessionClusterListener;
import org.keycloak.models.sessions.infinispan.events.RealmRemovedSessionEvent;
import org.keycloak.models.sessions.infinispan.events.RemoveAllUserLoginFailuresEvent;
import org.keycloak.models.sessions.infinispan.initializer.InfinispanCacheInitializer;
import org.keycloak.models.sessions.infinispan.initializer.InitializerState;
import org.keycloak.models.sessions.infinispan.remotestore.RemoteCacheInvoker;
import org.keycloak.models.sessions.infinispan.remotestore.RemoteCacheSessionListener;
import org.keycloak.models.sessions.infinispan.remotestore.RemoteCacheSessionsLoader;
import org.keycloak.models.sessions.infinispan.util.SessionTimeouts;
import org.keycloak.models.utils.KeycloakModelUtils;
import org.keycloak.models.utils.PostMigrationEvent;
import org.keycloak.provider.EnvironmentDependentProviderFactory;
import java.util.Set;
/**
* @author <a href="mailto:mkanis@redhat.com">Martin Kanis</a>
*/
public class InfinispanUserLoginFailureProviderFactory implements UserLoginFailureProviderFactory<InfinispanUserLoginFailureProvider>, EnvironmentDependentProviderFactory {
private static final Logger log = Logger.getLogger(InfinispanUserLoginFailureProviderFactory.class);
public static final String REALM_REMOVED_SESSION_EVENT = "REALM_REMOVED_EVENT_SESSIONS";
public static final String REMOVE_ALL_LOGIN_FAILURES_EVENT = "REMOVE_ALL_LOGIN_FAILURES_EVENT";
private Config.Scope config;
private RemoteCacheInvoker remoteCacheInvoker;
SerializeExecutionsByKey<LoginFailureKey> serializer = new SerializeExecutionsByKey<>();
@Override
@ -74,25 +54,18 @@ public class InfinispanUserLoginFailureProviderFactory implements UserLoginFailu
InfinispanConnectionProvider connections = session.getProvider(InfinispanConnectionProvider.class);
Cache<LoginFailureKey, SessionEntityWrapper<LoginFailureEntity>> loginFailures = connections.getCache(InfinispanConnectionProvider.LOGIN_FAILURE_CACHE_NAME);
return new InfinispanUserLoginFailureProvider(session, remoteCacheInvoker, loginFailures, serializer);
return new InfinispanUserLoginFailureProvider(session, loginFailures, serializer);
}
@Override
public void init(Config.Scope config) {
this.config = config;
}
@Override
public void postInit(final KeycloakSessionFactory factory) {
this.remoteCacheInvoker = new RemoteCacheInvoker();
factory.register(event -> {
if (event instanceof PostMigrationEvent) {
KeycloakModelUtils.runJobInTransaction(factory, (KeycloakSession session) -> {
checkRemoteCaches(session);
registerClusterListeners(session);
loadLoginFailuresFromRemoteCaches(session);
});
KeycloakModelUtils.runJobInTransaction(factory, this::registerClusterListeners);
} else if (event instanceof UserModel.UserRemovedEvent userRemovedEvent) {
UserLoginFailureProvider provider = userRemovedEvent.getKeycloakSession().getProvider(UserLoginFailureProvider.class, getId());
provider.removeUserLoginFailure(userRemovedEvent.getRealm(), userRemovedEvent.getUser().getId());
@ -130,80 +103,6 @@ public class InfinispanUserLoginFailureProviderFactory implements UserLoginFailu
log.debug("Registered cluster listeners");
}
protected void checkRemoteCaches(KeycloakSession session) {
InfinispanConnectionProvider ispn = session.getProvider(InfinispanConnectionProvider.class);
Cache<LoginFailureKey, SessionEntityWrapper<LoginFailureEntity>> loginFailuresCache = ispn.getCache(InfinispanConnectionProvider.LOGIN_FAILURE_CACHE_NAME);
checkRemoteCache(session, loginFailuresCache, SessionTimeouts::getLoginFailuresLifespanMs, SessionTimeouts::getLoginFailuresMaxIdleMs);
}
private <K, V extends SessionEntity> RemoteCache checkRemoteCache(KeycloakSession session, Cache<K, SessionEntityWrapper<V>> ispnCache,
SessionFunction<V> lifespanMsLoader, SessionFunction<V> maxIdleTimeMsLoader) {
Set<RemoteStore> remoteStores = InfinispanUtil.getRemoteStores(ispnCache);
if (remoteStores.isEmpty()) {
log.debugf("No remote store configured for cache '%s'", ispnCache.getName());
return null;
} else {
log.infof("Remote store configured for cache '%s'", ispnCache.getName());
RemoteCache<K, SessionEntityWrapper<V>> remoteCache = (RemoteCache) remoteStores.iterator().next().getRemoteCache();
if (remoteCache == null) {
throw new IllegalStateException("No remote cache available for the infinispan cache: " + ispnCache.getName());
}
remoteCacheInvoker.addRemoteCache(ispnCache.getName(), remoteCache);
RemoteCacheSessionListener hotrodListener = RemoteCacheSessionListener.createListener(session, ispnCache, remoteCache, lifespanMsLoader, maxIdleTimeMsLoader, null);
remoteCache.addClientListener(hotrodListener);
return remoteCache;
}
}
// Max count of worker errors. Initialization will end with exception when this number is reached
private int getMaxErrors() {
return config.getInt("maxErrors", 20);
}
// Count of sessions to be computed in each segment
private int getSessionsPerSegment() {
return config.getInt("sessionsPerSegment", 64);
}
private void loadLoginFailuresFromRemoteCaches(KeycloakSession session) {
for (String cacheName : remoteCacheInvoker.getRemoteCacheNames()) {
loadLoginFailuresFromRemoteCaches(session.getKeycloakSessionFactory(), cacheName, getSessionsPerSegment(), getMaxErrors());
}
}
private int getStalledTimeoutInSeconds(int defaultTimeout) {
return config.getInt("stalledTimeoutInSeconds", defaultTimeout);
}
private void loadLoginFailuresFromRemoteCaches(final KeycloakSessionFactory sessionFactory, String cacheName, final int sessionsPerSegment, final int maxErrors) {
log.debugf("Check pre-loading sessions from remote cache '%s'", cacheName);
KeycloakModelUtils.runJobInTransaction(sessionFactory, new KeycloakSessionTask() {
@Override
public void run(KeycloakSession session) {
InfinispanConnectionProvider connections = session.getProvider(InfinispanConnectionProvider.class);
Cache<String, InitializerState> workCache = connections.getCache(InfinispanConnectionProvider.WORK_CACHE_NAME);
int defaultStateTransferTimeout = (int) (connections.getCache(InfinispanConnectionProvider.LOGIN_FAILURE_CACHE_NAME)
.getCacheConfiguration().clustering().stateTransfer().timeout() / 1000);
InfinispanCacheInitializer initializer = new InfinispanCacheInitializer(sessionFactory, workCache,
new RemoteCacheSessionsLoader(cacheName, sessionsPerSegment), "remoteCacheLoad::" + cacheName, maxErrors,
getStalledTimeoutInSeconds(defaultStateTransferTimeout));
initializer.loadSessions();
}
});
log.debugf("Pre-loading login failures from remote cache '%s' finished", cacheName);
}
@Override
public void close() {

View File

@ -41,7 +41,6 @@ import org.infinispan.client.hotrod.RemoteCache;
import org.infinispan.client.hotrod.exceptions.HotRodClientException;
import org.infinispan.commons.api.BasicCache;
import org.infinispan.commons.util.concurrent.CompletionStages;
import org.infinispan.context.Flag;
import org.infinispan.stream.CacheCollectors;
import org.jboss.logging.Logger;
import org.keycloak.cluster.ClusterProvider;
@ -50,7 +49,6 @@ import org.keycloak.common.Profile.Feature;
import org.keycloak.common.util.Retry;
import org.keycloak.common.util.Time;
import org.keycloak.connections.infinispan.InfinispanConnectionProvider;
import org.keycloak.connections.infinispan.InfinispanUtil;
import org.keycloak.models.AuthenticatedClientSessionModel;
import org.keycloak.models.ClientModel;
import org.keycloak.models.KeycloakSession;
@ -67,7 +65,6 @@ import org.keycloak.models.sessions.infinispan.changes.SerializeExecutionsByKey;
import org.keycloak.models.sessions.infinispan.changes.SessionEntityWrapper;
import org.keycloak.models.sessions.infinispan.changes.SessionUpdateTask;
import org.keycloak.models.sessions.infinispan.changes.Tasks;
import org.keycloak.models.sessions.infinispan.changes.sessions.CrossDCLastSessionRefreshStore;
import org.keycloak.models.sessions.infinispan.changes.sessions.PersisterLastSessionRefreshStore;
import org.keycloak.models.sessions.infinispan.entities.AuthenticatedClientSessionEntity;
import org.keycloak.models.sessions.infinispan.entities.AuthenticatedClientSessionStore;
@ -76,7 +73,6 @@ import org.keycloak.models.sessions.infinispan.entities.UserSessionEntity;
import org.keycloak.models.sessions.infinispan.events.RealmRemovedSessionEvent;
import org.keycloak.models.sessions.infinispan.events.RemoveUserSessionsEvent;
import org.keycloak.models.sessions.infinispan.events.SessionEventsSenderTransaction;
import org.keycloak.models.sessions.infinispan.remotestore.RemoteCacheInvoker;
import org.keycloak.models.sessions.infinispan.stream.CollectionToStreamMapper;
import org.keycloak.models.sessions.infinispan.stream.GroupAndCountCollectorSupplier;
import org.keycloak.models.sessions.infinispan.stream.Mappers;
@ -110,8 +106,6 @@ public class InfinispanUserSessionProvider implements UserSessionProvider, Sessi
protected final SessionEventsSenderTransaction clusterEventsSenderTx;
protected final CrossDCLastSessionRefreshStore lastSessionRefreshStore;
protected final CrossDCLastSessionRefreshStore offlineLastSessionRefreshStore;
protected final PersisterLastSessionRefreshStore persisterLastSessionRefreshStore;
protected final InfinispanKeyGenerator keyGenerator;
@ -121,9 +115,6 @@ public class InfinispanUserSessionProvider implements UserSessionProvider, Sessi
protected final SessionFunction offlineClientSessionCacheEntryLifespanAdjuster;
public InfinispanUserSessionProvider(KeycloakSession session,
RemoteCacheInvoker remoteCacheInvoker,
CrossDCLastSessionRefreshStore lastSessionRefreshStore,
CrossDCLastSessionRefreshStore offlineLastSessionRefreshStore,
PersisterLastSessionRefreshStore persisterLastSessionRefreshStore,
InfinispanKeyGenerator keyGenerator,
Cache<String, SessionEntityWrapper<UserSessionEntity>> sessionCache,
@ -143,15 +134,13 @@ public class InfinispanUserSessionProvider implements UserSessionProvider, Sessi
this.offlineSessionCache = offlineSessionCache;
this.offlineClientSessionCache = offlineClientSessionCache;
this.sessionTx = new InfinispanChangelogBasedTransaction<>(session, sessionCache, remoteCacheInvoker, SessionTimeouts::getUserSessionLifespanMs, SessionTimeouts::getUserSessionMaxIdleMs, serializerSession);
this.offlineSessionTx = new InfinispanChangelogBasedTransaction<>(session, offlineSessionCache, remoteCacheInvoker, offlineSessionCacheEntryLifespanAdjuster, SessionTimeouts::getOfflineSessionMaxIdleMs, serializerOfflineSession);
this.clientSessionTx = new InfinispanChangelogBasedTransaction<>(session, clientSessionCache, remoteCacheInvoker, SessionTimeouts::getClientSessionLifespanMs, SessionTimeouts::getClientSessionMaxIdleMs, serializerClientSession);
this.offlineClientSessionTx = new InfinispanChangelogBasedTransaction<>(session, offlineClientSessionCache, remoteCacheInvoker, offlineClientSessionCacheEntryLifespanAdjuster, SessionTimeouts::getOfflineClientSessionMaxIdleMs, serializerOfflineClientSession);
this.sessionTx = new InfinispanChangelogBasedTransaction<>(session, sessionCache, SessionTimeouts::getUserSessionLifespanMs, SessionTimeouts::getUserSessionMaxIdleMs, serializerSession);
this.offlineSessionTx = new InfinispanChangelogBasedTransaction<>(session, offlineSessionCache, offlineSessionCacheEntryLifespanAdjuster, SessionTimeouts::getOfflineSessionMaxIdleMs, serializerOfflineSession);
this.clientSessionTx = new InfinispanChangelogBasedTransaction<>(session, clientSessionCache, SessionTimeouts::getClientSessionLifespanMs, SessionTimeouts::getClientSessionMaxIdleMs, serializerClientSession);
this.offlineClientSessionTx = new InfinispanChangelogBasedTransaction<>(session, offlineClientSessionCache, offlineClientSessionCacheEntryLifespanAdjuster, SessionTimeouts::getOfflineClientSessionMaxIdleMs, serializerOfflineClientSession);
this.clusterEventsSenderTx = new SessionEventsSenderTransaction(session);
this.lastSessionRefreshStore = lastSessionRefreshStore;
this.offlineLastSessionRefreshStore = offlineLastSessionRefreshStore;
this.persisterLastSessionRefreshStore = persisterLastSessionRefreshStore;
this.keyGenerator = keyGenerator;
this.offlineSessionCacheEntryLifespanAdjuster = offlineSessionCacheEntryLifespanAdjuster;
@ -180,16 +169,6 @@ public class InfinispanUserSessionProvider implements UserSessionProvider, Sessi
return offline ? offlineClientSessionTx : clientSessionTx;
}
@Override
public CrossDCLastSessionRefreshStore getLastSessionRefreshStore() {
return lastSessionRefreshStore;
}
@Override
public CrossDCLastSessionRefreshStore getOfflineLastSessionRefreshStore() {
return offlineLastSessionRefreshStore;
}
@Override
public PersisterLastSessionRefreshStore getPersisterLastSessionRefreshStore() {
return persisterLastSessionRefreshStore;
@ -207,7 +186,7 @@ public class InfinispanUserSessionProvider implements UserSessionProvider, Sessi
InfinispanChangelogBasedTransaction<String, UserSessionEntity> userSessionUpdateTx = getTransaction(false);
InfinispanChangelogBasedTransaction<UUID, AuthenticatedClientSessionEntity> clientSessionUpdateTx = getClientSessionTransaction(false);
AuthenticatedClientSessionAdapter adapter = new AuthenticatedClientSessionAdapter(session, this, entity, client, userSession, clientSessionUpdateTx, false);
AuthenticatedClientSessionAdapter adapter = new AuthenticatedClientSessionAdapter(session, entity, client, userSession, clientSessionUpdateTx, false);
// For now, the clientSession is considered transient in case that userSession was transient
UserSessionModel.SessionPersistenceState persistenceState = userSession.getPersistenceState() != null ?
@ -383,12 +362,9 @@ public class InfinispanUserSessionProvider implements UserSessionProvider, Sessi
throw new ModelException("For offline sessions, only lookup by userId and brokerUserId is supported");
}
Cache<String, SessionEntityWrapper<UserSessionEntity>> cache = getCache(offline);
cache = CacheDecorators.skipCacheLoadersIfRemoteStoreIsEnabled(cache);
// return a stream that 'wraps' the infinispan cache stream so that the cache stream's elements are read one by one
// and then mapped locally to avoid serialization issues when trying to manipulate the cache stream directly.
return StreamSupport.stream(cache.entrySet().stream().filter(predicate).map(Mappers.userSessionEntity()).spliterator(), false)
return StreamSupport.stream(getCache(offline).entrySet().stream().filter(predicate).map(Mappers.userSessionEntity()).spliterator(), false)
.map(entity -> this.wrap(realm, entity, offline))
.filter(Objects::nonNull).map(Function.identity());
}
@ -493,52 +469,7 @@ public class InfinispanUserSessionProvider implements UserSessionProvider, Sessi
return userSession;
}
// Try lookup userSession from remoteCache
Cache<String, SessionEntityWrapper<UserSessionEntity>> cache = getCache(offline);
RemoteCache remoteCache = InfinispanUtil.getRemoteCache(cache);
if (remoteCache != null) {
SessionEntityWrapper<UserSessionEntity> remoteSessionEntityWrapper = (SessionEntityWrapper<UserSessionEntity>) remoteCache.get(id);
if (remoteSessionEntityWrapper != null) {
UserSessionEntity remoteSessionEntity = remoteSessionEntityWrapper.getEntity();
log.debugf("getUserSessionWithPredicate(%s): remote cache contains session entity %s", id, remoteSessionEntity);
UserSessionModel remoteSessionAdapter = wrap(realm, remoteSessionEntity, offline);
if (predicate.test(remoteSessionAdapter)) {
InfinispanChangelogBasedTransaction<String, UserSessionEntity> tx = getTransaction(offline);
// Remote entity contains our predicate. Update local cache with the remote entity
SessionEntityWrapper<UserSessionEntity> sessionWrapper = remoteSessionEntity.mergeRemoteEntityWithLocalEntity(tx.get(id));
// Replace entity just in ispn cache. Skip remoteStore
cache.getAdvancedCache().withFlags(Flag.SKIP_CACHE_STORE, Flag.SKIP_CACHE_LOAD, Flag.IGNORE_RETURN_VALUES)
.replace(id, sessionWrapper);
tx.reloadEntityInCurrentTransaction(realm, id, sessionWrapper);
// Recursion. We should have it locally now
return getUserSessionWithPredicate(realm, id, offline, predicate);
} else {
log.debugf("getUserSessionWithPredicate(%s): found, but predicate doesn't pass", id);
return null;
}
} else {
log.debugf("getUserSessionWithPredicate(%s): not found", id);
// Session not available on remoteCache. Was already removed there. So removing locally too.
// TODO: Can be optimized to skip calling remoteCache.remove
removeUserSession(realm, userSession);
return null;
}
} else {
log.debugf("getUserSessionWithPredicate(%s): remote cache not available", id);
return null;
}
return null;
}
@ -555,9 +486,7 @@ public class InfinispanUserSessionProvider implements UserSessionProvider, Sessi
return persister.getUserSessionsCountsByClients(realm, true);
}
Cache<String, SessionEntityWrapper<UserSessionEntity>> cache = getCache(offline);
cache = CacheDecorators.skipCacheLoadersIfRemoteStoreIsEnabled(cache);
return cache.entrySet().stream()
return getCache(offline).entrySet().stream()
.filter(UserSessionPredicate.create(realm.getId()))
.map(Mappers.authClientSessionSetMapper())
.flatMap(CollectionToStreamMapper.getInstance())
@ -589,11 +518,10 @@ public class InfinispanUserSessionProvider implements UserSessionProvider, Sessi
}
protected void removeUserSessions(RealmModel realm, UserModel user, boolean offline) {
Cache<String, SessionEntityWrapper<UserSessionEntity>> cache = getCache(offline);
cache = CacheDecorators.skipCacheLoadersIfRemoteStoreIsEnabled(cache);
Iterator<UserSessionEntity> itr = cache.entrySet().stream().filter(UserSessionPredicate.create(realm.getId()).user(user.getId())).map(Mappers.userSessionEntity()).iterator();
Iterator<UserSessionEntity> itr = getCache(offline).entrySet().stream()
.filter(UserSessionPredicate.create(realm.getId()).user(user.getId()))
.map(Mappers.userSessionEntity())
.iterator();
while (itr.hasNext()) {
UserSessionEntity userSessionEntity = itr.next();
@ -618,8 +546,8 @@ public class InfinispanUserSessionProvider implements UserSessionProvider, Sessi
public void removeUserSessions(RealmModel realm) {
// Don't send message to all DCs, just to all cluster nodes in current DC. The remoteCache will notify client listeners for removed userSessions.
clusterEventsSenderTx.addEvent(
RemoveUserSessionsEvent.createEvent(RemoveUserSessionsEvent.class, InfinispanUserSessionProviderFactory.REMOVE_USER_SESSIONS_EVENT, session, realm.getId(), true),
ClusterProvider.DCNotify.LOCAL_DC_ONLY);
RemoveUserSessionsEvent.createEvent(RemoveUserSessionsEvent.class, InfinispanUserSessionProviderFactory.REMOVE_USER_SESSIONS_EVENT, session, realm.getId())
);
}
protected void onRemoveUserSessionsEvent(String realmId) {
@ -636,11 +564,9 @@ public class InfinispanUserSessionProvider implements UserSessionProvider, Sessi
Cache<UUID, SessionEntityWrapper<AuthenticatedClientSessionEntity>> clientSessionCache = getClientSessionCache(offline);
Cache<UUID, SessionEntityWrapper<AuthenticatedClientSessionEntity>> localClientSessionCache = CacheDecorators.localCache(clientSessionCache);
Cache<String, SessionEntityWrapper<UserSessionEntity>> localCacheStoreIgnore = CacheDecorators.skipCacheLoadersIfRemoteStoreIsEnabled(localCache);
final AtomicInteger userSessionsSize = new AtomicInteger();
localCacheStoreIgnore
localCache
.entrySet()
.stream()
.filter(SessionWrapperPredicate.create(realmId))
@ -672,8 +598,8 @@ public class InfinispanUserSessionProvider implements UserSessionProvider, Sessi
public void onRealmRemoved(RealmModel realm) {
// Don't send message to all DCs, just to all cluster nodes in current DC. The remoteCache will notify client listeners for removed userSessions.
clusterEventsSenderTx.addEvent(
RealmRemovedSessionEvent.createEvent(RealmRemovedSessionEvent.class, InfinispanUserSessionProviderFactory.REALM_REMOVED_SESSION_EVENT, session, realm.getId(), true),
ClusterProvider.DCNotify.LOCAL_DC_ONLY);
RealmRemovedSessionEvent.createEvent(RealmRemovedSessionEvent.class, InfinispanUserSessionProviderFactory.REALM_REMOVED_SESSION_EVENT, session, realm.getId())
);
UserSessionPersisterProvider sessionsPersister = session.getProvider(UserSessionPersisterProvider.class);
if (sessionsPersister != null) {
@ -766,7 +692,7 @@ public class InfinispanUserSessionProvider implements UserSessionProvider, Sessi
AuthenticatedClientSessionAdapter wrap(UserSessionModel userSession, ClientModel client, AuthenticatedClientSessionEntity entity, boolean offline) {
InfinispanChangelogBasedTransaction<UUID, AuthenticatedClientSessionEntity> clientSessionUpdateTx = getClientSessionTransaction(offline);
return entity != null ? new AuthenticatedClientSessionAdapter(session, this, entity, client, userSession, clientSessionUpdateTx, offline) : null;
return entity != null ? new AuthenticatedClientSessionAdapter(session, entity, client, userSession, clientSessionUpdateTx, offline) : null;
}
UserSessionEntity getUserSessionEntity(RealmModel realm, UserSessionModel userSession, boolean offline) {
@ -884,7 +810,7 @@ public class InfinispanUserSessionProvider implements UserSessionProvider, Sessi
.collect(Collectors.toMap(sessionEntityWrapper -> sessionEntityWrapper.getEntity().getId(), Function.identity()));
// Directly put all entities to the infinispan cache
Cache<String, SessionEntityWrapper<UserSessionEntity>> cache = CacheDecorators.skipCacheLoadersIfRemoteStoreIsEnabled(getCache(offline));
Cache<String, SessionEntityWrapper<UserSessionEntity>> cache = getCache(offline);
boolean importWithExpiration = sessionsById.size() == 1;
if (importWithExpiration) {
@ -897,39 +823,8 @@ public class InfinispanUserSessionProvider implements UserSessionProvider, Sessi
}, 10, 10);
}
// put all entities to the remoteCache (if exists)
RemoteCache remoteCache = InfinispanUtil.getRemoteCache(cache);
if (remoteCache != null) {
Map<String, SessionEntityWrapper<UserSessionEntity>> sessionsByIdForTransport = sessionsById.values().stream()
.map(SessionEntityWrapper::forTransport)
.collect(Collectors.toMap(sessionEntityWrapper -> sessionEntityWrapper.getEntity().getId(), Function.identity()));
if (importWithExpiration) {
importSessionsWithExpiration(sessionsByIdForTransport, remoteCache,
offline ? offlineSessionCacheEntryLifespanAdjuster : SessionTimeouts::getUserSessionLifespanMs,
offline ? SessionTimeouts::getOfflineSessionMaxIdleMs : SessionTimeouts::getUserSessionMaxIdleMs);
} else {
Retry.executeWithBackoff((int iteration) -> {
try {
remoteCache.putAll(sessionsByIdForTransport);
} catch (HotRodClientException re) {
if (log.isDebugEnabled()) {
log.debugf(re, "Failed to put import %d sessions to remoteCache. Iteration '%s'. Will try to retry the task",
sessionsByIdForTransport.size(), iteration);
}
// Rethrow the exception. Retry will take care of handle the exception and eventually retry the operation.
throw re;
}
}, 10, 10);
}
}
// Import client sessions
Cache<UUID, SessionEntityWrapper<AuthenticatedClientSessionEntity>> clientSessCache =
CacheDecorators.skipCacheLoadersIfRemoteStoreIsEnabled(offline ? offlineClientSessionCache : clientSessionCache);
Cache<UUID, SessionEntityWrapper<AuthenticatedClientSessionEntity>> clientSessCache = getClientSessionCache(offline);
if (importWithExpiration) {
importSessionsWithExpiration(clientSessionsById, clientSessCache,
@ -940,36 +835,6 @@ public class InfinispanUserSessionProvider implements UserSessionProvider, Sessi
clientSessCache.putAll(clientSessionsById);
}, 10, 10);
}
// put all entities to the remoteCache (if exists)
RemoteCache remoteCacheClientSessions = InfinispanUtil.getRemoteCache(clientSessCache);
if (remoteCacheClientSessions != null) {
Map<UUID, SessionEntityWrapper<AuthenticatedClientSessionEntity>> sessionsByIdForTransport = clientSessionsById.values().stream()
.map(SessionEntityWrapper::forTransport)
.collect(Collectors.toMap(sessionEntityWrapper -> sessionEntityWrapper.getEntity().getId(), Function.identity()));
if (importWithExpiration) {
importSessionsWithExpiration(sessionsByIdForTransport, remoteCacheClientSessions,
offline ? offlineClientSessionCacheEntryLifespanAdjuster : SessionTimeouts::getClientSessionLifespanMs,
offline ? SessionTimeouts::getOfflineClientSessionMaxIdleMs : SessionTimeouts::getClientSessionMaxIdleMs);
} else {
Retry.executeWithBackoff((int iteration) -> {
try {
remoteCacheClientSessions.putAll(sessionsByIdForTransport);
} catch (HotRodClientException re) {
if (log.isDebugEnabled()) {
log.debugf(re, "Failed to put import %d client sessions to remoteCache. Iteration '%s'. Will try to retry the task",
sessionsByIdForTransport.size(), iteration);
}
// Rethrow the exception. Retry will take care of handle the exception and eventually retry the operation.
throw re;
}
}, 10, 10);
}
}
}
private <T extends SessionEntity> void importSessionsWithExpiration(Map<? extends Object, SessionEntityWrapper<T>> sessionsById,
@ -1055,7 +920,7 @@ public class InfinispanUserSessionProvider implements UserSessionProvider, Sessi
SessionUpdateTask registerClientSessionTask = new RegisterClientSessionTask(clientSession.getClient().getId(), clientSessionId);
userSessionUpdateTx.addTask(sessionToImportInto.getId(), registerClientSessionTask);
return new AuthenticatedClientSessionAdapter(session, this, entity, clientSession.getClient(), sessionToImportInto, clientSessionUpdateTx, offline);
return new AuthenticatedClientSessionAdapter(session, entity, clientSession.getClient(), sessionToImportInto, clientSessionUpdateTx, offline);
}
@ -1097,10 +962,6 @@ public class InfinispanUserSessionProvider implements UserSessionProvider, Sessi
return CacheOperation.REPLACE;
}
@Override
public CrossDCMessageStatus getCrossDCMessageStatus(SessionEntityWrapper<UserSessionEntity> sessionWrapper) {
return CrossDCMessageStatus.SYNC;
}
}
}

View File

@ -20,23 +20,17 @@ package org.keycloak.models.sessions.infinispan;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.TimeUnit;
import org.infinispan.Cache;
import org.infinispan.affinity.KeyGenerator;
import org.infinispan.client.hotrod.RemoteCache;
import org.infinispan.persistence.remote.RemoteStore;
import org.jboss.logging.Logger;
import org.keycloak.Config;
import org.keycloak.cluster.ClusterProvider;
import org.keycloak.common.util.Environment;
import org.keycloak.common.util.MultiSiteUtils;
import org.keycloak.common.util.Time;
import org.keycloak.connections.infinispan.InfinispanConnectionProvider;
import org.keycloak.connections.infinispan.InfinispanUtil;
import org.keycloak.infinispan.util.InfinispanUtils;
import org.keycloak.models.ClientModel;
import org.keycloak.models.KeycloakSession;
@ -50,21 +44,13 @@ import org.keycloak.models.sessions.infinispan.changes.PersistentSessionsWorker;
import org.keycloak.models.sessions.infinispan.changes.PersistentUpdate;
import org.keycloak.models.sessions.infinispan.changes.SerializeExecutionsByKey;
import org.keycloak.models.sessions.infinispan.changes.SessionEntityWrapper;
import org.keycloak.models.sessions.infinispan.changes.sessions.CrossDCLastSessionRefreshStore;
import org.keycloak.models.sessions.infinispan.changes.sessions.CrossDCLastSessionRefreshStoreFactory;
import org.keycloak.models.sessions.infinispan.changes.sessions.PersisterLastSessionRefreshStore;
import org.keycloak.models.sessions.infinispan.changes.sessions.PersisterLastSessionRefreshStoreFactory;
import org.keycloak.models.sessions.infinispan.entities.AuthenticatedClientSessionEntity;
import org.keycloak.models.sessions.infinispan.entities.SessionEntity;
import org.keycloak.models.sessions.infinispan.entities.UserSessionEntity;
import org.keycloak.models.sessions.infinispan.events.AbstractUserSessionClusterListener;
import org.keycloak.models.sessions.infinispan.events.RealmRemovedSessionEvent;
import org.keycloak.models.sessions.infinispan.events.RemoveUserSessionsEvent;
import org.keycloak.models.sessions.infinispan.initializer.InfinispanCacheInitializer;
import org.keycloak.models.sessions.infinispan.initializer.InitializerState;
import org.keycloak.models.sessions.infinispan.remotestore.RemoteCacheInvoker;
import org.keycloak.models.sessions.infinispan.remotestore.RemoteCacheSessionListener;
import org.keycloak.models.sessions.infinispan.remotestore.RemoteCacheSessionsLoader;
import org.keycloak.models.sessions.infinispan.util.InfinispanKeyGenerator;
import org.keycloak.models.sessions.infinispan.util.SessionTimeouts;
import org.keycloak.models.utils.KeycloakModelUtils;
@ -99,9 +85,6 @@ public class InfinispanUserSessionProviderFactory implements UserSessionProvider
private Config.Scope config;
private RemoteCacheInvoker remoteCacheInvoker;
private CrossDCLastSessionRefreshStore lastSessionRefreshStore;
private CrossDCLastSessionRefreshStore offlineLastSessionRefreshStore;
private PersisterLastSessionRefreshStore persisterLastSessionRefreshStore;
private InfinispanKeyGenerator keyGenerator;
SerializeExecutionsByKey<String> serializerSession = new SerializeExecutionsByKey<>();
@ -132,9 +115,6 @@ public class InfinispanUserSessionProviderFactory implements UserSessionProvider
if (MultiSiteUtils.isPersistentSessionsEnabled()) {
return new PersistentUserSessionProvider(
session,
remoteCacheInvoker,
lastSessionRefreshStore,
offlineLastSessionRefreshStore,
keyGenerator,
cache,
offlineSessionsCache,
@ -149,9 +129,6 @@ public class InfinispanUserSessionProviderFactory implements UserSessionProvider
}
return new InfinispanUserSessionProvider(
session,
remoteCacheInvoker,
lastSessionRefreshStore,
offlineLastSessionRefreshStore,
persisterLastSessionRefreshStore,
keyGenerator,
cache,
@ -196,19 +173,14 @@ public class InfinispanUserSessionProviderFactory implements UserSessionProvider
}
};
} else {
int preloadTransactionTimeout = getTimeoutForPreloadingSessionsSeconds();
log.debugf("Will preload sessions with transaction timeout %d seconds", preloadTransactionTimeout);
KeycloakModelUtils.runJobInTransactionWithTimeout(factory, (KeycloakSession session) -> {
KeycloakModelUtils.runJobInTransaction(factory, (KeycloakSession session) -> {
keyGenerator = new InfinispanKeyGenerator();
checkRemoteCaches(session);
if (!MultiSiteUtils.isPersistentSessionsEnabled()) {
initializeLastSessionRefreshStore(factory);
initializePersisterLastSessionRefreshStore(factory);
}
registerClusterListeners(session);
loadSessionsFromRemoteCaches(session);
}, preloadTransactionTimeout);
});
}
} else if (event instanceof UserModel.UserRemovedEvent) {
@ -227,12 +199,6 @@ public class InfinispanUserSessionProviderFactory implements UserSessionProvider
if (persisterLastSessionRefreshStore != null) {
persisterLastSessionRefreshStore.reset();
}
if (lastSessionRefreshStore != null) {
lastSessionRefreshStore.reset();
}
if (offlineLastSessionRefreshStore != null) {
offlineLastSessionRefreshStore.reset();
}
}
}
});
@ -244,26 +210,7 @@ public class InfinispanUserSessionProviderFactory implements UserSessionProvider
}
}
// Max count of worker errors. Initialization will end with exception when this number is reached
private int getMaxErrors() {
return config.getInt("maxErrors", 20);
}
// Count of sessions to be computed in each segment
private int getSessionsPerSegment() {
return config.getInt("sessionsPerSegment", 64);
}
private int getTimeoutForPreloadingSessionsSeconds() {
Integer timeout = config.getInt("sessionsPreloadTimeoutInSeconds", null);
return timeout != null ? timeout : Environment.getServerStartupTimeout();
}
private int getStalledTimeoutInSeconds(int defaultTimeout) {
return config.getInt("sessionPreloadStalledTimeoutInSeconds", defaultTimeout);
}
public void initializeLastSessionRefreshStore(final KeycloakSessionFactory sessionFactory) {
public void initializePersisterLastSessionRefreshStore(final KeycloakSessionFactory sessionFactory) {
KeycloakModelUtils.runJobInTransaction(sessionFactory, new KeycloakSessionTask() {
@Override
@ -310,65 +257,6 @@ public class InfinispanUserSessionProviderFactory implements UserSessionProvider
log.debug("Registered cluster listeners");
}
protected void checkRemoteCaches(KeycloakSession session) {
this.remoteCacheInvoker = new RemoteCacheInvoker();
InfinispanConnectionProvider ispn = session.getProvider(InfinispanConnectionProvider.class);
Cache<String, SessionEntityWrapper<UserSessionEntity>> sessionsCache = ispn.getCache(InfinispanConnectionProvider.USER_SESSION_CACHE_NAME);
RemoteCache sessionsRemoteCache = checkRemoteCache(session, sessionsCache, SessionTimeouts::getUserSessionLifespanMs, SessionTimeouts::getUserSessionMaxIdleMs);
if (sessionsRemoteCache != null) {
lastSessionRefreshStore = new CrossDCLastSessionRefreshStoreFactory().createAndInit(session, sessionsCache, false);
}
Cache<UUID, SessionEntityWrapper<AuthenticatedClientSessionEntity>> clientSessionsCache = ispn.getCache(InfinispanConnectionProvider.CLIENT_SESSION_CACHE_NAME);
checkRemoteCache(session, clientSessionsCache, SessionTimeouts::getClientSessionLifespanMs, SessionTimeouts::getClientSessionMaxIdleMs);
Cache<String, SessionEntityWrapper<UserSessionEntity>> offlineSessionsCache = ispn.getCache(InfinispanConnectionProvider.OFFLINE_USER_SESSION_CACHE_NAME);
RemoteCache offlineSessionsRemoteCache = checkRemoteCache(session, offlineSessionsCache, this::deriveOfflineSessionCacheEntryLifespanMs, SessionTimeouts::getOfflineSessionMaxIdleMs);
if (offlineSessionsRemoteCache != null) {
offlineLastSessionRefreshStore = new CrossDCLastSessionRefreshStoreFactory().createAndInit(session, offlineSessionsCache, true);
}
Cache<UUID, SessionEntityWrapper<AuthenticatedClientSessionEntity>> offlineClientSessionsCache = ispn.getCache(InfinispanConnectionProvider.OFFLINE_CLIENT_SESSION_CACHE_NAME);
checkRemoteCache(session, offlineClientSessionsCache, this::deriveOfflineClientSessionCacheEntryLifespanOverrideMs, SessionTimeouts::getOfflineClientSessionMaxIdleMs);
}
private <K, V extends SessionEntity> RemoteCache checkRemoteCache(KeycloakSession session, Cache<K, SessionEntityWrapper<V>> ispnCache,
SessionFunction<V> lifespanMsLoader, SessionFunction<V> maxIdleTimeMsLoader) {
Set<RemoteStore> remoteStores = InfinispanUtil.getRemoteStores(ispnCache);
if (remoteStores.isEmpty()) {
log.debugf("No remote store configured for cache '%s'", ispnCache.getName());
return null;
} else {
log.infof("Remote store configured for cache '%s'", ispnCache.getName());
RemoteCache<K, SessionEntityWrapper<V>> remoteCache = (RemoteCache) remoteStores.iterator().next().getRemoteCache();
if (remoteCache == null) {
throw new IllegalStateException("No remote cache available for the infinispan cache: " + ispnCache.getName());
}
remoteCacheInvoker.addRemoteCache(ispnCache.getName(), remoteCache);
Runnable onFailover = null;
if (useCaches && MultiSiteUtils.isPersistentSessionsEnabled()) {
// If persistent sessions are enabled, we want to clear the local caches when a failover of the listener on the remote store changes as we might have missed some of the remote store events
// which might have been triggered by another Keycloak site connected to the same remote Infinispan cluster.
// Due to this, we can be sure that we never have outdated information in our local cache. All entries will be re-loaded from the remote cache or the database as necessary lazily.
onFailover = ispnCache::clear;
}
RemoteCacheSessionListener hotrodListener = RemoteCacheSessionListener.createListener(session, ispnCache, remoteCache, lifespanMsLoader, maxIdleTimeMsLoader, onFailover);
remoteCache.addClientListener(hotrodListener);
return remoteCache;
}
}
protected Long deriveOfflineSessionCacheEntryLifespanMs(RealmModel realm, ClientModel client, UserSessionEntity entity) {
long configuredOfflineSessionLifespan = SessionTimeouts.getOfflineSessionLifespanMs(realm, client, entity);
@ -405,36 +293,6 @@ public class InfinispanUserSessionProviderFactory implements UserSessionProvider
return Math.min(TimeUnit.SECONDS.toMillis(offlineClientSessionCacheEntryLifespanOverride), configuredOfflineClientSessionLifespan);
}
private void loadSessionsFromRemoteCaches(KeycloakSession session) {
for (String cacheName : remoteCacheInvoker.getRemoteCacheNames()) {
loadSessionsFromRemoteCache(session.getKeycloakSessionFactory(), cacheName, getSessionsPerSegment(), getMaxErrors());
}
}
private void loadSessionsFromRemoteCache(final KeycloakSessionFactory sessionFactory, String cacheName, final int sessionsPerSegment, final int maxErrors) {
log.debugf("Check pre-loading sessions from remote cache '%s'", cacheName);
KeycloakModelUtils.runJobInTransaction(sessionFactory, new KeycloakSessionTask() {
@Override
public void run(KeycloakSession session) {
InfinispanConnectionProvider connections = session.getProvider(InfinispanConnectionProvider.class);
Cache<String, InitializerState> workCache = connections.getCache(InfinispanConnectionProvider.WORK_CACHE_NAME);
int defaultStateTransferTimeout = (int) (connections.getCache(InfinispanConnectionProvider.USER_SESSION_CACHE_NAME)
.getCacheConfiguration().clustering().stateTransfer().timeout() / 1000);
InfinispanCacheInitializer initializer = new InfinispanCacheInitializer(sessionFactory, workCache,
new RemoteCacheSessionsLoader(cacheName, sessionsPerSegment), "remoteCacheLoad::" + cacheName, maxErrors,
getStalledTimeoutInSeconds(defaultStateTransferTimeout));
initializer.loadSessions();
}
});
log.debugf("Pre-loading sessions from remote cache '%s' finished", cacheName);
}
@Override
public void close() {
if (persistentSessionsWorker != null) {

View File

@ -52,7 +52,6 @@ import org.keycloak.common.util.MultiSiteUtils;
import org.keycloak.common.util.Retry;
import org.keycloak.common.util.Time;
import org.keycloak.connections.infinispan.InfinispanConnectionProvider;
import org.keycloak.connections.infinispan.InfinispanUtil;
import org.keycloak.models.AuthenticatedClientSessionModel;
import org.keycloak.models.ClientModel;
import org.keycloak.models.KeycloakSession;
@ -75,7 +74,6 @@ import org.keycloak.models.sessions.infinispan.changes.SessionUpdateTask;
import org.keycloak.models.sessions.infinispan.changes.SessionUpdatesList;
import org.keycloak.models.sessions.infinispan.changes.Tasks;
import org.keycloak.models.sessions.infinispan.changes.UserSessionPersistentChangelogBasedTransaction;
import org.keycloak.models.sessions.infinispan.changes.sessions.CrossDCLastSessionRefreshStore;
import org.keycloak.models.sessions.infinispan.changes.sessions.PersisterLastSessionRefreshStore;
import org.keycloak.models.sessions.infinispan.entities.AuthenticatedClientSessionEntity;
import org.keycloak.models.sessions.infinispan.entities.AuthenticatedClientSessionStore;
@ -84,7 +82,6 @@ import org.keycloak.models.sessions.infinispan.entities.UserSessionEntity;
import org.keycloak.models.sessions.infinispan.events.RealmRemovedSessionEvent;
import org.keycloak.models.sessions.infinispan.events.RemoveUserSessionsEvent;
import org.keycloak.models.sessions.infinispan.events.SessionEventsSenderTransaction;
import org.keycloak.models.sessions.infinispan.remotestore.RemoteCacheInvoker;
import org.keycloak.models.sessions.infinispan.stream.Mappers;
import org.keycloak.models.sessions.infinispan.stream.SessionWrapperPredicate;
import org.keycloak.models.sessions.infinispan.stream.UserSessionPredicate;
@ -116,15 +113,9 @@ public class PersistentUserSessionProvider implements UserSessionProvider, Sessi
protected final SessionEventsSenderTransaction clusterEventsSenderTx;
protected final CrossDCLastSessionRefreshStore lastSessionRefreshStore;
protected final CrossDCLastSessionRefreshStore offlineLastSessionRefreshStore;
protected final InfinispanKeyGenerator keyGenerator;
public PersistentUserSessionProvider(KeycloakSession session,
RemoteCacheInvoker remoteCacheInvoker,
CrossDCLastSessionRefreshStore lastSessionRefreshStore,
CrossDCLastSessionRefreshStore offlineLastSessionRefreshStore,
InfinispanKeyGenerator keyGenerator,
Cache<String, SessionEntityWrapper<UserSessionEntity>> sessionCache,
Cache<String, SessionEntityWrapper<UserSessionEntity>> offlineSessionCache,
@ -148,7 +139,6 @@ public class PersistentUserSessionProvider implements UserSessionProvider, Sessi
this.sessionTx = new UserSessionPersistentChangelogBasedTransaction(session,
sessionCache, offlineSessionCache,
remoteCacheInvoker,
SessionTimeouts::getUserSessionLifespanMs, SessionTimeouts::getUserSessionMaxIdleMs,
SessionTimeouts::getOfflineSessionLifespanMs, SessionTimeouts::getOfflineSessionMaxIdleMs,
asyncQueuePersistentUpdate,
@ -157,7 +147,6 @@ public class PersistentUserSessionProvider implements UserSessionProvider, Sessi
this.clientSessionTx = new ClientSessionPersistentChangelogBasedTransaction(session,
clientSessionCache, offlineClientSessionCache,
remoteCacheInvoker,
SessionTimeouts::getClientSessionLifespanMs, SessionTimeouts::getClientSessionMaxIdleMs,
SessionTimeouts::getOfflineClientSessionLifespanMs, SessionTimeouts::getOfflineClientSessionMaxIdleMs,
sessionTx,
@ -167,8 +156,6 @@ public class PersistentUserSessionProvider implements UserSessionProvider, Sessi
this.clusterEventsSenderTx = new SessionEventsSenderTransaction(session);
this.lastSessionRefreshStore = lastSessionRefreshStore;
this.offlineLastSessionRefreshStore = offlineLastSessionRefreshStore;
this.keyGenerator = keyGenerator;
session.getTransactionManager().enlistAfterCompletion(clusterEventsSenderTx);
@ -184,16 +171,6 @@ public class PersistentUserSessionProvider implements UserSessionProvider, Sessi
return offline ? offlineClientSessionCache : clientSessionCache;
}
@Override
public CrossDCLastSessionRefreshStore getLastSessionRefreshStore() {
return lastSessionRefreshStore;
}
@Override
public CrossDCLastSessionRefreshStore getOfflineLastSessionRefreshStore() {
return offlineLastSessionRefreshStore;
}
@Override
public PersisterLastSessionRefreshStore getPersisterLastSessionRefreshStore() {
throw new IllegalStateException("PersisterLastSessionRefreshStore is not supported in PersistentUserSessionProvider");
@ -218,7 +195,7 @@ public class PersistentUserSessionProvider implements UserSessionProvider, Sessi
entity.getNotes().put(AuthenticatedClientSessionModel.USER_SESSION_REMEMBER_ME_NOTE, "true");
}
AuthenticatedClientSessionAdapter adapter = new AuthenticatedClientSessionAdapter(session, this, entity, client, userSession, clientSessionTx, false);
AuthenticatedClientSessionAdapter adapter = new AuthenticatedClientSessionAdapter(session, entity, client, userSession, clientSessionTx, false);
if (userSession.isOffline()) {
// If this is an offline session, and the referred online session doesn't exist anymore, don't register the client session in the transaction.
@ -371,7 +348,7 @@ public class PersistentUserSessionProvider implements UserSessionProvider, Sessi
SessionEntityWrapper<AuthenticatedClientSessionEntity> clientSessionEntity = clientSessionTx.get(client.getRealm(), client, userSession, clientSessionUUID, offline);
if (clientSessionEntity != null) {
return new AuthenticatedClientSessionAdapter(session, this, clientSessionEntity.getEntity(), client, userSession, clientSessionTx, offline);
return new AuthenticatedClientSessionAdapter(session, clientSessionEntity.getEntity(), client, userSession, clientSessionTx, offline);
}
return null;
@ -482,8 +459,8 @@ public class PersistentUserSessionProvider implements UserSessionProvider, Sessi
public void removeUserSessions(RealmModel realm) {
// Send message to all DCs as each site might have different entries in the cache
clusterEventsSenderTx.addEvent(
RemoveUserSessionsEvent.createEvent(RemoveUserSessionsEvent.class, InfinispanUserSessionProviderFactory.REMOVE_USER_SESSIONS_EVENT, session, realm.getId(), true),
ClusterProvider.DCNotify.ALL_DCS);
RemoveUserSessionsEvent.createEvent(RemoveUserSessionsEvent.class, InfinispanUserSessionProviderFactory.REMOVE_USER_SESSIONS_EVENT, session, realm.getId())
);
session.getProvider(UserSessionPersisterProvider.class).removeUserSessions(realm);
}
@ -495,27 +472,19 @@ public class PersistentUserSessionProvider implements UserSessionProvider, Sessi
// public for usage in the testsuite
public void removeLocalUserSessions(String realmId, boolean offline) {
Cache<String, SessionEntityWrapper<UserSessionEntity>> cache = getCache(offline);
Cache<String, SessionEntityWrapper<UserSessionEntity>> localCache = CacheDecorators.localCache(cache);
Cache<String, SessionEntityWrapper<UserSessionEntity>> localCache = CacheDecorators.localCache(getCache(offline));
Cache<UUID, SessionEntityWrapper<AuthenticatedClientSessionEntity>> clientSessionCache = getClientSessionCache(offline);
Cache<UUID, SessionEntityWrapper<AuthenticatedClientSessionEntity>> localClientSessionCache = CacheDecorators.localCache(clientSessionCache);
Cache<String, SessionEntityWrapper<UserSessionEntity>> localCacheStoreIgnore = CacheDecorators.skipCacheLoadersIfRemoteStoreIsEnabled(localCache);
final AtomicInteger userSessionsSize = new AtomicInteger();
removeEntriesByRealm(realmId, localCacheStoreIgnore, userSessionsSize, localCache, localClientSessionCache);
// TODO: This now runs on each node on each site. Ideally it should run only once on each site.
removeEntriesByRealmRemote(realmId, InfinispanUtil.getRemoteCache(getCache(offline)), userSessionsSize, InfinispanUtil.getRemoteCache(getClientSessionCache(offline)));
removeEntriesByRealm(realmId, localCache, userSessionsSize, localClientSessionCache);
log.debugf("Removed %d sessions in realm %s. Offline: %b", (Object) userSessionsSize.get(), realmId, offline);
}
private static void removeEntriesByRealm(String realmId, Cache<String, SessionEntityWrapper<UserSessionEntity>> sessions, AtomicInteger userSessionsSize, Cache<String, SessionEntityWrapper<UserSessionEntity>> localCache, Cache<UUID, SessionEntityWrapper<AuthenticatedClientSessionEntity>> clientSessions) {
private static void removeEntriesByRealm(String realmId, Cache<String, SessionEntityWrapper<UserSessionEntity>> sessionsCache, AtomicInteger userSessionsSize, Cache<UUID, SessionEntityWrapper<AuthenticatedClientSessionEntity>> clientSessions) {
FuturesHelper futures = new FuturesHelper();
sessions
sessionsCache
.entrySet()
.stream()
.filter(SessionWrapperPredicate.create(realmId))
@ -524,7 +493,7 @@ public class PersistentUserSessionProvider implements UserSessionProvider, Sessi
userSessionsSize.incrementAndGet();
// Remove session from remoteCache too. Use removeAsync for better perf
Future<SessionEntityWrapper<UserSessionEntity>> future = localCache.removeAsync(userSessionEntity.getId());
Future<SessionEntityWrapper<UserSessionEntity>> future = sessionsCache.removeAsync(userSessionEntity.getId());
futures.addTask(future);
userSessionEntity.getAuthenticatedClientSessions().forEach((clientUUID, clientSessionId) -> {
Future<SessionEntityWrapper<AuthenticatedClientSessionEntity>> f = clientSessions.removeAsync(clientSessionId);
@ -535,40 +504,12 @@ public class PersistentUserSessionProvider implements UserSessionProvider, Sessi
futures.waitForAllToFinish();
}
private static void removeEntriesByRealmRemote(String realmId, RemoteCache<String, SessionEntityWrapper<UserSessionEntity>> sessions, AtomicInteger userSessionsSize, RemoteCache<UUID, SessionEntityWrapper<AuthenticatedClientSessionEntity>> clientSessions) {
if (sessions == null) {
return;
}
FuturesHelper futures = new FuturesHelper();
sessions
.entrySet()
.stream()
.filter(UserSessionPredicate.create(realmId))
.map(Mappers.userSessionEntity())
.forEach((Consumer<UserSessionEntity>) userSessionEntity -> {
userSessionsSize.incrementAndGet();
Future<SessionEntityWrapper<UserSessionEntity>> future = sessions.withFlags(org.infinispan.client.hotrod.Flag.SKIP_LISTENER_NOTIFICATION).removeAsync(userSessionEntity.getId());
futures.addTask(future);
if (clientSessions != null) {
userSessionEntity.getAuthenticatedClientSessions().forEach((clientUUID, clientSessionId) -> {
Future<SessionEntityWrapper<AuthenticatedClientSessionEntity>> f = clientSessions.withFlags(org.infinispan.client.hotrod.Flag.SKIP_LISTENER_NOTIFICATION).removeAsync(clientSessionId);
futures.addTask(f);
});
}
});
futures.waitForAllToFinish();
}
@Override
public void onRealmRemoved(RealmModel realm) {
// Send message to all DCs, as each DC might have different entries in their site cache
clusterEventsSenderTx.addEvent(
RealmRemovedSessionEvent.createEvent(RealmRemovedSessionEvent.class, InfinispanUserSessionProviderFactory.REALM_REMOVED_SESSION_EVENT, session, realm.getId(), true),
ClusterProvider.DCNotify.ALL_DCS);
RealmRemovedSessionEvent.createEvent(RealmRemovedSessionEvent.class, InfinispanUserSessionProviderFactory.REALM_REMOVED_SESSION_EVENT, session, realm.getId())
);
UserSessionPersisterProvider sessionsPersister = session.getProvider(UserSessionPersisterProvider.class);
if (sessionsPersister != null) {
@ -765,8 +706,7 @@ public class PersistentUserSessionProvider implements UserSessionProvider, Sessi
Map<String, SessionEntityWrapper<UserSessionEntity>> sessionsById =
Stream.of(wrappedUserSessionEntity).collect(Collectors.toMap(sessionEntityWrapper -> sessionEntityWrapper.getEntity().getId(), Function.identity()));
// Directly put all entities to the infinispan cache
Cache<String, SessionEntityWrapper<UserSessionEntity>> cache = CacheDecorators.skipCacheLoadersIfRemoteStoreIsEnabled(getCache(offline));
Cache<String, SessionEntityWrapper<UserSessionEntity>> cache = getCache(offline);
sessionsById = importSessionsWithExpiration(sessionsById, cache,
offline ? SessionTimeouts::getOfflineSessionLifespanMs : SessionTimeouts::getUserSessionLifespanMs,
@ -776,38 +716,13 @@ public class PersistentUserSessionProvider implements UserSessionProvider, Sessi
return null;
}
// put all entities to the remoteCache (if exists)
RemoteCache remoteCache = InfinispanUtil.getRemoteCache(cache);
if (remoteCache != null) {
Map<String, SessionEntityWrapper<UserSessionEntity>> sessionsByIdForTransport = Stream.of(wrappedUserSessionEntity)
.map(SessionEntityWrapper::forTransport)
.collect(Collectors.toMap(sessionEntityWrapper -> sessionEntityWrapper.getEntity().getId(), Function.identity()));
importSessionsWithExpiration(sessionsByIdForTransport, remoteCache,
offline ? SessionTimeouts::getOfflineSessionLifespanMs : SessionTimeouts::getUserSessionLifespanMs,
offline ? SessionTimeouts::getOfflineSessionMaxIdleMs : SessionTimeouts::getUserSessionMaxIdleMs);
}
// Import client sessions
Cache<UUID, SessionEntityWrapper<AuthenticatedClientSessionEntity>> clientSessCache =
CacheDecorators.skipCacheLoadersIfRemoteStoreIsEnabled(offline ? offlineClientSessionCache : clientSessionCache);
Cache<UUID, SessionEntityWrapper<AuthenticatedClientSessionEntity>> clientSessCache = getClientSessionCache(offline);
importSessionsWithExpiration(clientSessionsById, clientSessCache,
offline ? SessionTimeouts::getOfflineClientSessionLifespanMs : SessionTimeouts::getClientSessionLifespanMs,
offline ? SessionTimeouts::getOfflineClientSessionMaxIdleMs : SessionTimeouts::getClientSessionMaxIdleMs);
// put all entities to the remoteCache (if exists)
RemoteCache remoteCacheClientSessions = InfinispanUtil.getRemoteCache(clientSessCache);
if (remoteCacheClientSessions != null) {
Map<UUID, SessionEntityWrapper<AuthenticatedClientSessionEntity>> sessionsByIdForTransport = clientSessionsById.values().stream()
.map(SessionEntityWrapper::forTransport)
.collect(Collectors.toMap(sessionEntityWrapper -> sessionEntityWrapper.getEntity().getId(), Function.identity()));
importSessionsWithExpiration(sessionsByIdForTransport, remoteCacheClientSessions,
offline ? SessionTimeouts::getOfflineClientSessionLifespanMs : SessionTimeouts::getClientSessionLifespanMs,
offline ? SessionTimeouts::getOfflineClientSessionMaxIdleMs : SessionTimeouts::getClientSessionMaxIdleMs);
}
return sessionsById.entrySet().stream().findFirst().map(Map.Entry::getValue).orElse(null);
}
@ -902,7 +817,7 @@ public class PersistentUserSessionProvider implements UserSessionProvider, Sessi
SessionUpdateTask<UserSessionEntity> registerClientSessionTask = new ClientSessionPersistentChangelogBasedTransaction.RegisterClientSessionTask(clientSession.getClient().getId(), clientSessionId, true);
sessionTx.addTask(sessionToImportInto.getId(), registerClientSessionTask);
return new AuthenticatedClientSessionAdapter(session, this, entity, clientSession.getClient(), sessionToImportInto, clientSessionTx, true);
return new AuthenticatedClientSessionAdapter(session, entity, clientSession.getClient(), sessionToImportInto, clientSessionTx, true);
}

View File

@ -17,13 +17,8 @@
package org.keycloak.models.sessions.infinispan;
import org.keycloak.models.sessions.infinispan.changes.sessions.CrossDCLastSessionRefreshStore;
import org.keycloak.models.sessions.infinispan.changes.sessions.PersisterLastSessionRefreshStore;
public interface SessionRefreshStore {
CrossDCLastSessionRefreshStore getLastSessionRefreshStore();
CrossDCLastSessionRefreshStore getOfflineLastSessionRefreshStore();
PersisterLastSessionRefreshStore getPersisterLastSessionRefreshStore();
}

View File

@ -26,11 +26,8 @@ import org.keycloak.models.UserModel;
import org.keycloak.models.UserSessionModel;
import org.keycloak.models.UserSessionProvider;
import org.keycloak.models.sessions.infinispan.changes.SessionsChangelogBasedTransaction;
import org.keycloak.models.sessions.infinispan.changes.sessions.CrossDCLastSessionRefreshChecker;
import org.keycloak.models.sessions.infinispan.changes.SessionEntityWrapper;
import org.keycloak.models.sessions.infinispan.changes.Tasks;
import org.keycloak.models.sessions.infinispan.changes.UserSessionUpdateTask;
import org.keycloak.models.sessions.infinispan.changes.sessions.CrossDCLastSessionRefreshListener;
import org.keycloak.models.sessions.infinispan.entities.AuthenticatedClientSessionEntity;
import org.keycloak.models.sessions.infinispan.entities.AuthenticatedClientSessionStore;
import org.keycloak.models.sessions.infinispan.entities.UserSessionEntity;
@ -234,10 +231,7 @@ public class UserSessionAdapter<T extends SessionRefreshStore & UserSessionProvi
if (!MultiSiteUtils.isPersistentSessionsEnabled() && offline) {
// Received the message from the other DC that we should update the lastSessionRefresh in local cluster. Don't update DB in that case.
// The other DC already did.
Boolean ignoreRemoteCacheUpdate = (Boolean) session.getAttribute(CrossDCLastSessionRefreshListener.IGNORE_REMOTE_CACHE_UPDATE);
if (ignoreRemoteCacheUpdate == null || !ignoreRemoteCacheUpdate) {
provider.getPersisterLastSessionRefreshStore().putLastSessionRefresh(session, entity.getId(), realm.getId(), lastSessionRefresh);
}
provider.getPersisterLastSessionRefreshStore().putLastSessionRefresh(session, entity.getId(), realm.getId(), lastSessionRefresh);
}
UserSessionUpdateTask task = new UserSessionUpdateTask() {
@ -250,12 +244,6 @@ public class UserSessionAdapter<T extends SessionRefreshStore & UserSessionProvi
entity.setLastSessionRefresh(lastSessionRefresh);
}
@Override
public CrossDCMessageStatus getCrossDCMessageStatus(SessionEntityWrapper<UserSessionEntity> sessionWrapper) {
return new CrossDCLastSessionRefreshChecker(provider.getLastSessionRefreshStore(), provider.getOfflineLastSessionRefreshStore())
.shouldSaveUserSessionToRemoteCache(UserSessionAdapter.this.session, UserSessionAdapter.this.realm, sessionWrapper, offline, lastSessionRefresh);
}
@Override
public boolean isOffline() {
return offline;

View File

@ -31,7 +31,6 @@ import org.keycloak.models.sessions.infinispan.UserSessionAdapter;
import org.keycloak.models.sessions.infinispan.entities.AuthenticatedClientSessionEntity;
import org.keycloak.models.sessions.infinispan.entities.AuthenticatedClientSessionStore;
import org.keycloak.models.sessions.infinispan.entities.UserSessionEntity;
import org.keycloak.models.sessions.infinispan.remotestore.RemoteCacheInvoker;
import org.keycloak.models.sessions.infinispan.util.SessionTimeouts;
import java.util.UUID;
@ -48,7 +47,6 @@ public class ClientSessionPersistentChangelogBasedTransaction extends Persistent
public ClientSessionPersistentChangelogBasedTransaction(KeycloakSession session,
Cache<UUID, SessionEntityWrapper<AuthenticatedClientSessionEntity>> cache,
Cache<UUID, SessionEntityWrapper<AuthenticatedClientSessionEntity>> offlineCache,
RemoteCacheInvoker remoteCacheInvoker,
SessionFunction<AuthenticatedClientSessionEntity> lifespanMsLoader,
SessionFunction<AuthenticatedClientSessionEntity> maxIdleTimeMsLoader,
SessionFunction<AuthenticatedClientSessionEntity> offlineLifespanMsLoader,
@ -57,7 +55,7 @@ public class ClientSessionPersistentChangelogBasedTransaction extends Persistent
ArrayBlockingQueue<PersistentUpdate> batchingQueue,
SerializeExecutionsByKey<UUID> serializerOnline,
SerializeExecutionsByKey<UUID> serializerOffline) {
super(session, CLIENT_SESSION_CACHE_NAME, cache, offlineCache, remoteCacheInvoker, lifespanMsLoader, maxIdleTimeMsLoader, offlineLifespanMsLoader, offlineMaxIdleTimeMsLoader, batchingQueue, serializerOnline, serializerOffline);
super(session, CLIENT_SESSION_CACHE_NAME, cache, offlineCache, lifespanMsLoader, maxIdleTimeMsLoader, offlineLifespanMsLoader, offlineMaxIdleTimeMsLoader, batchingQueue, serializerOnline, serializerOffline);
this.userSessionTx = userSessionTx;
}
@ -205,11 +203,6 @@ public class ClientSessionPersistentChangelogBasedTransaction extends Persistent
return CacheOperation.REPLACE;
}
@Override
public CrossDCMessageStatus getCrossDCMessageStatus(SessionEntityWrapper<UserSessionEntity> sessionWrapper) {
return CrossDCMessageStatus.SYNC;
}
@Override
public boolean isOffline() {
return offline;

View File

@ -29,9 +29,4 @@ public abstract class ClientSessionUpdateTask implements PersistentSessionUpdate
return CacheOperation.REPLACE;
}
@Override
public CrossDCMessageStatus getCrossDCMessageStatus(SessionEntityWrapper<AuthenticatedClientSessionEntity> sessionWrapper) {
return CrossDCMessageStatus.SYNC;
}
}

View File

@ -18,7 +18,6 @@
package org.keycloak.models.sessions.infinispan.changes;
import org.infinispan.Cache;
import org.infinispan.context.Flag;
import org.jboss.logging.Logger;
import org.keycloak.connections.infinispan.InfinispanUtil;
import org.keycloak.models.sessions.infinispan.CacheDecorators;
@ -50,19 +49,16 @@ public class EmbeddedCachesChangesPerformer<K, V extends SessionEntity> implemen
switch (operation) {
case REMOVE:
// Just remove it
CacheDecorators.skipCacheStoreIfRemoteCacheIsEnabled(cache)
.withFlags(Flag.IGNORE_RETURN_VALUES)
.remove(key);
CacheDecorators.ignoreReturnValues(cache).remove(key);
break;
case ADD:
CacheDecorators.skipCacheStoreIfRemoteCacheIsEnabled(cache)
.withFlags(Flag.IGNORE_RETURN_VALUES)
CacheDecorators.ignoreReturnValues(cache)
.put(key, sessionWrapper, task.getLifespanMs(), TimeUnit.MILLISECONDS, task.getMaxIdleTimeMs(), TimeUnit.MILLISECONDS);
LOG.tracef("Added entity '%s' to the cache '%s' . Lifespan: %d ms, MaxIdle: %d ms", key, cache.getName(), task.getLifespanMs(), task.getMaxIdleTimeMs());
break;
case ADD_IF_ABSENT:
SessionEntityWrapper<V> existing = CacheDecorators.skipCacheStoreIfRemoteCacheIsEnabled(cache).putIfAbsent(key, sessionWrapper, task.getLifespanMs(), TimeUnit.MILLISECONDS, task.getMaxIdleTimeMs(), TimeUnit.MILLISECONDS);
SessionEntityWrapper<V> existing = cache.putIfAbsent(key, sessionWrapper, task.getLifespanMs(), TimeUnit.MILLISECONDS, task.getMaxIdleTimeMs(), TimeUnit.MILLISECONDS);
if (existing != null) {
LOG.debugf("Existing entity in cache for key: %s . Will update it", key);
@ -89,10 +85,9 @@ public class EmbeddedCachesChangesPerformer<K, V extends SessionEntity> implemen
SessionEntityWrapper<V> returnValue = null;
int iteration = 0;
V session = oldVersion.getEntity();
var writeCache = CacheDecorators.skipCacheStoreIfRemoteCacheIsEnabled(cache);
while (iteration++ < InfinispanUtil.MAXIMUM_REPLACE_RETRIES) {
SessionEntityWrapper<V> newVersionEntity = generateNewVersionAndWrapEntity(session, oldVersion.getLocalMetadata());
returnValue = writeCache.computeIfPresent(key, new ReplaceFunction<>(oldVersion.getVersion(), newVersionEntity), lifespanMs, TimeUnit.MILLISECONDS, maxIdleTimeMs, TimeUnit.MILLISECONDS);
returnValue = cache.computeIfPresent(key, new ReplaceFunction<>(oldVersion.getVersion(), newVersionEntity), lifespanMs, TimeUnit.MILLISECONDS, maxIdleTimeMs, TimeUnit.MILLISECONDS);
if (returnValue == null) {
LOG.debugf("Entity %s not found. Maybe removed in the meantime. Replace task will be ignored", key);

View File

@ -19,13 +19,10 @@ package org.keycloak.models.sessions.infinispan.changes;
import java.util.HashMap;
import java.util.Map;
import java.util.Objects;
import java.util.concurrent.TimeUnit;
import org.infinispan.Cache;
import org.infinispan.context.Flag;
import org.jboss.logging.Logger;
import org.keycloak.common.Profile;
import org.keycloak.models.AbstractKeycloakTransaction;
import org.keycloak.models.KeycloakSession;
import org.keycloak.models.RealmModel;
@ -33,14 +30,8 @@ import org.keycloak.models.UserSessionModel;
import org.keycloak.models.sessions.infinispan.CacheDecorators;
import org.keycloak.models.sessions.infinispan.SessionFunction;
import org.keycloak.models.sessions.infinispan.entities.SessionEntity;
import org.keycloak.models.sessions.infinispan.remotestore.RemoteCacheInvoker;
import org.keycloak.connections.infinispan.InfinispanUtil;
import static org.keycloak.connections.infinispan.InfinispanConnectionProvider.CLIENT_SESSION_CACHE_NAME;
import static org.keycloak.connections.infinispan.InfinispanConnectionProvider.OFFLINE_CLIENT_SESSION_CACHE_NAME;
import static org.keycloak.connections.infinispan.InfinispanConnectionProvider.OFFLINE_USER_SESSION_CACHE_NAME;
import static org.keycloak.connections.infinispan.InfinispanConnectionProvider.USER_SESSION_CACHE_NAME;
/**
* @author <a href="mailto:mposolda@redhat.com">Marek Posolda</a>
*/
@ -49,9 +40,7 @@ public class InfinispanChangelogBasedTransaction<K, V extends SessionEntity> ext
public static final Logger logger = Logger.getLogger(InfinispanChangelogBasedTransaction.class);
protected final KeycloakSession kcSession;
private final String cacheName;
protected final Cache<K, SessionEntityWrapper<V>> cache;
private final RemoteCacheInvoker remoteCacheInvoker;
protected final Map<K, SessionUpdatesList<V>> updates = new HashMap<>();
@ -59,12 +48,10 @@ public class InfinispanChangelogBasedTransaction<K, V extends SessionEntity> ext
protected final SessionFunction<V> maxIdleTimeMsLoader;
private final SerializeExecutionsByKey<K> serializer;
public InfinispanChangelogBasedTransaction(KeycloakSession kcSession, Cache<K, SessionEntityWrapper<V>> cache, RemoteCacheInvoker remoteCacheInvoker,
public InfinispanChangelogBasedTransaction(KeycloakSession kcSession, Cache<K, SessionEntityWrapper<V>> cache,
SessionFunction<V> lifespanMsLoader, SessionFunction<V> maxIdleTimeMsLoader, SerializeExecutionsByKey<K> serializer) {
this.kcSession = kcSession;
this.cacheName = cache.getName();
this.cache = cache;
this.remoteCacheInvoker = remoteCacheInvoker;
this.lifespanMsLoader = lifespanMsLoader;
this.maxIdleTimeMsLoader = maxIdleTimeMsLoader;
this.serializer = serializer;
@ -185,9 +172,6 @@ public class InfinispanChangelogBasedTransaction<K, V extends SessionEntity> ext
if (merged != null) {
// Now run the operation in our cluster
runOperationInCluster(entry.getKey(), merged, sessionWrapper);
// Check if we need to send message to second DC
remoteCacheInvoker.runTask(kcSession, realm, cacheName, entry.getKey(), merged, sessionWrapper);
}
}
}
@ -202,19 +186,16 @@ public class InfinispanChangelogBasedTransaction<K, V extends SessionEntity> ext
switch (operation) {
case REMOVE:
// Just remove it
CacheDecorators.skipCacheStoreIfRemoteCacheIsEnabled(cache)
.withFlags(Flag.IGNORE_RETURN_VALUES)
.remove(key);
CacheDecorators.ignoreReturnValues(cache).remove(key);
break;
case ADD:
CacheDecorators.skipCacheStoreIfRemoteCacheIsEnabled(cache)
.withFlags(Flag.IGNORE_RETURN_VALUES)
CacheDecorators.ignoreReturnValues(cache)
.put(key, sessionWrapper, task.getLifespanMs(), TimeUnit.MILLISECONDS, task.getMaxIdleTimeMs(), TimeUnit.MILLISECONDS);
logger.tracef("Added entity '%s' to the cache '%s' . Lifespan: %d ms, MaxIdle: %d ms", key, cache.getName(), task.getLifespanMs(), task.getMaxIdleTimeMs());
break;
case ADD_IF_ABSENT:
SessionEntityWrapper<V> existing = CacheDecorators.skipCacheStoreIfRemoteCacheIsEnabled(cache).putIfAbsent(key, sessionWrapper, task.getLifespanMs(), TimeUnit.MILLISECONDS, task.getMaxIdleTimeMs(), TimeUnit.MILLISECONDS);
SessionEntityWrapper<V> existing = cache.putIfAbsent(key, sessionWrapper, task.getLifespanMs(), TimeUnit.MILLISECONDS, task.getMaxIdleTimeMs(), TimeUnit.MILLISECONDS);
if (existing != null) {
logger.debugf("Existing entity in cache for key: %s . Will update it", key);
@ -241,17 +222,16 @@ public class InfinispanChangelogBasedTransaction<K, V extends SessionEntity> ext
SessionEntityWrapper<V> returnValue = null;
int iteration = 0;
V session = oldVersion.getEntity();
var writeCache = CacheDecorators.skipCacheStoreIfRemoteCacheIsEnabled(cache);
while (iteration++ < InfinispanUtil.MAXIMUM_REPLACE_RETRIES) {
if (session.shouldEvaluateRemoval() && task.shouldRemove(session)) {
logger.debugf("Entity %s removed after evaluation", key);
writeCache.withFlags(Flag.IGNORE_RETURN_VALUES).remove(key);
CacheDecorators.ignoreReturnValues(cache).remove(key);
return;
}
SessionEntityWrapper<V> newVersionEntity = generateNewVersionAndWrapEntity(session, oldVersion.getLocalMetadata());
returnValue = writeCache.computeIfPresent(key, new ReplaceFunction<>(oldVersion.getVersion(), newVersionEntity), lifespanMs, TimeUnit.MILLISECONDS, maxIdleTimeMs, TimeUnit.MILLISECONDS);
returnValue = cache.computeIfPresent(key, new ReplaceFunction<>(oldVersion.getVersion(), newVersionEntity), lifespanMs, TimeUnit.MILLISECONDS, maxIdleTimeMs, TimeUnit.MILLISECONDS);
if (returnValue == null) {
logger.debugf("Entity %s not found. Maybe removed in the meantime. Replace task will be ignored", key);

View File

@ -29,8 +29,4 @@ public abstract class LoginFailuresUpdateTask implements SessionUpdateTask<Login
return CacheOperation.REPLACE;
}
@Override
public CrossDCMessageStatus getCrossDCMessageStatus(SessionEntityWrapper<LoginFailureEntity> sessionWrapper) {
return CrossDCMessageStatus.SYNC;
}
}

View File

@ -33,13 +33,11 @@ public class MergedUpdate<S extends SessionEntity> implements SessionUpdateTask<
private final List<SessionUpdateTask<S>> childUpdates = new LinkedList<>();
private CacheOperation operation;
private CrossDCMessageStatus crossDCMessageStatus;
private final long lifespanMs;
private final long maxIdleTimeMs;
private MergedUpdate(CacheOperation operation, CrossDCMessageStatus crossDCMessageStatus, long lifespanMs, long maxIdleTimeMs) {
private MergedUpdate(CacheOperation operation, long lifespanMs, long maxIdleTimeMs) {
this.operation = operation;
this.crossDCMessageStatus = crossDCMessageStatus;
this.lifespanMs = lifespanMs;
this.maxIdleTimeMs = maxIdleTimeMs;
}
@ -66,11 +64,6 @@ public class MergedUpdate<S extends SessionEntity> implements SessionUpdateTask<
return operation;
}
@Override
public CrossDCMessageStatus getCrossDCMessageStatus(SessionEntityWrapper<S> sessionWrapper) {
return crossDCMessageStatus;
}
public long getLifespanMs() {
return lifespanMs;
}
@ -96,25 +89,16 @@ public class MergedUpdate<S extends SessionEntity> implements SessionUpdateTask<
logger.tracef("Entry '%s' is expired. Will remove it from the cache", sessionWrapper);
}
result = new MergedUpdate<>(operation, child.getCrossDCMessageStatus(sessionWrapper), lifespanMs, maxIdleTimeMs);
result = new MergedUpdate<>(operation, lifespanMs, maxIdleTimeMs);
result.childUpdates.add(child);
} else {
// Merge the operations.
result.operation = result.getOperation().merge(child.getOperation(), session);
// Check if we need to send message to other DCs and how critical it is
CrossDCMessageStatus currentDCStatus = result.getCrossDCMessageStatus(sessionWrapper);
// Optimization. If we already have SYNC, we don't need to retrieve childDCStatus
if (currentDCStatus != CrossDCMessageStatus.SYNC) {
CrossDCMessageStatus childDCStatus = child.getCrossDCMessageStatus(sessionWrapper);
result.crossDCMessageStatus = currentDCStatus.merge(childDCStatus);
}
// REMOVE is special case as other operations are not needed then.
if (result.operation == CacheOperation.REMOVE) {
result = new MergedUpdate<>(result.operation, result.crossDCMessageStatus, lifespanMs, maxIdleTimeMs);
result = new MergedUpdate<>(result.operation, lifespanMs, maxIdleTimeMs);
result.childUpdates.add(child);
return result;
}

View File

@ -25,7 +25,6 @@ import org.keycloak.models.RealmModel;
import org.keycloak.models.UserSessionModel;
import org.keycloak.models.sessions.infinispan.SessionFunction;
import org.keycloak.models.sessions.infinispan.entities.SessionEntity;
import org.keycloak.models.sessions.infinispan.remotestore.RemoteCacheInvoker;
import org.keycloak.models.utils.KeycloakModelUtils;
import java.util.HashMap;
@ -44,7 +43,6 @@ abstract public class PersistentSessionsChangelogBasedTransaction<K, V extends S
private final String cacheName;
private final Cache<K, SessionEntityWrapper<V>> cache;
private final Cache<K, SessionEntityWrapper<V>> offlineCache;
private final RemoteCacheInvoker remoteCacheInvoker;
private final SessionFunction<V> lifespanMsLoader;
private final SessionFunction<V> maxIdleTimeMsLoader;
private final SessionFunction<V> offlineLifespanMsLoader;
@ -57,7 +55,6 @@ abstract public class PersistentSessionsChangelogBasedTransaction<K, V extends S
String cacheName,
Cache<K, SessionEntityWrapper<V>> cache,
Cache<K, SessionEntityWrapper<V>> offlineCache,
RemoteCacheInvoker remoteCacheInvoker,
SessionFunction<V> lifespanMsLoader,
SessionFunction<V> maxIdleTimeMsLoader,
SessionFunction<V> offlineLifespanMsLoader,
@ -69,7 +66,6 @@ abstract public class PersistentSessionsChangelogBasedTransaction<K, V extends S
this.cacheName = cacheName;
this.cache = cache;
this.offlineCache = offlineCache;
this.remoteCacheInvoker = remoteCacheInvoker;
this.lifespanMsLoader = lifespanMsLoader;
this.maxIdleTimeMsLoader = maxIdleTimeMsLoader;
this.offlineLifespanMsLoader = offlineLifespanMsLoader;
@ -160,12 +156,6 @@ abstract public class PersistentSessionsChangelogBasedTransaction<K, V extends S
return !entity.isOffline();
}
});
changesPerformers.add(new RemoteCachesChangesPerformer<>(kcSession, cache, remoteCacheInvoker) {
@Override
public boolean shouldConsumeChange(V entity) {
return !entity.isOffline();
}
});
}
if (offlineCache != null) {
@ -175,12 +165,6 @@ abstract public class PersistentSessionsChangelogBasedTransaction<K, V extends S
return entity.isOffline();
}
});
changesPerformers.add(new RemoteCachesChangesPerformer<>(kcSession, offlineCache, remoteCacheInvoker) {
@Override
public boolean shouldConsumeChange(V entity) {
return entity.isOffline();
}
});
}
return changesPerformers;

View File

@ -1,53 +0,0 @@
/*
* Copyright 2024 Red Hat, Inc. and/or its affiliates
* and other contributors as indicated by the @author tags.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.keycloak.models.sessions.infinispan.changes;
import org.infinispan.Cache;
import org.keycloak.models.KeycloakSession;
import org.keycloak.models.sessions.infinispan.entities.SessionEntity;
import org.keycloak.models.sessions.infinispan.remotestore.RemoteCacheInvoker;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
public class RemoteCachesChangesPerformer<K, V extends SessionEntity> implements SessionChangesPerformer<K, V> {
private final KeycloakSession session;
private final Cache<K, SessionEntityWrapper<V>> cache;
private final RemoteCacheInvoker remoteCacheInvoker;
private final List<Runnable> changes = new LinkedList<>();
public RemoteCachesChangesPerformer(KeycloakSession session, Cache<K, SessionEntityWrapper<V>> cache, RemoteCacheInvoker remoteCacheInvoker) {
this.session = session;
this.cache = cache;
this.remoteCacheInvoker = remoteCacheInvoker;
}
@Override
public void registerChange(Map.Entry<K, SessionUpdatesList<V>> entry, MergedUpdate<V> merged) {
SessionUpdatesList<V> updates = entry.getValue();
changes.add(() -> remoteCacheInvoker.runTask(session, updates.getRealm(), cache.getName(), entry.getKey(), merged, updates.getEntityWrapper()));
}
@Override
public void applyChanges() {
changes.forEach(Runnable::run);
}
}

View File

@ -29,8 +29,4 @@ public abstract class RootAuthenticationSessionUpdateTask implements SessionUpda
return CacheOperation.REPLACE;
}
@Override
public CrossDCMessageStatus getCrossDCMessageStatus(SessionEntityWrapper<RootAuthenticationSessionEntity> sessionWrapper) {
return CrossDCMessageStatus.SYNC;
}
}

View File

@ -32,8 +32,6 @@ public interface SessionUpdateTask<S extends SessionEntity> {
CacheOperation getOperation();
CrossDCMessageStatus getCrossDCMessageStatus(SessionEntityWrapper<S> sessionWrapper);
enum CacheOperation {
ADD,
@ -58,27 +56,4 @@ public interface SessionUpdateTask<S extends SessionEntity> {
return REPLACE;
}
}
enum CrossDCMessageStatus {
SYNC,
//ASYNC,
// QUEUE,
NOT_NEEDED;
CrossDCMessageStatus merge(CrossDCMessageStatus other) {
if (this == SYNC || other == SYNC) {
return SYNC;
}
/*if (this == ASYNC || other == ASYNC) {
return ASYNC;
}*/
return NOT_NEEDED;
}
}
}

View File

@ -17,7 +17,6 @@
package org.keycloak.models.sessions.infinispan.changes;
import org.keycloak.models.sessions.infinispan.changes.SessionUpdateTask.CacheOperation;
import org.keycloak.models.sessions.infinispan.changes.SessionUpdateTask.CrossDCMessageStatus;
import org.keycloak.models.sessions.infinispan.entities.SessionEntity;
/**
@ -36,10 +35,6 @@ public class Tasks {
return CacheOperation.ADD_IF_ABSENT;
}
@Override
public CrossDCMessageStatus getCrossDCMessageStatus(SessionEntityWrapper<SessionEntity> sessionWrapper) {
return CrossDCMessageStatus.SYNC;
}
};
private static final SessionUpdateTask<? extends SessionEntity> REMOVE_SYNC = new PersistentSessionUpdateTask<SessionEntity>() {
@ -52,11 +47,6 @@ public class Tasks {
return CacheOperation.REMOVE;
}
@Override
public CrossDCMessageStatus getCrossDCMessageStatus(SessionEntityWrapper<SessionEntity> sessionWrapper) {
return CrossDCMessageStatus.SYNC;
}
@Override
public boolean isOffline() {
return false;
@ -73,11 +63,6 @@ public class Tasks {
return CacheOperation.REMOVE;
}
@Override
public CrossDCMessageStatus getCrossDCMessageStatus(SessionEntityWrapper<SessionEntity> sessionWrapper) {
return CrossDCMessageStatus.SYNC;
}
@Override
public boolean isOffline() {
return true;
@ -85,8 +70,7 @@ public class Tasks {
};
/**
* Returns a typed task of type {@link CacheOperation#ADD_IF_ABSENT} that does no other update. This operation has DC message
* status {@link CrossDCMessageStatus#SYNC}.
* Returns a typed task of type {@link CacheOperation#ADD_IF_ABSENT} that does no other update.
* @param <S>
* @return
*/
@ -95,8 +79,7 @@ public class Tasks {
}
/**
* Returns a typed task of type {@link CacheOperation#REMOVE} that does no other update. This operation has DC message
* status {@link CrossDCMessageStatus#SYNC}.
* Returns a typed task of type {@link CacheOperation#REMOVE} that does no other update.
* @param <S>
* @return
*/
@ -105,8 +88,7 @@ public class Tasks {
}
/**
* Returns a typed task of type {@link CacheOperation#REMOVE} that does no other update. This operation has DC message
* status {@link CrossDCMessageStatus#SYNC}.
* Returns a typed task of type {@link CacheOperation#REMOVE} that does no other update.
*
* @param offline whether the operation should be performed on offline or non-offline session
* @param <S>

View File

@ -28,7 +28,6 @@ import org.keycloak.models.sessions.infinispan.PersistentUserSessionProvider;
import org.keycloak.models.sessions.infinispan.SessionFunction;
import org.keycloak.models.sessions.infinispan.entities.SessionEntity;
import org.keycloak.models.sessions.infinispan.entities.UserSessionEntity;
import org.keycloak.models.sessions.infinispan.remotestore.RemoteCacheInvoker;
import java.util.concurrent.ArrayBlockingQueue;
@ -41,7 +40,6 @@ public class UserSessionPersistentChangelogBasedTransaction extends PersistentSe
public UserSessionPersistentChangelogBasedTransaction(KeycloakSession session,
Cache<String, SessionEntityWrapper<UserSessionEntity>> cache,
Cache<String, SessionEntityWrapper<UserSessionEntity>> offlineCache,
RemoteCacheInvoker remoteCacheInvoker,
SessionFunction<UserSessionEntity> lifespanMsLoader,
SessionFunction<UserSessionEntity> maxIdleTimeMsLoader,
SessionFunction<UserSessionEntity> offlineLifespanMsLoader,
@ -49,7 +47,7 @@ public class UserSessionPersistentChangelogBasedTransaction extends PersistentSe
ArrayBlockingQueue<PersistentUpdate> batchingQueue,
SerializeExecutionsByKey<String> serializerOnline,
SerializeExecutionsByKey<String> serializerOffline) {
super(session, USER_SESSION_CACHE_NAME, cache, offlineCache, remoteCacheInvoker, lifespanMsLoader, maxIdleTimeMsLoader, offlineLifespanMsLoader, offlineMaxIdleTimeMsLoader, batchingQueue, serializerOnline, serializerOffline);
super(session, USER_SESSION_CACHE_NAME, cache, offlineCache, lifespanMsLoader, maxIdleTimeMsLoader, offlineLifespanMsLoader, offlineMaxIdleTimeMsLoader, batchingQueue, serializerOnline, serializerOffline);
}
public SessionEntityWrapper<UserSessionEntity> get(RealmModel realm, String key, UserSessionModel userSession, boolean offline) {

View File

@ -29,8 +29,4 @@ public abstract class UserSessionUpdateTask implements PersistentSessionUpdateTa
return CacheOperation.REPLACE;
}
@Override
public CrossDCMessageStatus getCrossDCMessageStatus(SessionEntityWrapper<UserSessionEntity> sessionWrapper) {
return CrossDCMessageStatus.SYNC;
}
}

View File

@ -1,134 +0,0 @@
/*
* Copyright 2017 Red Hat, Inc. and/or its affiliates
* and other contributors as indicated by the @author tags.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.keycloak.models.sessions.infinispan.changes.sessions;
import java.util.UUID;
import org.jboss.logging.Logger;
import org.keycloak.models.KeycloakSession;
import org.keycloak.models.RealmModel;
import org.keycloak.models.UserSessionModel;
import org.keycloak.models.sessions.infinispan.changes.SessionEntityWrapper;
import org.keycloak.models.sessions.infinispan.changes.SessionUpdateTask;
import org.keycloak.models.sessions.infinispan.entities.AuthenticatedClientSessionEntity;
import org.keycloak.models.sessions.infinispan.entities.UserSessionEntity;
/**
* @author <a href="mailto:mposolda@redhat.com">Marek Posolda</a>
*/
public class CrossDCLastSessionRefreshChecker {
public static final Logger logger = Logger.getLogger(CrossDCLastSessionRefreshChecker.class);
private final CrossDCLastSessionRefreshStore store;
private final CrossDCLastSessionRefreshStore offlineStore;
public CrossDCLastSessionRefreshChecker(CrossDCLastSessionRefreshStore store, CrossDCLastSessionRefreshStore offlineStore) {
this.store = store;
this.offlineStore = offlineStore;
}
public SessionUpdateTask.CrossDCMessageStatus shouldSaveUserSessionToRemoteCache(
KeycloakSession kcSession, RealmModel realm, SessionEntityWrapper<UserSessionEntity> sessionWrapper, boolean offline, int newLastSessionRefresh) {
SessionUpdateTask.CrossDCMessageStatus baseChecks = baseChecks(kcSession, realm ,offline);
if (baseChecks != null) {
return baseChecks;
}
String userSessionId = sessionWrapper.getEntity().getId();
if (offline) {
Integer lsrr = sessionWrapper.getLocalMetadataNoteInt(UserSessionEntity.LAST_SESSION_REFRESH_REMOTE);
if (lsrr == null) {
lsrr = sessionWrapper.getEntity().getStarted();
}
if (lsrr + (realm.getOfflineSessionIdleTimeout() / 2) <= newLastSessionRefresh) {
logger.debugf("We are going to write remotely userSession %s. Remote last session refresh: %d, New last session refresh: %d",
userSessionId, lsrr, newLastSessionRefresh);
return SessionUpdateTask.CrossDCMessageStatus.SYNC;
}
}
if (logger.isDebugEnabled()) {
logger.debugf("Skip writing last session refresh to the remoteCache. Session %s newLastSessionRefresh %d", userSessionId, newLastSessionRefresh);
}
CrossDCLastSessionRefreshStore storeToUse = offline ? offlineStore : store;
storeToUse.putLastSessionRefresh(kcSession, userSessionId, realm.getId(), newLastSessionRefresh);
return SessionUpdateTask.CrossDCMessageStatus.NOT_NEEDED;
}
public SessionUpdateTask.CrossDCMessageStatus shouldSaveClientSessionToRemoteCache(
KeycloakSession kcSession, RealmModel realm, SessionEntityWrapper<AuthenticatedClientSessionEntity> sessionWrapper, UserSessionModel userSession, boolean offline, int newTimestamp) {
SessionUpdateTask.CrossDCMessageStatus baseChecks = baseChecks(kcSession, realm ,offline);
if (baseChecks != null) {
return baseChecks;
}
UUID clientSessionId = sessionWrapper.getEntity().getId();
if (offline) {
Integer lsrr = sessionWrapper.getLocalMetadataNoteInt(AuthenticatedClientSessionEntity.LAST_TIMESTAMP_REMOTE);
if (lsrr == null) {
lsrr = userSession.getStarted();
}
if (lsrr + (realm.getOfflineSessionIdleTimeout() / 2) <= newTimestamp) {
logger.debugf("We are going to write remotely for clientSession %s. Remote timestamp: %d, New timestamp: %d",
clientSessionId, lsrr, newTimestamp);
return SessionUpdateTask.CrossDCMessageStatus.SYNC;
}
}
if (logger.isDebugEnabled()) {
logger.debugf("Skip writing timestamp to the remoteCache. ClientSession %s timestamp %d", clientSessionId, newTimestamp);
}
return SessionUpdateTask.CrossDCMessageStatus.NOT_NEEDED;
}
private SessionUpdateTask.CrossDCMessageStatus baseChecks(KeycloakSession kcSession, RealmModel realm, boolean offline) {
// revokeRefreshToken always writes everything to remoteCache immediately
if (realm.isRevokeRefreshToken()) {
return SessionUpdateTask.CrossDCMessageStatus.SYNC;
}
// We're likely not in cross-dc environment. Doesn't matter what we return
CrossDCLastSessionRefreshStore storeToUse = offline ? offlineStore : store;
if (storeToUse == null) {
return SessionUpdateTask.CrossDCMessageStatus.SYNC;
}
// Received the message from the other DC that we should update the lastSessionRefresh in local cluster
Boolean ignoreRemoteCacheUpdate = (Boolean) kcSession.getAttribute(CrossDCLastSessionRefreshListener.IGNORE_REMOTE_CACHE_UPDATE);
if (ignoreRemoteCacheUpdate != null && ignoreRemoteCacheUpdate) {
return SessionUpdateTask.CrossDCMessageStatus.NOT_NEEDED;
}
return null;
}
}

View File

@ -1,101 +0,0 @@
/*
* Copyright 2017 Red Hat, Inc. and/or its affiliates
* and other contributors as indicated by the @author tags.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.keycloak.models.sessions.infinispan.changes.sessions;
import java.util.Map;
import org.infinispan.Cache;
import org.jboss.logging.Logger;
import org.keycloak.cluster.ClusterEvent;
import org.keycloak.cluster.ClusterListener;
import org.keycloak.connections.infinispan.TopologyInfo;
import org.keycloak.models.KeycloakSession;
import org.keycloak.models.KeycloakSessionFactory;
import org.keycloak.models.RealmModel;
import org.keycloak.models.UserSessionModel;
import org.keycloak.models.sessions.infinispan.changes.SessionEntityWrapper;
import org.keycloak.models.sessions.infinispan.entities.UserSessionEntity;
import org.keycloak.connections.infinispan.InfinispanUtil;
import org.keycloak.models.utils.KeycloakModelUtils;
/**
* @author <a href="mailto:mposolda@redhat.com">Marek Posolda</a>
*/
public class CrossDCLastSessionRefreshListener implements ClusterListener {
public static final Logger logger = Logger.getLogger(CrossDCLastSessionRefreshListener.class);
public static final String IGNORE_REMOTE_CACHE_UPDATE = "IGNORE_REMOTE_CACHE_UPDATE";
private final boolean offline;
private final KeycloakSessionFactory sessionFactory;
private final Cache<String, SessionEntityWrapper<UserSessionEntity>> cache;
private final TopologyInfo topologyInfo;
public CrossDCLastSessionRefreshListener(KeycloakSession session, Cache<String, SessionEntityWrapper<UserSessionEntity>> cache, boolean offline) {
this.sessionFactory = session.getKeycloakSessionFactory();
this.cache = cache;
this.offline = offline;
this.topologyInfo = InfinispanUtil.getTopologyInfo(session);
}
@Override
public void eventReceived(ClusterEvent event) {
Map<String, SessionData> lastSessionRefreshes = ((LastSessionRefreshEvent) event).getLastSessionRefreshes();
if (logger.isDebugEnabled()) {
logger.debugf("Received refreshes. Offline %b, refreshes: %s", offline, lastSessionRefreshes);
}
lastSessionRefreshes.entrySet().stream().forEach((entry) -> {
String sessionId = entry.getKey();
String realmId = entry.getValue().realmId();
int lastSessionRefresh = entry.getValue().lastSessionRefresh();
// All nodes will receive the message. So ensure that each node updates just lastSessionRefreshes owned by him.
if (shouldUpdateLocalCache(sessionId)) {
KeycloakModelUtils.runJobInTransaction(sessionFactory, (kcSession) -> {
RealmModel realm = kcSession.realms().getRealm(realmId);
UserSessionModel userSession = offline ? kcSession.sessions().getOfflineUserSession(realm, sessionId) : kcSession.sessions().getUserSession(realm, sessionId);
if (userSession == null) {
logger.debugf("User session '%s' not available on node '%s' offline '%b'", sessionId, topologyInfo.getMyNodeName(), offline);
} else {
// Update just if lastSessionRefresh from event is bigger than ours
if (lastSessionRefresh > userSession.getLastSessionRefresh()) {
// Ensure that remoteCache won't be updated due to this
kcSession.setAttribute(IGNORE_REMOTE_CACHE_UPDATE, true);
userSession.setLastSessionRefresh(lastSessionRefresh);
}
}
});
}
});
}
// For distributed caches, ensure that local modification is executed just on owner
protected boolean shouldUpdateLocalCache(String key) {
return topologyInfo.amIOwner(cache, key);
}
}

View File

@ -1,58 +0,0 @@
/*
* Copyright 2017 Red Hat, Inc. and/or its affiliates
* and other contributors as indicated by the @author tags.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.keycloak.models.sessions.infinispan.changes.sessions;
import java.util.Map;
import org.jboss.logging.Logger;
import org.keycloak.cluster.ClusterProvider;
import org.keycloak.models.KeycloakSession;
/**
* Cross-DC based CrossDCLastSessionRefreshStore
*
* Tracks the queue of lastSessionRefreshes, which were updated on this host. Those will be sent to the second DC in bulk, so second DC can update
* lastSessionRefreshes on it's side. Message is sent either periodically or if there are lots of stored lastSessionRefreshes.
*
* @author <a href="mailto:mposolda@redhat.com">Marek Posolda</a>
*/
public class CrossDCLastSessionRefreshStore extends AbstractLastSessionRefreshStore {
protected static final Logger logger = Logger.getLogger(CrossDCLastSessionRefreshStore.class);
private final String eventKey;
protected CrossDCLastSessionRefreshStore(int maxIntervalBetweenMessagesSeconds, int maxCount, String eventKey) {
super(maxIntervalBetweenMessagesSeconds, maxCount);
this.eventKey = eventKey;
}
protected void sendMessage(KeycloakSession kcSession, Map<String, SessionData> refreshesToSend) {
LastSessionRefreshEvent event = new LastSessionRefreshEvent(refreshesToSend);
if (logger.isDebugEnabled()) {
logger.debugf("Sending lastSessionRefreshes for key '%s'. Refreshes: %s", eventKey, event.getLastSessionRefreshes().toString());
}
// Don't notify local DC about the lastSessionRefreshes. They were processed here already
ClusterProvider cluster = kcSession.getProvider(ClusterProvider.class);
cluster.notify(eventKey, event, true, ClusterProvider.DCNotify.ALL_BUT_LOCAL_DC);
}
}

View File

@ -1,63 +0,0 @@
/*
* Copyright 2017 Red Hat, Inc. and/or its affiliates
* and other contributors as indicated by the @author tags.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.keycloak.models.sessions.infinispan.changes.sessions;
import org.infinispan.Cache;
import org.keycloak.cluster.ClusterProvider;
import org.keycloak.models.KeycloakSession;
import org.keycloak.models.sessions.infinispan.changes.SessionEntityWrapper;
import org.keycloak.models.sessions.infinispan.entities.UserSessionEntity;
/**
* @author <a href="mailto:mposolda@redhat.com">Marek Posolda</a>
*/
public class CrossDCLastSessionRefreshStoreFactory extends AbstractLastSessionRefreshStoreFactory {
// Name of periodic tasks to send events to the other DCs
public static final String LSR_PERIODIC_TASK_NAME = "lastSessionRefreshes";
public static final String LSR_OFFLINE_PERIODIC_TASK_NAME = "lastSessionRefreshes-offline";
public CrossDCLastSessionRefreshStore createAndInit(KeycloakSession kcSession, Cache<String, SessionEntityWrapper<UserSessionEntity>> cache, boolean offline) {
return createAndInit(kcSession, cache, DEFAULT_TIMER_INTERVAL_MS, DEFAULT_MAX_INTERVAL_BETWEEN_MESSAGES_SECONDS, DEFAULT_MAX_COUNT, offline);
}
public CrossDCLastSessionRefreshStore createAndInit(KeycloakSession kcSession, Cache<String, SessionEntityWrapper<UserSessionEntity>> cache,
long timerIntervalMs, int maxIntervalBetweenMessagesSeconds, int maxCount, boolean offline) {
String eventKey = offline ? LSR_OFFLINE_PERIODIC_TASK_NAME : LSR_PERIODIC_TASK_NAME;
CrossDCLastSessionRefreshStore store = createStoreInstance(maxIntervalBetweenMessagesSeconds, maxCount, eventKey);
// Register listener
ClusterProvider cluster = kcSession.getProvider(ClusterProvider.class);
cluster.registerListener(eventKey, new CrossDCLastSessionRefreshListener(kcSession, cache, offline));
// Setup periodic timer check
setupPeriodicTimer(kcSession, store, timerIntervalMs, eventKey);
return store;
}
protected CrossDCLastSessionRefreshStore createStoreInstance(int maxIntervalBetweenMessagesSeconds, int maxCount, String eventKey) {
return new CrossDCLastSessionRefreshStore(maxIntervalBetweenMessagesSeconds, maxCount, eventKey);
}
}

View File

@ -1,57 +0,0 @@
/*
* Copyright 2017 Red Hat, Inc. and/or its affiliates
* and other contributors as indicated by the @author tags.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.keycloak.models.sessions.infinispan.changes.sessions;
import org.infinispan.protostream.annotations.ProtoFactory;
import org.infinispan.protostream.annotations.ProtoField;
import org.infinispan.protostream.annotations.ProtoTypeId;
import org.keycloak.cluster.ClusterEvent;
import org.keycloak.marshalling.Marshalling;
import java.util.HashMap;
import java.util.Map;
/**
* @author <a href="mailto:mposolda@redhat.com">Marek Posolda</a>
*/
@ProtoTypeId(Marshalling.LAST_SESSION_REFRESH_EVENT)
public class LastSessionRefreshEvent implements ClusterEvent {
private final Map<String, SessionData> lastSessionRefreshes;
@ProtoFactory
public LastSessionRefreshEvent(Map<String, SessionData> lastSessionRefreshes) {
this.lastSessionRefreshes = lastSessionRefreshes;
}
@ProtoField(value = 1, mapImplementation = HashMap.class)
public Map<String, SessionData> getLastSessionRefreshes() {
return lastSessionRefreshes;
}
@Override
public boolean equals(Object o) {
return o instanceof LastSessionRefreshEvent;
}
@Override
public int hashCode() {
return 1;
}
}

View File

@ -22,6 +22,7 @@ import org.keycloak.models.KeycloakSession;
/**
* @author <a href="mailto:mposolda@redhat.com">Marek Posolda</a>
*/
// TODO: mhajas Do not delete, is still used as an optimization for offline sessions update into the database with persistent_user_sessions disabled
public class PersisterLastSessionRefreshStoreFactory extends AbstractLastSessionRefreshStoreFactory {
// Name of periodic task to update DB with lastSessionRefreshes
@ -32,7 +33,7 @@ public class PersisterLastSessionRefreshStoreFactory extends AbstractLastSession
}
private PersisterLastSessionRefreshStore createAndInit(KeycloakSession kcSession,
public PersisterLastSessionRefreshStore createAndInit(KeycloakSession kcSession,
long timerIntervalMs, int maxIntervalBetweenMessagesSeconds, int maxCount, boolean offline) {
PersisterLastSessionRefreshStore store = createStoreInstance(maxIntervalBetweenMessagesSeconds, maxCount, offline);

View File

@ -23,10 +23,9 @@ import org.keycloak.common.util.MultiSiteUtils;
import org.keycloak.models.sessions.infinispan.changes.SessionEntityWrapper;
/**
* Represents an entity containing data about a session, i.e. an object that is stored in infinispan cache and can be
* potentially shared across DCs. Due to conflict management in {@code RemoteCacheInvoker} and
* {@code InfinispanChangelogBasedTransaction} that use Infinispan's {@code replace()} method, overriding {@link #hashCode()}
* and {@link #equals(java.lang.Object)} is <b>mandatory</b> in descendants.
* Represents an entity containing data about a session, i.e. an object that is stored in infinispan cache.
* Due to conflict management in {@code InfinispanChangelogBasedTransaction} that use Infinispan's {@code replace()}
* method, overriding {@link #hashCode()} and {@link #equals(java.lang.Object)} is <b>mandatory</b> in descendants.
*
* @author <a href="mailto:sthorger@redhat.com">Stian Thorgersen</a>
*/

View File

@ -20,11 +20,8 @@ package org.keycloak.models.sessions.infinispan.events;
import org.jboss.logging.Logger;
import org.keycloak.cluster.ClusterEvent;
import org.keycloak.cluster.ClusterListener;
import org.keycloak.cluster.ClusterProvider;
import org.keycloak.connections.infinispan.TopologyInfo;
import org.keycloak.models.KeycloakSession;
import org.keycloak.models.KeycloakSessionFactory;
import org.keycloak.connections.infinispan.InfinispanUtil;
import org.keycloak.models.utils.KeycloakModelUtils;
import org.keycloak.provider.Provider;
@ -51,34 +48,13 @@ public abstract class AbstractUserSessionClusterListener<SE extends SessionClust
T provider = session.getProvider(providerClazz);
SE sessionEvent = (SE) event;
boolean shouldResendEvent = shouldResendEvent(session, sessionEvent);
if (log.isDebugEnabled()) {
log.debugf("Received user session event '%s'. Should resend event: %b", sessionEvent.toString(), shouldResendEvent);
log.debugf("Received user session event '%s'.", sessionEvent.toString());
}
eventReceived(provider, sessionEvent);
if (shouldResendEvent) {
session.getProvider(ClusterProvider.class).notify(sessionEvent.getEventKey(), event, true, ClusterProvider.DCNotify.ALL_BUT_LOCAL_DC);
}
});
}
protected abstract void eventReceived(T provider, SE sessionEvent);
private boolean shouldResendEvent(KeycloakSession session, SessionClusterEvent event) {
if (!event.isResendingEvent()) {
return false;
}
// Just the initiator will re-send the event after receiving it
TopologyInfo topology = InfinispanUtil.getTopologyInfo(session);
String myNode = topology.getMyNodeName();
String mySite = topology.getMySiteName();
return (event.getNodeId() != null && event.getNodeId().equals(myNode) && event.getSiteId() != null && event.getSiteId().equals(mySite));
}
}

View File

@ -20,6 +20,7 @@ package org.keycloak.models.sessions.infinispan.events;
import java.util.Objects;
import org.infinispan.protostream.annotations.ProtoField;
import org.infinispan.protostream.annotations.ProtoReserved;
import org.keycloak.cluster.ClusterEvent;
import org.keycloak.connections.infinispan.InfinispanUtil;
import org.keycloak.connections.infinispan.TopologyInfo;
@ -28,19 +29,19 @@ import org.keycloak.models.KeycloakSession;
/**
* @author <a href="mailto:mposolda@redhat.com">Marek Posolda</a>
*/
@ProtoReserved(numbers = {3}, names = {"resendingEvent"})
public abstract class SessionClusterEvent implements ClusterEvent {
private String realmId;
private String eventKey;
private boolean resendingEvent;
private String siteId;
private String nodeId;
public static <T extends SessionClusterEvent> T createEvent(Class<T> eventClass, String eventKey, KeycloakSession session, String realmId, boolean resendingEvent) {
public static <T extends SessionClusterEvent> T createEvent(Class<T> eventClass, String eventKey, KeycloakSession session, String realmId) {
try {
T event = eventClass.getDeclaredConstructor().newInstance();
event.setData(session, eventKey, realmId, resendingEvent);
event.setData(session, eventKey, realmId);
return event;
} catch (Exception e) {
throw new RuntimeException(e);
@ -48,10 +49,9 @@ public abstract class SessionClusterEvent implements ClusterEvent {
}
void setData(KeycloakSession session, String eventKey, String realmId, boolean resendingEvent) {
void setData(KeycloakSession session, String eventKey, String realmId) {
this.realmId = realmId;
this.eventKey = eventKey;
this.resendingEvent = resendingEvent;
TopologyInfo topology = InfinispanUtil.getTopologyInfo(session);
this.siteId = topology.getMySiteName();
this.nodeId = topology.getMyNodeName();
@ -76,15 +76,6 @@ public abstract class SessionClusterEvent implements ClusterEvent {
this.eventKey = eventKey;
}
@ProtoField(3)
public boolean isResendingEvent() {
return resendingEvent;
}
void setResendingEvent(boolean resendingEvent) {
this.resendingEvent = resendingEvent;
}
@ProtoField(4)
public String getSiteId() {
return siteId;

View File

@ -36,22 +36,21 @@ public class SessionEventsSenderTransaction extends AbstractKeycloakTransaction
private final KeycloakSession session;
private final Map<EventGroup, List<ClusterEvent>> sessionEvents = new HashMap<>();
private final Map<String, List<ClusterEvent>> sessionEvents = new HashMap<>();
public SessionEventsSenderTransaction(KeycloakSession session) {
this.session = session;
}
public void addEvent(SessionClusterEvent event, ClusterProvider.DCNotify dcNotify) {
var group = new EventGroup(event.getEventKey(), dcNotify);
sessionEvents.computeIfAbsent(group, eventGroup -> new ArrayList<>()).add(event);
public void addEvent(SessionClusterEvent event) {
sessionEvents.computeIfAbsent(event.getEventKey(), eventGroup -> new ArrayList<>()).add(event);
}
@Override
protected void commitImpl() {
var cluster = session.getProvider(ClusterProvider.class);
for (var entry : sessionEvents.entrySet()) {
cluster.notify(entry.getKey().eventKey(), entry.getValue(), false, entry.getKey().dcNotify());
cluster.notify(entry.getKey(), entry.getValue(), false);
}
}
@ -61,5 +60,4 @@ public class SessionEventsSenderTransaction extends AbstractKeycloakTransaction
sessionEvents.clear();
}
private record EventGroup(String eventKey, ClusterProvider.DCNotify dcNotify) {}
}

View File

@ -1,107 +0,0 @@
/*
* Copyright 2016 Red Hat, Inc. and/or its affiliates
* and other contributors as indicated by the @author tags.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.keycloak.models.sessions.infinispan.initializer;
import org.infinispan.Cache;
import org.infinispan.context.Flag;
import org.infinispan.lifecycle.ComponentStatus;
import org.jboss.logging.Logger;
import org.keycloak.models.KeycloakSessionFactory;
/**
* @author <a href="mailto:mposolda@redhat.com">Marek Posolda</a>
*/
public abstract class BaseCacheInitializer extends CacheInitializer {
private static final String STATE_KEY_PREFIX = "distributed::";
private static final Logger log = Logger.getLogger(BaseCacheInitializer.class);
protected final KeycloakSessionFactory sessionFactory;
protected final Cache<String, InitializerState> workCache;
protected final SessionLoader<SessionLoader.LoaderContext, SessionLoader.WorkerContext, SessionLoader.WorkerResult> sessionLoader;
protected final String stateKey;
public BaseCacheInitializer(KeycloakSessionFactory sessionFactory, Cache<String, InitializerState> workCache, SessionLoader<SessionLoader.LoaderContext, SessionLoader.WorkerContext, SessionLoader.WorkerResult> sessionLoader, String stateKeySuffix) {
this.sessionFactory = sessionFactory;
this.workCache = workCache;
this.sessionLoader = sessionLoader;
this.stateKey = STATE_KEY_PREFIX + stateKeySuffix;
}
@Override
protected boolean isFinished() {
InitializerState state = getStateFromCache();
return state != null && state.isFinished();
}
@Override
protected boolean isCoordinator() {
return workCache.getCacheManager().isCoordinator();
}
@Override
protected int getProgressIndicator() {
InitializerState state = getStateFromCache();
return state == null ? 0 : state.getProgressIndicator();
}
protected InitializerState getStateFromCache() {
// We ignore cacheStore for now, so that in Cross-DC scenario (with RemoteStore enabled) is the remoteStore ignored.
return workCache.getAdvancedCache()
.withFlags(Flag.SKIP_CACHE_STORE, Flag.SKIP_CACHE_LOAD)
.get(stateKey);
}
protected void saveStateToCache(InitializerState state) {
// 3 attempts to send the message (it may fail if some node fails in the meantime)
retry(3, () -> {
// Save this synchronously to ensure all nodes read correct state
// We ignore cacheStore for now, so that in Cross-DC scenario (with RemoteStore enabled) is the remoteStore ignored.
BaseCacheInitializer.this.workCache.getAdvancedCache().
withFlags(Flag.IGNORE_RETURN_VALUES, Flag.FORCE_SYNCHRONOUS, Flag.SKIP_CACHE_STORE, Flag.SKIP_CACHE_LOAD)
.put(stateKey, state);
});
}
private void retry(int retry, Runnable runnable) {
while (true) {
try {
runnable.run();
return;
} catch (RuntimeException e) {
ComponentStatus status = workCache.getStatus();
if (status.isStopping() || status.isTerminated()) {
log.warn("Failed to put initializerState to the cache. Cache is already terminating");
log.debug(e.getMessage(), e);
return;
}
retry--;
if (retry == 0) {
throw e;
}
}
}
}
}

View File

@ -1,88 +0,0 @@
/*
* Copyright 2016 Red Hat, Inc. and/or its affiliates
* and other contributors as indicated by the @author tags.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.keycloak.models.sessions.infinispan.initializer;
import java.time.Instant;
import java.util.concurrent.TimeUnit;
import org.jboss.logging.Logger;
/**
* @author <a href="mailto:mposolda@redhat.com">Marek Posolda</a>
*/
public abstract class CacheInitializer {
private static final Logger log = Logger.getLogger(CacheInitializer.class);
public void loadSessions() {
Instant loadingMustContinueBy = Instant.now().plusSeconds(getStalledTimeoutInSeconds());
boolean loadingStalledInPreviousStep = false;
int lastProgressIndicator = 0;
while (!isFinished()) {
if (!isCoordinator()) {
try {
TimeUnit.SECONDS.sleep(1);
final int progressIndicator = getProgressIndicator();
final boolean loadingStalled = lastProgressIndicator == progressIndicator;
if (loadingStalled) {
if (loadingStalledInPreviousStep) {
if (Instant.now().isAfter(loadingMustContinueBy)) {
throw new RuntimeException("Loading sessions has stalled for " + getStalledTimeoutInSeconds() + " seconds, possibly caused by split-brain");
}
log.tracef("Loading sessions stalled. Waiting until %s", loadingMustContinueBy);
} else {
loadingMustContinueBy = Instant.now().plusSeconds(getStalledTimeoutInSeconds());
loadingStalledInPreviousStep = true;
}
} else {
loadingStalledInPreviousStep = false;
}
lastProgressIndicator = progressIndicator;
} catch (InterruptedException ie) {
log.error("Interrupted", ie);
throw new RuntimeException("Loading sessions failed", ie);
}
} else {
startLoading();
}
}
}
protected abstract boolean isFinished();
protected abstract boolean isCoordinator();
/**
* Returns an integer which captures current progress. If there is a progress in loading,
* this indicator must be different most of the time so that it does not hit 30-seconds
* limit.
* @see #stalledTimeoutInSeconds
* @return
*/
protected abstract int getProgressIndicator();
/**
* Just coordinator will run this
*/
protected abstract void startLoading();
protected abstract int getStalledTimeoutInSeconds();
}

View File

@ -1,154 +0,0 @@
/*
* Copyright 2016 Red Hat, Inc. and/or its affiliates
* and other contributors as indicated by the @author tags.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.keycloak.models.sessions.infinispan.initializer;
import org.infinispan.Cache;
import org.jboss.logging.Logger;
import org.keycloak.models.KeycloakSession;
import org.keycloak.models.KeycloakSessionFactory;
import org.keycloak.models.KeycloakSessionTask;
import org.keycloak.models.utils.KeycloakModelUtils;
import java.util.List;
import java.util.Queue;
import java.util.concurrent.ConcurrentLinkedQueue;
/**
* Startup initialization for reading persistent userSessions to be filled into infinispan/memory.
*
* Implementation is pretty generic and doesn't contain any "userSession" specific stuff. All logic related to how sessions are loaded is in the SessionLoader implementation
*
* @author <a href="mailto:mposolda@redhat.com">Marek Posolda</a>
*/
public class InfinispanCacheInitializer extends BaseCacheInitializer {
private static final Logger log = Logger.getLogger(InfinispanCacheInitializer.class);
private final int maxErrors;
// Effectively no timeout
private final int stalledTimeoutInSeconds;
public InfinispanCacheInitializer(KeycloakSessionFactory sessionFactory, Cache<String, InitializerState> workCache, SessionLoader sessionLoader, String stateKeySuffix, int maxErrors, int stalledTimeoutInSeconds) {
super(sessionFactory, workCache, sessionLoader, stateKeySuffix);
this.maxErrors = maxErrors;
this.stalledTimeoutInSeconds = stalledTimeoutInSeconds;
}
// Just coordinator will run this
@Override
protected void startLoading() {
InitializerState state = getStateFromCache();
SessionLoader.LoaderContext[] ctx = new SessionLoader.LoaderContext[1];
if (state == null) {
KeycloakModelUtils.runJobInTransaction(sessionFactory, new KeycloakSessionTask() {
@Override
public void run(KeycloakSession session) {
ctx[0] = sessionLoader.computeLoaderContext();
}
});
state = new InitializerState(ctx[0].getSegmentsCount());
} else {
KeycloakModelUtils.runJobInTransaction(sessionFactory, new KeycloakSessionTask() {
@Override
public void run(KeycloakSession session) {
ctx[0] = sessionLoader.computeLoaderContext();
}
});
}
log.debugf("Start loading with loader: '%s', ctx: '%s' , state: %s",
sessionLoader.toString(), ctx[0].toString(), state.toString());
startLoadingImpl(state, ctx[0]);
}
@Override
protected int getStalledTimeoutInSeconds() {
return this.stalledTimeoutInSeconds;
}
protected void startLoadingImpl(InitializerState state, SessionLoader.LoaderContext loaderCtx) {
final int errors = 0;
int segmentToLoad = 0;
int distributedWorkersCount = 1;
while (segmentToLoad < state.getSegmentsCount()) {
log.debugf("Starting next iteration with %d workers", distributedWorkersCount);
List<Integer> segments = state.getSegmentsToLoad(segmentToLoad, distributedWorkersCount);
if (log.isTraceEnabled()) {
log.trace("unfinished segments for this iteration: " + segments);
}
Queue<SessionLoader.WorkerResult> results = new ConcurrentLinkedQueue<>();
for (Integer segment : segments) {
SessionLoader.WorkerContext workerCtx = sessionLoader.computeWorkerContext(segment);
SessionInitializerWorker worker = new SessionInitializerWorker();
worker.setWorkerEnvironment(loaderCtx, workerCtx, sessionLoader);
results.add(worker.apply(sessionFactory));
}
boolean anyFailure = false;
// Check the results
for (SessionLoader.WorkerResult result : results) {
if (result.success()) {
state.markSegmentFinished(result.segment());
if (result.segment() == segmentToLoad + distributedWorkersCount - 1) {
// last result for next iteration when complete
}
} else {
if (log.isTraceEnabled()) {
log.tracef("Segment %d failed to compute", result.segment());
}
anyFailure = true;
}
}
if (errors >= maxErrors) {
throw new RuntimeException("Maximum count of worker errors occurred. Limit was " + maxErrors + ". See server.log for details");
}
if (!anyFailure) {
// everything is OK, prepare the new row
segmentToLoad += distributedWorkersCount;
if (log.isTraceEnabled()) {
log.debugf("New initializer state is: %s", state);
}
}
}
// Push the state after computation is finished
saveStateToCache(state);
// Loader callback after the task is finished
this.sessionLoader.afterAllSessionsLoaded();
}
}

View File

@ -1,137 +0,0 @@
/*
* Copyright 2016 Red Hat, Inc. and/or its affiliates
* and other contributors as indicated by the @author tags.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.keycloak.models.sessions.infinispan.initializer;
import org.infinispan.protostream.annotations.ProtoFactory;
import org.infinispan.protostream.annotations.ProtoField;
import org.infinispan.protostream.annotations.ProtoTypeId;
import org.jboss.logging.Logger;
import org.keycloak.marshalling.Marshalling;
import org.keycloak.models.sessions.infinispan.entities.SessionEntity;
import java.util.BitSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Objects;
/**
* Note that this state is <b>NOT</b> thread safe. Currently it is only used from single thread so it's fine
* but further optimizations might need to revisit this (see {@link InfinispanCacheInitializer}).
*
* @author <a href="mailto:mposolda@redhat.com">Marek Posolda</a>
*/
@ProtoTypeId(Marshalling.INITIALIZER_STATE)
public class InitializerState extends SessionEntity {
private static final Logger log = Logger.getLogger(InitializerState.class);
private final int segmentsCount;
private final BitSet segments;
public InitializerState(int segmentsCount) {
this.segmentsCount = segmentsCount;
this.segments = new BitSet(segmentsCount);
log.debugf("segmentsCount: %d", segmentsCount);
}
@ProtoFactory
InitializerState(String realmId, int segmentsCount, BitSet segments) {
super(realmId);
this.segmentsCount = segmentsCount;
this.segments = segments;
log.debugf("segmentsCount: %d", segmentsCount);
}
/**
* Getter for the segments count.
* @return The number of segments of the state
*/
@ProtoField(2)
public int getSegmentsCount() {
return segmentsCount;
}
@ProtoField(3)
BitSet getSegments() {
return segments;
}
/** Return true just if computation is entirely finished (all segments are true) */
public boolean isFinished() {
return segments.cardinality() == segmentsCount;
}
/** Return indication of progress - changes upon progress */
public int getProgressIndicator() {
return segments.hashCode();
}
/** Return next un-finished segments in the next row of segments.
* @param segmentToLoad The segment we are loading
* @param maxSegmentCount The max segment to load
* @return The list of segments to work on this step
*/
public List<Integer> getSegmentsToLoad(int segmentToLoad, int maxSegmentCount) {
List<Integer> result = new LinkedList<>();
for (int i = segmentToLoad; i < (segmentToLoad + maxSegmentCount) && i < segmentsCount; i++) {
if (!segments.get(i)) {
result.add(i);
}
}
return result;
}
public void markSegmentFinished(int index) {
segments.set(index);
}
@Override
public String toString() {
int finished = segments.cardinality();
int nonFinished = segmentsCount - finished;
return "finished segments count: " + finished
+ (", non-finished segments count: " + nonFinished);
}
@Override
public int hashCode() {
int hash = 3;
hash = 97 * hash + this.segmentsCount;
hash = 97 * hash + Objects.hashCode(this.segments);
return hash;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
InitializerState other = (InitializerState) obj;
return this.segmentsCount == other.segmentsCount && Objects.equals(this.segments, other.segments);
}
}

View File

@ -1,53 +0,0 @@
/*
* Copyright 2017 Red Hat, Inc. and/or its affiliates
* and other contributors as indicated by the @author tags.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.keycloak.models.sessions.infinispan.initializer;
/**
* @author <a href="mailto:mposolda@redhat.com">Marek Posolda</a>
*/
public class OfflinePersistentLoaderContext extends SessionLoader.LoaderContext {
private final int sessionsTotal;
private final int sessionsPerSegment;
public OfflinePersistentLoaderContext(int sessionsTotal, int sessionsPerSegment) {
super(computeSegmentsCount(sessionsTotal, sessionsPerSegment));
this.sessionsTotal = sessionsTotal;
this.sessionsPerSegment = sessionsPerSegment;
}
private static int computeSegmentsCount(int sessionsTotal, int sessionsPerSegment) {
int segmentsCount = sessionsTotal / sessionsPerSegment;
if (sessionsTotal % sessionsPerSegment >= 1) {
segmentsCount = segmentsCount + 1;
}
return segmentsCount;
}
@Override
public String toString() {
return new StringBuilder("OfflinePersistentLoaderContext [ ")
.append(" sessionsTotal: ").append(sessionsTotal)
.append(", sessionsPerSegment: ").append(sessionsPerSegment)
.append(", segmentsCount: ").append(getSegmentsCount())
.append(" ]")
.toString();
}
}

View File

@ -1,45 +0,0 @@
/*
* Copyright 2016 Red Hat, Inc. and/or its affiliates
* and other contributors as indicated by the @author tags.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.keycloak.models.sessions.infinispan.initializer;
import org.keycloak.models.KeycloakSessionFactory;
import org.keycloak.models.utils.KeycloakModelUtils;
import java.util.function.Function;
/**
* @author <a href="mailto:mposolda@redhat.com">Marek Posolda</a>
*/
public class SessionInitializerWorker implements Function<KeycloakSessionFactory, SessionLoader.WorkerResult> {
private SessionLoader.LoaderContext loaderCtx;
private SessionLoader.WorkerContext workerCtx;
private SessionLoader<SessionLoader.LoaderContext, SessionLoader.WorkerContext, SessionLoader.WorkerResult> sessionLoader;
public void setWorkerEnvironment(SessionLoader.LoaderContext loaderCtx, SessionLoader.WorkerContext workerCtx, SessionLoader<SessionLoader.LoaderContext, SessionLoader.WorkerContext, SessionLoader.WorkerResult> sessionLoader) {
this.loaderCtx = loaderCtx;
this.workerCtx = workerCtx;
this.sessionLoader = sessionLoader;
}
@Override
public SessionLoader.WorkerResult apply(KeycloakSessionFactory sessionFactory) {
return KeycloakModelUtils.runJobInTransactionWithResult(sessionFactory, (session) -> sessionLoader.loadSessions(session, loaderCtx, workerCtx));
}
}

View File

@ -1,101 +0,0 @@
/*
* Copyright 2016 Red Hat, Inc. and/or its affiliates
* and other contributors as indicated by the @author tags.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.keycloak.models.sessions.infinispan.initializer;
import org.keycloak.models.KeycloakSession;
/**
* @author <a href="mailto:mposolda@redhat.com">Marek Posolda</a>
*/
public interface SessionLoader<LOADER_CONTEXT extends SessionLoader.LoaderContext,
WORKER_CONTEXT extends SessionLoader.WorkerContext,
WORKER_RESULT extends SessionLoader.WorkerResult> {
/**
*
* Will be triggered just once on cluster coordinator node to count the number of segments and other context data specific to whole computation.
* Each segment will be then later computed in one "worker" task
*
* This method could be expensive to call, so the "computed" loaderContext object is passed among workers/loaders and needs to be serializable
*
* @return
*/
LOADER_CONTEXT computeLoaderContext();
/**
* Compute the worker context for current iteration
*
* @param segment the current segment (page) to compute
* @return
*/
WORKER_CONTEXT computeWorkerContext(int segment);
/**
* Will be called on all cluster nodes to load the specified page.
*
* @param session
* @param loaderContext global loaderContext object, which was already computed before
* @param workerContext for current iteration
* @return
*/
WORKER_RESULT loadSessions(KeycloakSession session, LOADER_CONTEXT loaderContext, WORKER_CONTEXT workerContext);
/**
* Callback triggered on cluster coordinator once it recognize that all sessions were successfully loaded
*/
void afterAllSessionsLoaded();
/**
* Object, which contains some context data to be used by SessionLoader implementation. It's computed just once and then passed
* to each {@link SessionLoader}.
*/
class LoaderContext {
private final int segmentsCount;
public LoaderContext(int segmentsCount) {
this.segmentsCount = segmentsCount;
}
public int getSegmentsCount() {
return segmentsCount;
}
}
/**
* Object, which is computed before each worker iteration and contains some data to be used by the corresponding worker iteration.
* For example info about which segment/page should be loaded by current worker.
*/
record WorkerContext(int segment) {
}
/**
* Result of single worker iteration
*/
record WorkerResult(boolean success, int segment) {
}
}

View File

@ -1,297 +0,0 @@
/*
* Copyright 2017 Red Hat, Inc. and/or its affiliates
* and other contributors as indicated by the @author tags.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.keycloak.models.sessions.infinispan.remotestore;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.RejectedExecutionException;
import org.infinispan.client.hotrod.event.ClientCacheEntryCreatedEvent;
import org.infinispan.client.hotrod.event.ClientCacheEntryModifiedEvent;
import org.infinispan.client.hotrod.event.ClientCacheEntryRemovedEvent;
import org.infinispan.client.hotrod.event.ClientCacheFailoverEvent;
import org.infinispan.client.hotrod.event.ClientEvent;
import org.jboss.logging.Logger;
import org.keycloak.common.util.MultivaluedHashMap;
import org.keycloak.common.util.Time;
import static org.infinispan.client.hotrod.event.ClientEvent.Type.CLIENT_CACHE_ENTRY_CREATED;
import static org.infinispan.client.hotrod.event.ClientEvent.Type.CLIENT_CACHE_ENTRY_REMOVED;
/**
* @author <a href="mailto:mposolda@redhat.com">Marek Posolda</a>
*/
public class ClientListenerExecutorDecorator<K> {
private static final Logger logger = Logger.getLogger(ClientListenerExecutorDecorator.class);
private final Object lock = new Object();
private final ExecutorService decorated;
// Both "eventsInProgress" and "eventsQueue" maps are guarded by the "lock", so doesn't need to be concurrency safe
// Events currently submitted to the ExecutorService
private Map<K, MyClientEvent> eventsInProgress = new HashMap<>();
// Queue of the events waiting to process. We don't want events of same key to be processed concurrently
private MultivaluedHashMap<K, MyClientEventContext> eventsQueue = new MultivaluedHashMap<>();
public ClientListenerExecutorDecorator(ExecutorService decorated) {
this.decorated = decorated;
}
// Use explicit submit methods to ensure that different type of ClientEvent is not used
public void submit(ClientCacheFailoverEvent clientCacheFailoverEvent, Runnable r) {
decorated.submit(r);
}
public void submit(ClientCacheEntryCreatedEvent<K> cacheEntryCreatedEvent, Runnable r) {
MyClientEvent event = convertIspnClientEvent(cacheEntryCreatedEvent);
submit(event, r);
}
public void submit(ClientCacheEntryModifiedEvent<K> cacheEntryModifiedEvent, Runnable r) {
MyClientEvent event = convertIspnClientEvent(cacheEntryModifiedEvent);
submit(event, r);
}
public void submit(ClientCacheEntryRemovedEvent<K> cacheEntryRemovedEvent, Runnable r) {
MyClientEvent event = convertIspnClientEvent(cacheEntryRemovedEvent);
submit(event, r);
}
// IMPL
private void submit(MyClientEvent event, Runnable r) {
K key = event.key;
synchronized (lock) {
if (!eventsInProgress.containsKey(key)) {
submitImpl(key, event, r);
} else {
putEventToTheQueue(key, event, r);
}
}
}
// Assume it's called from the synchronized block
private void submitImpl(K key, MyClientEvent event, Runnable r) {
logger.debugf("Submitting event to the executor: %s . eventsInProgress size: %d, eventsQueue size: %d", event.toString(), eventsInProgress.size(), eventsQueue.size());
eventsInProgress.put(key, event);
Runnable decoratedRunnable = () -> {
Long start = null;
try {
if (logger.isDebugEnabled()) {
start = Time.currentTimeMillis();
}
r.run();
} finally {
synchronized (lock) {
eventsInProgress.remove(key);
if (logger.isDebugEnabled()) {
long took = Time.currentTimeMillis() - start;
logger.debugf("Finished processing event by the executor: %s, took: %d ms. EventsInProgress size: %d", event.toString(), took, eventsInProgress.size());
}
pollQueue(key);
}
}
};
try {
decorated.submit(decoratedRunnable);
} catch (RejectedExecutionException ree) {
eventsInProgress.remove(key);
// server is shutting down or pool was terminated - don't throw errors
if (ree.getMessage() != null && (ree.getMessage().contains("Terminated") || ree.getMessage().contains("Shutting down"))) {
logger.warnf("Rejected execution of task for the event '%s' because server is shutting down or pool was terminated.", event.toString());
logger.debug(ree);
} else {
// avoid touching the cache when creating a log message to avoid a deadlock in Infinispan 12.1.7.Final
logger.errorf("Rejected execution of task for the event '%s' . Try to increase the pool size. Pool is '%s'", event.toString(), decorated.toString());
throw ree;
}
}
}
// Assume it's called from the synchronized block
private void pollQueue(K key) {
if (eventsQueue.containsKey(key)) {
List<MyClientEventContext> events = eventsQueue.get(key);
if (events.size() > 0) {
MyClientEventContext nextEvent = events.remove(0);
// Was last event in the queue for that key
if (events.size() == 0) {
eventsQueue.remove(key);
}
submitImpl(key, nextEvent.event, nextEvent.r);
} else {
// Shouldn't happen
throw new IllegalStateException("Illegal state. Size was 0 for key " + key);
}
}
}
// Assume it's called from the synchronized block
private void putEventToTheQueue(K key, MyClientEvent event, Runnable r) {
logger.debugf("Calling putEventToTheQueue: %s", event.toString());
if (!eventsQueue.containsKey(key)) {
eventsQueue.putSingle(key, new MyClientEventContext(event, r));
} else {
List<MyClientEventContext> existingEvents = eventsQueue.get(key);
MyClientEventContext myNewEvent = new MyClientEventContext(event, r);
// Try to optimize queue (EG. in case we have REMOVE event, we can ignore the previous CREATE or MODIFIED events)
switch (event.type) {
case CLIENT_CACHE_ENTRY_CREATED:
boolean add = true;
for (MyClientEventContext ctx : existingEvents) {
if (ctx.event.type == CLIENT_CACHE_ENTRY_REMOVED) {
// Ignore. TODO: Log me?
add = false;
break;
} else if (ctx.event.type == CLIENT_CACHE_ENTRY_CREATED) {
// Ignore. Already on the list
add = false;
break;
}
}
// Add to the beginning before the MODIFIED events
if (add) {
existingEvents.add(0, myNewEvent);
}
break;
case CLIENT_CACHE_ENTRY_MODIFIED:
boolean addd = true;
for (int i=0 ; i<existingEvents.size() ; i++) {
MyClientEventContext ctx = existingEvents.get(i);
if (ctx.event.type == CLIENT_CACHE_ENTRY_REMOVED) {
// Ignore.
addd = false;
break;
} else if (ctx.event.type == CLIENT_CACHE_ENTRY_CREATED) {
// Shift to the next element. CREATE event go first.
} else {
// Can ignore the previous MODIFY event if we have newer version
if (ctx.event.version < myNewEvent.event.version) {
existingEvents.remove(i);
} else {
addd = false;
}
}
if (addd) {
// Add to the end
existingEvents.add(myNewEvent);
}
}
break;
case CLIENT_CACHE_ENTRY_REMOVED:
// Can just ignore the other events in the queue in case of REMOVE
eventsQueue.putSingle(key, new MyClientEventContext(event, r));
break;
default:
throw new IllegalStateException("Unsupported event type: " + event.type);
}
}
logger.debugf("Event queued. Current events for the key '%s': %s", key.toString(), eventsQueue.getList(key));
}
public MyClientEvent convertIspnClientEvent(ClientEvent ispnClientEvent) {
if (ispnClientEvent instanceof ClientCacheEntryCreatedEvent) {
ClientCacheEntryCreatedEvent<K> ev = (ClientCacheEntryCreatedEvent<K>) ispnClientEvent;
return new MyClientEvent(ev.getKey(), ev.getVersion(), ev.getType());
} else if (ispnClientEvent instanceof ClientCacheEntryModifiedEvent) {
ClientCacheEntryModifiedEvent<K> ev = (ClientCacheEntryModifiedEvent<K>) ispnClientEvent;
return new MyClientEvent(ev.getKey(), ev.getVersion(), ev.getType());
} else if (ispnClientEvent instanceof ClientCacheEntryRemovedEvent) {
ClientCacheEntryRemovedEvent<K> ev = (ClientCacheEntryRemovedEvent<K>) ispnClientEvent;
return new MyClientEvent(ev.getKey(), -1l, ev.getType());
} else {
throw new IllegalStateException("Unsupported event type: " + ispnClientEvent.getType());
}
}
private class MyClientEventContext {
private final MyClientEvent event;
private final Runnable r;
private MyClientEventContext(MyClientEvent event, Runnable r) {
this.event = event;
this.r = r;
}
@Override
public String toString() {
return event.toString();
}
}
// Using separate class as ISPN ClientEvent type doesn't provide access to key and version :/
private class MyClientEvent {
private final K key;
private final long version;
private final ClientEvent.Type type;
private MyClientEvent(K key, long version, ClientEvent.Type type) {
this.key = key;
this.version = version;
this.type = type;
}
@Override
public String toString() {
return String.format("ClientEvent [ type=%s, key=%s, version=%d ]", type, key, version);
}
}
}

View File

@ -1,202 +0,0 @@
/*
* Copyright 2016 Red Hat, Inc. and/or its affiliates
* and other contributors as indicated by the @author tags.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.keycloak.models.sessions.infinispan.remotestore;
import org.infinispan.client.hotrod.exceptions.HotRodClientException;
import org.keycloak.common.util.MultiSiteUtils;
import org.keycloak.common.util.Retry;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import org.infinispan.client.hotrod.Flag;
import org.infinispan.client.hotrod.RemoteCache;
import org.infinispan.client.hotrod.VersionedValue;
import org.jboss.logging.Logger;
import org.keycloak.connections.infinispan.TopologyInfo;
import org.keycloak.models.KeycloakSession;
import org.keycloak.models.RealmModel;
import org.keycloak.models.sessions.infinispan.changes.MergedUpdate;
import org.keycloak.models.sessions.infinispan.changes.SessionEntityWrapper;
import org.keycloak.models.sessions.infinispan.changes.SessionUpdateTask;
import org.keycloak.models.sessions.infinispan.entities.SessionEntity;
import org.keycloak.connections.infinispan.InfinispanUtil;
import static org.keycloak.connections.infinispan.InfinispanConnectionProvider.CLIENT_SESSION_CACHE_NAME;
import static org.keycloak.connections.infinispan.InfinispanConnectionProvider.OFFLINE_CLIENT_SESSION_CACHE_NAME;
import static org.keycloak.connections.infinispan.InfinispanConnectionProvider.OFFLINE_USER_SESSION_CACHE_NAME;
import static org.keycloak.connections.infinispan.InfinispanConnectionProvider.USER_SESSION_CACHE_NAME;
/**
* @author <a href="mailto:mposolda@redhat.com">Marek Posolda</a>
*/
public class RemoteCacheInvoker {
public static final Logger logger = Logger.getLogger(RemoteCacheInvoker.class);
private final Map<String, RemoteCache> remoteCaches = new HashMap<>();
public void addRemoteCache(String cacheName, RemoteCache remoteCache) {
remoteCaches.put(cacheName, remoteCache);
}
public Set<String> getRemoteCacheNames() {
return Collections.unmodifiableSet(remoteCaches.keySet());
}
public <K, V extends SessionEntity> void runTask(KeycloakSession kcSession, RealmModel realm, String cacheName, K key, MergedUpdate<V> task, SessionEntityWrapper<V> sessionWrapper) {
RemoteCache remoteCache = remoteCaches.get(cacheName);
if (remoteCache == null) {
return;
}
SessionUpdateTask.CacheOperation operation = task.getOperation();
SessionUpdateTask.CrossDCMessageStatus status = task.getCrossDCMessageStatus(sessionWrapper);
if (status == SessionUpdateTask.CrossDCMessageStatus.NOT_NEEDED) {
if (logger.isTraceEnabled()) {
logger.tracef("Skip writing to remoteCache for entity '%s' of cache '%s' and operation '%s'", key, cacheName, operation);
}
return;
}
long maxIdleTimeMs = getMaxIdleTimeMs(task);
if (logger.isTraceEnabled()) {
logger.tracef("Running task '%s' on remote cache '%s' . Key is '%s'", operation, cacheName, key);
}
TopologyInfo topology = InfinispanUtil.getTopologyInfo(kcSession);
Retry.executeWithBackoff((int iteration) -> {
try {
runOnRemoteCache(topology, remoteCache, maxIdleTimeMs, key, task, sessionWrapper);
} catch (HotRodClientException re) {
if (logger.isDebugEnabled()) {
logger.debugf(re, "Failed running task '%s' on remote cache '%s' . Key: '%s', iteration '%s'. Will try to retry the task",
operation, cacheName, key, iteration);
}
// Rethrow the exception. Retry will take care of handle the exception and eventually retry the operation.
throw re;
}
}, 10, 10);
}
private static <V extends SessionEntity> long getMaxIdleTimeMs(MergedUpdate<V> task) {
long maxIdleTimeMs = task.getMaxIdleTimeMs();
if (maxIdleTimeMs > 0) {
// Increase the timeout to ensure that entry won't expire on remoteCache in case that write of some entities to remoteCache is postponed (eg. userSession.lastSessionRefresh)
maxIdleTimeMs += 1800000;
}
return maxIdleTimeMs;
}
private <K, V extends SessionEntity> void runOnRemoteCache(TopologyInfo topology, RemoteCache<K, SessionEntityWrapper<V>> remoteCache, long maxIdleMs, K key, MergedUpdate<V> task, SessionEntityWrapper<V> sessionWrapper) {
SessionUpdateTask.CacheOperation operation = task.getOperation();
switch (operation) {
case REMOVE:
remoteCache.remove(key);
break;
case ADD:
remoteCache.put(key, sessionWrapper.forTransport(),
InfinispanUtil.toHotrodTimeMs(remoteCache, task.getLifespanMs()), TimeUnit.MILLISECONDS,
InfinispanUtil.toHotrodTimeMs(remoteCache, maxIdleMs), TimeUnit.MILLISECONDS);
break;
case ADD_IF_ABSENT:
SessionEntityWrapper<V> existing = remoteCache
.withFlags(Flag.FORCE_RETURN_VALUE)
.putIfAbsent(key, sessionWrapper.forTransport(), InfinispanUtil.toHotrodTimeMs(remoteCache, task.getLifespanMs()), TimeUnit.MILLISECONDS, InfinispanUtil.toHotrodTimeMs(remoteCache, maxIdleMs), TimeUnit.MILLISECONDS);
if (existing != null) {
logger.debugf("Existing entity in remote cache for key: %s . Will update it", key);
replace(topology, remoteCache, task.getLifespanMs(), maxIdleMs, key, task);
}
break;
case REPLACE:
replace(topology, remoteCache, task.getLifespanMs(), maxIdleMs, key, task);
break;
default:
throw new IllegalStateException("Unsupported state " + operation);
}
}
private <K, V extends SessionEntity> void replace(TopologyInfo topology, RemoteCache<K, SessionEntityWrapper<V>> remoteCache, long lifespanMs, long maxIdleMs, K key, SessionUpdateTask<V> task) {
// Adjust based on the hotrod protocol
lifespanMs = InfinispanUtil.toHotrodTimeMs(remoteCache, lifespanMs);
maxIdleMs = InfinispanUtil.toHotrodTimeMs(remoteCache, maxIdleMs);
boolean replaced = false;
int replaceIteration = 0;
while (!replaced && replaceIteration < InfinispanUtil.MAXIMUM_REPLACE_RETRIES) {
replaceIteration++;
VersionedValue<SessionEntityWrapper<V>> versioned = remoteCache.getWithMetadata(key);
if (versioned == null) {
if (MultiSiteUtils.isPersistentSessionsEnabled() &&
(remoteCache.getName().equals(USER_SESSION_CACHE_NAME)
|| remoteCache.getName().equals(CLIENT_SESSION_CACHE_NAME)
|| remoteCache.getName().equals(OFFLINE_USER_SESSION_CACHE_NAME)
|| remoteCache.getName().equals(OFFLINE_CLIENT_SESSION_CACHE_NAME))) {
logger.debugf("No existing entry for %s in the remote cache to remove, might have been evicted. A delete will force an eviction in the other DC.", key);
remoteCache.remove(key);
}
logger.warnf("Not found entity to replace for key '%s'", key);
return;
}
SessionEntityWrapper<V> sessionWrapper = versioned.getValue();
final V session = sessionWrapper.getEntity();
// Run task on the remote session
task.runUpdate(session);
if (logger.isTraceEnabled()) {
logger.tracef("%s: Before replaceWithVersion. Entity to write version %d: %s", logTopologyData(topology, replaceIteration),
versioned.getVersion(), session);
}
replaced = remoteCache.replaceWithVersion(key, SessionEntityWrapper.forTransport(session), versioned.getVersion(), lifespanMs, TimeUnit.MILLISECONDS, maxIdleMs, TimeUnit.MILLISECONDS);
if (!replaced) {
logger.debugf("%s: Failed to replace entity '%s' version %d. Will retry again", logTopologyData(topology, replaceIteration), key, versioned.getVersion());
} else {
if (logger.isTraceEnabled()) {
logger.tracef("%s: Replaced entity version %d in remote cache: %s", logTopologyData(topology, replaceIteration), versioned.getVersion(), session);
}
}
}
if (!replaced) {
logger.warnf("Failed to replace entity '%s' in remote cache '%s'", key, remoteCache.getName());
}
}
private String logTopologyData(TopologyInfo topology, int iteration) {
return topology.toString() + ", replaceIteration: " + iteration;
}
}

View File

@ -1,285 +0,0 @@
/*
* Copyright 2016 Red Hat, Inc. and/or its affiliates
* and other contributors as indicated by the @author tags.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.keycloak.models.sessions.infinispan.remotestore;
import java.util.Random;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import org.infinispan.Cache;
import org.infinispan.client.hotrod.RemoteCache;
import org.infinispan.client.hotrod.VersionedValue;
import org.infinispan.client.hotrod.annotation.ClientCacheEntryCreated;
import org.infinispan.client.hotrod.annotation.ClientCacheEntryModified;
import org.infinispan.client.hotrod.annotation.ClientCacheEntryRemoved;
import org.infinispan.client.hotrod.annotation.ClientCacheFailover;
import org.infinispan.client.hotrod.annotation.ClientListener;
import org.infinispan.client.hotrod.event.ClientCacheEntryCreatedEvent;
import org.infinispan.client.hotrod.event.ClientCacheEntryModifiedEvent;
import org.infinispan.client.hotrod.event.ClientCacheEntryRemovedEvent;
import org.infinispan.client.hotrod.event.ClientCacheFailoverEvent;
import org.infinispan.client.hotrod.event.ClientEvent;
import org.infinispan.context.Flag;
import org.jboss.logging.Logger;
import org.keycloak.connections.infinispan.InfinispanUtil;
import org.keycloak.connections.infinispan.TopologyInfo;
import org.keycloak.executors.ExecutorsProvider;
import org.keycloak.models.ClientModel;
import org.keycloak.models.KeycloakSession;
import org.keycloak.models.KeycloakSessionFactory;
import org.keycloak.models.RealmModel;
import org.keycloak.models.sessions.infinispan.SessionFunction;
import org.keycloak.models.sessions.infinispan.changes.SessionEntityWrapper;
import org.keycloak.models.sessions.infinispan.entities.SessionEntity;
import org.keycloak.models.sessions.infinispan.util.SessionTimeouts;
import org.keycloak.models.utils.KeycloakModelUtils;
/**
* @author <a href="mailto:mposolda@redhat.com">Marek Posolda</a>
*/
@ClientListener
public class RemoteCacheSessionListener<K, V extends SessionEntity> {
protected static final Logger logger = Logger.getLogger(RemoteCacheSessionListener.class);
private static final int MAXIMUM_REPLACE_RETRIES = 10;
private final Runnable onFailover;
private Cache<K, SessionEntityWrapper<V>> cache;
private RemoteCache<K, SessionEntityWrapper<V>> remoteCache;
private TopologyInfo topologyInfo;
private ClientListenerExecutorDecorator<K> executor;
private SessionFunction<V> lifespanMsLoader;
private SessionFunction<V> maxIdleTimeMsLoader;
private KeycloakSessionFactory sessionFactory;
protected RemoteCacheSessionListener(Runnable onFailover) {
this.onFailover = onFailover;
}
protected void init(KeycloakSession session, Cache<K, SessionEntityWrapper<V>> cache, RemoteCache<K, SessionEntityWrapper<V>> remoteCache,
SessionFunction<V> lifespanMsLoader, SessionFunction<V> maxIdleTimeMsLoader) {
this.cache = cache;
this.remoteCache = remoteCache;
this.topologyInfo = InfinispanUtil.getTopologyInfo(session);
this.lifespanMsLoader = lifespanMsLoader;
this.maxIdleTimeMsLoader = maxIdleTimeMsLoader;
this.sessionFactory = session.getKeycloakSessionFactory();
ExecutorService executor = session.getProvider(ExecutorsProvider.class).getExecutor("client-listener-" + cache.getName());
this.executor = new ClientListenerExecutorDecorator<>(executor);
}
@ClientCacheEntryCreated
public void created(ClientCacheEntryCreatedEvent event) {
K key = (K) event.getKey();
if (shouldUpdateLocalCache(event.getType(), key, event.isCommandRetried())) {
this.executor.submit(event, () -> {
// Doesn't work due https://issues.jboss.org/browse/ISPN-9323. Needs to explicitly retrieve and create it
//cache.get(key);
createRemoteEntityInCache(key);
});
}
}
@ClientCacheFailover
public void cacheFailover(ClientCacheFailoverEvent event) {
if (onFailover != null) {
this.executor.submit(event, onFailover);
}
}
@ClientCacheEntryModified
public void updated(ClientCacheEntryModifiedEvent event) {
K key = (K) event.getKey();
if (shouldUpdateLocalCache(event.getType(), key, event.isCommandRetried())) {
this.executor.submit(event, () -> {
replaceRemoteEntityInCache(key, event.getVersion());
});
}
}
protected void createRemoteEntityInCache(K key) {
VersionedValue<SessionEntityWrapper<V>> remoteSessionVersioned = remoteCache.getWithMetadata(key);
// Maybe can happen under some circumstances that remoteCache doesn't yet contain the value sent in the event (maybe just theoretically...)
if (remoteSessionVersioned == null || remoteSessionVersioned.getValue() == null) {
logger.debugf("Entity '%s' not present in remoteCache. Ignoring create", key);
return;
}
V remoteSession = remoteSessionVersioned.getValue().getEntity();
SessionEntityWrapper<V> newWrapper = new SessionEntityWrapper<>(remoteSession);
logger.debugf("Read session entity wrapper from the remote cache: %s", remoteSession);
KeycloakModelUtils.runJobInTransaction(sessionFactory, (session -> {
RealmModel realm = session.realms().getRealm(newWrapper.getEntity().getRealmId());
ClientModel client = newWrapper.getClientIfNeeded(realm);
long lifespanMs = lifespanMsLoader.apply(realm, client, newWrapper.getEntity());
long maxIdleTimeMs = maxIdleTimeMsLoader.apply(realm, client, newWrapper.getEntity());
// It is possible the session may be expired by the time this has replicated, double check before inserting
if (maxIdleTimeMs != SessionTimeouts.ENTRY_EXPIRED_FLAG && lifespanMs != SessionTimeouts.ENTRY_EXPIRED_FLAG) {
logger.tracef("Calling putIfAbsent for entity '%s' in the cache '%s' . lifespan: %d ms, maxIdleTime: %d ms", key, remoteCache.getName(), lifespanMs, maxIdleTimeMs);
// Using putIfAbsent. Theoretic possibility that entity was already put to cache by someone else
cache.getAdvancedCache().withFlags(Flag.SKIP_CACHE_STORE, Flag.SKIP_CACHE_LOAD, Flag.IGNORE_RETURN_VALUES)
.putIfAbsent(key, newWrapper, lifespanMs, TimeUnit.MILLISECONDS, maxIdleTimeMs, TimeUnit.MILLISECONDS);
} else {
logger.tracef("Not calling putIfAbsent for entity '%s' in the cache '%s' as entry is already expired", key, remoteCache.getName());
}
}));
}
protected void replaceRemoteEntityInCache(K key, long eventVersion) {
// TODO can be optimized and remoteSession sent in the event itself?
AtomicBoolean replaced = new AtomicBoolean(false);
int replaceRetries = 0;
int sleepInterval = 25;
do {
replaceRetries++;
SessionEntityWrapper<V> localEntityWrapper = cache.get(key);
VersionedValue<SessionEntityWrapper<V>> remoteSessionVersioned = remoteCache.getWithMetadata(key);
// Probably already removed
if (remoteSessionVersioned == null || remoteSessionVersioned.getValue() == null) {
logger.debugf("Entity '%s' not present in remoteCache. Ignoring replace",
key);
return;
}
if (remoteSessionVersioned.getVersion() < eventVersion) {
try {
logger.debugf("Got replace remote entity event prematurely for entity '%s', will try again. Event version: %d, got: %d",
key, eventVersion, remoteSessionVersioned == null ? -1 : remoteSessionVersioned.getVersion());
Thread.sleep(new Random().nextInt(sleepInterval)); // using exponential backoff
continue;
} catch (InterruptedException ex) {
continue;
} finally {
sleepInterval = sleepInterval << 1;
}
}
SessionEntity remoteSession = remoteSessionVersioned.getValue().getEntity();
logger.debugf("Read session entity from the remote cache: %s . replaceRetries=%d", remoteSession, replaceRetries);
SessionEntityWrapper<V> sessionWrapper = remoteSession.mergeRemoteEntityWithLocalEntity(localEntityWrapper);
KeycloakModelUtils.runJobInTransaction(sessionFactory, (session -> {
RealmModel realm = session.realms().getRealm(sessionWrapper.getEntity().getRealmId());
ClientModel client = sessionWrapper.getClientIfNeeded(realm);
long lifespanMs = lifespanMsLoader.apply(realm, client, sessionWrapper.getEntity());
long maxIdleTimeMs = maxIdleTimeMsLoader.apply(realm, client, sessionWrapper.getEntity());
// We received event from remoteCache, so we won't update it back
replaced.set(cache.getAdvancedCache().withFlags(Flag.SKIP_CACHE_STORE, Flag.SKIP_CACHE_LOAD, Flag.IGNORE_RETURN_VALUES)
.replace(key, localEntityWrapper, sessionWrapper, lifespanMs, TimeUnit.MILLISECONDS, maxIdleTimeMs, TimeUnit.MILLISECONDS));
}));
if (! replaced.get()) {
logger.debugf("Did not succeed in merging sessions, will try again: %s", remoteSession);
}
} while (replaceRetries < MAXIMUM_REPLACE_RETRIES && ! replaced.get());
}
@ClientCacheEntryRemoved
public void removed(ClientCacheEntryRemovedEvent event) {
K key = (K) event.getKey();
if (shouldUpdateLocalCache(event.getType(), key, event.isCommandRetried())) {
this.executor.submit(event, () -> {
// We received event from remoteCache, so we won't update it back
cache.getAdvancedCache().withFlags(Flag.SKIP_CACHE_STORE, Flag.SKIP_CACHE_LOAD, Flag.IGNORE_RETURN_VALUES)
.remove(key);
});
}
}
// For distributed caches, ensure that local modification is executed just on owner OR if event.isCommandRetried
protected boolean shouldUpdateLocalCache(ClientEvent.Type type, K key, boolean commandRetried) {
boolean result;
// Case when cache is stopping or stopped already
if (!cache.getStatus().allowInvocations()) {
return false;
}
if (commandRetried) {
result = true;
} else {
result = topologyInfo.amIOwner(cache, key);
}
logger.debugf("Received event from remote store. Event '%s', key '%s', skip '%b'", type, key, !result);
return result;
}
public static <K, V extends SessionEntity> RemoteCacheSessionListener createListener(KeycloakSession session, Cache<K, SessionEntityWrapper<V>> cache, RemoteCache<K, SessionEntityWrapper<V>> remoteCache,
SessionFunction<V> lifespanMsLoader, SessionFunction<V> maxIdleTimeMsLoader, Runnable onFailover) {
/*boolean isCoordinator = InfinispanUtil.isCoordinator(cache);
// Just cluster coordinator will fetch userSessions from remote cache.
// In case that coordinator is failover during state fetch, there is slight risk that not all userSessions will be fetched to local cluster. Assume acceptable for now
RemoteCacheSessionListener listener;
if (isCoordinator) {
logger.infof("Will fetch initial state from remote cache for cache '%s'", cache.getName());
listener = new FetchInitialStateCacheListener();
} else {
logger.infof("Won't fetch initial state from remote cache for cache '%s'", cache.getName());
listener = new DontFetchInitialStateCacheListener();
}*/
RemoteCacheSessionListener<K, V> listener = new RemoteCacheSessionListener<>(onFailover);
listener.init(session, cache, remoteCache, lifespanMsLoader, maxIdleTimeMsLoader);
return listener;
}
}

View File

@ -1,185 +0,0 @@
/*
* Copyright 2016 Red Hat, Inc. and/or its affiliates
* and other contributors as indicated by the @author tags.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.keycloak.models.sessions.infinispan.remotestore;
import org.infinispan.Cache;
import org.infinispan.client.hotrod.MetadataValue;
import org.infinispan.client.hotrod.RemoteCache;
import org.infinispan.commons.util.CloseableIterator;
import org.infinispan.context.Flag;
import org.jboss.logging.Logger;
import org.keycloak.common.util.Retry;
import org.keycloak.connections.infinispan.DefaultInfinispanConnectionProviderFactory;
import org.keycloak.connections.infinispan.InfinispanConnectionProvider;
import org.keycloak.models.KeycloakSession;
import org.keycloak.models.sessions.infinispan.initializer.SessionLoader;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.TimeUnit;
/**
* @author <a href="mailto:mposolda@redhat.com">Marek Posolda</a>
*/
public class RemoteCacheSessionsLoader implements SessionLoader<RemoteCacheSessionsLoaderContext, SessionLoader.WorkerContext, SessionLoader.WorkerResult> {
private static final Logger log = Logger.getLogger(RemoteCacheSessionsLoader.class);
private final String cacheName;
private final int sessionsPerSegment;
public RemoteCacheSessionsLoader(String cacheName, int sessionsPerSegment) {
this.cacheName = cacheName;
this.sessionsPerSegment = sessionsPerSegment;
}
@Override
public RemoteCacheSessionsLoaderContext computeLoaderContext() {
return new RemoteCacheSessionsLoaderContext(sessionsPerSegment);
}
@Override
public WorkerContext computeWorkerContext(int segment) {
return new WorkerContext(segment);
}
@Override
public WorkerResult loadSessions(KeycloakSession session, RemoteCacheSessionsLoaderContext loaderContext, WorkerContext ctx) {
Cache<Object, Object> cache = getCache(session);
Cache<Object, Object> decoratedCache = cache.getAdvancedCache().withFlags(Flag.SKIP_CACHE_LOAD, Flag.SKIP_CACHE_STORE, Flag.IGNORE_RETURN_VALUES);
RemoteCache<?, ?> remoteCache = getRemoteCache(session);
int countLoaded = 0;
try (CloseableIterator<Map.Entry<Object, MetadataValue<Object>>> it = remoteCache.retrieveEntriesWithMetadata(null, loaderContext.getSessionsPerSegment())) {
Map<Object, Object> toInsertExpiring = new HashMap<>(loaderContext.getSessionsPerSegment());
Map<Object, Object> toInsertImmortal = new HashMap<>(loaderContext.getSessionsPerSegment());
int count = 0;
int maxLifespanExpiring = 0;
int maxIdleExpiring = -1;
int maxIdleImmortal = -1;
while (it.hasNext()) {
Map.Entry<Object, MetadataValue<Object>> entry = it.next();
boolean isImmortal = entry.getValue().getLifespan() < 0;
boolean shouldInsert = true;
if (!isImmortal) {
// Calculate the remaining lifetime reduced by the current time, not Keycloak time as the remote Infinispan isn't on Keycloak's clock.
// The lifetime will be larger than on the remote store for those entries, but all sessions contain timestamp which will be validated anyway.
// If we don't trust the clock calculations here, we would instead use the maxLifeSpan as is, which could enlarge the expiry time significantly.
int remainingLifespan = entry.getValue().getLifespan() - (int) ((System.currentTimeMillis() - entry.getValue().getCreated()) / 1000);
maxLifespanExpiring = Math.max(maxLifespanExpiring, remainingLifespan);
if (remainingLifespan <= 0) {
shouldInsert = false;
}
}
if (entry.getValue().getMaxIdle() > 0) {
// The max idle time on the remote store is set to the max lifetime as remote store entries are not touched on read, and therefore would otherwise expire too early.
// Still, this is the only number we have available, so we use it.
if (isImmortal) {
maxIdleImmortal = Math.max(maxIdleImmortal, entry.getValue().getMaxIdle());
} else {
maxIdleExpiring = Math.max(maxIdleExpiring, entry.getValue().getMaxIdle());
}
}
if (shouldInsert) {
(isImmortal ? toInsertImmortal : toInsertExpiring).put(entry.getKey(), entry.getValue().getValue());
++countLoaded;
}
if (++count == loaderContext.getSessionsPerSegment()) {
if (!toInsertExpiring.isEmpty()) {
insertSessions(decoratedCache, toInsertExpiring, maxIdleExpiring, maxLifespanExpiring);
toInsertExpiring.clear();
maxLifespanExpiring = 0;
maxIdleExpiring = -1;
}
if (!toInsertImmortal.isEmpty()) {
insertSessions(decoratedCache, toInsertImmortal, maxIdleImmortal, -1);
toInsertImmortal.clear();
maxIdleImmortal = -1;
}
count = 0;
}
}
// last batch
if (!toInsertExpiring.isEmpty()) {
insertSessions(decoratedCache, toInsertExpiring, maxIdleExpiring, maxLifespanExpiring);
}
if (!toInsertImmortal.isEmpty()) {
insertSessions(decoratedCache, toInsertImmortal, maxIdleImmortal, -1);
}
} catch (RuntimeException e) {
log.warnf(e, "Error loading sessions from remote cache '%s' for segment '%d'", remoteCache.getName(), ctx.segment());
throw e;
}
log.debugf("Successfully finished loading sessions from cache '%s' . Segment: %d, Count of sessions loaded: %d", cache.getName(), ctx.segment(), countLoaded);
return new WorkerResult(true, ctx.segment());
}
private void insertSessions(Cache<Object, Object> cache, Map<Object, Object> entries, int maxIdle, int lifespan) {
log.debugf("Adding %d entries to cache '%s'", entries.size(), cacheName);
// The `putAll` operation might time out when a node becomes unavailable, therefore, retry.
Retry.executeWithBackoff(
(int iteration) -> {
DefaultInfinispanConnectionProviderFactory.runWithReadLockOnCacheManager(() -> {
// With Infinispan 14.0.21/14.0.19, we've seen deadlocks in tests where this future never completed when shutting down the internal Infinispan.
// Therefore, prevent the shutdown of the internal Infinispan during this step.
cache.putAll(entries, lifespan, TimeUnit.SECONDS, maxIdle, TimeUnit.SECONDS);
});
},
(iteration, throwable) -> log.warnf("Unable to put entries into the cache in iteration %s", iteration, throwable),
3,
10);
}
@Override
public void afterAllSessionsLoaded() {
}
protected Cache getCache(KeycloakSession session) {
InfinispanConnectionProvider ispn = session.getProvider(InfinispanConnectionProvider.class);
return ispn.getCache(cacheName);
}
// Get remoteCache, which may be secured
protected RemoteCache getRemoteCache(KeycloakSession session) {
InfinispanConnectionProvider ispn = session.getProvider(InfinispanConnectionProvider.class);
return ispn.getRemoteCache(cacheName);
}
@Override
public String toString() {
return new StringBuilder("RemoteCacheSessionsLoader [ ")
.append("cacheName: ").append(cacheName)
.append(", sessionsPerSegment: ").append(sessionsPerSegment)
.append(" ]")
.toString();
}
}

View File

@ -1,48 +0,0 @@
/*
* Copyright 2017 Red Hat, Inc. and/or its affiliates
* and other contributors as indicated by the @author tags.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.keycloak.models.sessions.infinispan.remotestore;
import org.keycloak.models.sessions.infinispan.initializer.SessionLoader;
/**
*
* @author <a href="mailto:mposolda@redhat.com">Marek Posolda</a>
*/
public class RemoteCacheSessionsLoaderContext extends SessionLoader.LoaderContext {
private final int sessionsPerSegment;
public RemoteCacheSessionsLoaderContext(int sessionsPerSegment) {
super(1);
this.sessionsPerSegment = sessionsPerSegment;
}
public int getSessionsPerSegment() {
return sessionsPerSegment;
}
@Override
public String toString() {
return new StringBuilder("RemoteCacheSessionsLoaderContext [ ")
.append("segmentsCount: ").append(getSegmentsCount())
.append(", sessionsPerSegment: ").append(sessionsPerSegment)
.append(" ]")
.toString();
}
}

View File

@ -1,161 +0,0 @@
/*
* Copyright 2017 Red Hat, Inc. and/or its affiliates
* and other contributors as indicated by the @author tags.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.keycloak.cluster.infinispan;
import java.util.HashSet;
import java.util.Set;
import org.infinispan.Cache;
import org.infinispan.client.hotrod.RemoteCache;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.persistence.remote.configuration.RemoteStoreConfigurationBuilder;
import org.jboss.logging.Logger;
import org.junit.Assert;
import org.junit.Ignore;
import org.junit.Test;
import org.keycloak.common.util.Time;
import org.keycloak.connections.infinispan.InfinispanConnectionProvider;
import org.keycloak.models.KeycloakSession;
import org.keycloak.models.sessions.infinispan.changes.SessionEntityWrapper;
import org.keycloak.models.sessions.infinispan.entities.UserSessionEntity;
import org.keycloak.models.sessions.infinispan.initializer.SessionLoader;
import org.keycloak.models.sessions.infinispan.remotestore.RemoteCacheSessionsLoader;
import org.keycloak.models.sessions.infinispan.remotestore.RemoteCacheSessionsLoaderContext;
import org.keycloak.connections.infinispan.InfinispanUtil;
/**
* @author <a href="mailto:mposolda@redhat.com">Marek Posolda</a>
*/
public class RemoteCacheSessionsLoaderTest {
protected static final Logger logger = Logger.getLogger(RemoteCacheSessionsLoaderTest.class);
private static final int COUNT = 10000;
@Test
@Ignore
public void testRemoteCache() throws Exception {
String cacheName = InfinispanConnectionProvider.USER_SESSION_CACHE_NAME;
Cache cache1 = createManager(1, cacheName).getCache(cacheName);
Cache cache2 = cache1.getCacheManager().getCache("local");
RemoteCache remoteCache = InfinispanUtil.getRemoteCache(cache1);
cache1.clear();
cache2.clear();
remoteCache.clear();
try {
for (int i=0 ; i<COUNT ; i++) {
// Create initial item
UserSessionEntity session = new UserSessionEntity("loader-key-" + i);
session.setRealmId("master");
session.setBrokerSessionId("!23123123");
session.setBrokerUserId(null);
session.setUser("admin");
session.setLoginUsername("admin");
session.setIpAddress("123.44.143.178");
session.setStarted(Time.currentTime());
session.setLastSessionRefresh(Time.currentTime());
SessionEntityWrapper<UserSessionEntity> wrappedSession = new SessionEntityWrapper<>(session);
// Create caches, listeners and finally worker threads
remoteCache.put("loader-key-" + i, wrappedSession);
Assert.assertFalse(cache2.containsKey("loader-key-" + i));
if (i % 1000 == 0) {
logger.infof("%d sessions added", i);
}
}
// RemoteCacheSessionsLoader loader = new RemoteCacheSessionsLoader(InfinispanConnectionProvider.USER_SESSION_CACHE_NAME, 64) {
//
// @Override
// protected Cache getCache(KeycloakSession session) {
// return cache2;
// }
//
// @Override
// protected RemoteCache getRemoteCache(KeycloakSession session) {
// return remoteCache;
// }
//
// };
// Just to be able to test serializability
RemoteCacheSessionsLoader loader = new CustomLoader(cacheName, 64, cache2, remoteCache);
RemoteCacheSessionsLoaderContext ctx = loader.computeLoaderContext();
Assert.assertEquals(ctx.getSessionsPerSegment(), 64);
int totalCount = 0;
logger.infof("segmentsCount: %d", ctx.getSegmentsCount());
Set<String> visitedKeys = new HashSet<>();
for (int currentSegment=0 ; currentSegment<ctx.getSegmentsCount() ; currentSegment++) {
logger.infof("Loading segment %d", currentSegment);
loader.loadSessions(null, ctx, new SessionLoader.WorkerContext(currentSegment));
logger.infof("Loaded %d keys for segment %d", cache2.keySet().size(), currentSegment);
totalCount = totalCount + cache2.keySet().size();
visitedKeys.addAll(cache2.keySet());
cache2.clear();
}
Assert.assertEquals(totalCount, COUNT);
Assert.assertEquals(visitedKeys.size(), COUNT);
logger.infof("SUCCESS: Loaded %d sessions", totalCount);
} finally {
// Finish JVM
cache1.getCacheManager().stop();
}
}
private static EmbeddedCacheManager createManager(int threadId, String cacheName) {
return new TestCacheManagerFactory().createManager(threadId, cacheName, RemoteStoreConfigurationBuilder.class);
}
public static class CustomLoader extends RemoteCacheSessionsLoader {
private final transient Cache cache2;
private final transient RemoteCache remoteCache;
public CustomLoader(String cacheName, int sessionsPerSegment, Cache cache2, RemoteCache remoteCache) {
super(cacheName, sessionsPerSegment);
this.cache2 = cache2;
this.remoteCache = remoteCache;
}
@Override
protected Cache getCache(KeycloakSession session) {
return cache2;
}
@Override
protected RemoteCache getRemoteCache(KeycloakSession session) {
return remoteCache;
}
}
}

View File

@ -1,123 +0,0 @@
/*
* Copyright 2016 Red Hat, Inc. and/or its affiliates
* and other contributors as indicated by the @author tags.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.keycloak.models.sessions.infinispan.initializer;
import org.junit.Assert;
import org.junit.Test;
import org.keycloak.models.sessions.infinispan.remotestore.RemoteCacheSessionsLoaderContext;
import org.keycloak.storage.CacheableStorageProviderModel;
import java.text.DateFormat;
import java.util.Calendar;
import java.util.Date;
import java.util.List;
/**
* @author <a href="mailto:mposolda@redhat.com">Marek Posolda</a>
*/
public class InitializerStateTest {
@Test
public void testOfflineLoaderContext() {
OfflinePersistentLoaderContext ctx = new OfflinePersistentLoaderContext(28, 5);
Assert.assertEquals(ctx.getSegmentsCount(), 6);
ctx = new OfflinePersistentLoaderContext(19, 5);
Assert.assertEquals(ctx.getSegmentsCount(), 4);
ctx = new OfflinePersistentLoaderContext(20, 5);
Assert.assertEquals(ctx.getSegmentsCount(), 4);
ctx = new OfflinePersistentLoaderContext(21, 5);
Assert.assertEquals(ctx.getSegmentsCount(), 5);
}
@Test
public void testRemoteLoaderContext() {
assertSegmentsForRemoteLoader(64, 1);
}
private void assertSegmentsForRemoteLoader(int sessionsPerSegment, int expectedSegments) {
RemoteCacheSessionsLoaderContext ctx = new RemoteCacheSessionsLoaderContext(sessionsPerSegment);
Assert.assertEquals(expectedSegments, ctx.getSegmentsCount());
}
@Test
public void testComputationState() {
OfflinePersistentLoaderContext ctx = new OfflinePersistentLoaderContext(28, 5);
Assert.assertEquals(ctx.getSegmentsCount(), 6);
InitializerState state = new InitializerState(ctx.getSegmentsCount());
Assert.assertFalse(state.isFinished());
List<Integer> segments = state.getSegmentsToLoad(0, 3);
assertContains(segments, 3, 0, 1, 2);
state.markSegmentFinished(1);
state.markSegmentFinished(2);
segments = state.getSegmentsToLoad(0, 3);
assertContains(segments, 1, 0);
state.markSegmentFinished(0);
state.markSegmentFinished(3);
segments = state.getSegmentsToLoad(4, 4);
assertContains(segments, 2, 4, 5);
state.markSegmentFinished(4);
state.markSegmentFinished(5);
segments = state.getSegmentsToLoad(4, 4);
Assert.assertTrue(segments.isEmpty());
Assert.assertTrue(state.isFinished());
}
private void assertContains(List<Integer> segments, int expectedLength, int... expected) {
Assert.assertEquals(segments.size(), expectedLength);
for (int i : expected) {
Assert.assertTrue(segments.contains(i));
}
}
@Test
public void testDailyTimeout() {
Date date = new Date(CacheableStorageProviderModel.dailyTimeout(10, 30));
System.out.println(DateFormat.getDateTimeInstance(DateFormat.FULL, DateFormat.FULL).format(date));
date = new Date(CacheableStorageProviderModel.dailyTimeout(17, 45));
System.out.println(DateFormat.getDateTimeInstance(DateFormat.FULL, DateFormat.FULL).format(date));
date = new Date(CacheableStorageProviderModel.weeklyTimeout(Calendar.MONDAY, 13, 45));
System.out.println(DateFormat.getDateTimeInstance(DateFormat.FULL, DateFormat.FULL).format(date));
date = new Date(CacheableStorageProviderModel.weeklyTimeout(Calendar.THURSDAY, 13, 45));
System.out.println(DateFormat.getDateTimeInstance(DateFormat.FULL, DateFormat.FULL).format(date));
System.out.println("----");
Calendar cal = Calendar.getInstance();
cal.add(Calendar.HOUR, 1);
int hour = cal.get(Calendar.HOUR_OF_DAY);
int min = cal.get(Calendar.MINUTE);
date = new Date(cal.getTimeInMillis());
System.out.println(DateFormat.getDateTimeInstance(DateFormat.FULL, DateFormat.FULL).format(date));
date = new Date(CacheableStorageProviderModel.dailyTimeout(hour, min));
System.out.println(DateFormat.getDateTimeInstance(DateFormat.FULL, DateFormat.FULL).format(date));
cal = Calendar.getInstance();
cal.set(Calendar.DAY_OF_WEEK, Calendar.MONDAY);
date = new Date(cal.getTimeInMillis());
System.out.println(DateFormat.getDateTimeInstance(DateFormat.FULL, DateFormat.FULL).format(date));
}
}

View File

@ -195,7 +195,7 @@ public class UserStorageSyncManager {
final ClusterProvider cp = session.getProvider(ClusterProvider.class);
if (cp != null) {
UserStorageProviderClusterEvent event = UserStorageProviderClusterEvent.createEvent(removed, realm.getId(), provider);
cp.notify(USER_STORAGE_TASK_KEY, event, false, ClusterProvider.DCNotify.ALL_DCS);
cp.notify(USER_STORAGE_TASK_KEY, event, false);
}
}

View File

@ -24,7 +24,7 @@ import static org.keycloak.quarkus.runtime.configuration.mappers.PropertyMapper.
final class CachingPropertyMappers {
private static final String REMOTE_HOST_SET = "remote host is set";
private static final String MULTI_SITE_OR_EMBEDDED_REMOTE_FEATURE_SET = "feature '%s', '%s' or '%s' is set".formatted(Profile.Feature.MULTI_SITE.getKey(), Profile.Feature.CLUSTERLESS.getKey(), Profile.Feature.CACHE_EMBEDDED_REMOTE_STORE.getKey());
private static final String MULTI_SITE_OR_EMBEDDED_REMOTE_FEATURE_SET = "feature '%s' or '%s' is set".formatted(Profile.Feature.MULTI_SITE.getKey(), Profile.Feature.CLUSTERLESS.getKey());
private static final String MULTI_SITE_FEATURE_SET = "feature '%s' or '%s' is set".formatted(Profile.Feature.MULTI_SITE.getKey(), Profile.Feature.CLUSTERLESS.getKey());
private static final String CACHE_STACK_SET_TO_ISPN = "'cache' type is set to '" + CachingOptions.Mechanism.ispn.name() + "'";
@ -203,7 +203,7 @@ final class CachingPropertyMappers {
}
private static boolean isRemoteCacheHostEnabled() {
return InfinispanUtils.isRemoteInfinispan() || Profile.isFeatureEnabled(Profile.Feature.CACHE_EMBEDDED_REMOTE_STORE);
return InfinispanUtils.isRemoteInfinispan();
}
private static void validateCachingOptionIsPresent(Option<?> optionSet, Option<?> optionRequired) {

View File

@ -42,14 +42,12 @@ import org.infinispan.commons.util.concurrent.CompletableFutures;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.configuration.cache.HashConfiguration;
import org.infinispan.configuration.cache.PersistenceConfigurationBuilder;
import org.infinispan.configuration.global.ShutdownHookBehavior;
import org.infinispan.configuration.parsing.ConfigurationBuilderHolder;
import org.infinispan.configuration.parsing.ParserRegistry;
import org.infinispan.manager.DefaultCacheManager;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.metrics.config.MicrometerMeterRegisterConfigurationBuilder;
import org.infinispan.persistence.remote.configuration.ExhaustedAction;
import org.infinispan.persistence.remote.configuration.RemoteStoreConfigurationBuilder;
import org.infinispan.protostream.descriptors.FileDescriptor;
import org.infinispan.query.remote.client.ProtobufMetadataManagerConstants;
@ -87,6 +85,7 @@ import static org.keycloak.connections.infinispan.InfinispanConnectionProvider.L
import static org.keycloak.connections.infinispan.InfinispanConnectionProvider.LOGIN_FAILURE_CACHE_NAME;
import static org.keycloak.connections.infinispan.InfinispanConnectionProvider.OFFLINE_CLIENT_SESSION_CACHE_NAME;
import static org.keycloak.connections.infinispan.InfinispanConnectionProvider.OFFLINE_USER_SESSION_CACHE_NAME;
import static org.keycloak.connections.infinispan.InfinispanConnectionProvider.USER_AND_CLIENT_SESSION_CACHES;
import static org.keycloak.connections.infinispan.InfinispanConnectionProvider.USER_SESSION_CACHE_NAME;
import static org.keycloak.connections.infinispan.InfinispanConnectionProvider.WORK_CACHE_NAME;
import static org.keycloak.connections.infinispan.InfinispanConnectionProvider.skipSessionsCacheIfRequired;
@ -325,7 +324,6 @@ public class CacheManagerFactory {
if (jGroupsConfigurator.isLocal()) {
throw new RuntimeException("Unable to use clustered cache with local mode.");
}
configureRemoteStores(builder);
}
jGroupsConfigurator.configure(session);
configureCacheMaxCount(builder, CachingOptions.CLUSTERED_MAX_COUNT_CACHES);
@ -385,75 +383,34 @@ public class CacheManagerFactory {
return Integer.getInteger("kc.cache-ispn-start-timeout", 120);
}
private static void configureRemoteStores(ConfigurationBuilderHolder builder) {
//if one of remote store command line parameters is defined, some other are required, otherwise assume it'd configured via xml only
if (Configuration.getOptionalKcValue(CACHE_REMOTE_HOST_PROPERTY).isPresent()) {
String cacheRemoteHost = requiredStringProperty(CACHE_REMOTE_HOST_PROPERTY);
Integer cacheRemotePort = Configuration.getOptionalKcValue(CACHE_REMOTE_PORT_PROPERTY)
.map(Integer::parseInt)
.orElse(ConfigurationProperties.DEFAULT_HOTROD_PORT);
SSLContext sslContext = createSSLContext();
Arrays.stream(CLUSTERED_CACHE_NAMES).forEach(cacheName -> {
PersistenceConfigurationBuilder persistenceCB = builder.getNamedConfigurationBuilders().get(cacheName).persistence();
//if specified via command line -> cannot be defined in the xml file
if (!persistenceCB.stores().isEmpty()) {
throw new RuntimeException(String.format("Remote store for cache '%s' is already configured via CLI parameters. It should not be present in the XML file.", cacheName));
}
var storeBuilder = persistenceCB.addStore(RemoteStoreConfigurationBuilder.class);
storeBuilder
.rawValues(true)
.shared(true)
.segmented(false)
.remoteCacheName(cacheName)
.connectionPool()
.maxActive(16)
.exhaustedAction(ExhaustedAction.CREATE_NEW)
.addServer()
.host(cacheRemoteHost)
.port(cacheRemotePort);
if (isRemoteTLSEnabled()) {
storeBuilder.remoteSecurity()
.ssl()
.enable()
.sslContext(sslContext)
.sniHostName(cacheRemoteHost);
}
if (isRemoteAuthenticationEnabled()) {
storeBuilder.remoteSecurity()
.authentication()
.enable()
.username(requiredStringProperty(CACHE_REMOTE_USERNAME_PROPERTY))
.password(requiredStringProperty(CACHE_REMOTE_PASSWORD_PROPERTY))
.realm("default")
.saslMechanism(SCRAM_SHA_512);
}
});
}
}
/**
*
* RemoteStores were previously used when running Keycloak in the CrossDC environment, and Keycloak code
* contained a lot of performance optimizations to make this work smoothly.
* These optimizations are now removed as recommended multi-site setup no longer relies on RemoteStores.
* A lot of blueprints in the wild may turn into very ineffective setups.
* <p />
* For this reason, we need to be more opinionated on what configurations we allow,
* especially for user and client sessions.
* This method is responsible for checking the Infinispan configuration used and either change the configuration to
* more effective when possible or refuse to start with recommendations for users to change their config.
*
* @param builder Cache configuration builder
*/
private static void checkForRemoteStores(ConfigurationBuilderHolder builder) {
if (Profile.isFeatureEnabled(Profile.Feature.CACHE_EMBEDDED_REMOTE_STORE) && Profile.isFeatureEnabled(Profile.Feature.MULTI_SITE)) {
logger.fatalf("Feature %s is now deprecated.%nFor multi-site (cross-dc) support, enable only %s.",
Profile.Feature.CACHE_EMBEDDED_REMOTE_STORE.getKey(), Profile.Feature.MULTI_SITE.getKey());
throw new RuntimeException("The features " + Profile.Feature.CACHE_EMBEDDED_REMOTE_STORE.getKey() + " and " + Profile.Feature.MULTI_SITE.getKey() + " must not be enabled at the same time.");
}
if (Profile.isFeatureEnabled(Profile.Feature.CACHE_EMBEDDED_REMOTE_STORE) && Profile.isFeatureEnabled(Profile.Feature.CLUSTERLESS)) {
logger.fatalf("Feature %s is now deprecated.%nFor multi-site (cross-dc) support, enable only %s.",
Profile.Feature.CACHE_EMBEDDED_REMOTE_STORE.getKey(), Profile.Feature.CLUSTERLESS.getKey());
throw new RuntimeException("The features " + Profile.Feature.CACHE_EMBEDDED_REMOTE_STORE.getKey() + " and " + Profile.Feature.CLUSTERLESS.getKey() + " must not be enabled at the same time.");
}
if (!Profile.isFeatureEnabled(Profile.Feature.CACHE_EMBEDDED_REMOTE_STORE)) {
if (builder.getNamedConfigurationBuilders().values().stream().anyMatch(CacheManagerFactory::hasRemoteStore)) {
logger.fatalf("Remote stores are not supported for embedded caches as feature %s is not enabled. This feature is disabled by default as it is now deprecated.%nFor keeping user sessions across restarts, use feature %s which is enabled by default.%nFor multi-site (cross-dc) support, enable %s.",
Profile.Feature.CACHE_EMBEDDED_REMOTE_STORE.getKey(), Profile.Feature.PERSISTENT_USER_SESSIONS.getKey(), Profile.Feature.MULTI_SITE.getKey());
throw new RuntimeException("Remote store is not supported as feature " + Profile.Feature.CACHE_EMBEDDED_REMOTE_STORE.getKey() + " is not enabled.");
for (String cacheName : USER_AND_CLIENT_SESSION_CACHES) {
ConfigurationBuilder cacheConfigurationBuilder = builder.getNamedConfigurationBuilders().get(cacheName);
if (cacheConfigurationBuilder != null && hasRemoteStore(cacheConfigurationBuilder)) {
if (Profile.isFeatureEnabled(Profile.Feature.PERSISTENT_USER_SESSIONS)) {
logger.warnf("Feature %s is enabled and remote store detected for cache '%s'. Remote stores are no longer needed when sessions stored in the database. The configuration will be ignored.", Profile.Feature.PERSISTENT_USER_SESSIONS.getKey(), cacheName);
cacheConfigurationBuilder.persistence().stores().removeIf(RemoteStoreConfigurationBuilder.class::isInstance);
} else {
logger.fatalf("Remote stores are not supported for embedded caches storing user and client sessions.%nFor keeping user sessions across restarts, use feature %s which is enabled by default.%nFor multi-site support, enable %s.",
Profile.Feature.PERSISTENT_USER_SESSIONS.getKey(), Profile.Feature.MULTI_SITE.getKey());
throw new RuntimeException("Remote stores for storing user and client sessions are not supported.");
}
}
}
}

View File

@ -133,7 +133,7 @@ public class OptionsDistTest {
@Order(10)
@Launch({"start-dev", "--cache-remote-host=localhost"})
public void testCacheRemoteHostWithoutMultiSite(LaunchResult result) {
assertErrorStreamContains(result, "cache-remote-host available only when feature 'multi-site', 'clusterless' or 'cache-embedded-remote-store' is set");
assertErrorStreamContains(result, "cache-remote-host available only when feature 'multi-site' or 'clusterless' is set");
}
@DryRun

View File

@ -53,8 +53,8 @@ Cache:
The maximum number of entries that can be stored in-memory by the users cache.
--cache-remote-host <hostname>
The hostname of the external Infinispan cluster. Available only when feature
'multi-site', 'clusterless' or 'cache-embedded-remote-store' is set.
Required when feature 'multi-site' or 'clusterless' is set.
'multi-site' or 'clusterless' is set. Required when feature 'multi-site' or
'clusterless' is set.
Config:

View File

@ -81,8 +81,8 @@ Cache:
Available only when metrics are enabled.
--cache-remote-host <hostname>
The hostname of the external Infinispan cluster. Available only when feature
'multi-site', 'clusterless' or 'cache-embedded-remote-store' is set.
Required when feature 'multi-site' or 'clusterless' is set.
'multi-site' or 'clusterless' is set. Required when feature 'multi-site' or
'clusterless' is set.
--cache-remote-password <password>
The password for the authentication to the external Infinispan cluster. It is
optional if connecting to an unsecure external Infinispan cluster. If the

View File

@ -79,8 +79,8 @@ Cache:
The maximum number of entries that can be stored in-memory by the users cache.
--cache-remote-host <hostname>
The hostname of the external Infinispan cluster. Available only when feature
'multi-site', 'clusterless' or 'cache-embedded-remote-store' is set.
Required when feature 'multi-site' or 'clusterless' is set.
'multi-site' or 'clusterless' is set. Required when feature 'multi-site' or
'clusterless' is set.
--cache-stack <stack>
Define the default stack to use for cluster communication and node discovery.
Possible values are: jdbc-ping, kubernetes, jdbc-ping-udp (deprecated), tcp

View File

@ -82,8 +82,8 @@ Cache:
Available only when metrics are enabled.
--cache-remote-host <hostname>
The hostname of the external Infinispan cluster. Available only when feature
'multi-site', 'clusterless' or 'cache-embedded-remote-store' is set.
Required when feature 'multi-site' or 'clusterless' is set.
'multi-site' or 'clusterless' is set. Required when feature 'multi-site' or
'clusterless' is set.
--cache-remote-password <password>
The password for the authentication to the external Infinispan cluster. It is
optional if connecting to an unsecure external Infinispan cluster. If the

View File

@ -79,8 +79,8 @@ Cache:
The maximum number of entries that can be stored in-memory by the users cache.
--cache-remote-host <hostname>
The hostname of the external Infinispan cluster. Available only when feature
'multi-site', 'clusterless' or 'cache-embedded-remote-store' is set.
Required when feature 'multi-site' or 'clusterless' is set.
'multi-site' or 'clusterless' is set. Required when feature 'multi-site' or
'clusterless' is set.
--cache-stack <stack>
Define the default stack to use for cluster communication and node discovery.
Possible values are: jdbc-ping, kubernetes, jdbc-ping-udp (deprecated), tcp

View File

@ -82,8 +82,8 @@ Cache:
Available only when metrics are enabled.
--cache-remote-host <hostname>
The hostname of the external Infinispan cluster. Available only when feature
'multi-site', 'clusterless' or 'cache-embedded-remote-store' is set.
Required when feature 'multi-site' or 'clusterless' is set.
'multi-site' or 'clusterless' is set. Required when feature 'multi-site' or
'clusterless' is set.
--cache-remote-password <password>
The password for the authentication to the external Infinispan cluster. It is
optional if connecting to an unsecure external Infinispan cluster. If the

View File

@ -78,8 +78,8 @@ Cache:
The maximum number of entries that can be stored in-memory by the users cache.
--cache-remote-host <hostname>
The hostname of the external Infinispan cluster. Available only when feature
'multi-site', 'clusterless' or 'cache-embedded-remote-store' is set.
Required when feature 'multi-site' or 'clusterless' is set.
'multi-site' or 'clusterless' is set. Required when feature 'multi-site' or
'clusterless' is set.
--cache-stack <stack>
Define the default stack to use for cluster communication and node discovery.
Possible values are: jdbc-ping, kubernetes, jdbc-ping-udp (deprecated), tcp

View File

@ -81,8 +81,8 @@ Cache:
Available only when metrics are enabled.
--cache-remote-host <hostname>
The hostname of the external Infinispan cluster. Available only when feature
'multi-site', 'clusterless' or 'cache-embedded-remote-store' is set.
Required when feature 'multi-site' or 'clusterless' is set.
'multi-site' or 'clusterless' is set. Required when feature 'multi-site' or
'clusterless' is set.
--cache-remote-password <password>
The password for the authentication to the external Infinispan cluster. It is
optional if connecting to an unsecure external Infinispan cluster. If the

View File

@ -76,8 +76,8 @@ Cache:
The maximum number of entries that can be stored in-memory by the users cache.
--cache-remote-host <hostname>
The hostname of the external Infinispan cluster. Available only when feature
'multi-site', 'clusterless' or 'cache-embedded-remote-store' is set.
Required when feature 'multi-site' or 'clusterless' is set.
'multi-site' or 'clusterless' is set. Required when feature 'multi-site' or
'clusterless' is set.
--cache-stack <stack>
Define the default stack to use for cluster communication and node discovery.
Possible values are: jdbc-ping, kubernetes, jdbc-ping-udp (deprecated), tcp

View File

@ -79,8 +79,8 @@ Cache:
Available only when metrics are enabled.
--cache-remote-host <hostname>
The hostname of the external Infinispan cluster. Available only when feature
'multi-site', 'clusterless' or 'cache-embedded-remote-store' is set.
Required when feature 'multi-site' or 'clusterless' is set.
'multi-site' or 'clusterless' is set. Required when feature 'multi-site' or
'clusterless' is set.
--cache-remote-password <password>
The password for the authentication to the external Infinispan cluster. It is
optional if connecting to an unsecure external Infinispan cluster. If the

View File

@ -78,9 +78,15 @@ public interface ClusterProvider extends Provider {
* @param event
* @param ignoreSender if true, then sender node itself won't receive the notification
* @param dcNotify Specify which DCs to notify. See {@link DCNotify} enum values for more info
* @deprecated use {@link ClusterProvider#notify(String, ClusterEvent, boolean)} instead. {@link DCNotify} is deprecated. For removal in Keycloak 27.
*/
@Deprecated(since = "26.3", forRemoval = true)
void notify(String taskKey, ClusterEvent event, boolean ignoreSender, DCNotify dcNotify);
default void notify(String taskKey, ClusterEvent event, boolean ignoreSender) {
notify(taskKey, event, ignoreSender, DCNotify.ALL_DCS);
}
/**
* An alternative to {@link #notify(String, ClusterEvent, boolean, DCNotify)} that sends multiple events in a single
* network call.
@ -89,11 +95,23 @@ public interface ClusterProvider extends Provider {
* given {@code taskKey}
*
* @see #notify(String, ClusterEvent, boolean, DCNotify)
* @deprecated use {@link ClusterProvider#notify(String, Collection, boolean)} instead. {@link DCNotify} is deprecated. For removal in Keycloak 27.
*/
@Deprecated(since = "26.3", forRemoval = true)
default void notify(String taskKey, Collection<? extends ClusterEvent> events, boolean ignoreSender, DCNotify dcNotify) {
events.forEach(event -> notify(taskKey, event, ignoreSender, dcNotify));
}
default void notify(String taskKey, Collection<? extends ClusterEvent> events, boolean ignoreSender) {
events.forEach(event -> notify(taskKey, event, ignoreSender, DCNotify.ALL_DCS));
}
/**
* This is now used only in deprecated methods.
* All methods that are using this enum have a Javadoc suggesting alternative.
* @deprecated For removal in Keycloak 27.
*/
@Deprecated(since = "26.3", forRemoval = true)
enum DCNotify {
/** Send message to all cluster nodes in all DCs **/
ALL_DCS,
@ -102,7 +120,7 @@ public interface ClusterProvider extends Provider {
LOCAL_DC_ONLY,
/** Send message to all cluster nodes in all datacenters, but NOT to this datacenter. Option "ignoreSender" of method {@link #notify} will be ignored as sender is ignored anyway due it is in this datacenter **/
ALL_BUT_LOCAL_DC
ALL_BUT_LOCAL_DC
}
}

View File

@ -46,7 +46,7 @@ public class SessionTimeoutHelper {
/**
* The maximum time difference, which will be still tolerated when checking userSession idle timeout with periodic cleaner threads.
*
* Just the sessions, with the timeout bigger than this value are considered really time-outed and can be garbage-collected (Considering the cross-dc
* Just the sessions, with the timeout bigger than this value are considered really time-outed and can be garbage-collected (Considering the multi-site
* environment and the fact that some session updates on different DC can be postponed and seen on current DC with some delay).
*
* See {@link #PERIODIC_TASK_INTERVAL_SECONDS} and {@link #IDLE_TIMEOUT_WINDOW_SECONDS}

View File

@ -51,7 +51,7 @@ public interface SingleUseObjectProvider extends Provider {
/**
* This method returns data just if removal was successful. Implementation should guarantee that "remove" is single-use. So if
* 2 threads (even on different cluster nodes or on different cross-dc nodes) calls "remove(123)" concurrently, then just one of them
* 2 threads (even on different cluster nodes or on different multi-site nodes) calls "remove(123)" concurrently, then just one of them
* is allowed to succeed and return data back. It can't happen that both will succeed.
*
* @param key String

View File

@ -18,7 +18,6 @@
package org.keycloak.sessions;
import org.keycloak.models.ClientModel;
import org.keycloak.models.KeycloakSession;
import org.keycloak.models.RealmModel;
import org.keycloak.provider.Provider;
@ -98,8 +97,10 @@ public interface AuthenticationSessionProvider extends Provider {
* @param compoundId {@code AuthenticationSessionCompoundId} The method has no effect if {@code null}.
* @param authNotesFragment {@code Map<String, String>} Map with authNote values.
* Auth note is removed if the corresponding value in the map is {@code null}. Map itself can't be {@code null}.
* @deprecated For removal in Keycloak 27
*/
void updateNonlocalSessionAuthNotes(AuthenticationSessionCompoundId compoundId, Map<String, String> authNotesFragment);
@Deprecated(since = "26.3", forRemoval = true)
default void updateNonlocalSessionAuthNotes(AuthenticationSessionCompoundId compoundId, Map<String, String> authNotesFragment) {}
default void migrate(String modelVersion) {
}

View File

@ -28,16 +28,12 @@ import org.keycloak.models.Constants;
import org.keycloak.models.KeycloakSession;
import org.keycloak.models.RealmModel;
import org.keycloak.models.UserModel;
import org.keycloak.models.UserModel.RequiredAction;
import org.keycloak.services.Urls;
import org.keycloak.services.managers.AuthenticationSessionManager;
import org.keycloak.services.messages.Messages;
import org.keycloak.sessions.AuthenticationSessionCompoundId;
import org.keycloak.sessions.AuthenticationSessionModel;
import java.util.Collections;
import java.util.stream.Stream;
import jakarta.ws.rs.core.Response;
import jakarta.ws.rs.core.UriBuilder;
import jakarta.ws.rs.core.UriInfo;
@ -123,12 +119,6 @@ public class IdpVerifyAccountLinkActionTokenHandler extends AbstractActionTokenH
if (authSession != null) {
authSession.setAuthNote(IdpEmailVerificationAuthenticator.VERIFY_ACCOUNT_IDP_USERNAME, token.getIdentityProviderUsername());
} else {
session.authenticationSessions().updateNonlocalSessionAuthNotes(
compoundId,
Collections.singletonMap(IdpEmailVerificationAuthenticator.VERIFY_ACCOUNT_IDP_USERNAME, token.getIdentityProviderUsername())
);
}
return session.getProvider(LoginFormsProvider.class)

View File

@ -195,14 +195,8 @@ public class TokenManager {
ClientModel client = session.getContext().getClient();
AuthenticatedClientSessionModel clientSession = userSession.getAuthenticatedClientSessionByClient(client.getId());
// Can theoretically happen in cross-dc environment. Try to see if userSession with our client is available in remoteCache
if (clientSession == null) {
userSession = session.sessions().getUserSessionIfClientExists(realm, userSession.getId(), offline, client.getId());
if (userSession != null) {
clientSession = userSession.getAuthenticatedClientSessionByClient(client.getId());
} else {
throw new OAuthErrorException(OAuthErrorException.INVALID_GRANT, "Session doesn't have required client", "Session doesn't have required client");
}
throw new OAuthErrorException(OAuthErrorException.INVALID_GRANT, "Session doesn't have required client", "Session doesn't have required client");
}
if (!AuthenticationManager.isClientSessionValid(realm, client, userSession, clientSession)) {

View File

@ -432,151 +432,6 @@ and argument: `-p 8181`
3) Run loadbalancer (class `SimpleUndertowLoadBalancer`) without arguments and system properties. Loadbalancer runs on port 8180, so you can access Keycloak on `http://localhost:8180/auth`
## Cross-DC tests
Cross-DC tests use 2 data centers, each with one automatically started and one manually controlled backend servers,
and 1 frontend loadbalancer server node that sits in front of all servers.
The browser usually communicates directly with the frontend node and the test controls where the HTTP requests
land by adjusting load balancer configuration (e.g. to direct the traffic to only a single DC).
For an example of a test, see [org.keycloak.testsuite.crossdc.ActionTokenCrossDCTest](tests/base/src/test/java/org/keycloak/testsuite/crossdc/ActionTokenCrossDCTest.java).
The cross DC requires setting a profile specifying the used cache server.
Use `cache-server-infinispan` Maven profile for Infinispan 10 or higher, or `cache-server-legacy-infinispan` profile for Infinispan 9 and lower.
Use `cache-server-datagrid` Maven profile for Datagrid 8 or higher, or `cache-server-legacy-datagrid` profile for Datagrid 7 and lower.
To specify a custom Java platform to run the cache server it is possible to set parameter: `-Dcache.server.java.home=<PATH_TO_JDK>`.
### Cache Authentication
With WildFLy/EAP based auth server option it is possible to enable authentication for the HotRod protocol by enabling profile `cache-auth`.
It is possible to specify additional parameters:
- `-Dhotrod.sasl.mechanism`: SASL mechanism used by the hotrod protocol. Default value is `DIGEST-MD5`.
- `-Dkeycloak.connectionsInfinispan.hotrodProtocolVersion`: Version of the hotrod protocol.
Example: `-Pauth-server-wildfly,cache-server-infinispan,cache-auth -Dhotrod.sasl.mechanism=SCRAM-SHA-512`
Note: The cache authentication is not implemented for `SAMLAdapterCrossDCTest`.
Note: The `cache-auth` profile currently doesn't work with the legacy Infinispan/Datagrid modules. See: [KEYCLOAK-18336](https://issues.redhat.com/browse/KEYCLOAK-18336).
### Data Grid
Since Datagrid does not distribute `infinispan-server` zip artifact, for `cache-server-datagrid` profile it is
necessary to download the artifact and install it to local Maven repository. For Red Hat Data Grid 8 and above, the command is the following:
mvn install:install-file \
-DgroupId=com.redhat -DartifactId=datagrid -Dpackaging=zip -Dclassifier=bin -DgeneratePom=true \
-Dversion=${DATAGRID_VERSION} -Dfile=redhat-datagrid-${DATAGRID_VERSION}-server.zip
For Data Grid 7 and older use: `-Dfile=jboss-datagrid-${DATAGRID_VERSION}-server.zip`.
### Run Cross-DC Tests from Maven
Warning: The Cross-DC tests doesn't work with Quarkus distribution
Note: Profile `auth-servers-crossdc-undertow` currently doesn't work (see [KEYCLOAK-18335](https://issues.redhat.com/browse/KEYCLOAK-18335)).
Use `-Pauth-servers-crossdc-jboss,auth-server-wildfly` instead.
a) Prepare the environment. Compile the infinispan server and eventually Keycloak on JBoss server.
a1) If you want to use **Undertow** based Keycloak container, you just need to download and prepare the
Infinispan/JDG test server via the following command:
`mvn -Pcache-server-infinispan,auth-servers-crossdc-undertow -f testsuite/integration-arquillian -DskipTests clean install`
*note: 'cache-server-infinispan' can be replaced by 'cache-server-datagrid'*
a2) If you want to use **JBoss-based** Keycloak backend containers instead of containers on Embedded Undertow,
you need to prepare both the Infinispan/JDG test server and the Keycloak server on Wildfly/EAP. Run following command:
`mvn -Pcache-server-infinispan,auth-servers-crossdc-jboss,auth-server-wildfly -f testsuite/integration-arquillian -DskipTests clean install`
*note: 'cache-server-infinispan' can be replaced by 'cache-server-datagrid'*
*note: 'auth-server-wildfly' can be replaced by 'auth-server-eap'*
By default JBoss-based containers use TCP-based h2 database. It can be configured to use real DB spawn in Docker, e.g. with following command:
`mvn -Pcache-server-infinispan,auth-servers-crossdc-jboss,auth-server-wildfly,jpa,db-mariadb -f testsuite/integration-arquillian -DskipTests clean install`
b1) For **Undertow** Keycloak backend containers, you can run the tests using the following command (adjust the test specification according to your needs):
`mvn -Pcache-server-infinispan,auth-servers-crossdc-undertow -Dtest=org.keycloak.testsuite.crossdc.**.*Test -pl testsuite/integration-arquillian/tests/base clean install`
*note: 'cache-server-infinispan' can be replaced by 'cache-server-datagrid'*
*note: It can be useful to add additional system property to enable logging:*
`-Dkeycloak.infinispan.logging.level=debug`
b2) For **JBoss-based** Keycloak backend containers, you can run the tests like this:
`mvn -Pcache-server-infinispan,auth-servers-crossdc-jboss,auth-server-wildfly -Dtest=org.keycloak.testsuite.crossdc.**.*Test -pl testsuite/integration-arquillian/tests/base clean install`
*note: 'cache-server-infinispan' can be replaced by 'cache-server-datagrid'*
*note: 'auth-server-wildfly can be replaced by auth-server-eap'*
**note**:
For **JBoss-based** Keycloak backend containers on real DB, the previous commands from (a2) and (b2) can be "squashed" into one. E.g.:
`mvn -f testsuite/integration-arquillian -Dtest=org.keycloak.testsuite.crossdc.**.*Test -Pcache-server-infinispan,auth-servers-crossdc-jboss,auth-server-wildfly,jpa,db-mariadb clean install`
### Run Cross-DC Tests from Intellij IDEA
Note: Profile `auth-servers-crossdc-undertow` which is required in step (3) currently doesn't work (see [KEYCLOAK-18335](https://issues.redhat.com/browse/KEYCLOAK-18335)).
First we will manually download, configure and run infinispan servers. Then we can run the tests from IDE against the servers.
It's more effective during development as there is no need to restart infinispan server(s) among test runs.
1) Download infinispan server of corresponding version (See "infinispan.version" property in [root pom.xml](../../pom.xml))
from http://infinispan.org/download/ and go through the steps from the
[Keycloak Cross-DC documentation](http://www.keycloak.org/docs/latest/server_installation/index.html#jdgsetup) for setup infinispan servers.
The difference to original docs is, that you need to have JDG servers available on localhost with port offsets. So:
* The TCPPING hosts should be like this:
```xml
<property name="initial_hosts">localhost[8610],localhost[9610]"</property>
```
* The port offset when starting node `jdg1` should be like: `-Djboss.socket.binding.port-offset=1010` and when
starting the `jdg2` server, then `-Djboss.socket.binding.port-offset=2010` . In both cases, the bind address should be just
default `localhost` (In other words, the `-b` switch can be omitted).
So assume you have both Infinispan/JDG servers up and running.
2) Setup MySQL database or some other shared database.
3) Ensure that `org.wildfly.arquillian:wildfly-arquillian-container-managed` is on the classpath when running test. On Intellij, it can be
done by going to: `View` -> `Tool Windows` -> `Maven projects`. Then check profile `cache-server-infinispan` and `auth-servers-crossdc-undertow`.
The tests will use this profile when executed.
4) Run the LoginCrossDCTest (or any other test) with those properties. In shortcut, it's using MySQL database and
connects to the remoteStore provided by infinispan server configured in previous steps:
`-Dauth.server.crossdc=true -Dauth.server.undertow.crossdc=true -Dcache.server.lifecycle.skip=true -Dkeycloak.connectionsInfinispan.clustered=true -Dkeycloak.connectionsJpa.url.crossdc=jdbc:mysql://localhost/keycloak -Dkeycloak.connectionsJpa.driver.crossdc=com.mysql.jdbc.Driver -Dkeycloak.connectionsJpa.user=keycloak -Dkeycloak.connectionsJpa.password=keycloak -Dkeycloak.connectionsInfinispan.clustered=true -Dkeycloak.connectionsInfinispan.remoteStorePort=12232 -Dkeycloak.connectionsInfinispan.remoteStorePort.2=13232 -Dkeycloak.connectionsInfinispan.sessionsOwners=1 -Dsession.cache.owners=1 -Dkeycloak.infinispan.logging.level=debug -Dresources`
**NOTE**: Tests from package `manual` (eg. SessionsPreloadCrossDCTest) needs to be executed with managed containers.
So skip steps 1,2 and add property `-Dmanual.mode=true` and change "cache.server.lifecycle.skip" to false `-Dcache.server.lifecycle.skip=false` or remove it.
5) If you want to debug or test manually, the servers are running on these ports (Note that not all backend servers are running by default and some might be also unused by loadbalancer):
* *Loadbalancer* -> "http://localhost:8180/auth"
* *auth-server-undertow-cross-dc-0_1* -> "http://localhost:8101/auth"
* *auth-server-undertow-cross-dc-0_2-manual* -> "http://localhost:8102/auth"
* *auth-server-undertow-cross-dc-1_1* -> "http://localhost:8111/auth"
* *auth-server-undertow-cross-dc-1_2-manual* -> "http://localhost:8112/auth"
## Run Docker Authentication test
First, validate that your machine has a valid docker installation and that it is available to the JVM running the test.

View File

@ -62,7 +62,6 @@ import org.keycloak.models.UserCredentialModel;
import org.keycloak.models.UserModel;
import org.keycloak.models.UserProvider;
import org.keycloak.models.UserSessionModel;
import org.keycloak.models.sessions.infinispan.changes.sessions.CrossDCLastSessionRefreshStoreFactory;
import org.keycloak.models.utils.ModelToRepresentation;
import org.keycloak.models.utils.ResetTimeOffsetEvent;
import org.keycloak.protocol.oidc.encode.AccessTokenContext;
@ -114,7 +113,6 @@ import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.Date;
@ -744,8 +742,6 @@ public class TestingResourceProvider implements RealmResourceProvider {
@Produces(MediaType.APPLICATION_JSON)
public Response suspendPeriodicTasks() {
suspendTask(ClearExpiredUserSessions.TASK_NAME);
suspendTask(CrossDCLastSessionRefreshStoreFactory.LSR_PERIODIC_TASK_NAME);
suspendTask(CrossDCLastSessionRefreshStoreFactory.LSR_OFFLINE_PERIODIC_TASK_NAME);
return Response.noContent().build();
}

View File

@ -24,18 +24,12 @@ import jakarta.ws.rs.Path;
import jakarta.ws.rs.PathParam;
import jakarta.ws.rs.Produces;
import org.infinispan.Cache;
import org.infinispan.client.hotrod.RemoteCache;
import org.infinispan.stream.CacheCollectors;
import org.keycloak.connections.infinispan.InfinispanConnectionProvider;
import org.keycloak.connections.infinispan.InfinispanUtil;
import org.keycloak.models.KeycloakSession;
import org.keycloak.models.sessions.infinispan.changes.SessionEntityWrapper;
import org.keycloak.models.sessions.infinispan.entities.UserSessionEntity;
import org.keycloak.utils.MediaType;
import java.util.Collections;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.UUID;
import java.util.stream.Collectors;
@ -108,33 +102,4 @@ public class TestCacheResource {
public void processExpiration() {
cache.getAdvancedCache().getExpirationManager().processExpiration();
}
@GET
@Path("/remote-cache-stats")
@Produces(MediaType.APPLICATION_JSON)
public Map<String, String> getRemoteCacheStats() {
var remoteCache = InfinispanUtil.getRemoteCache(cache);
return remoteCache == null ?
Collections.emptyMap() :
remoteCache.serverStatistics().getStatsMap();
}
@GET
@Path("/remote-cache-last-session-refresh/{user-session-id}")
@Produces(MediaType.APPLICATION_JSON)
public int getRemoteCacheLastSessionRefresh(@PathParam("user-session-id") String userSessionId) {
RemoteCache<String, SessionEntityWrapper<UserSessionEntity>> remoteCache = InfinispanUtil.getRemoteCache(cache);
if (remoteCache == null) {
return -1;
} else {
SessionEntityWrapper<UserSessionEntity> userSession = remoteCache.get(userSessionId);
if (userSession == null) {
return -1;
} else {
return userSession.getEntity().getLastSessionRefresh();
}
}
}
}

View File

@ -51,8 +51,6 @@ import org.keycloak.testsuite.arquillian.annotation.UncaughtServerErrorExpected;
import org.keycloak.testsuite.arquillian.annotation.EnableVault;
import org.keycloak.testsuite.client.KeycloakTestingClient;
import org.keycloak.testsuite.util.HttpClientUtils;
import org.keycloak.testsuite.util.MutualTLSUtils;
import org.keycloak.testsuite.util.oauth.HttpClientManager;
import org.keycloak.testsuite.util.oauth.OAuthClient;
import org.keycloak.testsuite.util.SpiProvidersSwitchingUtils;
import org.keycloak.testsuite.util.SqlUtils;
@ -68,7 +66,6 @@ import java.net.URL;
import java.security.Provider;
import java.security.Security;
import java.util.List;
import java.util.Objects;
import java.util.Optional;
import java.util.Set;
@ -128,8 +125,6 @@ public class AuthServerTestEnricher {
public static final String AUTH_SERVER_CLUSTER_PROPERTY = "auth.server.cluster";
public static final boolean AUTH_SERVER_CLUSTER = Boolean.parseBoolean(System.getProperty(AUTH_SERVER_CLUSTER_PROPERTY, "false"));
public static final String AUTH_SERVER_CROSS_DC_PROPERTY = "auth.server.crossdc";
public static final boolean AUTH_SERVER_CROSS_DC = Boolean.parseBoolean(System.getProperty(AUTH_SERVER_CROSS_DC_PROPERTY, "false"));
public static final String AUTH_SERVER_HOME_PROPERTY = "auth.server.home";
@ -206,65 +201,7 @@ public class AuthServerTestEnricher {
suiteContext = new SuiteContext(containers);
if (AUTH_SERVER_CROSS_DC) {
// if cross-dc mode enabled, load-balancer is the frontend of datacenter cluster
containers.stream()
.filter(c -> c.getQualifier().startsWith(AUTH_SERVER_BALANCER + "-cross-dc"))
.forEach(c -> {
String portOffsetString = c.getArquillianContainer().getContainerConfiguration().getContainerProperties().getOrDefault("bindHttpPortOffset", "0");
String dcString = c.getArquillianContainer().getContainerConfiguration().getContainerProperties().getOrDefault("dataCenter", "0");
updateWithAuthServerInfo(c, Integer.valueOf(portOffsetString));
suiteContext.addAuthServerInfo(Integer.valueOf(dcString), c);
});
if (suiteContext.getDcAuthServerInfo().isEmpty()) {
throw new IllegalStateException("Not found frontend container (load balancer): " + AUTH_SERVER_BALANCER);
}
if (suiteContext.getDcAuthServerInfo().stream().anyMatch(Objects::isNull)) {
throw new IllegalStateException("Frontend container (load balancer) misconfiguration");
}
containers.stream()
.filter(c -> c.getQualifier().startsWith("auth-server-" + System.getProperty("node.name") + "-"))
.sorted((a, b) -> a.getQualifier().compareTo(b.getQualifier()))
.forEach(c -> {
String portOffsetString = c.getArquillianContainer().getContainerConfiguration().getContainerProperties().getOrDefault("bindHttpPortOffset", "0");
updateWithAuthServerInfo(c, Integer.valueOf(portOffsetString));
String dcString = c.getArquillianContainer().getContainerConfiguration().getContainerProperties().getOrDefault("dataCenter", "0");
suiteContext.addAuthServerBackendsInfo(Integer.valueOf(dcString), c);
});
containers.stream()
.filter(c -> c.getQualifier().startsWith("cache-server-"))
.sorted((a, b) -> a.getQualifier().compareTo(b.getQualifier()))
.forEach(containerInfo -> {
log.info(String.format("cache container: %s", containerInfo.getQualifier()));
int prefixSize = containerInfo.getQualifier().lastIndexOf("-") + 1;
int dcIndex = Integer.parseInt(containerInfo.getQualifier().substring(prefixSize)) - 1;
suiteContext.addCacheServerInfo(dcIndex, containerInfo);
});
if (suiteContext.getDcAuthServerInfo().isEmpty()) {
throw new RuntimeException(String.format("No auth server container matching '%s' found in arquillian.xml.", AUTH_SERVER_BACKEND));
}
if (suiteContext.getDcAuthServerBackendsInfo().stream().anyMatch(Objects::isNull)) {
throw new IllegalStateException("Frontend container (load balancer) misconfiguration");
}
if (suiteContext.getDcAuthServerBackendsInfo().stream().anyMatch(List::isEmpty)) {
throw new RuntimeException(String.format("Some data center has no auth server container matching '%s' defined in arquillian.xml.", AUTH_SERVER_BACKEND));
}
if (suiteContext.getCacheServersInfo().isEmpty() && !CACHE_SERVER_LIFECYCLE_SKIP) {
throw new IllegalStateException("Cache containers misconfiguration");
}
log.info("Using frontend containers: " + this.suiteContext.getDcAuthServerInfo().stream()
.map(ContainerInfo::getQualifier)
.collect(Collectors.joining(", ")));
} else if (AUTH_SERVER_CLUSTER) {
if (AUTH_SERVER_CLUSTER) {
// if cluster mode enabled, load-balancer is the frontend
ContainerInfo container = containers.stream()
.filter(c -> c.getQualifier().startsWith(AUTH_SERVER_BALANCER))
@ -316,7 +253,6 @@ public class AuthServerTestEnricher {
}
suiteContextProducer.set(suiteContext);
CrossDCTestEnricher.initializeSuiteContext(suiteContext);
log.info("\n\n" + suiteContext);
log.info("\n\n" + SystemInfoHelper.getSystemInfo());

View File

@ -1,432 +0,0 @@
package org.keycloak.testsuite.arquillian;
import java.io.IOException;
import java.io.NotSerializableException;
import java.lang.management.ManagementFactory;
import java.lang.reflect.Field;
import java.lang.reflect.Method;
import java.lang.reflect.Parameter;
import java.net.MalformedURLException;
import java.rmi.UnmarshalException;
import java.util.Arrays;
import java.util.Collections;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import java.util.function.Supplier;
import java.util.stream.Collectors;
import javax.management.Attribute;
import javax.management.AttributeNotFoundException;
import javax.management.InstanceNotFoundException;
import javax.management.IntrospectionException;
import javax.management.MBeanAttributeInfo;
import javax.management.MBeanException;
import javax.management.MBeanInfo;
import javax.management.MBeanServerConnection;
import javax.management.MalformedObjectNameException;
import javax.management.ObjectName;
import javax.management.ReflectionException;
import javax.management.remote.JMXServiceURL;
import org.apache.commons.lang3.reflect.FieldUtils;
import org.jboss.arquillian.container.spi.Container;
import org.jboss.arquillian.container.spi.ContainerRegistry;
import org.jboss.arquillian.core.api.Instance;
import org.jboss.arquillian.core.api.annotation.Inject;
import org.jboss.arquillian.core.spi.Validate;
import org.jboss.arquillian.test.spi.TestEnricher;
import org.jboss.logging.Logger;
import org.keycloak.connections.infinispan.InfinispanConnectionProvider;
import org.keycloak.common.util.Retry;
import org.keycloak.testsuite.arquillian.annotation.JmxInfinispanCacheStatistics;
import org.keycloak.testsuite.arquillian.annotation.JmxInfinispanChannelStatistics;
import org.keycloak.testsuite.arquillian.containers.InfinispanServerDeployableContainer;
import org.keycloak.testsuite.arquillian.jmx.JmxConnectorRegistry;
import org.keycloak.testsuite.arquillian.undertow.KeycloakOnUndertow;
import org.keycloak.testsuite.crossdc.DC;
/**
*
* @author hmlnarik
*/
public class CacheStatisticsControllerEnricher implements TestEnricher {
private static final Logger LOG = Logger.getLogger(CacheStatisticsControllerEnricher.class);
@Inject
private Instance<ContainerRegistry> registry;
@Inject
private Instance<JmxConnectorRegistry> jmxConnectorRegistry;
@Inject
private Instance<SuiteContext> suiteContext;
@Override
public void enrich(Object testCase) {
Validate.notNull(registry.get(), "registry should not be null");
Validate.notNull(jmxConnectorRegistry.get(), "jmxConnectorRegistry should not be null");
Validate.notNull(suiteContext.get(), "suiteContext should not be null");
for (Field field : FieldUtils.getAllFields(testCase.getClass())) {
JmxInfinispanCacheStatistics annotation = field.getAnnotation(JmxInfinispanCacheStatistics.class);
if (annotation == null) {
continue;
}
try {
FieldUtils.writeField(field, testCase, getInfinispanCacheStatistics(annotation), true);
} catch (IOException | IllegalAccessException | MalformedObjectNameException e) {
throw new RuntimeException("Could not set value on field " + field);
}
}
}
private InfinispanStatistics getInfinispanCacheStatistics(JmxInfinispanCacheStatistics annotation) throws MalformedObjectNameException, IOException, MalformedURLException {
List<ObjectName> mbeanNameTemplates = new LinkedList<>();
mbeanNameTemplates.add(new ObjectName(String.format(
"%s:type=%s,name=\"%s(%s)\",manager=\"%s\",component=%s",
annotation.domain().isEmpty() ? getDefaultDomain(annotation.dc().getDcIndex(), annotation.dcNodeIndex()) : InfinispanConnectionProvider.JMX_DOMAIN,
annotation.type(),
annotation.cacheName(),
annotation.cacheMode(),
annotation.cacheManagerName(),
annotation.component()
)));
// For the Keycloak on Wildfly 20 and bigger, the typical objectName for the cache statistics looks similar to: jboss.as:subsystem=infinispan,cache-container=keycloak,cache=actionTokens
if (annotation.dc().getDcIndex() != -1 && annotation.dcNodeIndex() != -1) {
mbeanNameTemplates.add(new ObjectName(String.format(
"jboss.as:subsystem=infinispan,cache-container=keycloak,cache=%s",
annotation.cacheName()
)));
}
InfinispanStatistics value = new InfinispanCacheStatisticsImpl(getJmxServerConnection(annotation), mbeanNameTemplates);
if (annotation.domain().isEmpty()) {
try {
LOG.debug("Going to try reset InfinispanCacheStatistics (2 attempts, 150 ms interval)");
int execute = Retry.execute(() -> value.reset(), 2, 150);
LOG.debug("reset in " + execute + " attempts");
} catch (RuntimeException ex) {
if (annotation.dc() != DC.UNDEFINED && annotation.dcNodeIndex() != -1
&& suiteContext.get().getAuthServerBackendsInfo(annotation.dc().getDcIndex()).get(annotation.dcNodeIndex()).isStarted()) {
LOG.warn("Could not reset statistics for any of the mbean name templates " + mbeanNameTemplates + ". The reason is: \"" + ex.getMessage() + "\"");
}
}
}
return value;
}
private InfinispanStatistics getJGroupsChannelStatistics(JmxInfinispanChannelStatistics annotation) throws MalformedObjectNameException, IOException, MalformedURLException {
ObjectName mbeanName = new ObjectName(String.format(
"%s:%stype=%s,cluster=\"%s\"",
annotation.domain().isEmpty() ? getDefaultDomain(annotation.dc().getDcIndex(), annotation.dcNodeIndex()) : InfinispanConnectionProvider.JMX_DOMAIN,
isLegacyInfinispan() ? "" : "manager=\"default\",",
annotation.type(),
annotation.cluster()
));
InfinispanStatistics value = new InfinispanChannelStatisticsImpl(getJmxServerConnection(annotation), mbeanName);
if (annotation.domain().isEmpty()) {
try {
Retry.execute(() -> value.reset(), 2, 150);
} catch (RuntimeException ex) {
if (annotation.dc() != DC.UNDEFINED && annotation.dcNodeIndex() != -1
&& suiteContext.get().getAuthServerBackendsInfo(annotation.dc().getDcIndex()).get(annotation.dcNodeIndex()).isStarted()) {
LOG.warn("Could not reset statistics for " + mbeanName + ". The reason is: \"" + ex.getMessage() + "\"");
}
}
}
return value;
}
@Override
public Object[] resolve(Method method) {
Object[] values = new Object[method.getParameterCount()];
for (int i = 0; i < method.getParameterCount(); i ++) {
Parameter param = method.getParameters()[i];
JmxInfinispanCacheStatistics annotation = param.getAnnotation(JmxInfinispanCacheStatistics.class);
if (annotation != null) try {
values[i] = getInfinispanCacheStatistics(annotation);
} catch (IOException | MalformedObjectNameException e) {
throw new RuntimeException("Could not set value on field " + param);
}
JmxInfinispanChannelStatistics channelAnnotation = param.getAnnotation(JmxInfinispanChannelStatistics.class);
if (channelAnnotation != null) try {
values[i] = getJGroupsChannelStatistics(channelAnnotation);
} catch (IOException | MalformedObjectNameException e) {
throw new RuntimeException("Could not set value on field " + param);
}
}
return values;
}
private String getDefaultDomain(int dcIndex, int dcNodeIndex) {
if (dcIndex != -1 && dcNodeIndex != -1) {
if (Boolean.parseBoolean(System.getProperty("auth.server.jboss.crossdc"))) {
//backend-jboss-server
return "org.wildfly.clustering.infinispan";
}
//backend-undertow-server
return InfinispanConnectionProvider.JMX_DOMAIN + "-" + suiteContext.get().getAuthServerBackendsInfo(dcIndex).get(dcNodeIndex).getQualifier();
}
//cache-server
return isLegacyInfinispan() ? "jboss.datagrid-infinispan" : "org.infinispan";
}
private boolean isLegacyInfinispan() { // infinispan 9 or lower
return Boolean.parseBoolean(System.getProperty("cache.server.legacy", "false"));
}
private Supplier<MBeanServerConnection> getJmxServerConnection(JmxInfinispanCacheStatistics annotation) throws MalformedURLException {
final String host;
final int port;
if (annotation.dc() != DC.UNDEFINED && annotation.dcNodeIndex() != -1) {
ContainerInfo node = suiteContext.get().getAuthServerBackendsInfo(annotation.dc().getDcIndex()).get(annotation.dcNodeIndex());
Container container = node.getArquillianContainer();
if (container.getDeployableContainer() instanceof KeycloakOnUndertow) {
return () -> ManagementFactory.getPlatformMBeanServer();
}
host = "localhost";
port = container.getContainerConfiguration().getContainerProperties().containsKey("managementPort")
? Integer.valueOf(container.getContainerConfiguration().getContainerProperties().get("managementPort"))
: 9990;
} else {
Container container = suiteContext.get().getCacheServersInfo().get(0).getArquillianContainer();
if (container.getDeployableContainer() instanceof InfinispanServerDeployableContainer) {
// jmx connection to infinispan server
return () -> {
try {
return jmxConnectorRegistry.get().getConnection(
((InfinispanServerDeployableContainer) container.getDeployableContainer()).getJMXServiceURL()
).getMBeanServerConnection();
} catch (IOException ex) {
throw new RuntimeException(ex);
}
};
}
host = annotation.host().isEmpty()
? System.getProperty((annotation.hostProperty().isEmpty()
? "keycloak.connectionsInfinispan.remoteStoreServer"
: annotation.hostProperty()))
: annotation.host();
port = annotation.managementPort() == -1
? Integer.valueOf(System.getProperty((annotation.managementPortProperty().isEmpty()
? "cache.server.management.port"
: annotation.managementPortProperty())))
: annotation.managementPort();
}
JMXServiceURL url = new JMXServiceURL("service:jmx:remote+http://" + host + ":" + port);
return () -> {
try {
return jmxConnectorRegistry.get().getConnection(url).getMBeanServerConnection();
} catch (IOException ex) {
throw new RuntimeException(ex);
}
};
}
private Supplier<MBeanServerConnection> getJmxServerConnection(JmxInfinispanChannelStatistics annotation) throws MalformedURLException {
final String host;
final int port;
if (annotation.dc() != DC.UNDEFINED && annotation.dcNodeIndex() != -1) {
ContainerInfo node = suiteContext.get().getAuthServerBackendsInfo(annotation.dc().getDcIndex()).get(annotation.dcNodeIndex());
Container container = node.getArquillianContainer();
if (container.getDeployableContainer() instanceof KeycloakOnUndertow) {
return () -> ManagementFactory.getPlatformMBeanServer();
}
host = "localhost";
port = container.getContainerConfiguration().getContainerProperties().containsKey("managementPort")
? Integer.valueOf(container.getContainerConfiguration().getContainerProperties().get("managementPort"))
: 9990;
} else {
Container container = suiteContext.get().getCacheServersInfo().get(0).getArquillianContainer();
if (container.getDeployableContainer() instanceof InfinispanServerDeployableContainer) {
// jmx connection to infinispan server
return () -> {
try {
return jmxConnectorRegistry.get().getConnection(
((InfinispanServerDeployableContainer) container.getDeployableContainer()).getJMXServiceURL()
).getMBeanServerConnection();
} catch (IOException ex) {
throw new RuntimeException(ex);
}
};
}
host = annotation.host().isEmpty()
? System.getProperty((annotation.hostProperty().isEmpty()
? "keycloak.connectionsInfinispan.remoteStoreServer"
: annotation.hostProperty()))
: annotation.host();
port = annotation.managementPort() == -1
? Integer.valueOf(System.getProperty((annotation.managementPortProperty().isEmpty()
? "cache.server.management.port"
: annotation.managementPortProperty())))
: annotation.managementPort();
}
JMXServiceURL url = new JMXServiceURL("service:jmx:remote+http://" + host + ":" + port);
return () -> {
try {
return jmxConnectorRegistry.get().getConnection(url).getMBeanServerConnection();
} catch (IOException ex) {
throw new RuntimeException(ex);
}
};
}
private static abstract class CacheStatisticsImpl implements InfinispanStatistics {
private final Supplier<MBeanServerConnection> mbscCreateor;
private final List<ObjectName> mbeanNameTemplates;
private ObjectName mbeanName;
public CacheStatisticsImpl(Supplier<MBeanServerConnection> mbscCreateor, ObjectName mbeanNameTemplate) {
this(mbscCreateor, Collections.singletonList(mbeanNameTemplate));
}
public CacheStatisticsImpl(Supplier<MBeanServerConnection> mbscCreateor, List<ObjectName> mbeanNameTemplates) {
this.mbscCreateor = mbscCreateor;
this.mbeanNameTemplates = mbeanNameTemplates;
}
protected MBeanServerConnection getConnection() {
return mbscCreateor.get();
}
@Override
public boolean exists() {
try {
getMbeanName();
return true;
} catch (IOException | RuntimeException ex) {
return false;
}
}
@Override
public Map<String, Object> getStatistics() {
try {
MBeanInfo mBeanInfo = getConnection().getMBeanInfo(getMbeanName());
String[] statAttrs = Arrays.asList(mBeanInfo.getAttributes()).stream()
.filter(MBeanAttributeInfo::isReadable)
.map(MBeanAttributeInfo::getName)
.collect(Collectors.toList())
.toArray(new String[] {});
return getConnection().getAttributes(getMbeanName(), statAttrs)
.asList()
.stream()
.collect(Collectors.toMap(Attribute::getName, Attribute::getValue));
} catch (IOException | InstanceNotFoundException | ReflectionException | IntrospectionException ex) {
throw new RuntimeException(ex);
}
}
protected ObjectName getMbeanName() throws IOException, RuntimeException {
if (this.mbeanName == null) {
// Try all the mbeanName templates
for (ObjectName mbeanNameTemplate : this.mbeanNameTemplates) {
Set<ObjectName> queryNames = getConnection().queryNames(mbeanNameTemplate, null);
if (queryNames.isEmpty()) {
LOG.infof("No MBean available for the template %s .", mbeanNameTemplate);
continue;
}
this.mbeanName = queryNames.iterator().next();
return this.mbeanName;
}
throw new RuntimeException("No MBean for any of the templates " + this.mbeanNameTemplates + " found at JMX server");
}
return this.mbeanName;
}
@Override
public Comparable getSingleStatistics(String statisticsName) {
try {
return (Comparable) getConnection().getAttribute(getMbeanName(), statisticsName);
} catch (IOException | InstanceNotFoundException | MBeanException | ReflectionException | AttributeNotFoundException ex) {
throw new RuntimeException(ex);
}
}
@Override
public void waitToBecomeAvailable(int time, TimeUnit unit) {
long timeInMillis = TimeUnit.MILLISECONDS.convert(time, unit);
Retry.execute(() -> {
try {
getMbeanName();
if (! isAvailable()) throw new RuntimeException("Not available");
} catch (IOException | RuntimeException ex) {
throw new RuntimeException("Timed out while waiting for any of the mbean name templates " + this.mbeanNameTemplates + " to become available", ex);
}
}, 1 + (int) timeInMillis / 100, 100);
}
protected abstract boolean isAvailable();
}
private static class InfinispanCacheStatisticsImpl extends CacheStatisticsImpl {
public InfinispanCacheStatisticsImpl(Supplier<MBeanServerConnection> mbscCreator, List<ObjectName> mbeanNames) {
super(mbscCreator, mbeanNames);
}
@Override
public void reset() {
try {
getConnection().invoke(getMbeanName(), "resetStatistics", new Object[] {}, new String[] {});
} catch (IOException | InstanceNotFoundException | MBeanException | ReflectionException ex) {
throw new RuntimeException(ex);
}
}
@Override
protected boolean isAvailable() {
return getSingleStatistics(Constants.STAT_CACHE_HITS) != null;
}
}
private static class InfinispanChannelStatisticsImpl extends CacheStatisticsImpl {
public InfinispanChannelStatisticsImpl(Supplier<MBeanServerConnection> mbscCreator, ObjectName mbeanName) {
super(mbscCreator, mbeanName);
}
@Override
public void reset() {
try {
getConnection().invoke(getMbeanName(), "resetStats", new Object[] {}, new String[] {});
} catch (NotSerializableException | UnmarshalException ex) {
// Ignore return value not serializable, the invocation has already done its job
} catch (IOException | InstanceNotFoundException | MBeanException | ReflectionException ex) {
throw new RuntimeException(ex);
}
}
@Override
protected boolean isAvailable() {
return Objects.equals(getSingleStatistics(Constants.STAT_CHANNEL_CONNECTED), Boolean.TRUE);
}
}
}

View File

@ -1,438 +0,0 @@
/*
* Copyright 2018 Red Hat, Inc. and/or its affiliates
* and other contributors as indicated by the @author tags.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.keycloak.testsuite.arquillian;
import org.apache.commons.io.FileUtils;
import org.jboss.arquillian.container.spi.event.StopContainer;
import org.jboss.arquillian.container.spi.event.StopSuiteContainers;
import org.jboss.arquillian.container.test.api.ContainerController;
import org.jboss.arquillian.core.api.Event;
import org.jboss.arquillian.core.api.Instance;
import org.jboss.arquillian.core.api.annotation.Inject;
import org.jboss.arquillian.core.api.annotation.Observes;
import org.jboss.arquillian.core.spi.Validate;
import org.jboss.arquillian.test.spi.event.suite.After;
import org.jboss.arquillian.test.spi.event.suite.AfterSuite;
import org.jboss.arquillian.test.spi.event.suite.Before;
import org.jboss.logging.Logger;
import org.keycloak.admin.client.Keycloak;
import org.keycloak.models.Constants;
import org.keycloak.testsuite.arquillian.annotation.InitialDcState;
import org.keycloak.testsuite.auth.page.AuthRealm;
import org.keycloak.testsuite.client.KeycloakTestingClient;
import org.keycloak.testsuite.crossdc.DC;
import org.keycloak.testsuite.crossdc.ServerSetup;
import org.keycloak.testsuite.utils.tls.TLSUtils;
import java.io.File;
import java.io.IOException;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.function.Consumer;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Stream;
import static org.hamcrest.Matchers.lessThan;
import static org.hamcrest.MatcherAssert.assertThat;
/**
*
* @author vramik
*/
public class CrossDCTestEnricher {
protected static final Logger log = Logger.getLogger(CrossDCTestEnricher.class);
private static SuiteContext suiteContext;
@Inject
private static Instance<ContainerController> containerController;
@Inject
private Event<StopContainer> stopContainer;
private static final Map<ContainerInfo, Keycloak> backendAdminClients = new HashMap<>();
private static final Map<ContainerInfo, KeycloakTestingClient> backendTestingClients = new HashMap<>();
static void initializeSuiteContext(SuiteContext suiteContext) {
Validate.notNull(suiteContext, "Suite context cannot be null.");
CrossDCTestEnricher.suiteContext = suiteContext;
if (AuthServerTestEnricher.AUTH_SERVER_CROSS_DC && suiteContext.getCacheServersInfo().isEmpty() && !AuthServerTestEnricher.CACHE_SERVER_LIFECYCLE_SKIP) {
throw new IllegalStateException("Cache containers misconfiguration");
}
}
public void beforeTest(@Observes(precedence = -2) Before event) {
if (!suiteContext.isAuthServerCrossDc()) return;
//if annotation is present on method
InitialDcState annotation = event.getTestMethod().getAnnotation(InitialDcState.class);
//annotation not present on method, taking it from class
if (annotation == null) {
Class<?> annotatedClass = getNearestSuperclassWithAnnotation(event.getTestClass().getJavaClass(), InitialDcState.class);
annotation = annotatedClass == null ? null : annotatedClass.getAnnotation(InitialDcState.class);
}
if (annotation == null) {
log.debug("No environment preparation requested, not changing auth/cache server run status.");
return; // Test does not specify its environment, so it's on its own
}
ServerSetup cacheServers = annotation.cacheServers();
ServerSetup authServers = annotation.authServers();
// Stop auth servers that otherwise could be hang connecting to a cache server stopped next
switch (authServers) {
case ALL_NODES_IN_EVERY_DC:
break;
case FIRST_NODE_IN_EVERY_DC:
DC.validDcsStream().forEach((DC dc) -> stopAuthServerBackendNode(dc, 1));
break;
case FIRST_NODE_IN_FIRST_DC:
stopAuthServerBackendNode(DC.FIRST, 1);
forAllBackendNodesInDc(DC.SECOND, CrossDCTestEnricher::stopAuthServerBackendNode);
break;
case ALL_NODES_IN_FIRST_DC_FIRST_NODE_IN_SECOND_DC:
stopAuthServerBackendNode(DC.SECOND, 1);
break;
case ALL_NODES_IN_FIRST_DC_NO_NODES_IN_SECOND_DC:
forAllBackendNodesInDc(DC.SECOND, CrossDCTestEnricher::stopAuthServerBackendNode);
break;
}
switch (cacheServers) {
case ALL_NODES_IN_EVERY_DC:
case FIRST_NODE_IN_EVERY_DC: //the same as ALL_NODES_IN_EVERY_DC as there is only one cache server per DC
case ALL_NODES_IN_FIRST_DC_FIRST_NODE_IN_SECOND_DC:
DC.validDcsStream().forEach(CrossDCTestEnricher::startCacheServer);
break;
case FIRST_NODE_IN_FIRST_DC:
case ALL_NODES_IN_FIRST_DC_NO_NODES_IN_SECOND_DC:
startCacheServer(DC.FIRST);
stopCacheServer(DC.SECOND);
break;
}
switch (authServers) {
case ALL_NODES_IN_EVERY_DC:
forAllBackendNodes(CrossDCTestEnricher::startAuthServerBackendNode);
break;
case FIRST_NODE_IN_EVERY_DC:
DC.validDcsStream().forEach((DC dc) -> startAuthServerBackendNode(dc, 0));
break;
case FIRST_NODE_IN_FIRST_DC:
startAuthServerBackendNode(DC.FIRST, 0);
break;
case ALL_NODES_IN_FIRST_DC_FIRST_NODE_IN_SECOND_DC:
forAllBackendNodesInDc(DC.FIRST, CrossDCTestEnricher::startAuthServerBackendNode);
startAuthServerBackendNode(DC.SECOND, 0);
break;
case ALL_NODES_IN_FIRST_DC_NO_NODES_IN_SECOND_DC:
forAllBackendNodesInDc(DC.FIRST, CrossDCTestEnricher::startAuthServerBackendNode);
break;
}
suspendPeriodicTasks();
}
public void afterTest(@Observes After event) {
if (!suiteContext.isAuthServerCrossDc()) return;
restorePeriodicTasks();
}
public void afterSuite(@Observes(precedence = 4) AfterSuite event) {
if (!suiteContext.isAuthServerCrossDc()) return;
// Unfortunately, in AfterSuite, containerController context is already cleaned so stopAuthServerBackendNode()
// and stopCacheServer cannot be used. On the other hand, Arquillian by default does not guarantee that cache
// servers are terminated only after auth servers were, so the termination has to be done in this enricher.
forAllBackendNodesStream()
.map(ContainerInfo::getArquillianContainer)
.map(StopContainer::new)
.forEach(stopContainer::fire);
if (!AuthServerTestEnricher.CACHE_SERVER_LIFECYCLE_SKIP) {
DC.validDcsStream()
.map(CrossDCTestEnricher::getCacheServer)
.map(ContainerInfo::getArquillianContainer)
.map(StopContainer::new)
.forEach(stopContainer::fire);
}
}
public void stopSuiteContainers(@Observes(precedence = 4) StopSuiteContainers event) {
if (!suiteContext.isAuthServerCrossDc()) return;
forAllBackendNodes(CrossDCTestEnricher::stopAuthServerBackendNode);
DC.validDcsStream().forEach(CrossDCTestEnricher::stopCacheServer);
}
private static void createRESTClientsForNode(ContainerInfo node) {
if (!backendAdminClients.containsKey(node)) {
backendAdminClients.put(node, createAdminClientFor(node));
}
if (!backendTestingClients.containsKey(node)) {
backendTestingClients.put(node, createTestingClientFor(node));
}
}
private static void removeRESTClientsForNode(ContainerInfo node) {
if (backendAdminClients.containsKey(node)) {
backendAdminClients.get(node).close();
backendAdminClients.remove(node);
}
if (backendTestingClients.containsKey(node)) {
backendTestingClients.get(node).close();
backendTestingClients.remove(node);
}
}
public static Map<ContainerInfo, Keycloak> getBackendAdminClients() {
return Collections.unmodifiableMap(backendAdminClients);
}
public static Map<ContainerInfo, KeycloakTestingClient> getBackendTestingClients() {
return Collections.unmodifiableMap(backendTestingClients);
}
private static Keycloak createAdminClientFor(ContainerInfo node) {
log.info("--DC: Initializing admin client for " + node.getContextRoot() + "/auth");
return Keycloak.getInstance(node.getContextRoot() + "/auth", AuthRealm.MASTER, AuthRealm.ADMIN, AuthRealm.ADMIN, Constants.ADMIN_CLI_CLIENT_ID, TLSUtils.initializeTLS());
}
private static KeycloakTestingClient createTestingClientFor(ContainerInfo node) {
log.info("--DC: Initializing testing client for " + node.getContextRoot() + "/auth");
return KeycloakTestingClient.getInstance(node.getContextRoot() + "/auth");
}
// Disable periodic tasks in cross-dc tests. It's needed to have some scenarios more stable.
private static void suspendPeriodicTasks() {
log.debug("--DC: suspendPeriodicTasks");
backendTestingClients.values().stream().forEach((KeycloakTestingClient testingClient) -> {
testingClient.testing().suspendPeriodicTasks();
});
}
private static void restorePeriodicTasks() {
log.debug("--DC: restorePeriodicTasks");
backendTestingClients.values().stream().forEach((KeycloakTestingClient testingClient) -> {
testingClient.testing().restorePeriodicTasks();
});
}
/**
* Returns cache server corresponding to given DC
* @param dc
* @return
*/
private static ContainerInfo getCacheServer(DC dc) {
assertValidDc(dc);
int dcIndex = dc.ordinal();
return suiteContext.getCacheServersInfo().get(dcIndex);
}
private static void assertValidDc(DC dc) throws IllegalStateException {
if (dc == DC.UNDEFINED) {
throw new IllegalStateException("Invalid DC used: " + DC.UNDEFINED);
}
}
/* Code to detect if underlying JVM is modular (AKA JDK 9+) taken over from Wildfly Core code base:
* https://github.com/wildfly/wildfly-core/blob/main/launcher/src/main/java/org/wildfly/core/launcher/Jvm.java#L59
* and turned into a function for easier reuse.
*/
public static boolean isModularJvm() {
boolean modularJvm = false;
final String javaSpecVersion = System.getProperty("java.specification.version");
if (javaSpecVersion != null) {
final Matcher matcher = Pattern.compile("^(?:1\\.)?(\\d+)$").matcher(javaSpecVersion);
if (matcher.find()) modularJvm = Integer.parseInt(matcher.group(1)) >= 9;
}
return modularJvm;
}
public static void startCacheServer(DC dc) {
if (AuthServerTestEnricher.CACHE_SERVER_LIFECYCLE_SKIP) return;
if (!containerController.get().isStarted(getCacheServer(dc).getQualifier())) {
log.infof("--DC: Starting %s", getCacheServer(dc).getQualifier());
// Original config of the cache server container as a map
Map<String, String> containerConfig = getCacheServer(dc).getProperties();
// Start cache server with default modular JVM options set if JDK is modular (JDK 9+)
final String defaultModularJvmOptions = System.getProperty("default.modular.jvm.options");
final String originalJvmArguments = getCacheServer(dc).getProperties().get("javaVmArguments");
/* When JVM used to launch the cache server container is modular, add the default
* modular JVM options to the configuration of the cache server container if
* these aren't present there yet.
*
* See the definition of the 'default.modular.jvm.options' property for details.
*/
if (!originalJvmArguments.contains(defaultModularJvmOptions)) {
if(isModularJvm() && defaultModularJvmOptions != null) {
log.infof("Modular JVM detected. Adding default modular JVM '%s' options to the cache server container's configuration.", defaultModularJvmOptions);
final String lineSeparator = System.getProperty("line.separator");
final String adjustedJvmArguments = originalJvmArguments.replace(lineSeparator, " ") + defaultModularJvmOptions + lineSeparator;
/* Since next time the cache server container might get started using a non-modular
* JVM again, don't store the default modular JVM options into the cache server container's
* configuration permanently (not to need to remove them again later).
*
* Rather, instead of that, retrieve the original cache server container's configuration
* as a map, add the default modular JVM options there, and one-time way start the cache server
* using this custom temporary configuration.
*/
containerConfig.put("javaVmArguments", adjustedJvmArguments);
}
}
/* Finally start the cache server container:
* - Either using the original container config (case of a non-modular JVM),
* - Or using the updated container config (case of a modular JVM)
*/
containerController.get().start(getCacheServer(dc).getQualifier(), containerConfig);
log.infof("--DC: Started %s", getCacheServer(dc).getQualifier());
}
}
public static void stopCacheServer(DC dc) {
if (AuthServerTestEnricher.CACHE_SERVER_LIFECYCLE_SKIP) return;
String qualifier = getCacheServer(dc).getQualifier();
if (containerController.get().isStarted(qualifier)) {
log.infof("--DC: Stopping %s", qualifier);
containerController.get().stop(qualifier);
// Workaround for possible arquillian bug. Needs to cleanup dir manually
String setupCleanServerBaseDir = getContainerProperty(getCacheServer(dc), "setupCleanServerBaseDir");
String cleanServerBaseDir = getContainerProperty(getCacheServer(dc), "cleanServerBaseDir");
if (Boolean.parseBoolean(setupCleanServerBaseDir)) {
log.debugf("Going to clean directory: %s", cleanServerBaseDir);
File dir = new File(cleanServerBaseDir);
if (dir.exists()) {
try {
dir.renameTo(new File(dir.getParentFile(), dir.getName() + "-backup-" + System.currentTimeMillis()));
File deploymentsDir = new File(dir, "deployments");
FileUtils.forceMkdir(deploymentsDir);
} catch (IOException ioe) {
throw new RuntimeException("Failed to clean directory: " + cleanServerBaseDir, ioe);
}
}
}
log.infof("--DC: Stopped %s", qualifier);
}
}
public static void forAllBackendNodes(Consumer<ContainerInfo> functionOnContainerInfo) {
forAllBackendNodesStream()
.forEach(functionOnContainerInfo);
}
public static Stream<ContainerInfo> forAllBackendNodesStream() {
return suiteContext.getDcAuthServerBackendsInfo().stream()
.flatMap(Collection::stream);
}
public static void forAllBackendNodesInDc(DC dc, Consumer<ContainerInfo> functionOnContainerInfo) {
assertValidDc(dc);
suiteContext.getDcAuthServerBackendsInfo().get(dc.ordinal()).stream()
.forEach(functionOnContainerInfo);
}
public static void stopAuthServerBackendNode(ContainerInfo containerInfo) {
if (containerInfo.isStarted()) {
log.infof("--DC: Stopping backend auth-server node: %s", containerInfo.getQualifier());
removeRESTClientsForNode(containerInfo);
containerController.get().stop(containerInfo.getQualifier());
}
}
public static void startAuthServerBackendNode(ContainerInfo containerInfo) {
if (! containerInfo.isStarted()) {
log.infof("--DC: Starting backend auth-server node: %s", containerInfo.getQualifier());
containerController.get().start(containerInfo.getQualifier());
// Cross-DC are not working with Quarkus
//AuthServerTestEnricher.initializeTLS(containerInfo);
createRESTClientsForNode(containerInfo);
}
}
public static ContainerInfo getBackendNode(DC dc, int nodeIndex) {
assertValidDc(dc);
int dcIndex = dc.ordinal();
assertThat((Integer) dcIndex, lessThan(suiteContext.getDcAuthServerBackendsInfo().size()));
final List<ContainerInfo> dcNodes = suiteContext.getDcAuthServerBackendsInfo().get(dcIndex);
assertThat((Integer) nodeIndex, lessThan(dcNodes.size()));
return dcNodes.get(nodeIndex);
}
/**
* Starts a manually-controlled backend auth-server node in cross-DC scenario.
* @param dc
* @param nodeIndex
* @return Started instance descriptor.
*/
public static ContainerInfo startAuthServerBackendNode(DC dc, int nodeIndex) {
ContainerInfo dcNode = getBackendNode(dc, nodeIndex);
startAuthServerBackendNode(dcNode);
return dcNode;
}
/**
* Stops a manually-controlled backend auth-server node in cross-DC scenario.
* @param dc
* @param nodeIndex
* @return Stopped instance descriptor.
*/
public static ContainerInfo stopAuthServerBackendNode(DC dc, int nodeIndex) {
ContainerInfo dcNode = getBackendNode(dc, nodeIndex);
stopAuthServerBackendNode(dcNode);
return dcNode;
}
private Class getNearestSuperclassWithAnnotation(Class<?> testClass, Class annotationClass) {
return (testClass.isAnnotationPresent(annotationClass)) ? testClass
: (testClass.getSuperclass().equals(Object.class) ? null // stop recursion
: getNearestSuperclassWithAnnotation(testClass.getSuperclass(), annotationClass)); // continue recursion
}
private static String getContainerProperty(ContainerInfo cacheServer, String propertyName) {
return cacheServer.getArquillianContainer().getContainerConfiguration().getContainerProperties().get(propertyName);
}
}

Some files were not shown because too many files have changed in this diff Show More