Upgrade to Infinispan 16.0.0.Dev05

- Remove query modules
- Remove unused config file
- Update config file versions
- Update jgroups attributes
- Remove ISPN-16595 workaround
- Call HotRodServer#postStart in HotRodServerRule to start caches as well
as the server
- Simplify cluster-ha.xml
- Utilise org.infinispan.commons.util.TimeQuantity in CacheConfiguration

Signed-off-by: Ryan Emerson <remerson@ibm.com>
This commit is contained in:
Ryan Emerson 2025-09-19 16:18:49 +01:00
parent c6b13cb2ec
commit 3158aaa59c
24 changed files with 47 additions and 391 deletions

View File

@ -74,16 +74,6 @@
<groupId>org.infinispan</groupId>
<artifactId>infinispan-cachestore-remote</artifactId>
</dependency>
<!-- required for query/search in the external Infinispan server -->
<dependency>
<groupId>org.infinispan</groupId>
<artifactId>infinispan-remote-query-client</artifactId>
</dependency>
<!-- to be removed after https://issues.redhat.com/browse/ISPN-16220 -->
<dependency>
<groupId>org.infinispan</groupId>
<artifactId>infinispan-query-dsl</artifactId>
</dependency>
<dependency>
<groupId>org.infinispan</groupId>
<artifactId>infinispan-component-annotations</artifactId>

View File

@ -28,9 +28,7 @@ import org.infinispan.distribution.DistributionManager;
import org.infinispan.factories.GlobalComponentRegistry;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.remoting.transport.Address;
import org.infinispan.remoting.transport.LocalModeAddress;
import org.infinispan.remoting.transport.Transport;
import org.infinispan.remoting.transport.jgroups.JGroupsAddress;
import org.infinispan.remoting.transport.jgroups.JGroupsTransport;
import org.jboss.logging.Logger;
import org.jgroups.stack.IpAddress;
@ -116,12 +114,9 @@ public class TopologyInfo {
public boolean amIOwner(Cache<?, ?> cache, Object key) {
Address myAddress = cache.getCacheManager().getAddress();
Address objectOwnerAddress = getOwnerAddress(cache, key);
// NOTE: For scattered caches, this will always return true, which may not be correct. Need to review this if we add support for scattered caches
return Objects.equals(myAddress, objectOwnerAddress);
}
/**
* Get route to be used as the identifier for sticky session. Return null if I am not able to find the appropriate route (or in case of local mode)
* @deprecated Use {@link org.keycloak.sessions.StickySessionEncoderProvider#sessionIdRoute(String)} instead.
@ -140,11 +135,11 @@ public class TopologyInfo {
Address address = getOwnerAddress(cache, key);
// Local mode
if (address == null || (address == LocalModeAddress.INSTANCE)) {
if (address == null || (address == Address.LOCAL)) {
return myNodeName;
}
org.jgroups.Address jgroupsAddress = toJGroupsAddress(address);
org.jgroups.Address jgroupsAddress = Address.toExtendedUUID(address);
String name = NameCache.get(jgroupsAddress);
// If no logical name exists, create one using physical address
@ -169,16 +164,4 @@ public class TopologyInfo {
DistributionManager dist = cache.getAdvancedCache().getDistributionManager();
return dist == null ? cache.getCacheManager().getAddress() : dist.getCacheTopology().getDistribution(key).primary();
}
// See org.wildfly.clustering.server.group.CacheGroup
private static org.jgroups.Address toJGroupsAddress(Address address) {
if ((address == null) || (address == LocalModeAddress.INSTANCE)) return null;
if (address instanceof JGroupsAddress jgroupsAddress) {
return jgroupsAddress.getJGroupsAddress();
}
throw new IllegalArgumentException(address.toString());
}
}

View File

@ -18,222 +18,18 @@
package org.keycloak.jgroups.protocol;
import java.sql.Connection;
import java.sql.SQLException;
import java.util.Collections;
import java.util.List;
import java.util.Objects;
import java.util.stream.Collectors;
import org.keycloak.connections.jpa.JpaConnectionProviderFactory;
import org.jgroups.Address;
import org.jgroups.Event;
import org.jgroups.PhysicalAddress;
import org.jgroups.View;
import org.jgroups.protocols.JDBC_PING2;
import org.jgroups.protocols.PingData;
import org.jgroups.stack.Protocol;
import org.jgroups.util.ExtendedUUID;
import org.jgroups.util.NameCache;
import org.jgroups.util.Responses;
import org.jgroups.util.UUID;
/**
* Enhanced JDBC_PING2 to handle entries transactionally.
* <p>
* Workaround for issue <a href="https://issues.redhat.com/browse/JGRP-2870">JGRP-2870</a>
*/
public class KEYCLOAK_JDBC_PING2 extends JDBC_PING2 {
private JpaConnectionProviderFactory factory;
@Override
protected void handleView(View new_view, View old_view, boolean coord_changed) {
// If we are the coordinator, it is good to learn about new entries that have been added before we delete them.
// If we are not the coordinator, it is good to learn the new entries added by the coordinator.
// This avoids a "JGRP000032: %s: no physical address for %s, dropping message" that leads to split clusters at concurrent startup.
learnExistingAddresses();
// This is an updated logic where we do not call removeAll but instead remove those obsolete entries.
// This avoids the short moment where the table is empty and a new node might not see any other node.
if (is_coord) {
if (remove_old_coords_on_view_change) {
Address old_coord = old_view != null ? old_view.getCreator() : null;
if (old_coord != null)
remove(cluster_name, old_coord);
}
Address[] left = View.diff(old_view, new_view)[1];
if (coord_changed || update_store_on_view_change || left.length > 0) {
writeAll(left);
if (remove_all_data_on_view_change) {
removeAllNotInCurrentView();
}
if (remove_all_data_on_view_change || remove_old_coords_on_view_change) {
startInfoWriter();
}
}
} else if (coord_changed && !remove_all_data_on_view_change) {
// I'm no longer the coordinator, usually due to a merge.
// The new coordinator will update my status to non-coordinator, and remove me fully
// if 'remove_all_data_on_view_change' is enabled and I'm no longer part of the view.
// Maybe this branch even be removed completely, but for JDBC_PING 'remove_all_data_on_view_change' is always set to true.
PhysicalAddress physical_addr = (PhysicalAddress) down(new Event(Event.GET_PHYSICAL_ADDRESS, local_addr));
PingData coord_data = new PingData(local_addr, true, NameCache.get(local_addr), physical_addr).coord(is_coord);
write(Collections.singletonList(coord_data), cluster_name);
}
}
@Override
protected void removeAll(String clustername) {
// This is unsafe as even if we would fill the table a moment later, a new node might see an empty table and become a coordinator
throw new RuntimeException("Not implemented as it is unsafe");
}
private void removeAllNotInCurrentView() {
try {
List<PingData> list = readFromDB(getClusterName());
for (PingData data : list) {
Address addr = data.getAddress();
if (view != null && !view.containsMember(addr)) {
addDiscoveryResponseToCaches(addr, data.getLogicalName(), data.getPhysicalAddr());
remove(cluster_name, addr);
}
}
} catch (Exception e) {
log.error(String.format("%s: failed reading from the DB", local_addr), e);
}
}
protected void learnExistingAddresses() {
try {
List<PingData> list = readFromDB(getClusterName());
for (PingData data : list) {
Address addr = data.getAddress();
if (local_addr != null && !local_addr.equals(addr)) {
addDiscoveryResponseToCaches(addr, data.getLogicalName(), data.getPhysicalAddr());
}
}
} catch (Exception e) {
log.error(String.format("%s: failed reading from the DB", local_addr), e);
}
}
@Override
public synchronized boolean isInfoWriterRunning() {
// Do not rely on the InfoWriter, instead always write the missing information on find if it is missing. Find is also triggered by MERGE.
return false;
}
@Override
public void findMembers(List<Address> members, boolean initial_discovery, Responses responses) {
if (initial_discovery) {
try {
List<PingData> pingData = readFromDB(cluster_name);
PhysicalAddress physical_addr = (PhysicalAddress) down(new Event(Event.GET_PHYSICAL_ADDRESS, local_addr));
// Sending the discovery here, as parent class will not execute it once there is data in the table
sendDiscoveryResponse(local_addr, physical_addr, NameCache.get(local_addr), null, is_coord);
PingData coord_data = new PingData(local_addr, true, NameCache.get(local_addr), physical_addr).coord(is_coord);
write(Collections.singletonList(coord_data), cluster_name);
while (pingData.stream().noneMatch(PingData::isCoord)) {
// Do a quick check if more nodes have arrived, to have a more complete list of nodes to start with.
List<PingData> newPingData = readFromDB(cluster_name);
if (newPingData.stream().map(PingData::getAddress).collect(Collectors.toSet()).equals(pingData.stream().map(PingData::getAddress).collect(Collectors.toSet()))
|| pingData.stream().anyMatch(PingData::isCoord)) {
break;
}
pingData = newPingData;
}
} catch (Exception e) {
log.error(String.format("%s: failed reading from the DB", local_addr), e);
}
}
super.findMembers(members, initial_discovery, responses);
}
@Override
protected void writeToDB(PingData data, String clustername) throws SQLException {
lock.lock();
try (Connection connection = getConnection()) {
if(call_insert_sp != null && insert_sp != null)
callInsertStoredProcedure(connection, data, clustername);
else {
boolean isAutocommit = connection.getAutoCommit();
try {
if (isAutocommit) {
// Always use a transaction for the delete+insert to make it atomic
// to avoid the short moment where there is no entry in the table.
connection.setAutoCommit(false);
} else {
log.warn("Autocommit is disabled. This indicates a transaction context that might batch statements and can lead to deadlocks.");
}
delete(connection, clustername, data.getAddress());
insert(connection, data, clustername);
if (isAutocommit) {
connection.commit();
}
} catch (SQLException e) {
if (isAutocommit) {
connection.rollback();
}
throw e;
} finally {
if (isAutocommit) {
connection.setAutoCommit(true);
}
}
}
} finally {
lock.unlock();
}
}
/* START: JDBC_PING2 does not handle ExtendedUUID yet, see
https://github.com/belaban/JGroups/pull/901 - until this is backported, we convert all of them.
*/
@Override
public <T extends Protocol> T addr(Address addr) {
addr = toUUID(addr);
return super.addr(addr);
}
@Override
public <T extends Protocol> T setAddress(Address addr) {
addr = toUUID(addr);
return super.setAddress(addr);
}
@Override
protected void delete(Connection conn, String clustername, Address addressToDelete) throws SQLException {
super.delete(conn, clustername, toUUID(addressToDelete));
}
@Override
protected void delete(String clustername, Address addressToDelete) throws SQLException {
super.delete(clustername, toUUID(addressToDelete));
}
@Override
protected void insert(Connection connection, PingData data, String clustername) throws SQLException {
if (data.getAddress() instanceof ExtendedUUID) {
data = new PingData(toUUID(data.getAddress()), data.isServer(), data.getLogicalName(), data.getPhysicalAddr()).coord(data.isCoord());
}
super.insert(connection, data, clustername);
}
private static Address toUUID(Address addr) {
if (addr instanceof ExtendedUUID eUUID) {
addr = new UUID(eUUID.getMostSignificantBits(), eUUID.getLeastSignificantBits());
}
return addr;
}
/* END: JDBC_PING2 does not handle ExtendedUUID yet, see
https://github.com/belaban/JGroups/pull/901 - until this is backported, we convert all of them.
*/
@Override
protected void loadDriver() {
//no-op, using JpaConnectionProviderFactory

View File

@ -118,6 +118,7 @@ import org.infinispan.protostream.types.java.CommonTypes;
schemaPackageName = Marshalling.PROTO_SCHEMA_PACKAGE,
schemaFilePath = "proto/generated",
allowNullFields = true,
orderedMarshallers = true,
// common-types for UUID
dependsOn = CommonTypes.class,

View File

@ -34,7 +34,7 @@ import org.keycloak.sessions.StickySessionEncoderProvider;
import org.keycloak.sessions.StickySessionEncoderProviderFactory;
import org.infinispan.Cache;
import org.infinispan.remoting.transport.jgroups.JGroupsAddress;
import org.infinispan.remoting.transport.Address;
import org.jboss.logging.Logger;
import org.jgroups.util.NameCache;
@ -153,6 +153,6 @@ public class InfinispanStickySessionEncoderProviderFactory implements StickySess
// Return null if the logical name is not available yet.
// The following request may be redirected to the wrong instance, but that's ok.
// In a healthy/stable cluster, the name cache is correctly populated.
return primaryOwner instanceof JGroupsAddress jgrpAddr ? NameCache.get(jgrpAddr.getJGroupsAddress()) : null;
return primaryOwner == null ? null : NameCache.get(Address.toExtendedUUID(primaryOwner));
}
}

View File

@ -32,7 +32,7 @@ import java.util.stream.StreamSupport;
import org.infinispan.client.hotrod.impl.query.RemoteQuery;
import org.infinispan.commons.api.query.Query;
import org.infinispan.query.dsl.QueryResult;
import org.infinispan.commons.api.query.QueryResult;
public final class QueryHelper {
@ -195,7 +195,7 @@ public final class QueryHelper {
return;
}
currentOffset += resultList.size();
if (rsp.count().isExact() && currentOffset >= rsp.count().value()) {
if (rsp.count().exact() && currentOffset >= rsp.count().value()) {
completed = true;
return;
}

View File

@ -33,6 +33,7 @@ import org.keycloak.models.sessions.infinispan.entities.RemoteUserSessionEntity;
import org.keycloak.models.sessions.infinispan.entities.RootAuthenticationSessionEntity;
import org.infinispan.commons.dataconversion.MediaType;
import org.infinispan.commons.util.TimeQuantity;
import org.infinispan.configuration.cache.AbstractStoreConfiguration;
import org.infinispan.configuration.cache.BackupConfiguration;
import org.infinispan.configuration.cache.BackupFailurePolicy;
@ -394,13 +395,17 @@ public final class CacheConfigurator {
}
private static ConfigurationBuilder remoteCacheConfigurationBuilder(String name, Config.Scope config, String[] sites, Class<?> indexedEntity, long expirationWakeupPeriodMillis) {
return remoteCacheConfigurationBuilder(name, config, sites, indexedEntity, TimeQuantity.valueOf(expirationWakeupPeriodMillis));
}
private static ConfigurationBuilder remoteCacheConfigurationBuilder(String name, Config.Scope config, String[] sites, Class<?> indexedEntity, TimeQuantity expirationWakeupPeriod) {
var builder = new ConfigurationBuilder();
builder.clustering().cacheMode(CacheMode.DIST_SYNC);
builder.clustering().hash().numOwners(Math.max(MIN_NUM_OWNERS_REMOTE_CACHE, config.getInt(numOwnerConfigKey(name), MIN_NUM_OWNERS_REMOTE_CACHE)));
builder.clustering().stateTransfer().chunkSize(STATE_TRANSFER_CHUNK_SIZE);
builder.encoding().mediaType(MediaType.APPLICATION_PROTOSTREAM);
builder.statistics().enable();
builder.expiration().enableReaper().wakeUpInterval(expirationWakeupPeriodMillis);
builder.expiration().enableReaper().wakeUpInterval(expirationWakeupPeriod.longValue());
if (indexedEntity != null) {
builder.indexing().enable().addIndexedEntities(Marshalling.protoEntity(indexedEntity));

View File

@ -1,57 +0,0 @@
<config xmlns="urn:org:jgroups"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="urn:org:jgroups http://www.jgroups.org/schema/jgroups-5.2.xsd">
<!-- This file has been adpoted from https://github.com/infinispan/infinispan/blob/master/core/src/main/resources/default-configs/default-jgroups-udp.xml -->
<!-- jgroups.udp.address is deprecated and will be removed, see ISPN-11867 -->
<UDP bind_addr="${jgroups.bind.address,jgroups.udp.address:127.0.0.1}"
bind_port="${jgroups.bind.port,jgroups.udp.port:0}"
mcast_addr="${jgroups.udp.mcast_addr,jgroups.mcast_addr:228.6.7.8}"
mcast_port="${jgroups.udp.mcast_port,jgroups.mcast_port:46655}"
tos="0"
ucast_send_buf_size="1m"
mcast_send_buf_size="1m"
ucast_recv_buf_size="20m"
mcast_recv_buf_size="25m"
ip_ttl="${jgroups.ip_ttl:2}"
thread_naming_pattern="pl"
diag.enabled="${jgroups.diag.enabled:false}"
bundler_type="transfer-queue"
bundler.max_size="${jgroups.bundler.max_size:64000}"
thread_pool.min_threads="${jgroups.thread_pool.min_threads:0}"
thread_pool.max_threads="${jgroups.thread_pool.max_threads:200}"
thread_pool.keep_alive_time="60000"
/>
<RED/>
<PING num_discovery_runs="3"/>
<MERGE3 min_interval="10000"
max_interval="30000"
/>
<FD_SOCK2 offset="${jgroups.fd.port-offset:50000}"/>
<FD_ALL3/>
<VERIFY_SUSPECT timeout="1000"/>
<pbcast.NAKACK2 xmit_interval="100"
xmit_table_num_rows="50"
xmit_table_msgs_per_row="1024"
xmit_table_max_compaction_time="30000"
resend_last_seqno="true"
/>
<UNICAST3 xmit_interval="100"
xmit_table_num_rows="50"
xmit_table_msgs_per_row="1024"
xmit_table_max_compaction_time="30000"
/>
<pbcast.STABLE desired_avg_gossip="5000"
max_bytes="1M"
/>
<pbcast.GMS print_local_addr="false"
join_timeout="${jgroups.join_timeout:500}"
/>
<UFC max_credits="${jgroups.max_credits:4m}"
min_threshold="0.40"
/>
<MFC max_credits="${jgroups.max_credits:4m}"
min_threshold="0.40"
/>
<FRAG4 frag_size="${jgroups.frag_size:60000}"/>
</config>

View File

@ -18,8 +18,8 @@
<infinispan
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="urn:infinispan:config:15.0 http://www.infinispan.org/schemas/infinispan-config-15.0.xsd"
xmlns="urn:infinispan:config:15.0">
xsi:schemaLocation="urn:infinispan:config:16.0 https://infinispan.org/schemas/infinispan-config-16.0.xsd"
xmlns="urn:infinispan:config:16.0">
<cache-container name="keycloak">
<transport lock-timeout="60000"/>

View File

@ -91,9 +91,9 @@
<h2.version>2.4.240</h2.version>
<hibernate-orm.plugin.version>6.2.13.Final</hibernate-orm.plugin.version>
<hibernate.c3p0.version>6.2.13.Final</hibernate.c3p0.version>
<infinispan.version>15.0.19.Final</infinispan.version>
<hibernate-validator.version>9.0.1.Final</hibernate-validator.version>
<protostream.version>5.0.14.Final</protostream.version> <!-- For the annotation processor: keep in sync with the version shipped with Infinispan -->
<infinispan.version>16.0.0.Dev05</infinispan.version>
<protostream.version>6.0.0.Dev13</protostream.version> <!-- For the annotation processor: keep in sync with the version shipped with Infinispan -->
<protostream.plugin.version>${protostream.version}</protostream.plugin.version>
<!--JAKARTA-->

View File

@ -672,16 +672,6 @@
<artifactId>infinispan-component-annotations</artifactId>
<scope>provided</scope>
</dependency>
<!-- required for query/search in the external Infinispan server -->
<dependency>
<groupId>org.infinispan</groupId>
<artifactId>infinispan-remote-query-client</artifactId>
</dependency>
<!-- to be removed after https://issues.redhat.com/browse/ISPN-16220 -->
<dependency>
<groupId>org.infinispan</groupId>
<artifactId>infinispan-query-dsl</artifactId>
</dependency>
<dependency>
<groupId>jakarta.xml.bind</groupId>
<artifactId>jakarta.xml.bind-api</artifactId>

View File

@ -18,8 +18,8 @@
<infinispan
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="urn:infinispan:config:15.0 http://www.infinispan.org/schemas/infinispan-config-15.0.xsd"
xmlns="urn:infinispan:config:15.0">
xsi:schemaLocation="urn:infinispan:config:16.0 https://infinispan.org/schemas/infinispan-config-16.0.xsd"
xmlns="urn:infinispan:config:16.0">
<cache-container name="keycloak">
<transport lock-timeout="60000"/>

View File

@ -18,7 +18,7 @@
<infinispan
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="urn:infinispan:config:15.0 http://www.infinispan.org/schemas/infinispan-config-15.0.xsd"
xmlns="urn:infinispan:config:15.0">
xsi:schemaLocation="urn:infinispan:config:16.0 https://infinispan.org/schemas/infinispan-config-16.0.xsd"
xmlns="urn:infinispan:config:16.0">
<cache-container />
</infinispan>

View File

@ -148,9 +148,7 @@ public class ClusterConfigKeepAliveDistTest {
.prettyPrint();
ConfigurationBuilderHolder configHolder = new ParserRegistry().parse(configJson, MediaType.APPLICATION_JSON);
// Workaround for ISPN-16595
String cacheName = CaseFormat.LOWER_CAMEL.to(CaseFormat.LOWER_HYPHEN, cache);
return configHolder.getNamedConfigurationBuilders().get(cacheName).build();
return configHolder.getNamedConfigurationBuilders().get(cache).build();
}
private record CacheOwners(String name, int owners) {

View File

@ -21,9 +21,9 @@
<!--tag::keycloak-ispn-configmap[] -->
<infinispan
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="urn:infinispan:config:15.0 https://www.infinispan.org/schemas/infinispan-config-15.0.xsd
xsi:schemaLocation="urn:infinispan:config:16.0 https://www.infinispan.org/schemas/infinispan-config-15.0.xsd
urn:infinispan:config:store:remote:15.0 https://www.infinispan.org/schemas/infinispan-cachestore-remote-config-15.0.xsd"
xmlns="urn:infinispan:config:15.0">
xmlns="urn:infinispan:config:16.0">
<!--end::keycloak-ispn-configmap[] -->
<!-- the statistics="true" attribute is not part of the original KC config and was added by Keycloak Benchmark -->

View File

@ -18,8 +18,8 @@
<infinispan
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="urn:infinispan:config:15.0 http://www.infinispan.org/schemas/infinispan-config-15.0.xsd"
xmlns="urn:infinispan:config:15.0">
xsi:schemaLocation="urn:infinispan:config:16.0 https://infinispan.org/schemas/infinispan-config-16.0.xsd"
xmlns="urn:infinispan:config:16.0">
<cache-container name="keycloak">
<transport lock-timeout="60000"/>

View File

@ -18,8 +18,8 @@
<infinispan
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="urn:infinispan:config:15.0 http://www.infinispan.org/schemas/infinispan-config-15.0.xsd"
xmlns="urn:infinispan:config:15.0">
xsi:schemaLocation="urn:infinispan:config:16.0 https://infinispan.org/schemas/infinispan-config-16.0.xsd"
xmlns="urn:infinispan:config:16.0">
<cache-container name="keycloak">
<transport lock-timeout="60000"/>

View File

@ -18,8 +18,8 @@
<infinispan
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="urn:infinispan:config:15.0 http://www.infinispan.org/schemas/infinispan-config-15.0.xsd"
xmlns="urn:infinispan:config:15.0">
xsi:schemaLocation="urn:infinispan:config:16.0 https://infinispan.org/schemas/infinispan-config-16.0.xsd"
xmlns="urn:infinispan:config:16.0">
<cache-container name="keycloak">
<transport lock-timeout="60000"/>

View File

@ -18,63 +18,17 @@
<infinispan
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="urn:infinispan:config:11.0 http://www.infinispan.org/schemas/infinispan-config-11.0.xsd"
xmlns="urn:infinispan:config:11.0">
xsi:schemaLocation="urn:infinispan:config:16.0 https://www.infinispan.org/schemas/infinispan-config-16.0.xsd"
xmlns="urn:infinispan:config:16.0">
<!-- Distributed Cache Container Configuration -->
<cache-container name="keycloak">
<transport lock-timeout="60000" node-name="${jboss.node.name}" />
<local-cache name="realms">
<encoding>
<key media-type="application/x-java-object"/>
<value media-type="application/x-java-object"/>
</encoding>
<memory storage="HEAP" max-count="10000"/>
</local-cache>
<local-cache name="users">
<encoding>
<key media-type="application/x-java-object"/>
<value media-type="application/x-java-object"/>
</encoding>
<memory storage="HEAP" max-count="10000"/>
</local-cache>
<distributed-cache name="sessions" owners="${session.cache.owners}"/>
<distributed-cache name="authenticationSessions" owners="${session.cache.owners}"/>
<distributed-cache name="offlineSessions" owners="${offline.session.cache.owners}"/>
<distributed-cache name="clientSessions" owners="${session.cache.owners}"/>
<distributed-cache name="offlineClientSessions" owners="${offline.session.cache.owners}"/>
<distributed-cache name="loginFailures" owners="${login.failure.cache.owners}"/>
<local-cache name="authorization">
<encoding>
<key media-type="application/x-java-object"/>
<value media-type="application/x-java-object"/>
</encoding>
<memory storage="HEAP" max-count="10000"/>
</local-cache>
<replicated-cache name="work"/>
<local-cache name="keys">
<encoding>
<key media-type="application/x-java-object"/>
<value media-type="application/x-java-object"/>
</encoding>
<expiration max-idle="3600000"/>
<memory storage="HEAP" max-count="1000"/>
</local-cache>
<local-cache name="crl" simple-cache="true">
<encoding>
<key media-type="application/x-java-object"/>
<value media-type="application/x-java-object"/>
</encoding>
<expiration lifespan="-1"/>
<memory max-count="1000"/>
</local-cache>
<distributed-cache name="actionTokens" owners="2">
<encoding>
<key media-type="application/x-java-object"/>
<value media-type="application/x-java-object"/>
</encoding>
<expiration max-idle="-1" interval="300000"/>
<memory storage="HEAP" max-count="-1"/>
</distributed-cache>
</cache-container>
</infinispan>

View File

@ -119,11 +119,6 @@
<artifactId>infinispan-component-annotations</artifactId>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.infinispan</groupId>
<artifactId>infinispan-remote-query-server</artifactId>
</dependency>
<dependency>
<groupId>org.awaitility</groupId>
<artifactId>awaitility</artifactId>

View File

@ -69,10 +69,13 @@ public class HotRodServerRule extends ExternalResource {
HotRodServerConfiguration build = new HotRodServerConfigurationBuilder().build();
hotRodServer = new HotRodServer();
hotRodServer.start(build, hotRodCacheManager);
hotRodServer.postStart();
HotRodServerConfiguration build2 = new HotRodServerConfigurationBuilder().port(11333).build();
hotRodServer2 = new HotRodServer();
hotRodServer2.start(build2, hotRodCacheManager2);
hotRodServer2.postStart();
// Create a Hot Rod client
org.infinispan.client.hotrod.configuration.ConfigurationBuilder remoteBuilder = new org.infinispan.client.hotrod.configuration.ConfigurationBuilder();

View File

@ -18,21 +18,20 @@
<infinispan
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="urn:infinispan:config:15.0 http://www.infinispan.org/schemas/infinispan-config-15.0.xsd
urn:org:jgroups http://www.jgroups.org/schema/jgroups-5.3.xsd"
xmlns="urn:infinispan:config:15.0"
xmlns:ispn="urn:infinispan:config:15.0">
xsi:schemaLocation="urn:infinispan:config:16.0 https://infinispan.org/schemas/infinispan-config-16.0.xsd
urn:org:jgroups http://www.jgroups.org/schema/jgroups-5.4.xsd"
xmlns="urn:infinispan:config:16.0"
xmlns:ispn="urn:infinispan:config:16.0">
<jgroups>
<stack name="test" extends="tcp">
<!-- no network traffic as all messages are handled inside the JVM -->
<SHARED_LOOPBACK xmlns="urn:org:jgroups" ispn:stack.combine="REPLACE" ispn:stack.position="TCP"
thread_pool.use_virtual_threads="true"
use_vthreads="true"
bundler_type="no-bundler"/>
<SHARED_LOOPBACK_PING xmlns="urn:org:jgroups" ispn:stack.combine="REPLACE" ispn:stack.position="MPING"/>
<!-- in JVM cluster, no failure detection, no flow control, no fragmentation. -->
<RED xmlns="urn:org:jgroups" ispn:stack.combine="REMOVE"/>
<FD_SOCK2 xmlns="urn:org:jgroups" ispn:stack.combine="REMOVE"/>
<UFC xmlns="urn:org:jgroups" ispn:stack.combine="REMOVE"/>
<MFC xmlns="urn:org:jgroups" ispn:stack.combine="REMOVE"/>
<FRAG4 xmlns="urn:org:jgroups" ispn:stack.combine="REMOVE"/>

View File

@ -18,10 +18,10 @@
<infinispan
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="urn:infinispan:config:15.0 http://www.infinispan.org/schemas/infinispan-config-15.0.xsd
urn:org:jgroups http://www.jgroups.org/schema/jgroups-5.3.xsd"
xmlns="urn:infinispan:config:15.0"
xmlns:ispn="urn:infinispan:config:15.0">
xsi:schemaLocation="urn:infinispan:config:16.0 https://infinispan.org/schemas/infinispan-config-16.0.xsd
urn:org:jgroups http://www.jgroups.org/schema/jgroups-5.4.xsd"
xmlns="urn:infinispan:config:16.0"
xmlns:ispn="urn:infinispan:config:16.0">
<!-- used by auth-server-cluster-undertow profile -->
<!-- all containers run in the same JVM, we can use the shared loopback and ping.-->
@ -30,12 +30,11 @@
<stack name="test" extends="tcp">
<!-- no network traffic as all messages are handled inside the JVM -->
<SHARED_LOOPBACK xmlns="urn:org:jgroups" ispn:stack.combine="REPLACE" ispn:stack.position="TCP"
thread_pool.use_virtual_threads="true"
use_vthreads="true"
bundler_type="no-bundler"/>
<SHARED_LOOPBACK_PING xmlns="urn:org:jgroups" ispn:stack.combine="REPLACE" ispn:stack.position="MPING"/>
<!-- in JVM cluster, no failure detection, no flow control, no fragmentation. -->
<RED xmlns="urn:org:jgroups" ispn:stack.combine="REMOVE"/>
<FD_SOCK2 xmlns="urn:org:jgroups" ispn:stack.combine="REMOVE"/>
<UFC xmlns="urn:org:jgroups" ispn:stack.combine="REMOVE"/>
<MFC xmlns="urn:org:jgroups" ispn:stack.combine="REMOVE"/>
<FRAG4 xmlns="urn:org:jgroups" ispn:stack.combine="REMOVE"/>

View File

@ -18,8 +18,8 @@
<infinispan
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="urn:infinispan:config:15.0 http://www.infinispan.org/schemas/infinispan-config-15.0.xsd"
xmlns="urn:infinispan:config:15.0">
xsi:schemaLocation="urn:infinispan:config:16.0 https://infinispan.org/schemas/infinispan-config-16.0.xsd"
xmlns="urn:infinispan:config:16.0">
<!-- used undertow deployment -->