diff --git a/docs/documentation/upgrading/topics/changes/changes-26_5_0.adoc b/docs/documentation/upgrading/topics/changes/changes-26_5_0.adoc index 19e38fd5397..6331cb420db 100644 --- a/docs/documentation/upgrading/topics/changes/changes-26_5_0.adoc +++ b/docs/documentation/upgrading/topics/changes/changes-26_5_0.adoc @@ -75,6 +75,13 @@ This prevents problems with client IDs or passwords that contain, for example, a To revert to the old behavior, change the client authentication to *Client secret sent as HTTP Basic authentication without URL encoding (deprecated)* (`client_secret_basic_unencoded`). +=== Permissions of the database user for PostgreSQL + +If you are running on PostgreSQL as a database for {project_name}, ensure that the database user has `SELECT` permissions to the following tables to ensure an efficient upgrade: `pg_class`, `pg_namespace`. + +This is used during upgrades of {project_name} to determine an estimated number of rows in a table. +If {project_name} does not have permissions to access these tables, it will log a warning and proceed with the less efficient `+SELECT COUNT(*) ...+` operation during the upgrade to determine the number of rows in tables affected by schema changes. + === Not recommended to use org.keycloak.credential.UserCredentialManager directly in your extensions If you have user storage extension and you reference the class `org.keycloak.credential.UserCredentialManager` from your providers, it is recommended to avoid using this class directly as it might be diff --git a/docs/documentation/upgrading/topics/migrate_db.adoc b/docs/documentation/upgrading/topics/migrate_db.adoc index 1b5c342eb61..a92846ffe08 100644 --- a/docs/documentation/upgrading/topics/migrate_db.adoc +++ b/docs/documentation/upgrading/topics/migrate_db.adoc @@ -28,6 +28,13 @@ To change the threshold, set the `index-creation-threshold` property, value for kc.[sh|bat] start --spi-connections-liquibase--quarkus--index-creation-threshold=300000 ---- +You can disable this feature by setting it to zero or a negative number: + +[source,bash] +---- +kc.[sh|bat] start --spi-connections-liquibase--quarkus--index-creation-threshold=0 +---- + === Manual relational database migration To enable manual upgrading of the database schema, set the `migration-strategy` property value to "manual" for the diff --git a/docs/guides/server/db.adoc b/docs/guides/server/db.adoc index 29c4f377588..1b88f8ee31f 100644 --- a/docs/guides/server/db.adoc +++ b/docs/guides/server/db.adoc @@ -267,6 +267,8 @@ create database keycloak with encoding 'UTF8'; == Preparing for PostgreSQL +=== Writer and reader instances + When running PostgreSQL reader and writer instances, {project_name} needs to always connect to the writer instance to do its work. When using the original PostgreSQL driver, {project_name} sets the `targetServerType` property of the PostgreSQL JDBC driver to `primary` to ensure that it always connects to a writable primary instance and never connects to a secondary reader instance in failover or switchover scenarios. @@ -277,9 +279,15 @@ You can override this behavior by setting your own value for `targetServerType` The `targetServerType` is only applied automatically to the primary datasource, as requirements might be different for additional datasources. ==== -[TIP] -.Secure Your PostgreSQL Connection -==== +=== Permissions of the database user + +Ensure that the database user has `SELECT` permissions to the following tables to ensure an efficient upgrade: `pg_class`, `pg_namespace`. + +This is used during upgrades of {project_name} to determine an estimated number of rows in a table. +If {project_name} does not have permissions to access these tables, it will log a warning and proceed with the less efficient `+SELECT COUNT(*) ...+` operation during the upgrade to determine the number of rows in tables affected by schema changes. + +=== Secure your connection + To secure your database connection, configure your PostgreSQL server to use TLS and perform full server certificate verification on the client side. **Server-side Configuration (Prerequisites):** @@ -295,7 +303,6 @@ db-url=jdbc:postgresql://...?sslmode=verify-full&sslrootcert=/path/to/certificat * `sslmode=verify-full`: Forces TLS and verifies the server's identity against the trusted certificate. * `sslrootcert=/path/to/certificate`: The path to the server's public certificate file on the client machine. -==== [[preparing-keycloak-for-amazon-aurora-postgresql]] == Preparing for Amazon Aurora PostgreSQL diff --git a/model/jpa/src/main/java/org/keycloak/connections/jpa/updater/liquibase/conn/DefaultLiquibaseConnectionProvider.java b/model/jpa/src/main/java/org/keycloak/connections/jpa/updater/liquibase/conn/DefaultLiquibaseConnectionProvider.java index dc79f6f8f35..bb50c1edc02 100644 --- a/model/jpa/src/main/java/org/keycloak/connections/jpa/updater/liquibase/conn/DefaultLiquibaseConnectionProvider.java +++ b/model/jpa/src/main/java/org/keycloak/connections/jpa/updater/liquibase/conn/DefaultLiquibaseConnectionProvider.java @@ -49,7 +49,7 @@ public class DefaultLiquibaseConnectionProvider implements LiquibaseConnectionPr public static final String INDEX_CREATION_THRESHOLD_PARAM = "keycloak.indexCreationThreshold"; - private int indexCreationThreshold; + private long indexCreationThreshold; private Class liquibaseDatabaseClazz; private static final AtomicBoolean INITIALIZATION = new AtomicBoolean(false); @@ -97,7 +97,7 @@ public class DefaultLiquibaseConnectionProvider implements LiquibaseConnectionPr @SuppressWarnings("unchecked") @Override public void init(Config.Scope config) { - indexCreationThreshold = config.getInt("indexCreationThreshold", 300000); + indexCreationThreshold = config.getLong("indexCreationThreshold", 300000L); logger.debugf("indexCreationThreshold is %d", indexCreationThreshold); // We need to explicitly handle the default here as Config might not be MicroProfile and hence no actually server config exists diff --git a/model/jpa/src/main/java/org/keycloak/connections/jpa/updater/liquibase/custom/CustomCreateIndexChange.java b/model/jpa/src/main/java/org/keycloak/connections/jpa/updater/liquibase/custom/CustomCreateIndexChange.java index 7b37481d4d9..b02f67ede7f 100644 --- a/model/jpa/src/main/java/org/keycloak/connections/jpa/updater/liquibase/custom/CustomCreateIndexChange.java +++ b/model/jpa/src/main/java/org/keycloak/connections/jpa/updater/liquibase/custom/CustomCreateIndexChange.java @@ -31,10 +31,12 @@ import liquibase.change.DatabaseChange; import liquibase.change.core.CreateIndexChange; import liquibase.database.AbstractJdbcDatabase; import liquibase.database.Database; +import liquibase.database.core.PostgresDatabase; import liquibase.exception.DatabaseException; import liquibase.exception.UnexpectedLiquibaseException; import liquibase.exception.ValidationErrors; import liquibase.exception.Warnings; +import liquibase.executor.Executor; import liquibase.executor.ExecutorService; import liquibase.executor.LoggingExecutor; import liquibase.snapshot.InvalidExampleException; @@ -42,7 +44,7 @@ import liquibase.snapshot.SnapshotGeneratorFactory; import liquibase.sqlgenerator.SqlGeneratorFactory; import liquibase.statement.SqlStatement; import liquibase.statement.core.CreateIndexStatement; -import liquibase.statement.core.RawSqlStatement; +import liquibase.statement.core.RawParameterizedSqlStatement; import liquibase.structure.core.Schema; import liquibase.structure.core.Table; import org.jboss.logging.Logger; @@ -54,19 +56,21 @@ import org.jboss.logging.Logger; + 1, appliesTo = "index") public class CustomCreateIndexChange extends CreateIndexChange { private static final Logger logger = Logger.getLogger(CustomCreateIndexChange.class); - private int indexCreationThreshold; + private long indexCreationThreshold; + private Long entriesInTable = null; + private boolean logged; @Override public SqlStatement[] generateStatements(Database database) { // This check is for manual migration - if (Scope.getCurrentScope().getSingleton(ExecutorService.class).getExecutor(LiquibaseConstants.JDBC_EXECUTOR, database) instanceof LoggingExecutor) + if (getExecutor(database) instanceof LoggingExecutor) return super.generateStatements(database); Object indexCreationThreshold = ((AbstractJdbcDatabase) database) .get(DefaultLiquibaseConnectionProvider.INDEX_CREATION_THRESHOLD_PARAM); - if (indexCreationThreshold instanceof Integer) { - this.indexCreationThreshold = (Integer) indexCreationThreshold; + if (indexCreationThreshold instanceof Long) { + this.indexCreationThreshold = (Long) indexCreationThreshold; if (this.indexCreationThreshold <= 0) return super.generateStatements(database); } else { @@ -75,15 +79,18 @@ public class CustomCreateIndexChange extends CreateIndexChange { try { // To check that the table already exists or not on which the index will be created. if (getTableName() == null || !SnapshotGeneratorFactory.getInstance() - .has(new Table().setName(getTableName()).setSchema(new Schema(getCatalogName(), getSchemaName())), database)) + .has(new Table().setName(getTableName()).setSchema(new Schema(getCatalogName(), getSchemaName())), database)) { return super.generateStatements(database); + } - int result = Scope.getCurrentScope().getSingleton(ExecutorService.class).getExecutor(LiquibaseConstants.JDBC_EXECUTOR, database) - .queryForInt(new RawSqlStatement("SELECT COUNT(*) FROM " + getTableNameForSqlSelects(database, getTableName()))); + Long entriesInTable = computeEntriesInTable(database); - if (result > this.indexCreationThreshold) { + if (entriesInTable > this.indexCreationThreshold) { String loggingString = createLoggingString(database); - logger.warnv("Following index should be created: {0}", loggingString); + if (!logged) { + logger.warnv("Following index should be created: {0}", loggingString); + logged = true; + } getChangeSet().setComments(loggingString); return new SqlStatement[] {}; } @@ -95,6 +102,38 @@ public class CustomCreateIndexChange extends CreateIndexChange { return super.generateStatements(database); } + private Long computeEntriesInTable(Database database) throws DatabaseException { + if (entriesInTable != null) { + return entriesInTable; + } + + if (database instanceof PostgresDatabase) { + try { + // This avoids locking all rows in the database table to get an exact count and instead takes an estimate + entriesInTable = getExecutor(database) + .queryForLong(new RawParameterizedSqlStatement("SELECT reltuples::bigint AS estimate FROM pg_class c JOIN pg_namespace n ON n.oid = c.relnamespace WHERE n.nspname = current_schema AND UPPER(c.relname) = UPPER(?)", getTableName())); + // Check if statistics exist for this table + if (entriesInTable > 0) { + return entriesInTable; + } + } catch (UnexpectedLiquibaseException e) { + logger.warn("No permissions to run SELECT on the pg_class and pg_namespace tables, therefore can't estimate row count. Falling back to slower method to count entries with SELECT COUNT(*).", e); + } + // This avoids selecting all rows in the database table to get an exact count, but instead only establishes a lower bound + entriesInTable = getExecutor(database) + .queryForLong(new RawParameterizedSqlStatement(String.format("SELECT COUNT(*) FROM (SELECT 1 FROM %s LIMIT ?) t", getTableNameForSqlSelects(database, getTableName())), this.indexCreationThreshold + 1)); + return entriesInTable; + } + + entriesInTable = getExecutor(database) + .queryForLong(new RawParameterizedSqlStatement(String.format("SELECT COUNT(*) FROM %s", getTableNameForSqlSelects(database, getTableName())))); + return entriesInTable; + } + + private static Executor getExecutor(Database database) { + return Scope.getCurrentScope().getSingleton(ExecutorService.class).getExecutor(LiquibaseConstants.JDBC_EXECUTOR, database); + } + private String getTableNameForSqlSelects(Database database, String tableName) { String correctedSchemaName = database.escapeObjectName(database.getDefaultSchemaName(), Schema.class); return LiquibaseJpaUpdaterProvider.getTable(tableName, correctedSchemaName); @@ -102,8 +141,7 @@ public class CustomCreateIndexChange extends CreateIndexChange { private String createLoggingString(Database database) throws DatabaseException { StringWriter writer = new StringWriter(); - LoggingExecutor loggingExecutor = new LoggingExecutor(Scope.getCurrentScope().getSingleton(ExecutorService.class) - .getExecutor(LiquibaseConstants.JDBC_EXECUTOR, database), writer, database); + LoggingExecutor loggingExecutor = new LoggingExecutor(getExecutor(database), writer, database); SqlStatement sqlStatement = new CreateIndexStatement(getIndexName(), getCatalogName(), getSchemaName(), getTableName(), this.isUnique(), getAssociatedWith(), getColumns().toArray(new AddColumnConfig[0])) .setTablespace(getTablespace()).setClustered(getClustered());