clazz, final String key);
+ // JDBC call context functions
- boolean isPluginInUse(final Class extends ConnectionPlugin> pluginClazz);
+ /**
+ * Retrieves details about the most recent {@link PluginService#connect} or
+ * {@link PluginService#forceConnect} calls. Specifically indicates whether the
+ * returned connection was obtained from a connection pool or newly created.
+ *
+ * Note: The {@link ConnectionPlugin} must process or store this information during
+ * the current JDBC call, as these details will be reset before the next JDBC call
+ * is processed, or another {@link PluginService#connect} or {@link PluginService#forceConnect}
+ * is made.
+ *
+ */
+ @Nullable Boolean isPooledConnection();
}
diff --git a/wrapper/src/main/java/software/amazon/jdbc/PluginServiceImpl.java b/wrapper/src/main/java/software/amazon/jdbc/PluginServiceImpl.java
index b6a5c318c..75e9ceff2 100644
--- a/wrapper/src/main/java/software/amazon/jdbc/PluginServiceImpl.java
+++ b/wrapper/src/main/java/software/amazon/jdbc/PluginServiceImpl.java
@@ -47,12 +47,15 @@
import software.amazon.jdbc.exceptions.ExceptionManager;
import software.amazon.jdbc.hostavailability.HostAvailability;
import software.amazon.jdbc.hostavailability.HostAvailabilityStrategyFactory;
+import software.amazon.jdbc.hostlistprovider.HostListProvider;
+import software.amazon.jdbc.hostlistprovider.HostListProviderService;
import software.amazon.jdbc.hostlistprovider.StaticHostListProvider;
import software.amazon.jdbc.profile.ConfigurationProfile;
import software.amazon.jdbc.states.SessionStateService;
import software.amazon.jdbc.states.SessionStateServiceImpl;
import software.amazon.jdbc.targetdriverdialect.TargetDriverDialect;
import software.amazon.jdbc.util.FullServicesContainer;
+import software.amazon.jdbc.util.LogUtils;
import software.amazon.jdbc.util.Messages;
import software.amazon.jdbc.util.Utils;
import software.amazon.jdbc.util.storage.CacheMap;
@@ -92,6 +95,9 @@ public class PluginServiceImpl implements PluginService, CanReleaseResources,
protected final ReentrantLock connectionSwitchLock = new ReentrantLock();
+ // JDBC call context members
+ protected Boolean pooledConnection = null;
+
public PluginServiceImpl(
@NonNull final FullServicesContainer servicesContainer,
@NonNull final Properties props,
@@ -189,7 +195,7 @@ public HostSpec getCurrentHostSpec() {
Messages.get("PluginServiceImpl.currentHostNotAllowed",
new Object[] {
currentHostSpec == null ? "" : currentHostSpec.getHostAndPort(),
- Utils.logTopology(allowedHosts, "")})
+ LogUtils.logTopology(allowedHosts, "")})
);
}
@@ -219,12 +225,6 @@ public String getOriginalUrl() {
return this.originalUrl;
}
- @Override
- @Deprecated
- public void setAllowedAndBlockedHosts(AllowedAndBlockedHosts allowedAndBlockedHosts) {
- this.servicesContainer.getStorageService().set(this.initialConnectionHostSpec.getHost(), allowedAndBlockedHosts);
- }
-
@Override
public boolean acceptsStrategy(HostRole role, String strategy) throws SQLException {
return this.pluginManager.acceptsStrategy(role, strategy);
@@ -245,17 +245,12 @@ public HostRole getHostRole(Connection conn) throws SQLException {
return this.hostListProvider.getHostRole(conn);
}
- @Override
- @Deprecated
- public ConnectionProvider getConnectionProvider() {
- return this.pluginManager.defaultConnProvider;
- }
-
@Override
public ConnectionProvider getDefaultConnectionProvider() {
return this.connectionProviderManager.getDefaultProvider();
}
+ @Deprecated
public boolean isPooledConnectionProvider(HostSpec host, Properties props) {
final ConnectionProvider connectionProvider =
this.connectionProviderManager.getConnectionProvider(this.driverProtocol, host, props);
@@ -664,12 +659,6 @@ public void releaseResources() {
}
}
- @Override
- @Deprecated
- public boolean isNetworkException(Throwable throwable) {
- return this.isNetworkException(throwable, this.targetDriverDialect);
- }
-
@Override
public boolean isNetworkException(final Throwable throwable, @Nullable TargetDriverDialect targetDriverDialect) {
if (this.exceptionHandler != null) {
@@ -686,12 +675,6 @@ public boolean isNetworkException(final String sqlState) {
return this.exceptionManager.isNetworkException(this.dialect, sqlState);
}
- @Override
- @Deprecated
- public boolean isLoginException(Throwable throwable) {
- return this.isLoginException(throwable, this.targetDriverDialect);
- }
-
@Override
public boolean isLoginException(final Throwable throwable, @Nullable TargetDriverDialect targetDriverDialect) {
if (this.exceptionHandler != null) {
@@ -708,6 +691,22 @@ public boolean isLoginException(final String sqlState) {
return this.exceptionManager.isLoginException(this.dialect, sqlState);
}
+ @Override
+ public boolean isReadOnlyConnectionException(@Nullable String sqlState, @Nullable Integer errorCode) {
+ if (this.exceptionHandler != null) {
+ return this.exceptionHandler.isReadOnlyConnectionException(sqlState, errorCode);
+ }
+ return this.exceptionManager.isReadOnlyConnectionException(this.dialect, sqlState, errorCode);
+ }
+
+ @Override
+ public boolean isReadOnlyConnectionException(Throwable throwable, @Nullable TargetDriverDialect targetDriverDialect) {
+ if (this.exceptionHandler != null) {
+ return this.exceptionHandler.isReadOnlyConnectionException(throwable, targetDriverDialect);
+ }
+ return this.exceptionManager.isReadOnlyConnectionException(this.dialect, throwable, targetDriverDialect);
+ }
+
@Override
public Dialect getDialect() {
return this.dialect;
@@ -728,7 +727,7 @@ public void updateDialect(final @NonNull Connection connection) throws SQLExcept
return;
}
- final HostListProviderSupplier supplier = this.dialect.getHostListProvider();
+ final HostListProviderSupplier supplier = this.dialect.getHostListProviderSupplier();
this.setHostListProvider(supplier.getProvider(this.props, this.originalUrl, this.servicesContainer));
this.refreshHostList(connection);
}
@@ -806,57 +805,27 @@ public static void clearCache() {
hostAvailabilityExpiringCache.clear();
}
- @Deprecated // Use StorageService#set instead.
- public void setStatus(final Class clazz, final @Nullable T status, final boolean clusterBound) {
- String clusterId = null;
- if (clusterBound) {
- try {
- clusterId = this.hostListProvider.getClusterId();
- } catch (Exception ex) {
- // do nothing
- }
- }
- this.setStatus(clazz, status, clusterId);
- }
-
- @Deprecated // Use StorageService#set instead.
- public void setStatus(final Class clazz, final @Nullable T status, final String key) {
- final String cacheKey = this.getStatusCacheKey(clazz, key);
- if (status == null) {
- statusesExpiringCache.remove(cacheKey);
- } else {
- statusesExpiringCache.put(cacheKey, status, DEFAULT_STATUS_CACHE_EXPIRE_NANO);
- }
- }
-
- @Deprecated // Use StorageService#get instead.
- public T getStatus(final @NonNull Class clazz, final boolean clusterBound) {
- String clusterId = null;
- if (clusterBound) {
- try {
- clusterId = this.hostListProvider.getClusterId();
- } catch (Exception ex) {
- // do nothing
- }
+ public boolean isPluginInUse(final Class extends ConnectionPlugin> pluginClazz) {
+ try {
+ return this.pluginManager.isWrapperFor(pluginClazz);
+ } catch (SQLException e) {
+ return false;
}
- return this.getStatus(clazz, clusterId);
}
- @Deprecated // Use StorageService#get instead.
- public T getStatus(final @NonNull Class clazz, String key) {
- return clazz.cast(statusesExpiringCache.get(this.getStatusCacheKey(clazz, key)));
+ @Override
+ public Boolean isPooledConnection() {
+ return this.pooledConnection;
}
- protected String getStatusCacheKey(final Class clazz, final String key) {
- return String.format("%s::%s", key == null ? "" : key.trim().toLowerCase(), clazz.getName());
+ @Override
+ public void setIsPooledConnection(Boolean isPooledConnection) {
+ this.pooledConnection = isPooledConnection;
}
- public boolean isPluginInUse(final Class extends ConnectionPlugin> pluginClazz) {
- try {
- return this.pluginManager.isWrapperFor(pluginClazz);
- } catch (SQLException e) {
- return false;
- }
+ @Override
+ public void resetCallContext() {
+ this.pooledConnection = null;
}
@Override
diff --git a/wrapper/src/main/java/software/amazon/jdbc/dialect/AuroraMysqlDialect.java b/wrapper/src/main/java/software/amazon/jdbc/dialect/AuroraMysqlDialect.java
index e64352899..8ae253cbd 100644
--- a/wrapper/src/main/java/software/amazon/jdbc/dialect/AuroraMysqlDialect.java
+++ b/wrapper/src/main/java/software/amazon/jdbc/dialect/AuroraMysqlDialect.java
@@ -17,69 +17,39 @@
package software.amazon.jdbc.dialect;
import java.sql.Connection;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.Statement;
import java.util.Collections;
import java.util.List;
import software.amazon.jdbc.PluginService;
-import software.amazon.jdbc.hostlistprovider.AuroraHostListProvider;
+import software.amazon.jdbc.hostlistprovider.AuroraTopologyUtils;
+import software.amazon.jdbc.hostlistprovider.RdsHostListProvider;
+import software.amazon.jdbc.hostlistprovider.TopologyUtils;
import software.amazon.jdbc.hostlistprovider.monitoring.MonitoringRdsHostListProvider;
import software.amazon.jdbc.plugin.failover2.FailoverConnectionPlugin;
-public class AuroraMysqlDialect extends MysqlDialect implements BlueGreenDialect {
+public class AuroraMysqlDialect extends MysqlDialect implements TopologyDialect, BlueGreenDialect {
- private static final String TOPOLOGY_QUERY =
+ protected static final String AURORA_VERSION_EXISTS_QUERY = "SHOW VARIABLES LIKE 'aurora_version'";
+ protected static final String TOPOLOGY_QUERY =
"SELECT SERVER_ID, CASE WHEN SESSION_ID = 'MASTER_SESSION_ID' THEN TRUE ELSE FALSE END, "
+ "CPU, REPLICA_LAG_IN_MILLISECONDS, LAST_UPDATE_TIMESTAMP "
+ "FROM information_schema.replica_host_status "
- // filter out nodes that haven't been updated in the last 5 minutes
+ // filter out instances that have not been updated in the last 5 minutes
+ "WHERE time_to_sec(timediff(now(), LAST_UPDATE_TIMESTAMP)) <= 300 OR SESSION_ID = 'MASTER_SESSION_ID' ";
- private static final String IS_WRITER_QUERY =
+ protected static final String INSTANCE_ID_QUERY = "SELECT @@aurora_server_id, @@aurora_server_id";
+ protected static final String WRITER_ID_QUERY =
"SELECT SERVER_ID FROM information_schema.replica_host_status "
- + "WHERE SESSION_ID = 'MASTER_SESSION_ID' AND SERVER_ID = @@aurora_server_id";
+ + "WHERE SESSION_ID = 'MASTER_SESSION_ID' AND SERVER_ID = @@aurora_server_id";
+ protected static final String IS_READER_QUERY = "SELECT @@innodb_read_only";
- private static final String NODE_ID_QUERY = "SELECT @@aurora_server_id";
- private static final String IS_READER_QUERY = "SELECT @@innodb_read_only";
-
- private static final String BG_STATUS_QUERY =
- "SELECT * FROM mysql.rds_topology";
-
- private static final String TOPOLOGY_TABLE_EXIST_QUERY =
+ protected static final String BG_TOPOLOGY_EXISTS_QUERY =
"SELECT 1 AS tmp FROM information_schema.tables WHERE"
+ " table_schema = 'mysql' AND table_name = 'rds_topology'";
+ protected static final String BG_STATUS_QUERY = "SELECT * FROM mysql.rds_topology";
@Override
public boolean isDialect(final Connection connection) {
- Statement stmt = null;
- ResultSet rs = null;
- try {
- stmt = connection.createStatement();
- rs = stmt.executeQuery("SHOW VARIABLES LIKE 'aurora_version'");
- if (rs.next()) {
- // If variable with such name is presented then it means it's an Aurora cluster
- return true;
- }
- } catch (final SQLException ex) {
- // ignore
- } finally {
- if (stmt != null) {
- try {
- stmt.close();
- } catch (SQLException ex) {
- // ignore
- }
- }
- if (rs != null) {
- try {
- rs.close();
- } catch (SQLException ex) {
- // ignore
- }
- }
- }
- return false;
+ return dialectUtils.checkExistenceQueries(connection, AURORA_VERSION_EXISTS_QUERY);
}
@Override
@@ -88,45 +58,44 @@ public boolean isDialect(final Connection connection) {
}
@Override
- public HostListProviderSupplier getHostListProvider() {
+ public HostListProviderSupplier getHostListProviderSupplier() {
return (properties, initialUrl, servicesContainer) -> {
final PluginService pluginService = servicesContainer.getPluginService();
+ final TopologyUtils topologyUtils = new AuroraTopologyUtils(this, pluginService.getHostSpecBuilder());
if (pluginService.isPluginInUse(FailoverConnectionPlugin.class)) {
- return new MonitoringRdsHostListProvider(
- properties,
- initialUrl,
- servicesContainer,
- TOPOLOGY_QUERY,
- NODE_ID_QUERY,
- IS_READER_QUERY,
- IS_WRITER_QUERY);
+ return new MonitoringRdsHostListProvider(topologyUtils, properties, initialUrl, servicesContainer);
}
- return new AuroraHostListProvider(
- properties,
- initialUrl,
- servicesContainer,
- TOPOLOGY_QUERY,
- NODE_ID_QUERY,
- IS_READER_QUERY);
+ return new RdsHostListProvider(topologyUtils, properties, initialUrl, servicesContainer);
};
}
@Override
- public String getBlueGreenStatusQuery() {
- return BG_STATUS_QUERY;
+ public String getTopologyQuery() {
+ return TOPOLOGY_QUERY;
+ }
+
+ @Override
+ public String getInstanceIdQuery() {
+ return INSTANCE_ID_QUERY;
+ }
+
+ @Override
+ public String getWriterIdQuery() {
+ return WRITER_ID_QUERY;
+ }
+
+ @Override
+ public String getIsReaderQuery() {
+ return IS_READER_QUERY;
}
@Override
public boolean isBlueGreenStatusAvailable(final Connection connection) {
- try {
- try (Statement statement = connection.createStatement();
- ResultSet rs = statement.executeQuery(TOPOLOGY_TABLE_EXIST_QUERY)) {
- return rs.next();
- }
- } catch (SQLException ex) {
- return false;
- }
+ return dialectUtils.checkExistenceQueries(connection, BG_TOPOLOGY_EXISTS_QUERY);
}
+ @Override
+ public String getBlueGreenStatusQuery() {
+ return BG_STATUS_QUERY;
+ }
}
-
diff --git a/wrapper/src/main/java/software/amazon/jdbc/dialect/AuroraPgDialect.java b/wrapper/src/main/java/software/amazon/jdbc/dialect/AuroraPgDialect.java
index c81d85f70..e43f30cb8 100644
--- a/wrapper/src/main/java/software/amazon/jdbc/dialect/AuroraPgDialect.java
+++ b/wrapper/src/main/java/software/amazon/jdbc/dialect/AuroraPgDialect.java
@@ -20,53 +20,53 @@
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
+import java.util.Arrays;
+import java.util.List;
import java.util.logging.Logger;
import software.amazon.jdbc.PluginService;
-import software.amazon.jdbc.hostlistprovider.AuroraHostListProvider;
+import software.amazon.jdbc.hostlistprovider.AuroraTopologyUtils;
+import software.amazon.jdbc.hostlistprovider.RdsHostListProvider;
+import software.amazon.jdbc.hostlistprovider.TopologyUtils;
import software.amazon.jdbc.hostlistprovider.monitoring.MonitoringRdsHostListProvider;
import software.amazon.jdbc.plugin.failover2.FailoverConnectionPlugin;
import software.amazon.jdbc.util.DriverInfo;
+import software.amazon.jdbc.util.Messages;
-/**
- * Suitable for the following AWS PG configurations.
- * - Regional Cluster
- */
-public class AuroraPgDialect extends PgDialect implements AuroraLimitlessDialect, BlueGreenDialect {
- private static final Logger LOGGER = Logger.getLogger(AuroraPgDialect.class.getName());
+public class AuroraPgDialect extends PgDialect implements TopologyDialect, AuroraLimitlessDialect, BlueGreenDialect {
- private static final String extensionsSql =
+ protected static final String AURORA_UTILS_EXIST_QUERY =
"SELECT (setting LIKE '%aurora_stat_utils%') AS aurora_stat_utils "
+ "FROM pg_catalog.pg_settings "
+ "WHERE name OPERATOR(pg_catalog.=) 'rds.extensions'";
-
- private static final String topologySql = "SELECT 1 FROM pg_catalog.aurora_replica_status() LIMIT 1";
-
- private static final String TOPOLOGY_QUERY =
+ protected static final String TOPOLOGY_EXISTS_QUERY = "SELECT 1 FROM pg_catalog.aurora_replica_status() LIMIT 1";
+ protected static final String TOPOLOGY_QUERY =
"SELECT SERVER_ID, CASE WHEN SESSION_ID OPERATOR(pg_catalog.=) 'MASTER_SESSION_ID' THEN TRUE ELSE FALSE END, "
+ "CPU, COALESCE(REPLICA_LAG_IN_MSEC, 0), LAST_UPDATE_TIMESTAMP "
+ "FROM pg_catalog.aurora_replica_status() "
- // filter out nodes that haven't been updated in the last 5 minutes
+ // filter out instances that haven't been updated in the last 5 minutes
+ "WHERE EXTRACT("
+ "EPOCH FROM(pg_catalog.NOW() OPERATOR(pg_catalog.-) LAST_UPDATE_TIMESTAMP)) OPERATOR(pg_catalog.<=) 300 "
+ "OR SESSION_ID OPERATOR(pg_catalog.=) 'MASTER_SESSION_ID' "
+ "OR LAST_UPDATE_TIMESTAMP IS NULL";
- private static final String IS_WRITER_QUERY =
+ protected static final String INSTANCE_ID_QUERY =
+ "SELECT pg_catalog.aurora_db_instance_identifier(), pg_catalog.aurora_db_instance_identifier()";
+ protected static final String WRITER_ID_QUERY =
"SELECT SERVER_ID FROM pg_catalog.aurora_replica_status() "
+ "WHERE SESSION_ID OPERATOR(pg_catalog.=) 'MASTER_SESSION_ID' "
+ "AND SERVER_ID OPERATOR(pg_catalog.=) pg_catalog.aurora_db_instance_identifier()";
+ protected static final String IS_READER_QUERY = "SELECT pg_catalog.pg_is_in_recovery()";
- private static final String NODE_ID_QUERY = "SELECT pg_catalog.aurora_db_instance_identifier()";
- private static final String IS_READER_QUERY = "SELECT pg_catalog.pg_is_in_recovery()";
protected static final String LIMITLESS_ROUTER_ENDPOINT_QUERY =
"select router_endpoint, load from pg_catalog.aurora_limitless_router_endpoints()";
- private static final String BG_STATUS_QUERY =
+ protected static final String BG_TOPOLOGY_EXISTS_QUERY =
+ "SELECT 'pg_catalog.get_blue_green_fast_switchover_metadata'::regproc";
+ protected static final String BG_STATUS_QUERY =
"SELECT * FROM "
- + "pg_catalog.get_blue_green_fast_switchover_metadata('aws_jdbc_driver-" + DriverInfo.DRIVER_VERSION + "')";
+ + "pg_catalog.get_blue_green_fast_switchover_metadata('aws_jdbc_driver-" + DriverInfo.DRIVER_VERSION + "')";
- private static final String TOPOLOGY_TABLE_EXIST_QUERY =
- "SELECT 'pg_catalog.get_blue_green_fast_switchover_metadata'::regproc";
+ private static final Logger LOGGER = Logger.getLogger(AuroraPgDialect.class.getName());
@Override
public boolean isDialect(final Connection connection) {
@@ -74,112 +74,80 @@ public boolean isDialect(final Connection connection) {
return false;
}
- Statement stmt = null;
- ResultSet rs = null;
boolean hasExtensions = false;
- boolean hasTopology = false;
- try {
- stmt = connection.createStatement();
- rs = stmt.executeQuery(extensionsSql);
- if (rs.next()) {
- final boolean auroraUtils = rs.getBoolean("aurora_stat_utils");
- LOGGER.finest(() -> String.format("auroraUtils: %b", auroraUtils));
- if (auroraUtils) {
- hasExtensions = true;
- }
- }
- } catch (SQLException ex) {
- // ignore
- } finally {
- if (stmt != null) {
- try {
- stmt.close();
- } catch (SQLException ex) {
- // ignore
- }
+ try (Statement stmt = connection.createStatement();
+ ResultSet rs = stmt.executeQuery(AURORA_UTILS_EXIST_QUERY)) {
+ if (!rs.next()) {
+ return false;
}
- if (rs != null) {
- try {
- rs.close();
- } catch (SQLException ex) {
- // ignore
- }
+
+ final boolean auroraUtils = rs.getBoolean("aurora_stat_utils");
+ LOGGER.finest(Messages.get("AuroraPgDialect.auroraUtils", new Object[] {auroraUtils}));
+ if (auroraUtils) {
+ hasExtensions = true;
}
+ } catch (SQLException ex) {
+ return false;
}
+
if (!hasExtensions) {
return false;
}
- try {
- stmt = connection.createStatement();
- rs = stmt.executeQuery(topologySql);
- if (rs.next()) {
- LOGGER.finest(() -> "hasTopology: true");
- hasTopology = true;
- }
- } catch (final SQLException ex) {
- // ignore
- } finally {
- if (stmt != null) {
- try {
- stmt.close();
- } catch (SQLException ex) {
- // ignore
- }
- }
- if (rs != null) {
- try {
- rs.close();
- } catch (SQLException ex) {
- // ignore
- }
- }
- }
- return hasExtensions && hasTopology;
+
+ return dialectUtils.checkExistenceQueries(connection, TOPOLOGY_EXISTS_QUERY);
}
@Override
- public HostListProviderSupplier getHostListProvider() {
+ public List getDialectUpdateCandidates() {
+ return Arrays.asList(DialectCodes.GLOBAL_AURORA_PG,
+ DialectCodes.RDS_MULTI_AZ_PG_CLUSTER,
+ DialectCodes.RDS_PG);
+ }
+
+ @Override
+ public HostListProviderSupplier getHostListProviderSupplier() {
return (properties, initialUrl, servicesContainer) -> {
final PluginService pluginService = servicesContainer.getPluginService();
+ final TopologyUtils topologyUtils = new AuroraTopologyUtils(this, pluginService.getHostSpecBuilder());
if (pluginService.isPluginInUse(FailoverConnectionPlugin.class)) {
- return new MonitoringRdsHostListProvider(
- properties,
- initialUrl,
- servicesContainer,
- TOPOLOGY_QUERY,
- NODE_ID_QUERY,
- IS_READER_QUERY,
- IS_WRITER_QUERY);
+ return new MonitoringRdsHostListProvider(topologyUtils, properties, initialUrl, servicesContainer);
}
- return new AuroraHostListProvider(
- properties,
- initialUrl,
- servicesContainer,
- TOPOLOGY_QUERY,
- NODE_ID_QUERY,
- IS_READER_QUERY);
+ return new RdsHostListProvider(topologyUtils, properties, initialUrl, servicesContainer);
};
}
+ @Override
+ public String getTopologyQuery() {
+ return TOPOLOGY_QUERY;
+ }
+
+ @Override
+ public String getInstanceIdQuery() {
+ return INSTANCE_ID_QUERY;
+ }
+
+ @Override
+ public String getWriterIdQuery() {
+ return WRITER_ID_QUERY;
+ }
+
+ @Override
+ public String getIsReaderQuery() {
+ return IS_READER_QUERY;
+ }
+
@Override
public String getLimitlessRouterEndpointQuery() {
return LIMITLESS_ROUTER_ENDPOINT_QUERY;
}
@Override
- public String getBlueGreenStatusQuery() {
- return BG_STATUS_QUERY;
+ public boolean isBlueGreenStatusAvailable(final Connection connection) {
+ return dialectUtils.checkExistenceQueries(connection, BG_TOPOLOGY_EXISTS_QUERY);
}
@Override
- public boolean isBlueGreenStatusAvailable(final Connection connection) {
- try {
- try (Statement statement = connection.createStatement();
- ResultSet rs = statement.executeQuery(TOPOLOGY_TABLE_EXIST_QUERY)) {
- return rs.next();
- }
- } catch (SQLException ex) {
- return false;
- }
+ public String getBlueGreenStatusQuery() {
+ return BG_STATUS_QUERY;
}
}
diff --git a/wrapper/src/main/java/software/amazon/jdbc/dialect/BlueGreenDialect.java b/wrapper/src/main/java/software/amazon/jdbc/dialect/BlueGreenDialect.java
index ce1b678d3..a5e34f150 100644
--- a/wrapper/src/main/java/software/amazon/jdbc/dialect/BlueGreenDialect.java
+++ b/wrapper/src/main/java/software/amazon/jdbc/dialect/BlueGreenDialect.java
@@ -19,7 +19,7 @@
import java.sql.Connection;
public interface BlueGreenDialect {
- String getBlueGreenStatusQuery();
-
boolean isBlueGreenStatusAvailable(final Connection connection);
+
+ String getBlueGreenStatusQuery();
}
diff --git a/wrapper/src/main/java/software/amazon/jdbc/dialect/Dialect.java b/wrapper/src/main/java/software/amazon/jdbc/dialect/Dialect.java
index 367db7d25..5f09aae0b 100644
--- a/wrapper/src/main/java/software/amazon/jdbc/dialect/Dialect.java
+++ b/wrapper/src/main/java/software/amazon/jdbc/dialect/Dialect.java
@@ -26,22 +26,23 @@
import software.amazon.jdbc.plugin.failover.FailoverRestriction;
public interface Dialect {
- int getDefaultPort();
- ExceptionHandler getExceptionHandler();
+ boolean isDialect(Connection connection);
- String getHostAliasQuery();
+ int getDefaultPort();
- String getServerVersionQuery();
+ List* dialect code */ String> getDialectUpdateCandidates();
- boolean isDialect(Connection connection);
+ ExceptionHandler getExceptionHandler();
- List* dialect code */ String> getDialectUpdateCandidates();
+ HostListProviderSupplier getHostListProviderSupplier();
- HostListProviderSupplier getHostListProvider();
+ String getHostAliasQuery();
void prepareConnectProperties(
final @NonNull Properties connectProperties, final @NonNull String protocol, final @NonNull HostSpec hostSpec);
EnumSet getFailoverRestrictions();
+
+ String getServerVersionQuery();
}
diff --git a/wrapper/src/main/java/software/amazon/jdbc/dialect/DialectCodes.java b/wrapper/src/main/java/software/amazon/jdbc/dialect/DialectCodes.java
index 74c48a67c..47c0de3c5 100644
--- a/wrapper/src/main/java/software/amazon/jdbc/dialect/DialectCodes.java
+++ b/wrapper/src/main/java/software/amazon/jdbc/dialect/DialectCodes.java
@@ -17,12 +17,14 @@
package software.amazon.jdbc.dialect;
public class DialectCodes {
+ public static final String GLOBAL_AURORA_MYSQL = "global-aurora-mysql";
public static final String AURORA_MYSQL = "aurora-mysql";
public static final String RDS_MYSQL = "rds-mysql";
public static final String MYSQL = "mysql";
// https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/multi-az-db-clusters-concepts.html
public static final String RDS_MULTI_AZ_MYSQL_CLUSTER = "rds-multi-az-mysql-cluster";
+ public static final String GLOBAL_AURORA_PG = "global-aurora-pg";
public static final String AURORA_PG = "aurora-pg";
public static final String RDS_PG = "rds-pg";
// https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/multi-az-db-clusters-concepts.html
diff --git a/wrapper/src/main/java/software/amazon/jdbc/dialect/DialectManager.java b/wrapper/src/main/java/software/amazon/jdbc/dialect/DialectManager.java
index d29a1b3dd..ed7f4e71e 100644
--- a/wrapper/src/main/java/software/amazon/jdbc/dialect/DialectManager.java
+++ b/wrapper/src/main/java/software/amazon/jdbc/dialect/DialectManager.java
@@ -57,10 +57,12 @@ public class DialectManager implements DialectProvider {
put(DialectCodes.PG, new PgDialect());
put(DialectCodes.MARIADB, new MariaDbDialect());
put(DialectCodes.RDS_MYSQL, new RdsMysqlDialect());
- put(DialectCodes.RDS_MULTI_AZ_MYSQL_CLUSTER, new RdsMultiAzDbClusterMysqlDialect());
+ put(DialectCodes.RDS_MULTI_AZ_MYSQL_CLUSTER, new MultiAzClusterMysqlDialect());
put(DialectCodes.RDS_PG, new RdsPgDialect());
- put(DialectCodes.RDS_MULTI_AZ_PG_CLUSTER, new RdsMultiAzDbClusterPgDialect());
+ put(DialectCodes.RDS_MULTI_AZ_PG_CLUSTER, new MultiAzClusterPgDialect());
+ put(DialectCodes.GLOBAL_AURORA_MYSQL, new GlobalAuroraMysqlDialect());
put(DialectCodes.AURORA_MYSQL, new AuroraMysqlDialect());
+ put(DialectCodes.GLOBAL_AURORA_PG, new GlobalAuroraPgDialect());
put(DialectCodes.AURORA_PG, new AuroraPgDialect());
put(DialectCodes.UNKNOWN, new UnknownDialect());
}
@@ -73,7 +75,7 @@ public class DialectManager implements DialectProvider {
*/
protected static final long ENDPOINT_CACHE_EXPIRATION = TimeUnit.HOURS.toNanos(24);
- // Map of host name, or url, by dialect code.
+ // Keys are host names or URLs, values are dialect codes.
protected static final CacheMap knownEndpointDialects = new CacheMap<>();
private final RdsUtils rdsHelper = new RdsUtils();
@@ -92,28 +94,6 @@ public DialectManager(PluginService pluginService) {
this.pluginService = pluginService;
}
- /**
- * Sets a custom dialect handler.
- *
- * @param dialect A custom dialect to use.
- *
- * @deprecated Use software.amazon.jdbc.Driver instead
- */
- @Deprecated
- public static void setCustomDialect(final @NonNull Dialect dialect) {
- Driver.setCustomDialect(dialect);
- }
-
- /**
- * Resets a custom dialect handler.
- *
- * @deprecated Use software.amazon.jdbc.Driver instead
- */
- @Deprecated
- public static void resetCustomDialect() {
- Driver.resetCustomDialect();
- }
-
public static void resetEndpointCache() {
knownEndpointDialects.clear();
}
@@ -149,8 +129,7 @@ public Dialect getDialect(
this.logCurrentDialect();
return userDialect;
} else {
- throw new SQLException(
- Messages.get("DialectManager.unknownDialectCode", new Object[] {dialectCode}));
+ throw new SQLException(Messages.get("DialectManager.unknownDialectCode", new Object[] {dialectCode}));
}
}
@@ -160,13 +139,19 @@ public Dialect getDialect(
String host = url;
final List hosts = this.connectionUrlParser.getHostsFromConnectionUrl(
- url, true, pluginService::getHostSpecBuilder);
+ url, true, pluginService::getHostSpecBuilder);
if (!Utils.isNullOrEmpty(hosts)) {
host = hosts.get(0).getHost();
}
if (driverProtocol.contains("mysql")) {
RdsUrlType type = this.rdsHelper.identifyRdsType(host);
+ if (type == RdsUrlType.RDS_GLOBAL_WRITER_CLUSTER) {
+ this.canUpdate = false;
+ this.dialectCode = DialectCodes.GLOBAL_AURORA_MYSQL;
+ this.dialect = knownDialectsByCode.get(DialectCodes.GLOBAL_AURORA_MYSQL);
+ return this.dialect;
+ }
if (type.isRdsCluster()) {
this.canUpdate = true;
this.dialectCode = DialectCodes.AURORA_MYSQL;
@@ -195,6 +180,12 @@ public Dialect getDialect(
this.dialect = knownDialectsByCode.get(DialectCodes.AURORA_PG);
return this.dialect;
}
+ if (RdsUrlType.RDS_GLOBAL_WRITER_CLUSTER.equals(type)) {
+ this.canUpdate = false;
+ this.dialectCode = DialectCodes.GLOBAL_AURORA_PG;
+ this.dialect = knownDialectsByCode.get(DialectCodes.GLOBAL_AURORA_PG);
+ return this.dialect;
+ }
if (type.isRdsCluster()) {
this.canUpdate = true;
this.dialectCode = DialectCodes.AURORA_PG;
@@ -246,9 +237,10 @@ public Dialect getDialect(
for (String dialectCandidateCode : dialectCandidates) {
Dialect dialectCandidate = knownDialectsByCode.get(dialectCandidateCode);
if (dialectCandidate == null) {
- throw new SQLException(
- Messages.get("DialectManager.unknownDialectCode", new Object[] {dialectCandidateCode}));
+ throw new SQLException(Messages.get(
+ "DialectManager.unknownDialectCode", new Object[] {dialectCandidateCode}));
}
+
boolean isDialect = dialectCandidate.isDialect(connection);
if (isDialect) {
this.canUpdate = false;
@@ -278,9 +270,8 @@ public Dialect getDialect(
}
private void logCurrentDialect() {
- LOGGER.finest(() -> String.format("Current dialect: %s, %s, canUpdate: %b",
- this.dialectCode,
- this.dialect == null ? "" : this.dialect,
- this.canUpdate));
+ LOGGER.finest(Messages.get(
+ "DialectManager.currentDialect",
+ new Object[] {this.dialectCode, this.dialect == null ? "" : this.dialect, this.canUpdate}));
}
}
diff --git a/wrapper/src/main/java/software/amazon/jdbc/dialect/DialectUtils.java b/wrapper/src/main/java/software/amazon/jdbc/dialect/DialectUtils.java
new file mode 100644
index 000000000..a09480cd7
--- /dev/null
+++ b/wrapper/src/main/java/software/amazon/jdbc/dialect/DialectUtils.java
@@ -0,0 +1,46 @@
+/*
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License").
+ * You may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package software.amazon.jdbc.dialect;
+
+import java.sql.Connection;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+
+public class DialectUtils {
+ /**
+ * Given a series of existence queries, returns true if they all execute successfully and contain at least one record.
+ * Otherwise, returns false.
+ *
+ * @param conn the connection to use for executing the queries.
+ * @param existenceQueries the queries to check for existing records.
+ * @return true if all queries execute successfully and return at least one record, false otherwise.
+ */
+ public boolean checkExistenceQueries(Connection conn, String... existenceQueries) {
+ for (String existenceQuery : existenceQueries) {
+ try (Statement stmt = conn.createStatement(); ResultSet rs = stmt.executeQuery(existenceQuery)) {
+ if (!rs.next()) {
+ return false;
+ }
+ } catch (SQLException e) {
+ return false;
+ }
+ }
+
+ return true;
+ }
+}
diff --git a/wrapper/src/main/java/software/amazon/jdbc/dialect/GlobalAuroraMysqlDialect.java b/wrapper/src/main/java/software/amazon/jdbc/dialect/GlobalAuroraMysqlDialect.java
new file mode 100644
index 000000000..334757af9
--- /dev/null
+++ b/wrapper/src/main/java/software/amazon/jdbc/dialect/GlobalAuroraMysqlDialect.java
@@ -0,0 +1,97 @@
+/*
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License").
+ * You may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package software.amazon.jdbc.dialect;
+
+import java.sql.Connection;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.Collections;
+import java.util.List;
+import software.amazon.jdbc.PluginService;
+import software.amazon.jdbc.hostlistprovider.GlobalAuroraHostListProvider;
+import software.amazon.jdbc.hostlistprovider.GlobalAuroraTopologyUtils;
+import software.amazon.jdbc.hostlistprovider.monitoring.MonitoringGlobalAuroraHostListProvider;
+import software.amazon.jdbc.plugin.failover2.FailoverConnectionPlugin;
+
+public class GlobalAuroraMysqlDialect extends AuroraMysqlDialect implements GlobalAuroraTopologyDialect {
+
+ protected static final String GLOBAL_STATUS_TABLE_EXISTS_QUERY =
+ "SELECT 1 AS tmp FROM information_schema.tables WHERE"
+ + " upper(table_schema) = 'INFORMATION_SCHEMA' AND upper(table_name) = 'AURORA_GLOBAL_DB_STATUS'";
+ protected static final String GLOBAL_INSTANCE_STATUS_EXISTS_QUERY =
+ "SELECT 1 AS tmp FROM information_schema.tables WHERE"
+ + " upper(table_schema) = 'INFORMATION_SCHEMA' AND upper(table_name) = 'AURORA_GLOBAL_DB_INSTANCE_STATUS'";
+
+ protected static final String GLOBAL_TOPOLOGY_QUERY =
+ "SELECT SERVER_ID, CASE WHEN SESSION_ID = 'MASTER_SESSION_ID' THEN TRUE ELSE FALSE END, "
+ + "VISIBILITY_LAG_IN_MSEC, AWS_REGION "
+ + "FROM information_schema.aurora_global_db_instance_status ";
+
+ protected static final String REGION_COUNT_QUERY = "SELECT count(1) FROM information_schema.aurora_global_db_status";
+ protected static final String REGION_BY_INSTANCE_ID_QUERY =
+ "SELECT AWS_REGION FROM information_schema.aurora_global_db_instance_status WHERE SERVER_ID = ?";
+
+
+ @Override
+ public boolean isDialect(final Connection connection) {
+ if (!dialectUtils.checkExistenceQueries(
+ connection, GLOBAL_STATUS_TABLE_EXISTS_QUERY, GLOBAL_INSTANCE_STATUS_EXISTS_QUERY)) {
+ return false;
+ }
+
+ try (Statement stmt = connection.createStatement();
+ ResultSet rs = stmt.executeQuery(REGION_COUNT_QUERY)) {
+ if (!rs.next()) {
+ return false;
+ }
+
+ int awsRegionCount = rs.getInt(1);
+ return awsRegionCount > 1;
+ } catch (final SQLException ex) {
+ return false;
+ }
+ }
+
+ @Override
+ public List* dialect code */ String> getDialectUpdateCandidates() {
+ return Collections.emptyList();
+ }
+
+ @Override
+ public HostListProviderSupplier getHostListProviderSupplier() {
+ return (properties, initialUrl, servicesContainer) -> {
+ final PluginService pluginService = servicesContainer.getPluginService();
+ final GlobalAuroraTopologyUtils topologyUtils =
+ new GlobalAuroraTopologyUtils(this, pluginService.getHostSpecBuilder());
+ if (pluginService.isPluginInUse(FailoverConnectionPlugin.class)) {
+ return new MonitoringGlobalAuroraHostListProvider(topologyUtils, properties, initialUrl, servicesContainer);
+ }
+ return new GlobalAuroraHostListProvider(topologyUtils, properties, initialUrl, servicesContainer);
+ };
+ }
+
+ @Override
+ public String getTopologyQuery() {
+ return GLOBAL_TOPOLOGY_QUERY;
+ }
+
+ @Override
+ public String getRegionByInstanceIdQuery() {
+ return REGION_BY_INSTANCE_ID_QUERY;
+ }
+}
diff --git a/wrapper/src/main/java/software/amazon/jdbc/dialect/GlobalAuroraPgDialect.java b/wrapper/src/main/java/software/amazon/jdbc/dialect/GlobalAuroraPgDialect.java
new file mode 100644
index 000000000..7c060c800
--- /dev/null
+++ b/wrapper/src/main/java/software/amazon/jdbc/dialect/GlobalAuroraPgDialect.java
@@ -0,0 +1,112 @@
+/*
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License").
+ * You may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package software.amazon.jdbc.dialect;
+
+import java.sql.Connection;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.Collections;
+import java.util.List;
+import java.util.logging.Logger;
+import software.amazon.jdbc.PluginService;
+import software.amazon.jdbc.hostlistprovider.GlobalAuroraHostListProvider;
+import software.amazon.jdbc.hostlistprovider.GlobalAuroraTopologyUtils;
+import software.amazon.jdbc.hostlistprovider.monitoring.MonitoringGlobalAuroraHostListProvider;
+import software.amazon.jdbc.plugin.failover2.FailoverConnectionPlugin;
+import software.amazon.jdbc.util.Messages;
+
+public class GlobalAuroraPgDialect extends AuroraPgDialect implements GlobalAuroraTopologyDialect {
+
+ protected static final String GLOBAL_STATUS_FUNC_EXISTS_QUERY = "select 'aurora_global_db_status'::regproc";
+ protected static final String GLOBAL_INSTANCE_STATUS_FUNC_EXISTS_QUERY =
+ "select 'aurora_global_db_instance_status'::regproc";
+
+ protected static final String GLOBAL_TOPOLOGY_QUERY =
+ "SELECT SERVER_ID, CASE WHEN SESSION_ID = 'MASTER_SESSION_ID' THEN TRUE ELSE FALSE END, "
+ + "VISIBILITY_LAG_IN_MSEC, AWS_REGION "
+ + "FROM aurora_global_db_instance_status()";
+
+ protected static final String REGION_COUNT_QUERY = "SELECT count(1) FROM aurora_global_db_status()";
+ protected static final String REGION_BY_INSTANCE_ID_QUERY =
+ "SELECT AWS_REGION FROM aurora_global_db_instance_status() WHERE SERVER_ID = ?";
+
+ private static final Logger LOGGER = Logger.getLogger(GlobalAuroraPgDialect.class.getName());
+
+ @Override
+ public boolean isDialect(final Connection connection) {
+ try {
+ try (Statement stmt = connection.createStatement();
+ ResultSet rs = stmt.executeQuery(AURORA_UTILS_EXIST_QUERY)) {
+ if (!rs.next()) {
+ return false;
+ }
+
+ final boolean auroraUtils = rs.getBoolean("aurora_stat_utils");
+ LOGGER.finest(Messages.get("AuroraPgDialect.auroraUtils", new Object[] {auroraUtils}));
+ if (!auroraUtils) {
+ return false;
+ }
+ }
+
+ if (!dialectUtils.checkExistenceQueries(
+ connection, GLOBAL_STATUS_FUNC_EXISTS_QUERY, GLOBAL_INSTANCE_STATUS_FUNC_EXISTS_QUERY)) {
+ return false;
+ }
+
+ try (Statement stmt = connection.createStatement();
+ ResultSet rs = stmt.executeQuery(REGION_COUNT_QUERY)) {
+ if (!rs.next()) {
+ return false;
+ }
+
+ int awsRegionCount = rs.getInt(1);
+ return awsRegionCount > 1;
+ }
+ } catch (final SQLException ex) {
+ return false;
+ }
+ }
+
+ @Override
+ public List* dialect code */ String> getDialectUpdateCandidates() {
+ return Collections.emptyList();
+ }
+
+ @Override
+ public HostListProviderSupplier getHostListProviderSupplier() {
+ return (properties, initialUrl, servicesContainer) -> {
+ final PluginService pluginService = servicesContainer.getPluginService();
+ final GlobalAuroraTopologyUtils topologyUtils =
+ new GlobalAuroraTopologyUtils(this, pluginService.getHostSpecBuilder());
+ if (pluginService.isPluginInUse(FailoverConnectionPlugin.class)) {
+ return new MonitoringGlobalAuroraHostListProvider(topologyUtils, properties, initialUrl, servicesContainer);
+ }
+ return new GlobalAuroraHostListProvider(topologyUtils, properties, initialUrl, servicesContainer);
+ };
+ }
+
+ @Override
+ public String getTopologyQuery() {
+ return GLOBAL_TOPOLOGY_QUERY;
+ }
+
+ @Override
+ public String getRegionByInstanceIdQuery() {
+ return REGION_BY_INSTANCE_ID_QUERY;
+ }
+}
diff --git a/wrapper/src/main/java/software/amazon/jdbc/dialect/GlobalAuroraTopologyDialect.java b/wrapper/src/main/java/software/amazon/jdbc/dialect/GlobalAuroraTopologyDialect.java
new file mode 100644
index 000000000..11db48dff
--- /dev/null
+++ b/wrapper/src/main/java/software/amazon/jdbc/dialect/GlobalAuroraTopologyDialect.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License").
+ * You may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package software.amazon.jdbc.dialect;
+
+public interface GlobalAuroraTopologyDialect extends TopologyDialect {
+ String getRegionByInstanceIdQuery();
+}
diff --git a/wrapper/src/main/java/software/amazon/jdbc/dialect/HostListProviderSupplier.java b/wrapper/src/main/java/software/amazon/jdbc/dialect/HostListProviderSupplier.java
index 0dfe44dc5..bee378f9f 100644
--- a/wrapper/src/main/java/software/amazon/jdbc/dialect/HostListProviderSupplier.java
+++ b/wrapper/src/main/java/software/amazon/jdbc/dialect/HostListProviderSupplier.java
@@ -18,7 +18,7 @@
import java.util.Properties;
import org.checkerframework.checker.nullness.qual.NonNull;
-import software.amazon.jdbc.HostListProvider;
+import software.amazon.jdbc.hostlistprovider.HostListProvider;
import software.amazon.jdbc.util.FullServicesContainer;
@FunctionalInterface
diff --git a/wrapper/src/main/java/software/amazon/jdbc/dialect/MariaDbDialect.java b/wrapper/src/main/java/software/amazon/jdbc/dialect/MariaDbDialect.java
index 3b368a8a1..58453f6fa 100644
--- a/wrapper/src/main/java/software/amazon/jdbc/dialect/MariaDbDialect.java
+++ b/wrapper/src/main/java/software/amazon/jdbc/dialect/MariaDbDialect.java
@@ -32,45 +32,23 @@
import software.amazon.jdbc.plugin.failover.FailoverRestriction;
public class MariaDbDialect implements Dialect {
+
+ protected static final String VERSION_QUERY = "SELECT VERSION()";
+ protected static final String HOST_ALIAS_QUERY = "SELECT CONCAT(@@hostname, ':', @@port)";
+
+ private static MariaDBExceptionHandler mariaDBExceptionHandler;
+ private static final EnumSet NO_FAILOVER_RESTRICTIONS =
+ EnumSet.noneOf(FailoverRestriction.class);
private static final List dialectUpdateCandidates = Arrays.asList(
DialectCodes.AURORA_MYSQL,
DialectCodes.RDS_MULTI_AZ_MYSQL_CLUSTER,
DialectCodes.RDS_MYSQL,
DialectCodes.MYSQL);
- private static MariaDBExceptionHandler mariaDBExceptionHandler;
-
- private static final EnumSet NO_RESTRICTIONS = EnumSet.noneOf(FailoverRestriction.class);
-
- @Override
- public int getDefaultPort() {
- return 3306;
- }
-
- @Override
- public ExceptionHandler getExceptionHandler() {
- if (mariaDBExceptionHandler == null) {
- mariaDBExceptionHandler = new MariaDBExceptionHandler();
- }
- return mariaDBExceptionHandler;
- }
-
- @Override
- public String getHostAliasQuery() {
- return "SELECT CONCAT(@@hostname, ':', @@port)";
- }
-
- @Override
- public String getServerVersionQuery() {
- return "SELECT VERSION()";
- }
@Override
public boolean isDialect(final Connection connection) {
- Statement stmt = null;
- ResultSet rs = null;
- try {
- stmt = connection.createStatement();
- rs = stmt.executeQuery(this.getServerVersionQuery());
+ try (Statement stmt = connection.createStatement();
+ ResultSet rs = stmt.executeQuery(VERSION_QUERY)) {
while (rs.next()) {
final String columnValue = rs.getString(1);
if (columnValue != null && columnValue.toLowerCase().contains("mariadb")) {
@@ -78,32 +56,31 @@ public boolean isDialect(final Connection connection) {
}
}
} catch (final SQLException ex) {
- // ignore
- } finally {
- if (stmt != null) {
- try {
- stmt.close();
- } catch (SQLException ex) {
- // ignore
- }
- }
- if (rs != null) {
- try {
- rs.close();
- } catch (SQLException ex) {
- // ignore
- }
- }
+ return false;
}
+
return false;
}
+ @Override
+ public int getDefaultPort() {
+ return 3306;
+ }
+
@Override
public List getDialectUpdateCandidates() {
return dialectUpdateCandidates;
}
- public HostListProviderSupplier getHostListProvider() {
+ @Override
+ public ExceptionHandler getExceptionHandler() {
+ if (mariaDBExceptionHandler == null) {
+ mariaDBExceptionHandler = new MariaDBExceptionHandler();
+ }
+ return mariaDBExceptionHandler;
+ }
+
+ public HostListProviderSupplier getHostListProviderSupplier() {
return (properties, initialUrl, servicesContainer) ->
new ConnectionStringHostListProvider(properties, initialUrl, servicesContainer.getHostListProviderService());
}
@@ -116,6 +93,16 @@ public void prepareConnectProperties(
@Override
public EnumSet getFailoverRestrictions() {
- return NO_RESTRICTIONS;
+ return NO_FAILOVER_RESTRICTIONS;
+ }
+
+ @Override
+ public String getServerVersionQuery() {
+ return VERSION_QUERY;
+ }
+
+ @Override
+ public String getHostAliasQuery() {
+ return HOST_ALIAS_QUERY;
}
}
diff --git a/wrapper/src/main/java/software/amazon/jdbc/dialect/MultiAzClusterDialect.java b/wrapper/src/main/java/software/amazon/jdbc/dialect/MultiAzClusterDialect.java
new file mode 100644
index 000000000..4dc0a584d
--- /dev/null
+++ b/wrapper/src/main/java/software/amazon/jdbc/dialect/MultiAzClusterDialect.java
@@ -0,0 +1,23 @@
+/*
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License").
+ * You may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package software.amazon.jdbc.dialect;
+
+public interface MultiAzClusterDialect extends TopologyDialect {
+ String getWriterIdQuery();
+
+ String getWriterIdColumnName();
+}
diff --git a/wrapper/src/main/java/software/amazon/jdbc/dialect/RdsMultiAzDbClusterMysqlDialect.java b/wrapper/src/main/java/software/amazon/jdbc/dialect/MultiAzClusterMysqlDialect.java
similarity index 50%
rename from wrapper/src/main/java/software/amazon/jdbc/dialect/RdsMultiAzDbClusterMysqlDialect.java
rename to wrapper/src/main/java/software/amazon/jdbc/dialect/MultiAzClusterMysqlDialect.java
index 930cf1631..9fe7d3b03 100644
--- a/wrapper/src/main/java/software/amazon/jdbc/dialect/RdsMultiAzDbClusterMysqlDialect.java
+++ b/wrapper/src/main/java/software/amazon/jdbc/dialect/MultiAzClusterMysqlDialect.java
@@ -26,65 +26,56 @@
import org.checkerframework.checker.nullness.qual.NonNull;
import software.amazon.jdbc.HostSpec;
import software.amazon.jdbc.PluginService;
-import software.amazon.jdbc.hostlistprovider.RdsMultiAzDbClusterListProvider;
-import software.amazon.jdbc.hostlistprovider.monitoring.MonitoringRdsMultiAzHostListProvider;
+import software.amazon.jdbc.hostlistprovider.MultiAzTopologyUtils;
+import software.amazon.jdbc.hostlistprovider.RdsHostListProvider;
+import software.amazon.jdbc.hostlistprovider.TopologyUtils;
+import software.amazon.jdbc.hostlistprovider.monitoring.MonitoringRdsHostListProvider;
import software.amazon.jdbc.plugin.failover.FailoverRestriction;
import software.amazon.jdbc.plugin.failover2.FailoverConnectionPlugin;
import software.amazon.jdbc.util.DriverInfo;
import software.amazon.jdbc.util.RdsUtils;
import software.amazon.jdbc.util.StringUtils;
-public class RdsMultiAzDbClusterMysqlDialect extends MysqlDialect {
+public class MultiAzClusterMysqlDialect extends MysqlDialect implements MultiAzClusterDialect {
- private static final String TOPOLOGY_QUERY = "SELECT id, endpoint, port FROM mysql.rds_topology";
-
- private static final String TOPOLOGY_TABLE_EXIST_QUERY =
+ protected static final String REPORT_HOST_EXISTS_QUERY = "SHOW VARIABLES LIKE 'report_host'";
+ protected static final String TOPOLOGY_TABLE_EXISTS_QUERY =
"SELECT 1 AS tmp FROM information_schema.tables WHERE"
- + " table_schema = 'mysql' AND table_name = 'rds_topology'";
-
- // For reader nodes, the query returns a writer node ID. For a writer node, the query returns no data.
- private static final String FETCH_WRITER_NODE_QUERY = "SHOW REPLICA STATUS";
-
- private static final String FETCH_WRITER_NODE_QUERY_COLUMN_NAME = "Source_Server_Id";
-
- private static final String NODE_ID_QUERY = "SELECT @@server_id";
- private static final String IS_READER_QUERY = "SELECT @@read_only";
-
- private static final EnumSet RDS_MULTI_AZ_RESTRICTIONS =
+ + " table_schema = 'mysql' AND table_name = 'rds_topology'";
+ protected static final String TOPOLOGY_QUERY = "SELECT id, endpoint, port FROM mysql.rds_topology";
+
+ // This query returns both instanceId and instanceName.
+ // For example: "1845128080", "test-multiaz-instance-1"
+ protected static final String INSTANCE_ID_QUERY = "SELECT id, SUBSTRING_INDEX(endpoint, '.', 1)"
+ + " FROM mysql.rds_topology"
+ + " WHERE id = @@server_id";
+ // For reader instances, this query returns a writer instance ID. For a writer instance, this query returns no data.
+ protected static final String WRITER_ID_QUERY = "SHOW REPLICA STATUS";
+ protected static final String WRITER_ID_QUERY_COLUMN_NAME = "Source_Server_Id";
+ protected static final String IS_READER_QUERY = "SELECT @@read_only";
+
+ private static final EnumSet FAILOVER_RESTRICTIONS =
EnumSet.of(FailoverRestriction.DISABLE_TASK_A, FailoverRestriction.ENABLE_WRITER_IN_TASK_B);
protected final RdsUtils rdsUtils = new RdsUtils();
@Override
public boolean isDialect(final Connection connection) {
- try {
- try (Statement stmt = connection.createStatement();
- ResultSet rs = stmt.executeQuery(TOPOLOGY_TABLE_EXIST_QUERY)) {
- if (!rs.next()) {
- return false;
- }
- }
-
- try (Statement stmt = connection.createStatement();
- ResultSet rs = stmt.executeQuery(TOPOLOGY_QUERY)) {
- if (!rs.next()) {
- return false;
- }
- }
+ if (!dialectUtils.checkExistenceQueries(connection, TOPOLOGY_TABLE_EXISTS_QUERY, TOPOLOGY_QUERY)) {
+ return false;
+ }
- try (Statement stmt = connection.createStatement();
- ResultSet rs = stmt.executeQuery("SHOW VARIABLES LIKE 'report_host'")) {
- if (!rs.next()) {
- return false;
- }
- final String reportHost = rs.getString(2); // get variable value; expected value is IP address
- return !StringUtils.isNullOrEmpty(reportHost);
+ try (Statement stmt = connection.createStatement();
+ ResultSet rs = stmt.executeQuery(REPORT_HOST_EXISTS_QUERY)) {
+ if (!rs.next()) {
+ return false;
}
+ final String reportHost = rs.getString(2); // Expected value is an IP address
+ return !StringUtils.isNullOrEmpty(reportHost);
} catch (final SQLException ex) {
- // ignore
+ return false;
}
- return false;
}
@Override
@@ -93,31 +84,14 @@ public boolean isDialect(final Connection connection) {
}
@Override
- public HostListProviderSupplier getHostListProvider() {
+ public HostListProviderSupplier getHostListProviderSupplier() {
return (properties, initialUrl, servicesContainer) -> {
final PluginService pluginService = servicesContainer.getPluginService();
+ final TopologyUtils topologyUtils = new MultiAzTopologyUtils(this, pluginService.getHostSpecBuilder());
if (pluginService.isPluginInUse(FailoverConnectionPlugin.class)) {
- return new MonitoringRdsMultiAzHostListProvider(
- properties,
- initialUrl,
- servicesContainer,
- TOPOLOGY_QUERY,
- NODE_ID_QUERY,
- IS_READER_QUERY,
- FETCH_WRITER_NODE_QUERY,
- FETCH_WRITER_NODE_QUERY_COLUMN_NAME);
-
- } else {
- return new RdsMultiAzDbClusterListProvider(
- properties,
- initialUrl,
- servicesContainer,
- TOPOLOGY_QUERY,
- NODE_ID_QUERY,
- IS_READER_QUERY,
- FETCH_WRITER_NODE_QUERY,
- FETCH_WRITER_NODE_QUERY_COLUMN_NAME);
+ return new MonitoringRdsHostListProvider(topologyUtils, properties, initialUrl, servicesContainer);
}
+ return new RdsHostListProvider(topologyUtils, properties, initialUrl, servicesContainer);
};
}
@@ -134,6 +108,31 @@ public void prepareConnectProperties(
@Override
public EnumSet getFailoverRestrictions() {
- return RDS_MULTI_AZ_RESTRICTIONS;
+ return FAILOVER_RESTRICTIONS;
+ }
+
+ @Override
+ public String getTopologyQuery() {
+ return TOPOLOGY_QUERY;
+ }
+
+ @Override
+ public String getInstanceIdQuery() {
+ return INSTANCE_ID_QUERY;
+ }
+
+ @Override
+ public String getIsReaderQuery() {
+ return IS_READER_QUERY;
+ }
+
+ @Override
+ public String getWriterIdQuery() {
+ return WRITER_ID_QUERY;
+ }
+
+ @Override
+ public String getWriterIdColumnName() {
+ return WRITER_ID_QUERY_COLUMN_NAME;
}
}
diff --git a/wrapper/src/main/java/software/amazon/jdbc/dialect/RdsMultiAzDbClusterPgDialect.java b/wrapper/src/main/java/software/amazon/jdbc/dialect/MultiAzClusterPgDialect.java
similarity index 55%
rename from wrapper/src/main/java/software/amazon/jdbc/dialect/RdsMultiAzDbClusterPgDialect.java
rename to wrapper/src/main/java/software/amazon/jdbc/dialect/MultiAzClusterPgDialect.java
index eb3796adb..ba3600710 100644
--- a/wrapper/src/main/java/software/amazon/jdbc/dialect/RdsMultiAzDbClusterPgDialect.java
+++ b/wrapper/src/main/java/software/amazon/jdbc/dialect/MultiAzClusterPgDialect.java
@@ -21,56 +21,48 @@
import java.sql.SQLException;
import java.sql.Statement;
import java.util.List;
-import java.util.logging.Logger;
import software.amazon.jdbc.PluginService;
import software.amazon.jdbc.exceptions.ExceptionHandler;
import software.amazon.jdbc.exceptions.MultiAzDbClusterPgExceptionHandler;
-import software.amazon.jdbc.hostlistprovider.RdsMultiAzDbClusterListProvider;
-import software.amazon.jdbc.hostlistprovider.monitoring.MonitoringRdsMultiAzHostListProvider;
+import software.amazon.jdbc.hostlistprovider.MultiAzTopologyUtils;
+import software.amazon.jdbc.hostlistprovider.RdsHostListProvider;
+import software.amazon.jdbc.hostlistprovider.TopologyUtils;
+import software.amazon.jdbc.hostlistprovider.monitoring.MonitoringRdsHostListProvider;
import software.amazon.jdbc.plugin.failover2.FailoverConnectionPlugin;
import software.amazon.jdbc.util.DriverInfo;
-public class RdsMultiAzDbClusterPgDialect extends PgDialect {
+public class MultiAzClusterPgDialect extends PgDialect implements MultiAzClusterDialect {
- private static final Logger LOGGER = Logger.getLogger(RdsMultiAzDbClusterPgDialect.class.getName());
-
- private static MultiAzDbClusterPgExceptionHandler exceptionHandler;
-
- private static final String TOPOLOGY_QUERY =
+ protected static final String IS_RDS_CLUSTER_QUERY =
+ "SELECT multi_az_db_cluster_source_dbi_resource_id FROM rds_tools.multi_az_db_cluster_source_dbi_resource_id()";
+ protected static final String TOPOLOGY_QUERY =
"SELECT id, endpoint, port FROM rds_tools.show_topology('aws_jdbc_driver-" + DriverInfo.DRIVER_VERSION + "')";
- // For reader nodes, the query should return a writer node ID. For a writer node, the query should return no data.
- private static final String FETCH_WRITER_NODE_QUERY =
+ // This query returns both instanceId and instanceName.
+ // For example: "db-WQFQKBTL2LQUPIEFIFBGENS4ZQ", "test-multiaz-instance-1"
+ protected static final String INSTANCE_ID_QUERY =
+ "SELECT id, SUBSTRING(endpoint FROM 0 FOR POSITION('.' IN endpoint))"
+ + " FROM rds_tools.show_topology()"
+ + " WHERE id OPERATOR(pg_catalog.=) rds_tools.dbi_resource_id()";
+ // For reader instances, this query should return a writer instance ID.
+ // For a writer instance, this query should return no data.
+ protected static final String WRITER_ID_QUERY =
"SELECT multi_az_db_cluster_source_dbi_resource_id FROM rds_tools.multi_az_db_cluster_source_dbi_resource_id()"
+ " WHERE multi_az_db_cluster_source_dbi_resource_id OPERATOR(pg_catalog.!=)"
+ " (SELECT dbi_resource_id FROM rds_tools.dbi_resource_id())";
+ protected static final String WRITER_ID_QUERY_COLUMN_NAME = "multi_az_db_cluster_source_dbi_resource_id";
+ protected static final String IS_READER_QUERY = "SELECT pg_catalog.pg_is_in_recovery()";
- private static final String IS_RDS_CLUSTER_QUERY =
- "SELECT multi_az_db_cluster_source_dbi_resource_id FROM rds_tools.multi_az_db_cluster_source_dbi_resource_id()";
-
- private static final String FETCH_WRITER_NODE_QUERY_COLUMN_NAME = "multi_az_db_cluster_source_dbi_resource_id";
-
- private static final String NODE_ID_QUERY = "SELECT dbi_resource_id FROM rds_tools.dbi_resource_id()";
-
- private static final String IS_READER_QUERY = "SELECT pg_catalog.pg_is_in_recovery()";
-
- @Override
- public ExceptionHandler getExceptionHandler() {
- if (exceptionHandler == null) {
- exceptionHandler = new MultiAzDbClusterPgExceptionHandler();
- }
- return exceptionHandler;
- }
+ private static MultiAzDbClusterPgExceptionHandler exceptionHandler;
@Override
public boolean isDialect(final Connection connection) {
try (Statement stmt = connection.createStatement();
- ResultSet rs = stmt.executeQuery(IS_RDS_CLUSTER_QUERY)) {
+ ResultSet rs = stmt.executeQuery(IS_RDS_CLUSTER_QUERY)) {
return rs.next() && rs.getString(1) != null;
} catch (final SQLException ex) {
- // ignore
+ return false;
}
- return false;
}
@Override
@@ -79,32 +71,48 @@ public boolean isDialect(final Connection connection) {
}
@Override
- public HostListProviderSupplier getHostListProvider() {
+ public ExceptionHandler getExceptionHandler() {
+ if (exceptionHandler == null) {
+ exceptionHandler = new MultiAzDbClusterPgExceptionHandler();
+ }
+ return exceptionHandler;
+ }
+
+ @Override
+ public HostListProviderSupplier getHostListProviderSupplier() {
return (properties, initialUrl, servicesContainer) -> {
final PluginService pluginService = servicesContainer.getPluginService();
+ final TopologyUtils topologyUtils = new MultiAzTopologyUtils(this, pluginService.getHostSpecBuilder());
if (pluginService.isPluginInUse(FailoverConnectionPlugin.class)) {
- return new MonitoringRdsMultiAzHostListProvider(
- properties,
- initialUrl,
- servicesContainer,
- TOPOLOGY_QUERY,
- NODE_ID_QUERY,
- IS_READER_QUERY,
- FETCH_WRITER_NODE_QUERY,
- FETCH_WRITER_NODE_QUERY_COLUMN_NAME);
-
- } else {
-
- return new RdsMultiAzDbClusterListProvider(
- properties,
- initialUrl,
- servicesContainer,
- TOPOLOGY_QUERY,
- NODE_ID_QUERY,
- IS_READER_QUERY,
- FETCH_WRITER_NODE_QUERY,
- FETCH_WRITER_NODE_QUERY_COLUMN_NAME);
+ return new MonitoringRdsHostListProvider(topologyUtils, properties, initialUrl, servicesContainer);
}
+
+ return new RdsHostListProvider(topologyUtils, properties, initialUrl, servicesContainer);
};
}
+
+ @Override
+ public String getTopologyQuery() {
+ return TOPOLOGY_QUERY;
+ }
+
+ @Override
+ public String getInstanceIdQuery() {
+ return INSTANCE_ID_QUERY;
+ }
+
+ @Override
+ public String getIsReaderQuery() {
+ return IS_READER_QUERY;
+ }
+
+ @Override
+ public String getWriterIdQuery() {
+ return WRITER_ID_QUERY;
+ }
+
+ @Override
+ public String getWriterIdColumnName() {
+ return WRITER_ID_QUERY_COLUMN_NAME;
+ }
}
diff --git a/wrapper/src/main/java/software/amazon/jdbc/dialect/MysqlDialect.java b/wrapper/src/main/java/software/amazon/jdbc/dialect/MysqlDialect.java
index de9f181d3..e21bd06d5 100644
--- a/wrapper/src/main/java/software/amazon/jdbc/dialect/MysqlDialect.java
+++ b/wrapper/src/main/java/software/amazon/jdbc/dialect/MysqlDialect.java
@@ -33,45 +33,24 @@
public class MysqlDialect implements Dialect {
+ protected static final String VERSION_QUERY = "SHOW VARIABLES LIKE 'version_comment'";
+ protected static final String HOST_ALIAS_QUERY = "SELECT CONCAT(@@hostname, ':', @@port)";
+ private static MySQLExceptionHandler mySQLExceptionHandler;
+ private static final EnumSet NO_FAILOVER_RESTRICTIONS =
+ EnumSet.noneOf(FailoverRestriction.class);
private static final List dialectUpdateCandidates = Arrays.asList(
- DialectCodes.RDS_MULTI_AZ_MYSQL_CLUSTER,
+ DialectCodes.GLOBAL_AURORA_MYSQL,
DialectCodes.AURORA_MYSQL,
+ DialectCodes.RDS_MULTI_AZ_MYSQL_CLUSTER,
DialectCodes.RDS_MYSQL
);
- private static MySQLExceptionHandler mySQLExceptionHandler;
-
- private static final EnumSet NO_RESTRICTIONS = EnumSet.noneOf(FailoverRestriction.class);
-
- @Override
- public int getDefaultPort() {
- return 3306;
- }
-
- @Override
- public ExceptionHandler getExceptionHandler() {
- if (mySQLExceptionHandler == null) {
- mySQLExceptionHandler = new MySQLExceptionHandler();
- }
- return mySQLExceptionHandler;
- }
- @Override
- public String getHostAliasQuery() {
- return "SELECT CONCAT(@@hostname, ':', @@port)";
- }
-
- @Override
- public String getServerVersionQuery() {
- return "SHOW VARIABLES LIKE 'version_comment'";
- }
+ protected final DialectUtils dialectUtils = new DialectUtils();
@Override
public boolean isDialect(final Connection connection) {
- Statement stmt = null;
- ResultSet rs = null;
- try {
- stmt = connection.createStatement();
- rs = stmt.executeQuery(this.getServerVersionQuery());
+ try (Statement stmt = connection.createStatement();
+ ResultSet rs = stmt.executeQuery(VERSION_QUERY)) {
while (rs.next()) {
final String columnValue = rs.getString(2);
if (columnValue != null && columnValue.toLowerCase().contains("mysql")) {
@@ -79,32 +58,31 @@ public boolean isDialect(final Connection connection) {
}
}
} catch (final SQLException ex) {
- // ignore
- } finally {
- if (stmt != null) {
- try {
- stmt.close();
- } catch (SQLException ex) {
- // ignore
- }
- }
- if (rs != null) {
- try {
- rs.close();
- } catch (SQLException ex) {
- // ignore
- }
- }
+ return false;
}
+
return false;
}
+ @Override
+ public int getDefaultPort() {
+ return 3306;
+ }
+
@Override
public List getDialectUpdateCandidates() {
return dialectUpdateCandidates;
}
- public HostListProviderSupplier getHostListProvider() {
+ @Override
+ public ExceptionHandler getExceptionHandler() {
+ if (mySQLExceptionHandler == null) {
+ mySQLExceptionHandler = new MySQLExceptionHandler();
+ }
+ return mySQLExceptionHandler;
+ }
+
+ public HostListProviderSupplier getHostListProviderSupplier() {
return (properties, initialUrl, servicesContainer) ->
new ConnectionStringHostListProvider(properties, initialUrl, servicesContainer.getHostListProviderService());
}
@@ -117,6 +95,16 @@ public void prepareConnectProperties(
@Override
public EnumSet getFailoverRestrictions() {
- return NO_RESTRICTIONS;
+ return NO_FAILOVER_RESTRICTIONS;
+ }
+
+ @Override
+ public String getServerVersionQuery() {
+ return VERSION_QUERY;
+ }
+
+ @Override
+ public String getHostAliasQuery() {
+ return HOST_ALIAS_QUERY;
}
}
diff --git a/wrapper/src/main/java/software/amazon/jdbc/dialect/PgDialect.java b/wrapper/src/main/java/software/amazon/jdbc/dialect/PgDialect.java
index 075cf242d..abf33ee56 100644
--- a/wrapper/src/main/java/software/amazon/jdbc/dialect/PgDialect.java
+++ b/wrapper/src/main/java/software/amazon/jdbc/dialect/PgDialect.java
@@ -36,20 +36,37 @@
*/
public class PgDialect implements Dialect {
+ protected static final String PG_PROC_EXISTS_QUERY = "SELECT 1 FROM pg_catalog.pg_proc LIMIT 1";
+ protected static final String VERSION_QUERY = "SELECT 'version', pg_catalog.VERSION()";
+ protected static final String HOST_ALIAS_QUERY =
+ "SELECT pg_catalog.CONCAT(pg_catalog.inet_server_addr(), ':', pg_catalog.inet_server_port())";
+
+ private static PgExceptionHandler pgExceptionHandler;
+ private static final EnumSet NO_FAILOVER_RESTRICTIONS =
+ EnumSet.noneOf(FailoverRestriction.class);
private static final List dialectUpdateCandidates = Arrays.asList(
+ DialectCodes.GLOBAL_AURORA_PG,
DialectCodes.AURORA_PG,
DialectCodes.RDS_MULTI_AZ_PG_CLUSTER,
DialectCodes.RDS_PG);
- private static PgExceptionHandler pgExceptionHandler;
+ protected final DialectUtils dialectUtils = new DialectUtils();
- private static final EnumSet NO_RESTRICTIONS = EnumSet.noneOf(FailoverRestriction.class);
+ @Override
+ public boolean isDialect(final Connection connection) {
+ return dialectUtils.checkExistenceQueries(connection, PG_PROC_EXISTS_QUERY);
+ }
@Override
public int getDefaultPort() {
return 5432;
}
+ @Override
+ public List getDialectUpdateCandidates() {
+ return dialectUpdateCandidates;
+ }
+
@Override
public ExceptionHandler getExceptionHandler() {
if (pgExceptionHandler == null) {
@@ -59,53 +76,7 @@ public ExceptionHandler getExceptionHandler() {
}
@Override
- public String getHostAliasQuery() {
- return "SELECT pg_catalog.CONCAT(pg_catalog.inet_server_addr(), ':', pg_catalog.inet_server_port())";
- }
-
- @Override
- public String getServerVersionQuery() {
- return "SELECT 'version', pg_catalog.VERSION()";
- }
-
- @Override
- public boolean isDialect(final Connection connection) {
- Statement stmt = null;
- ResultSet rs = null;
- try {
- stmt = connection.createStatement();
- rs = stmt.executeQuery("SELECT 1 FROM pg_catalog.pg_proc LIMIT 1");
- if (rs.next()) {
- return true;
- }
- } catch (final SQLException ex) {
- // ignore
- } finally {
- if (stmt != null) {
- try {
- stmt.close();
- } catch (SQLException ex) {
- // ignore
- }
- }
- if (rs != null) {
- try {
- rs.close();
- } catch (SQLException ex) {
- // ignore
- }
- }
- }
- return false;
- }
-
- @Override
- public List getDialectUpdateCandidates() {
- return dialectUpdateCandidates;
- }
-
- @Override
- public HostListProviderSupplier getHostListProvider() {
+ public HostListProviderSupplier getHostListProviderSupplier() {
return (properties, initialUrl, servicesContainer) ->
new ConnectionStringHostListProvider(properties, initialUrl, servicesContainer.getHostListProviderService());
}
@@ -118,6 +89,16 @@ public void prepareConnectProperties(
@Override
public EnumSet getFailoverRestrictions() {
- return NO_RESTRICTIONS;
+ return NO_FAILOVER_RESTRICTIONS;
+ }
+
+ @Override
+ public String getServerVersionQuery() {
+ return VERSION_QUERY;
+ }
+
+ @Override
+ public String getHostAliasQuery() {
+ return HOST_ALIAS_QUERY;
}
}
diff --git a/wrapper/src/main/java/software/amazon/jdbc/dialect/RdsMysqlDialect.java b/wrapper/src/main/java/software/amazon/jdbc/dialect/RdsMysqlDialect.java
index 22e010ea7..1e173c772 100644
--- a/wrapper/src/main/java/software/amazon/jdbc/dialect/RdsMysqlDialect.java
+++ b/wrapper/src/main/java/software/amazon/jdbc/dialect/RdsMysqlDialect.java
@@ -26,15 +26,16 @@
public class RdsMysqlDialect extends MysqlDialect implements BlueGreenDialect {
- private static final String BG_STATUS_QUERY =
- "SELECT * FROM mysql.rds_topology";
-
- private static final String TOPOLOGY_TABLE_EXIST_QUERY =
+ protected static final String REPORT_HOST_EXISTS_QUERY = "SHOW VARIABLES LIKE 'report_host'";
+ protected static final String TOPOLOGY_TABLE_EXISTS_QUERY =
"SELECT 1 AS tmp FROM information_schema.tables WHERE"
+ " table_schema = 'mysql' AND table_name = 'rds_topology'";
+ protected static final String BG_STATUS_QUERY = "SELECT * FROM mysql.rds_topology";
+
private static final List dialectUpdateCandidates = Arrays.asList(
DialectCodes.AURORA_MYSQL,
+ DialectCodes.GLOBAL_AURORA_MYSQL,
DialectCodes.RDS_MULTI_AZ_MYSQL_CLUSTER);
@Override
@@ -53,50 +54,34 @@ public boolean isDialect(final Connection connection) {
// | Variable_name | value |
// |-----------------|---------------------|
// | version_comment | Source distribution |
- // If super.idDialect returns true there is no need to check for RdsMysqlDialect.
+ // If super.isDialect returns true there is no need to check for RdsMysqlDialect.
return false;
}
- Statement stmt = null;
- ResultSet rs = null;
-
- try {
- stmt = connection.createStatement();
- rs = stmt.executeQuery(this.getServerVersionQuery());
- if (!rs.next()) {
- return false;
- }
- final String columnValue = rs.getString(2);
- if (!"Source distribution".equalsIgnoreCase(columnValue)) {
- return false;
- }
- rs.close();
- rs = stmt.executeQuery("SHOW VARIABLES LIKE 'report_host'");
- if (!rs.next()) {
- return false;
- }
- final String reportHost = rs.getString(2); // get variable value; expected empty value
- return StringUtils.isNullOrEmpty(reportHost);
+ try (Statement stmt = connection.createStatement()) {
+ try (ResultSet rs = stmt.executeQuery(VERSION_QUERY)) {
+ if (!rs.next()) {
+ return false;
+ }
- } catch (final SQLException ex) {
- // ignore
- } finally {
- if (stmt != null) {
- try {
- stmt.close();
- } catch (SQLException ex) {
- // ignore
+ final String columnValue = rs.getString(2);
+ if (!"Source distribution".equalsIgnoreCase(columnValue)) {
+ return false;
}
}
- if (rs != null) {
- try {
- rs.close();
- } catch (SQLException ex) {
- // ignore
+
+ try (ResultSet rs = stmt.executeQuery(REPORT_HOST_EXISTS_QUERY)) {
+ if (!rs.next()) {
+ return false;
}
+
+ final String reportHost = rs.getString(2); // An empty value is expected
+ return StringUtils.isNullOrEmpty(reportHost);
}
+
+ } catch (final SQLException ex) {
+ return false;
}
- return false;
}
@Override
@@ -105,19 +90,12 @@ public List getDialectUpdateCandidates() {
}
@Override
- public String getBlueGreenStatusQuery() {
- return BG_STATUS_QUERY;
+ public boolean isBlueGreenStatusAvailable(final Connection connection) {
+ return dialectUtils.checkExistenceQueries(connection, TOPOLOGY_TABLE_EXISTS_QUERY);
}
@Override
- public boolean isBlueGreenStatusAvailable(final Connection connection) {
- try {
- try (Statement statement = connection.createStatement();
- ResultSet rs = statement.executeQuery(TOPOLOGY_TABLE_EXIST_QUERY)) {
- return rs.next();
- }
- } catch (SQLException ex) {
- return false;
- }
+ public String getBlueGreenStatusQuery() {
+ return BG_STATUS_QUERY;
}
}
diff --git a/wrapper/src/main/java/software/amazon/jdbc/dialect/RdsPgDialect.java b/wrapper/src/main/java/software/amazon/jdbc/dialect/RdsPgDialect.java
index d59b9f2eb..62a52d019 100644
--- a/wrapper/src/main/java/software/amazon/jdbc/dialect/RdsPgDialect.java
+++ b/wrapper/src/main/java/software/amazon/jdbc/dialect/RdsPgDialect.java
@@ -24,6 +24,7 @@
import java.util.List;
import java.util.logging.Logger;
import software.amazon.jdbc.util.DriverInfo;
+import software.amazon.jdbc.util.Messages;
/**
* Suitable for the following AWS PG configurations.
@@ -33,60 +34,42 @@
*/
public class RdsPgDialect extends PgDialect implements BlueGreenDialect {
- private static final Logger LOGGER = Logger.getLogger(RdsPgDialect.class.getName());
-
- private static final List dialectUpdateCandidates = Arrays.asList(
- DialectCodes.RDS_MULTI_AZ_PG_CLUSTER,
- DialectCodes.AURORA_PG);
-
- private static final String extensionsSql = "SELECT (setting LIKE '%rds_tools%') AS rds_tools, "
+ protected static final String EXTENSIONS_EXIST_SQL = "SELECT (setting LIKE '%rds_tools%') AS rds_tools, "
+ "(setting LIKE '%aurora_stat_utils%') AS aurora_stat_utils "
+ "FROM pg_catalog.pg_settings "
+ "WHERE name OPERATOR(pg_catalog.=) 'rds.extensions'";
+ protected static final String TOPOLOGY_TABLE_EXISTS_QUERY =
+ "SELECT 'rds_tools.show_topology'::regproc";
- private static final String BG_STATUS_QUERY =
+ protected static final String BG_STATUS_QUERY =
"SELECT * FROM rds_tools.show_topology('aws_jdbc_driver-" + DriverInfo.DRIVER_VERSION + "')";
- private static final String TOPOLOGY_TABLE_EXIST_QUERY =
- "SELECT 'rds_tools.show_topology'::regproc";
+ private static final Logger LOGGER = Logger.getLogger(RdsPgDialect.class.getName());
+ private static final List dialectUpdateCandidates = Arrays.asList(
+ DialectCodes.RDS_MULTI_AZ_PG_CLUSTER,
+ DialectCodes.GLOBAL_AURORA_PG,
+ DialectCodes.AURORA_PG);
@Override
public boolean isDialect(final Connection connection) {
if (!super.isDialect(connection)) {
return false;
}
- Statement stmt = null;
- ResultSet rs = null;
- try {
- stmt = connection.createStatement();
- rs = stmt.executeQuery(extensionsSql);
+ try (Statement stmt = connection.createStatement();
+ ResultSet rs = stmt.executeQuery(EXTENSIONS_EXIST_SQL)) {
while (rs.next()) {
final boolean rdsTools = rs.getBoolean("rds_tools");
final boolean auroraUtils = rs.getBoolean("aurora_stat_utils");
- LOGGER.finest(() -> String.format("rdsTools: %b, auroraUtils: %b", rdsTools, auroraUtils));
+ LOGGER.finest(Messages.get("RdsPgDialect.rdsToolsAuroraUtils", new Object[] {rdsTools, auroraUtils}));
if (rdsTools && !auroraUtils) {
return true;
}
}
} catch (final SQLException ex) {
- // ignore
- } finally {
- if (stmt != null) {
- try {
- stmt.close();
- } catch (SQLException ex) {
- // ignore
- }
- }
- if (rs != null) {
- try {
- rs.close();
- } catch (SQLException ex) {
- // ignore
- }
- }
+ return false;
}
+
return false;
}
@@ -96,19 +79,12 @@ public List getDialectUpdateCandidates() {
}
@Override
- public String getBlueGreenStatusQuery() {
- return BG_STATUS_QUERY;
+ public boolean isBlueGreenStatusAvailable(final Connection connection) {
+ return dialectUtils.checkExistenceQueries(connection, TOPOLOGY_TABLE_EXISTS_QUERY);
}
@Override
- public boolean isBlueGreenStatusAvailable(final Connection connection) {
- try {
- try (Statement statement = connection.createStatement();
- ResultSet rs = statement.executeQuery(TOPOLOGY_TABLE_EXIST_QUERY)) {
- return rs.next();
- }
- } catch (SQLException ex) {
- return false;
- }
+ public String getBlueGreenStatusQuery() {
+ return BG_STATUS_QUERY;
}
}
diff --git a/wrapper/src/main/java/software/amazon/jdbc/dialect/TopologyDialect.java b/wrapper/src/main/java/software/amazon/jdbc/dialect/TopologyDialect.java
new file mode 100644
index 000000000..e7aa0f4d2
--- /dev/null
+++ b/wrapper/src/main/java/software/amazon/jdbc/dialect/TopologyDialect.java
@@ -0,0 +1,27 @@
+/*
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License").
+ * You may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package software.amazon.jdbc.dialect;
+
+public interface TopologyDialect extends Dialect {
+ String getTopologyQuery();
+
+ String getInstanceIdQuery();
+
+ String getWriterIdQuery();
+
+ String getIsReaderQuery();
+}
diff --git a/wrapper/src/main/java/software/amazon/jdbc/dialect/UnknownDialect.java b/wrapper/src/main/java/software/amazon/jdbc/dialect/UnknownDialect.java
index 65b9eb544..067261242 100644
--- a/wrapper/src/main/java/software/amazon/jdbc/dialect/UnknownDialect.java
+++ b/wrapper/src/main/java/software/amazon/jdbc/dialect/UnknownDialect.java
@@ -31,6 +31,8 @@
public class UnknownDialect implements Dialect {
private static final List dialectUpdateCandidates = Arrays.asList(
+ DialectCodes.GLOBAL_AURORA_PG,
+ DialectCodes.GLOBAL_AURORA_MYSQL,
DialectCodes.AURORA_PG,
DialectCodes.AURORA_MYSQL,
DialectCodes.RDS_MULTI_AZ_PG_CLUSTER,
@@ -80,7 +82,7 @@ public List getDialectUpdateCandidates() {
}
@Override
- public HostListProviderSupplier getHostListProvider() {
+ public HostListProviderSupplier getHostListProviderSupplier() {
return (properties, initialUrl, servicesContainer) ->
new ConnectionStringHostListProvider(properties, initialUrl, servicesContainer.getHostListProviderService());
}
diff --git a/wrapper/src/main/java/software/amazon/jdbc/exceptions/AbstractPgExceptionHandler.java b/wrapper/src/main/java/software/amazon/jdbc/exceptions/AbstractPgExceptionHandler.java
index 22e9cf1be..9b8337f5c 100644
--- a/wrapper/src/main/java/software/amazon/jdbc/exceptions/AbstractPgExceptionHandler.java
+++ b/wrapper/src/main/java/software/amazon/jdbc/exceptions/AbstractPgExceptionHandler.java
@@ -23,16 +23,13 @@
import software.amazon.jdbc.util.StringUtils;
public abstract class AbstractPgExceptionHandler implements ExceptionHandler {
+
+ protected static final String READ_ONLY_CONNECTION_SQLSTATE = "25006";
+
public abstract List getNetworkErrors();
public abstract List getAccessErrors();
- @Override
- @Deprecated
- public boolean isNetworkException(Throwable throwable) {
- return this.isNetworkException(throwable, null);
- }
-
@Override
public boolean isNetworkException(final Throwable throwable, @Nullable TargetDriverDialect targetDriverDialect) {
Throwable exception = throwable;
@@ -68,12 +65,6 @@ public boolean isNetworkException(final String sqlState) {
return false;
}
- @Override
- @Deprecated
- public boolean isLoginException(final Throwable throwable) {
- return this.isLoginException(throwable, null);
- }
-
@Override
public boolean isLoginException(final Throwable throwable, @Nullable TargetDriverDialect targetDriverDialect) {
Throwable exception = throwable;
@@ -107,4 +98,36 @@ public boolean isLoginException(final String sqlState) {
}
return getAccessErrors().contains(sqlState);
}
+
+ @Override
+ public boolean isReadOnlyConnectionException(
+ final @Nullable String sqlState, final @Nullable Integer errorCode) {
+ return READ_ONLY_CONNECTION_SQLSTATE.equals(sqlState);
+ }
+
+ @Override
+ public boolean isReadOnlyConnectionException(
+ final Throwable throwable, @Nullable TargetDriverDialect targetDriverDialect) {
+
+ Throwable exception = throwable;
+
+ while (exception != null) {
+ String sqlState = null;
+ Integer errorCode = null;
+ if (exception instanceof SQLException) {
+ sqlState = ((SQLException) exception).getSQLState();
+ errorCode = ((SQLException) exception).getErrorCode();
+ } else if (targetDriverDialect != null) {
+ sqlState = targetDriverDialect.getSQLState(exception);
+ }
+
+ if (isReadOnlyConnectionException(sqlState, errorCode)) {
+ return true;
+ }
+
+ exception = exception.getCause();
+ }
+
+ return false;
+ }
}
diff --git a/wrapper/src/main/java/software/amazon/jdbc/exceptions/ExceptionHandler.java b/wrapper/src/main/java/software/amazon/jdbc/exceptions/ExceptionHandler.java
index ab886ece1..16b79f2c2 100644
--- a/wrapper/src/main/java/software/amazon/jdbc/exceptions/ExceptionHandler.java
+++ b/wrapper/src/main/java/software/amazon/jdbc/exceptions/ExceptionHandler.java
@@ -21,33 +21,15 @@
public interface ExceptionHandler {
- /**
- * The method determines whether provided throwable is about any network issues.
- *
- * @param throwable A throwable object to check.
- * @return true, if a provided throwable object is network-related.
- *
- * @deprecated Use similar method below that accepts throwable and target driver dialect.
- */
- @Deprecated
- boolean isNetworkException(Throwable throwable);
-
boolean isNetworkException(Throwable throwable, @Nullable TargetDriverDialect targetDriverDialect);
boolean isNetworkException(String sqlState);
boolean isLoginException(String sqlState);
- /**
- * The method determines whether provided throwable is about any login or authentication issues.
- *
- * @param throwable A throwable object to check.
- * @return true, if a provided throwable object is related to authentication.
+ boolean isLoginException(Throwable throwable, @Nullable TargetDriverDialect targetDriverDialect);
- * @deprecated Use similar method below that accepts throwable and target driver dialect.
- */
- @Deprecated
- boolean isLoginException(Throwable throwable);
+ boolean isReadOnlyConnectionException(final @Nullable String sqlState, final @Nullable Integer errorCode);
- boolean isLoginException(Throwable throwable, @Nullable TargetDriverDialect targetDriverDialect);
+ boolean isReadOnlyConnectionException(Throwable throwable, @Nullable TargetDriverDialect targetDriverDialect);
}
diff --git a/wrapper/src/main/java/software/amazon/jdbc/exceptions/ExceptionManager.java b/wrapper/src/main/java/software/amazon/jdbc/exceptions/ExceptionManager.java
index b6f6f5a64..3f7c45c07 100644
--- a/wrapper/src/main/java/software/amazon/jdbc/exceptions/ExceptionManager.java
+++ b/wrapper/src/main/java/software/amazon/jdbc/exceptions/ExceptionManager.java
@@ -16,34 +16,13 @@
package software.amazon.jdbc.exceptions;
+import org.checkerframework.checker.nullness.qual.Nullable;
import software.amazon.jdbc.Driver;
import software.amazon.jdbc.dialect.Dialect;
import software.amazon.jdbc.targetdriverdialect.TargetDriverDialect;
public class ExceptionManager {
- /**
- * Sets a custom exception handler.
- *
- * @param exceptionHandler A custom exception handler to use.
- *
- * @deprecated Use software.amazon.jdbc.Driver instead
- */
- @Deprecated
- public static void setCustomHandler(final ExceptionHandler exceptionHandler) {
- Driver.setCustomExceptionHandler(exceptionHandler);
- }
-
- /**
- * Resets a custom exception handler.
- *
- * @deprecated Use software.amazon.jdbc.Driver instead
- */
- @Deprecated
- public static void resetCustomHandler() {
- Driver.resetCustomExceptionHandler();
- }
-
public boolean isLoginException(
final Dialect dialect, final Throwable throwable, final TargetDriverDialect targetDriverDialect) {
final ExceptionHandler handler = getHandler(dialect);
@@ -66,6 +45,18 @@ public boolean isNetworkException(final Dialect dialect, final String sqlState)
return handler.isNetworkException(sqlState);
}
+ public boolean isReadOnlyConnectionException(
+ final Dialect dialect, final Throwable throwable, final TargetDriverDialect targetDriverDialect) {
+ final ExceptionHandler handler = getHandler(dialect);
+ return handler.isReadOnlyConnectionException(throwable, targetDriverDialect);
+ }
+
+ public boolean isReadOnlyConnectionException(
+ final Dialect dialect, final @Nullable String sqlState, final @Nullable Integer errorCode) {
+ final ExceptionHandler handler = getHandler(dialect);
+ return handler.isReadOnlyConnectionException(sqlState, errorCode);
+ }
+
private ExceptionHandler getHandler(final Dialect dialect) {
final ExceptionHandler customHandler = Driver.getCustomExceptionHandler();
return customHandler != null ? customHandler : dialect.getExceptionHandler();
diff --git a/wrapper/src/main/java/software/amazon/jdbc/exceptions/GenericExceptionHandler.java b/wrapper/src/main/java/software/amazon/jdbc/exceptions/GenericExceptionHandler.java
index cc34175db..a49dea5c4 100644
--- a/wrapper/src/main/java/software/amazon/jdbc/exceptions/GenericExceptionHandler.java
+++ b/wrapper/src/main/java/software/amazon/jdbc/exceptions/GenericExceptionHandler.java
@@ -38,12 +38,6 @@ public class GenericExceptionHandler implements ExceptionHandler {
"08"
);
- @Override
- @Deprecated
- public boolean isNetworkException(Throwable throwable) {
- return this.isNetworkException(throwable, null);
- }
-
@Override
public boolean isNetworkException(final Throwable throwable, @Nullable TargetDriverDialect targetDriverDialect) {
Throwable exception = throwable;
@@ -79,12 +73,6 @@ public boolean isNetworkException(final String sqlState) {
return false;
}
- @Override
- @Deprecated
- public boolean isLoginException(Throwable throwable) {
- return this.isLoginException(throwable, null);
- }
-
@Override
public boolean isLoginException(final Throwable throwable, TargetDriverDialect targetDriverDialect) {
Throwable exception = throwable;
@@ -115,4 +103,15 @@ public boolean isLoginException(final Throwable throwable, TargetDriverDialect t
public boolean isLoginException(final String sqlState) {
return ACCESS_ERRORS.contains(sqlState);
}
+
+ @Override
+ public boolean isReadOnlyConnectionException(@Nullable String sqlState, @Nullable Integer errorCode) {
+ return false;
+ }
+
+ @Override
+ public boolean isReadOnlyConnectionException(
+ Throwable throwable, @Nullable TargetDriverDialect targetDriverDialect) {
+ return false;
+ }
}
diff --git a/wrapper/src/main/java/software/amazon/jdbc/exceptions/MySQLExceptionHandler.java b/wrapper/src/main/java/software/amazon/jdbc/exceptions/MySQLExceptionHandler.java
index a504f5074..feb98fe38 100644
--- a/wrapper/src/main/java/software/amazon/jdbc/exceptions/MySQLExceptionHandler.java
+++ b/wrapper/src/main/java/software/amazon/jdbc/exceptions/MySQLExceptionHandler.java
@@ -17,6 +17,9 @@
package software.amazon.jdbc.exceptions;
import java.sql.SQLException;
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.Set;
import org.checkerframework.checker.nullness.qual.Nullable;
import software.amazon.jdbc.targetdriverdialect.TargetDriverDialect;
import software.amazon.jdbc.util.StringUtils;
@@ -27,11 +30,10 @@ public class MySQLExceptionHandler implements ExceptionHandler {
public static final String SET_NETWORK_TIMEOUT_ON_CLOSED_CONNECTION =
"setNetworkTimeout cannot be called on a closed connection";
- @Override
- @Deprecated
- public boolean isNetworkException(Throwable throwable) {
- return this.isNetworkException(throwable, null);
- }
+ private static final Set SQLSTATE_READ_ONLY_CONNECTION = new HashSet<>(Arrays.asList(
+ 1290, // The MySQL server is running with the --read-only option, so it cannot execute this statement
+ 1836 // Running in read-only mode
+ ));
@Override
public boolean isNetworkException(final Throwable throwable, @Nullable TargetDriverDialect targetDriverDialect) {
@@ -74,12 +76,6 @@ public boolean isNetworkException(final String sqlState) {
return sqlState.startsWith("08");
}
- @Override
- @Deprecated
- public boolean isLoginException(Throwable throwable) {
- return this.isLoginException(throwable, null);
- }
-
@Override
public boolean isLoginException(final Throwable throwable, @Nullable TargetDriverDialect targetDriverDialect) {
Throwable exception = throwable;
@@ -115,6 +111,39 @@ public boolean isLoginException(final String sqlState) {
return SQLSTATE_ACCESS_ERROR.equals(sqlState);
}
+ @Override
+ public boolean isReadOnlyConnectionException(
+ final @Nullable String sqlState, final @Nullable Integer errorCode) {
+ // HY000 - generic SQL state; use error code for more specific information
+ return "HY000".equals(sqlState) && errorCode != null && (SQLSTATE_READ_ONLY_CONNECTION.contains(errorCode));
+ }
+
+ @Override
+ public boolean isReadOnlyConnectionException(
+ final Throwable throwable, @Nullable TargetDriverDialect targetDriverDialect) {
+
+ Throwable exception = throwable;
+
+ while (exception != null) {
+ String sqlState = null;
+ Integer errorCode = null;
+ if (exception instanceof SQLException) {
+ sqlState = ((SQLException) exception).getSQLState();
+ errorCode = ((SQLException) exception).getErrorCode();
+ } else if (targetDriverDialect != null) {
+ sqlState = targetDriverDialect.getSQLState(exception);
+ }
+
+ if (isReadOnlyConnectionException(sqlState, errorCode)) {
+ return true;
+ }
+
+ exception = exception.getCause();
+ }
+
+ return false;
+ }
+
private boolean isHikariMariaDbNetworkException(final SQLException sqlException) {
return sqlException.getSQLState().equals(SQLSTATE_SYNTAX_ERROR_OR_ACCESS_VIOLATION)
&& sqlException.getMessage().contains(SET_NETWORK_TIMEOUT_ON_CLOSED_CONNECTION);
diff --git a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/AuroraHostListProvider.java b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/AuroraHostListProvider.java
deleted file mode 100644
index fc53f9e1d..000000000
--- a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/AuroraHostListProvider.java
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License").
- * You may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package software.amazon.jdbc.hostlistprovider;
-
-
-import java.util.Properties;
-import java.util.logging.Logger;
-import software.amazon.jdbc.util.FullServicesContainer;
-
-
-public class AuroraHostListProvider extends RdsHostListProvider {
-
- static final Logger LOGGER = Logger.getLogger(AuroraHostListProvider.class.getName());
-
- public AuroraHostListProvider(
- final Properties properties,
- final String originalUrl,
- final FullServicesContainer servicesContainer,
- final String topologyQuery,
- final String nodeIdQuery,
- final String isReaderQuery) {
- super(properties,
- originalUrl,
- servicesContainer,
- topologyQuery,
- nodeIdQuery,
- isReaderQuery);
- }
-}
diff --git a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/AuroraTopologyUtils.java b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/AuroraTopologyUtils.java
new file mode 100644
index 000000000..f62415ff9
--- /dev/null
+++ b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/AuroraTopologyUtils.java
@@ -0,0 +1,95 @@
+/*
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License").
+ * You may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package software.amazon.jdbc.hostlistprovider;
+
+import java.sql.Connection;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.sql.Timestamp;
+import java.time.Instant;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.logging.Logger;
+import org.checkerframework.checker.nullness.qual.Nullable;
+import software.amazon.jdbc.HostSpec;
+import software.amazon.jdbc.HostSpecBuilder;
+import software.amazon.jdbc.dialect.TopologyDialect;
+import software.amazon.jdbc.util.Messages;
+import software.amazon.jdbc.util.StringUtils;
+
+public class AuroraTopologyUtils extends TopologyUtils {
+ private static final Logger LOGGER = Logger.getLogger(AuroraTopologyUtils.class.getName());
+
+ public AuroraTopologyUtils(TopologyDialect dialect, HostSpecBuilder hostSpecBuilder) {
+ super(dialect, hostSpecBuilder);
+ }
+
+ @Override
+ protected @Nullable List getHosts(
+ Connection conn, ResultSet rs, HostSpec initialHostSpec, HostSpec instanceTemplate) throws SQLException {
+ // Data in the result set is ordered by last update time, so the latest records are last.
+ // We add hosts to a map to ensure newer records are not overwritten by older ones.
+ Map hostsMap = new HashMap<>();
+ while (rs.next()) {
+ try {
+ HostSpec host = createHost(rs, initialHostSpec, instanceTemplate);
+ hostsMap.put(host.getHost(), host);
+ } catch (Exception e) {
+ LOGGER.finest(Messages.get("TopologyUtils.errorProcessingQueryResults", new Object[] {e.getMessage()}));
+ return null;
+ }
+ }
+
+ return new ArrayList<>(hostsMap.values());
+ }
+
+ @Override
+ public boolean isWriterInstance(final Connection connection) throws SQLException {
+ try (final Statement stmt = connection.createStatement()) {
+ try (final ResultSet rs = stmt.executeQuery(this.dialect.getWriterIdQuery())) {
+ if (rs.next()) {
+ return !StringUtils.isNullOrEmpty(rs.getString(1));
+ }
+ }
+ }
+
+ return false;
+ }
+
+ protected HostSpec createHost(ResultSet rs, HostSpec initialHostSpec, HostSpec instanceTemplate) throws SQLException {
+ // According to the topology query the result set should contain 4 columns:
+ // instance ID, 1/0 (writer/reader), CPU utilization, instance lag in time.
+ String hostName = rs.getString(1);
+ final boolean isWriter = rs.getBoolean(2);
+ final double cpuUtilization = rs.getDouble(3);
+ final double instanceLag = rs.getDouble(4);
+ Timestamp lastUpdateTime;
+ try {
+ lastUpdateTime = rs.getTimestamp(5);
+ } catch (Exception e) {
+ lastUpdateTime = Timestamp.from(Instant.now());
+ }
+
+ // Calculate weight based on instance lag in time and CPU utilization.
+ final long weight = Math.round(instanceLag) * 100L + Math.round(cpuUtilization);
+
+ return createHost(hostName, hostName, isWriter, weight, lastUpdateTime, initialHostSpec, instanceTemplate);
+ }
+}
diff --git a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/ConnectionStringHostListProvider.java b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/ConnectionStringHostListProvider.java
index 80f55bdad..426ea3963 100644
--- a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/ConnectionStringHostListProvider.java
+++ b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/ConnectionStringHostListProvider.java
@@ -25,7 +25,6 @@
import java.util.logging.Logger;
import org.checkerframework.checker.nullness.qual.NonNull;
import software.amazon.jdbc.AwsWrapperProperty;
-import software.amazon.jdbc.HostListProviderService;
import software.amazon.jdbc.HostRole;
import software.amazon.jdbc.HostSpec;
import software.amazon.jdbc.util.ConnectionUrlParser;
@@ -36,7 +35,6 @@ public class ConnectionStringHostListProvider implements StaticHostListProvider
private static final Logger LOGGER = Logger.getLogger(ConnectionStringHostListProvider.class.getName());
final List hostList = new ArrayList<>();
- Properties properties;
private boolean isInitialized = false;
private final boolean isSingleWriterConnectionString;
private final ConnectionUrlParser connectionUrlParser;
@@ -75,11 +73,12 @@ private void init() throws SQLException {
}
this.hostList.addAll(
this.connectionUrlParser.getHostsFromConnectionUrl(this.initialUrl, this.isSingleWriterConnectionString,
- () -> this.hostListProviderService.getHostSpecBuilder()));
+ this.hostListProviderService::getHostSpecBuilder));
if (this.hostList.isEmpty()) {
throw new SQLException(Messages.get("ConnectionStringHostListProvider.parsedListEmpty",
new Object[] {this.initialUrl}));
}
+
this.hostListProviderService.setInitialConnectionHostSpec(this.hostList.get(0));
this.isInitialized = true;
}
diff --git a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/DynamicHostListProvider.java b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/DynamicHostListProvider.java
index 451c047f3..09d321c41 100644
--- a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/DynamicHostListProvider.java
+++ b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/DynamicHostListProvider.java
@@ -16,9 +16,7 @@
package software.amazon.jdbc.hostlistprovider;
-import software.amazon.jdbc.HostListProvider;
-
-// A marker interface for providers that can fetch a host list, and it changes depending on database status
-// A good example of such provider would be DB cluster provider (Aurora DB clusters, patroni DB clusters, etc.)
-// where cluster topology (nodes, their roles, their statuses) changes over time.
+// A marker interface for providers that can fetch a host list reflecting the current database topology.
+// Examples include providers for Aurora or Multi-AZ clusters, where the cluster topology, status, and instance roles
+// change over time.
public interface DynamicHostListProvider extends HostListProvider { }
diff --git a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/GlobalAuroraHostListProvider.java b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/GlobalAuroraHostListProvider.java
new file mode 100644
index 000000000..682ebb31f
--- /dev/null
+++ b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/GlobalAuroraHostListProvider.java
@@ -0,0 +1,73 @@
+/*
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License").
+ * You may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package software.amazon.jdbc.hostlistprovider;
+
+import java.sql.Connection;
+import java.sql.SQLException;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.logging.Logger;
+import software.amazon.jdbc.AwsWrapperProperty;
+import software.amazon.jdbc.HostSpec;
+import software.amazon.jdbc.PropertyDefinition;
+import software.amazon.jdbc.util.FullServicesContainer;
+import software.amazon.jdbc.util.RdsUtils;
+
+public class GlobalAuroraHostListProvider extends RdsHostListProvider {
+
+ public static final AwsWrapperProperty GLOBAL_CLUSTER_INSTANCE_HOST_PATTERNS =
+ new AwsWrapperProperty(
+ "globalClusterInstanceHostPatterns",
+ null,
+ "Comma-separated list of the cluster instance DNS patterns that will be used to "
+ + "build a complete instance endpoints. "
+ + "A \"?\" character in these patterns should be used as a placeholder for cluster instance names. "
+ + "This parameter is required for Global Aurora Databases. "
+ + "Each region in the Global Aurora Database should be specified in the list.");
+
+ protected final RdsUtils rdsUtils = new RdsUtils();
+ protected final GlobalAuroraTopologyUtils topologyUtils;
+
+ protected Map instanceTemplatesByRegion;
+
+ static {
+ PropertyDefinition.registerPluginProperties(GlobalAuroraHostListProvider.class);
+ }
+
+ public GlobalAuroraHostListProvider(
+ GlobalAuroraTopologyUtils topologyUtils, Properties properties, String originalUrl,
+ FullServicesContainer servicesContainer) {
+ super(topologyUtils, properties, originalUrl, servicesContainer);
+ this.topologyUtils = topologyUtils;
+ }
+
+ @Override
+ protected void initSettings() throws SQLException {
+ super.initSettings();
+
+ String instanceTemplates = GLOBAL_CLUSTER_INSTANCE_HOST_PATTERNS.getString(properties);
+ this.instanceTemplatesByRegion =
+ this.topologyUtils.parseInstanceTemplates(instanceTemplates, this::validateHostPatternSetting);
+ }
+
+ @Override
+ protected List queryForTopology(final Connection conn) throws SQLException {
+ init();
+ return this.topologyUtils.queryForTopology(conn, this.initialHostSpec, this.instanceTemplatesByRegion);
+ }
+}
diff --git a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/GlobalAuroraTopologyUtils.java b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/GlobalAuroraTopologyUtils.java
new file mode 100644
index 000000000..4a5d5eeea
--- /dev/null
+++ b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/GlobalAuroraTopologyUtils.java
@@ -0,0 +1,152 @@
+/*
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License").
+ * You may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package software.amazon.jdbc.hostlistprovider;
+
+import java.sql.Connection;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.SQLSyntaxErrorException;
+import java.sql.Statement;
+import java.sql.Timestamp;
+import java.time.Instant;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.function.Consumer;
+import java.util.logging.Logger;
+import java.util.stream.Collectors;
+import org.checkerframework.checker.nullness.qual.Nullable;
+import software.amazon.jdbc.HostSpec;
+import software.amazon.jdbc.HostSpecBuilder;
+import software.amazon.jdbc.dialect.GlobalAuroraTopologyDialect;
+import software.amazon.jdbc.util.ConnectionUrlParser;
+import software.amazon.jdbc.util.LogUtils;
+import software.amazon.jdbc.util.Messages;
+import software.amazon.jdbc.util.Pair;
+import software.amazon.jdbc.util.StringUtils;
+
+public class GlobalAuroraTopologyUtils extends AuroraTopologyUtils {
+ private static final Logger LOGGER = Logger.getLogger(GlobalAuroraTopologyUtils.class.getName());
+
+ protected final GlobalAuroraTopologyDialect dialect;
+
+ public GlobalAuroraTopologyUtils(GlobalAuroraTopologyDialect dialect, HostSpecBuilder hostSpecBuilder) {
+ super(dialect, hostSpecBuilder);
+ this.dialect = dialect;
+ }
+
+ public @Nullable List queryForTopology(
+ Connection conn, HostSpec initialHostSpec, Map instanceTemplatesByRegion)
+ throws SQLException {
+ int originalNetworkTimeout = setNetworkTimeout(conn);
+ try (final Statement stmt = conn.createStatement();
+ final ResultSet rs = stmt.executeQuery(this.dialect.getTopologyQuery())) {
+ if (rs.getMetaData().getColumnCount() == 0) {
+ // We expect at least 4 columns. Note that the server may return 0 columns if failover has occurred.
+ LOGGER.finest(Messages.get("TopologyUtils.unexpectedTopologyQueryColumnCount"));
+ return null;
+ }
+
+ return this.verifyWriter(this.getHosts(rs, initialHostSpec, instanceTemplatesByRegion));
+ } catch (final SQLSyntaxErrorException e) {
+ throw new SQLException(Messages.get("TopologyUtils.invalidQuery"), e);
+ } finally {
+ if (originalNetworkTimeout == 0 && !conn.isClosed()) {
+ conn.setNetworkTimeout(networkTimeoutExecutor, originalNetworkTimeout);
+ }
+ }
+ }
+
+ protected @Nullable List getHosts(
+ ResultSet rs, HostSpec initialHostSpec, Map instanceTemplatesByRegion) throws SQLException {
+ // Data in the result set is ordered by last update time, so the latest records are last.
+ // We add hosts to a map to ensure newer records are not overwritten by older ones.
+ Map hostsMap = new HashMap<>();
+ while (rs.next()) {
+ try {
+ HostSpec host = createHost(rs, initialHostSpec, instanceTemplatesByRegion);
+ hostsMap.put(host.getHost(), host);
+ } catch (Exception e) {
+ LOGGER.finest(Messages.get("TopologyUtils.errorProcessingQueryResults", new Object[] {e.getMessage()}));
+ return null;
+ }
+ }
+
+ return new ArrayList<>(hostsMap.values());
+ }
+
+ protected HostSpec createHost(
+ ResultSet rs, HostSpec initialHostSpec, Map instanceTemplatesByRegion)
+ throws SQLException {
+ // According to the topology query the result set should contain 4 columns:
+ // instance ID, 1/0 (writer/reader), node lag in time (msec), AWS region.
+ String hostName = rs.getString(1);
+ final boolean isWriter = rs.getBoolean(2);
+ final float nodeLag = rs.getFloat(3);
+ final String awsRegion = rs.getString(4);
+
+ // Calculate weight based on node lag in time and CPU utilization.
+ final long weight = Math.round(nodeLag) * 100L;
+
+ final HostSpec instanceTemplate = instanceTemplatesByRegion.get(awsRegion);
+ if (instanceTemplate == null) {
+ throw new SQLException(Messages.get(
+ "GlobalAuroraTopologyMonitor.cannotFindRegionTemplate", new Object[] {awsRegion}));
+ }
+
+ return createHost(
+ hostName, hostName, isWriter, weight, Timestamp.from(Instant.now()), initialHostSpec, instanceTemplate);
+ }
+
+ public @Nullable String getRegion(String instanceId, Connection conn) throws SQLException {
+ try (final PreparedStatement stmt = conn.prepareStatement(this.dialect.getRegionByInstanceIdQuery())) {
+ stmt.setString(1, instanceId);
+ try (final ResultSet rs = stmt.executeQuery()) {
+ if (rs.next()) {
+ String awsRegion = rs.getString(1);
+ return StringUtils.isNullOrEmpty(awsRegion) ? null : awsRegion;
+ }
+ }
+ }
+
+ return null;
+ }
+
+ public Map parseInstanceTemplates(String instanceTemplatesString, Consumer hostValidator)
+ throws SQLException {
+ if (StringUtils.isNullOrEmpty(instanceTemplatesString)) {
+ throw new SQLException(Messages.get("GlobalAuroraTopologyUtils.globalClusterInstanceHostPatternsRequired"));
+ }
+
+ Map instanceTemplates = Arrays.stream(instanceTemplatesString.split(","))
+ .map(x -> ConnectionUrlParser.parseHostPortPairWithRegionPrefix(x.trim(), () -> hostSpecBuilder))
+ .collect(Collectors.toMap(
+ Pair::getValue1,
+ v -> {
+ hostValidator.accept(v.getValue2().getHost());
+ return v.getValue2();
+ }));
+ LOGGER.finest(Messages.get(
+ "GlobalAuroraTopologyUtils.detectedGdbPatterns",
+ new Object[] {LogUtils.toLogString(instanceTemplates)}));
+
+ return instanceTemplates;
+ }
+}
diff --git a/wrapper/src/main/java/software/amazon/jdbc/HostListProvider.java b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/HostListProvider.java
similarity index 89%
rename from wrapper/src/main/java/software/amazon/jdbc/HostListProvider.java
rename to wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/HostListProvider.java
index 0aa93714a..206a35415 100644
--- a/wrapper/src/main/java/software/amazon/jdbc/HostListProvider.java
+++ b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/HostListProvider.java
@@ -14,11 +14,14 @@
* limitations under the License.
*/
-package software.amazon.jdbc;
+package software.amazon.jdbc.hostlistprovider;
import java.sql.Connection;
import java.sql.SQLException;
import java.util.List;
+import org.checkerframework.checker.nullness.qual.Nullable;
+import software.amazon.jdbc.HostRole;
+import software.amazon.jdbc.HostSpec;
public interface HostListProvider {
@@ -40,6 +43,7 @@ public interface HostListProvider {
*/
HostRole getHostRole(Connection connection) throws SQLException;
+ @Nullable
HostSpec identifyConnection(Connection connection) throws SQLException;
String getClusterId() throws UnsupportedOperationException, SQLException;
diff --git a/wrapper/src/main/java/software/amazon/jdbc/HostListProviderService.java b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/HostListProviderService.java
similarity index 89%
rename from wrapper/src/main/java/software/amazon/jdbc/HostListProviderService.java
rename to wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/HostListProviderService.java
index b2f6b5353..0413cb423 100644
--- a/wrapper/src/main/java/software/amazon/jdbc/HostListProviderService.java
+++ b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/HostListProviderService.java
@@ -14,9 +14,11 @@
* limitations under the License.
*/
-package software.amazon.jdbc;
+package software.amazon.jdbc.hostlistprovider;
import java.sql.Connection;
+import software.amazon.jdbc.HostSpec;
+import software.amazon.jdbc.HostSpecBuilder;
import software.amazon.jdbc.dialect.Dialect;
public interface HostListProviderService {
diff --git a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/MultiAzTopologyUtils.java b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/MultiAzTopologyUtils.java
new file mode 100644
index 000000000..1e1045002
--- /dev/null
+++ b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/MultiAzTopologyUtils.java
@@ -0,0 +1,117 @@
+/*
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License").
+ * You may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package software.amazon.jdbc.hostlistprovider;
+
+import java.sql.Connection;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.sql.Timestamp;
+import java.time.Instant;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.logging.Logger;
+import org.checkerframework.checker.nullness.qual.Nullable;
+import software.amazon.jdbc.HostSpec;
+import software.amazon.jdbc.HostSpecBuilder;
+import software.amazon.jdbc.dialect.MultiAzClusterDialect;
+import software.amazon.jdbc.util.Messages;
+import software.amazon.jdbc.util.StringUtils;
+
+public class MultiAzTopologyUtils extends TopologyUtils {
+ private static final Logger LOGGER = Logger.getLogger(MultiAzTopologyUtils.class.getName());
+
+ protected final MultiAzClusterDialect dialect;
+
+ public MultiAzTopologyUtils(MultiAzClusterDialect dialect, HostSpecBuilder hostSpecBuilder) {
+ super(dialect, hostSpecBuilder);
+ this.dialect = dialect;
+ }
+
+ @Override
+ protected @Nullable List getHosts(
+ Connection conn, ResultSet rs, HostSpec initialHostSpec, HostSpec instanceTemplate)
+ throws SQLException {
+ String writerId = this.getWriterId(conn);
+
+ // Data in the result set is ordered by last update time, so the latest records are last.
+ // We add hosts to a map to ensure newer records are not overwritten by older ones.
+ Map hostsMap = new HashMap<>();
+ while (rs.next()) {
+ try {
+ HostSpec host = createHost(rs, initialHostSpec, instanceTemplate, writerId);
+ hostsMap.put(host.getHost(), host);
+ } catch (Exception e) {
+ LOGGER.finest(Messages.get("TopologyUtils.errorProcessingQueryResults", new Object[]{e.getMessage()}));
+ return null;
+ }
+ }
+
+ return new ArrayList<>(hostsMap.values());
+ }
+
+ @Override
+ public boolean isWriterInstance(final Connection connection) throws SQLException {
+ try (final Statement stmt = connection.createStatement()) {
+
+ try (final ResultSet rs = stmt.executeQuery(this.dialect.getWriterIdQuery())) {
+ // When connected to a writer, the result is empty, otherwise it contains a single row.
+ return !rs.next();
+ }
+ }
+ }
+
+ protected @Nullable String getWriterId(Connection connection) throws SQLException {
+ try (final Statement stmt = connection.createStatement()) {
+ try (final ResultSet rs = stmt.executeQuery(this.dialect.getWriterIdQuery())) {
+ if (rs.next()) {
+ String writerId = rs.getString(this.dialect.getWriterIdColumnName());
+ if (!StringUtils.isNullOrEmpty(writerId)) {
+ return writerId;
+ }
+ }
+ }
+
+ // The writer ID is only returned when connected to a reader, so if the query does not return a value, it
+ // means we are connected to a writer
+ try (final ResultSet rs = stmt.executeQuery(this.dialect.getInstanceIdQuery())) {
+ if (rs.next()) {
+ return rs.getString(1);
+ }
+ }
+ }
+
+ return null;
+ }
+
+ protected HostSpec createHost(
+ final ResultSet rs,
+ final HostSpec initialHostSpec,
+ final HostSpec instanceTemplate,
+ final @Nullable String writerId) throws SQLException {
+
+ String endpoint = rs.getString("endpoint"); // "instance-name.XYZ.us-west-2.rds.amazonaws.com"
+ String instanceName = endpoint.substring(0, endpoint.indexOf(".")); // "instance-name"
+ String hostId = rs.getString("id"); // "1034958454"
+ final boolean isWriter = hostId.equals(writerId);
+
+ return createHost(
+ hostId, instanceName, isWriter, 0, Timestamp.from(Instant.now()), initialHostSpec, instanceTemplate);
+ }
+}
diff --git a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/RdsHostListProvider.java b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/RdsHostListProvider.java
index 738eebcc3..4bd523e39 100644
--- a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/RdsHostListProvider.java
+++ b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/RdsHostListProvider.java
@@ -17,45 +17,29 @@
package software.amazon.jdbc.hostlistprovider;
import java.sql.Connection;
-import java.sql.ResultSet;
import java.sql.SQLException;
-import java.sql.SQLSyntaxErrorException;
-import java.sql.Statement;
-import java.sql.Timestamp;
-import java.time.Instant;
import java.util.ArrayList;
import java.util.Collections;
-import java.util.Comparator;
-import java.util.HashMap;
import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
import java.util.Objects;
import java.util.Properties;
-import java.util.UUID;
-import java.util.concurrent.Executor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.ReentrantLock;
import java.util.logging.Logger;
-import java.util.stream.Collectors;
-import org.checkerframework.checker.nullness.qual.NonNull;
import org.checkerframework.checker.nullness.qual.Nullable;
import software.amazon.jdbc.AwsWrapperProperty;
-import software.amazon.jdbc.HostListProviderService;
import software.amazon.jdbc.HostRole;
import software.amazon.jdbc.HostSpec;
import software.amazon.jdbc.HostSpecBuilder;
import software.amazon.jdbc.PropertyDefinition;
-import software.amazon.jdbc.hostavailability.HostAvailability;
import software.amazon.jdbc.util.ConnectionUrlParser;
import software.amazon.jdbc.util.FullServicesContainer;
+import software.amazon.jdbc.util.LogUtils;
import software.amazon.jdbc.util.Messages;
+import software.amazon.jdbc.util.Pair;
import software.amazon.jdbc.util.RdsUrlType;
import software.amazon.jdbc.util.RdsUtils;
-import software.amazon.jdbc.util.StringUtils;
-import software.amazon.jdbc.util.SynchronousExecutor;
import software.amazon.jdbc.util.Utils;
-import software.amazon.jdbc.util.storage.CacheMap;
public class RdsHostListProvider implements DynamicHostListProvider {
@@ -70,10 +54,10 @@ public class RdsHostListProvider implements DynamicHostListProvider {
+ "after which it will be updated during the next interaction with the connection.");
public static final AwsWrapperProperty CLUSTER_ID = new AwsWrapperProperty(
- "clusterId", "",
+ "clusterId", "1",
"A unique identifier for the cluster. "
+ "Connections with the same cluster id share a cluster topology cache. "
- + "If unspecified, a cluster id is automatically created for AWS RDS clusters.");
+ + "If unspecified, a cluster id is '1'.");
public static final AwsWrapperProperty CLUSTER_INSTANCE_HOST_PATTERN =
new AwsWrapperProperty(
@@ -84,20 +68,17 @@ public class RdsHostListProvider implements DynamicHostListProvider {
+ "This pattern is required to be specified for IP address or custom domain connections to AWS RDS "
+ "clusters. Otherwise, if unspecified, the pattern will be automatically created for AWS RDS clusters.");
- protected static final Executor networkTimeoutExecutor = new SynchronousExecutor();
protected static final RdsUtils rdsHelper = new RdsUtils();
protected static final ConnectionUrlParser connectionUrlParser = new ConnectionUrlParser();
protected static final int defaultTopologyQueryTimeoutMs = 5000;
- protected static final long suggestedClusterIdRefreshRateNano = TimeUnit.MINUTES.toNanos(10);
- protected static final CacheMap suggestedPrimaryClusterIdCache = new CacheMap<>();
- protected static final CacheMap primaryClusterIdCache = new CacheMap<>();
+ protected final ReentrantLock lock = new ReentrantLock();
+ protected final Properties properties;
+ protected final String originalUrl;
protected final FullServicesContainer servicesContainer;
protected final HostListProviderService hostListProviderService;
- protected final String originalUrl;
- protected final String topologyQuery;
- protected final String nodeIdQuery;
- protected final String isReaderQuery;
+ protected final TopologyUtils topologyUtils;
+
protected RdsUrlType rdsUrlType;
protected long refreshRateNano = CLUSTER_TOPOLOGY_REFRESH_RATE_MS.defaultValue != null
? TimeUnit.MILLISECONDS.toNanos(Long.parseLong(CLUSTER_TOPOLOGY_REFRESH_RATE_MS.defaultValue))
@@ -105,37 +86,25 @@ public class RdsHostListProvider implements DynamicHostListProvider {
protected List hostList = new ArrayList<>();
protected List initialHostList = new ArrayList<>();
protected HostSpec initialHostSpec;
-
- protected final ReentrantLock lock = new ReentrantLock();
protected String clusterId;
- protected HostSpec clusterInstanceTemplate;
-
- // A primary clusterId is a clusterId that is based off of a cluster endpoint URL
- // (rather than a GUID or a value provided by the user).
- protected boolean isPrimaryClusterId;
+ protected HostSpec instanceTemplate;
protected volatile boolean isInitialized = false;
- protected Properties properties;
-
static {
PropertyDefinition.registerPluginProperties(RdsHostListProvider.class);
}
public RdsHostListProvider(
+ final TopologyUtils topologyUtils,
final Properties properties,
final String originalUrl,
- final FullServicesContainer servicesContainer,
- final String topologyQuery,
- final String nodeIdQuery,
- final String isReaderQuery) {
+ final FullServicesContainer servicesContainer) {
+ this.topologyUtils = topologyUtils;
this.properties = properties;
this.originalUrl = originalUrl;
this.servicesContainer = servicesContainer;
this.hostListProviderService = servicesContainer.getHostListProviderService();
- this.topologyQuery = topologyQuery;
- this.nodeIdQuery = nodeIdQuery;
- this.isReaderQuery = isReaderQuery;
}
protected void init() throws SQLException {
@@ -148,81 +117,54 @@ protected void init() throws SQLException {
if (this.isInitialized) {
return;
}
-
- // initial topology is based on connection string
- this.initialHostList =
- connectionUrlParser.getHostsFromConnectionUrl(this.originalUrl, false,
- this.hostListProviderService::getHostSpecBuilder);
- if (this.initialHostList == null || this.initialHostList.isEmpty()) {
- throw new SQLException(Messages.get("RdsHostListProvider.parsedListEmpty",
- new Object[] {this.originalUrl}));
- }
- this.initialHostSpec = this.initialHostList.get(0);
- this.hostListProviderService.setInitialConnectionHostSpec(this.initialHostSpec);
-
- this.clusterId = UUID.randomUUID().toString();
- this.isPrimaryClusterId = false;
- this.refreshRateNano =
- TimeUnit.MILLISECONDS.toNanos(CLUSTER_TOPOLOGY_REFRESH_RATE_MS.getInteger(properties));
-
- HostSpecBuilder hostSpecBuilder = this.hostListProviderService.getHostSpecBuilder();
- String clusterInstancePattern = CLUSTER_INSTANCE_HOST_PATTERN.getString(this.properties);
- if (clusterInstancePattern != null) {
- this.clusterInstanceTemplate =
- ConnectionUrlParser.parseHostPortPair(clusterInstancePattern, () -> hostSpecBuilder);
- } else {
- this.clusterInstanceTemplate =
- hostSpecBuilder
- .host(rdsHelper.getRdsInstanceHostPattern(this.initialHostSpec.getHost()))
- .hostId(this.initialHostSpec.getHostId())
- .port(this.initialHostSpec.getPort())
- .build();
- }
-
- validateHostPatternSetting(this.clusterInstanceTemplate.getHost());
-
- this.rdsUrlType = rdsHelper.identifyRdsType(this.initialHostSpec.getHost());
-
- final String clusterIdSetting = CLUSTER_ID.getString(this.properties);
- if (!StringUtils.isNullOrEmpty(clusterIdSetting)) {
- this.clusterId = clusterIdSetting;
- } else if (rdsUrlType == RdsUrlType.RDS_PROXY) {
- // Each proxy is associated with a single cluster, so it's safe to use RDS Proxy Url as cluster
- // identification
- this.clusterId = this.initialHostSpec.getUrl();
- } else if (rdsUrlType.isRds()) {
- final ClusterSuggestedResult clusterSuggestedResult =
- getSuggestedClusterId(this.initialHostSpec.getHostAndPort());
- if (clusterSuggestedResult != null && !StringUtils.isNullOrEmpty(clusterSuggestedResult.clusterId)) {
- this.clusterId = clusterSuggestedResult.clusterId;
- this.isPrimaryClusterId = clusterSuggestedResult.isPrimaryClusterId;
- } else {
- final String clusterRdsHostUrl =
- rdsHelper.getRdsClusterHostUrl(this.initialHostSpec.getHost());
- if (!StringUtils.isNullOrEmpty(clusterRdsHostUrl)) {
- this.clusterId = this.clusterInstanceTemplate.isPortSpecified()
- ? String.format("%s:%s", clusterRdsHostUrl, this.clusterInstanceTemplate.getPort())
- : clusterRdsHostUrl;
- this.isPrimaryClusterId = true;
- primaryClusterIdCache.put(this.clusterId, true, suggestedClusterIdRefreshRateNano);
- }
- }
- }
-
+ this.initSettings();
this.isInitialized = true;
} finally {
lock.unlock();
}
}
+ protected void initSettings() throws SQLException {
+ // The initial topology is based on the connection string.
+ this.initialHostList =
+ connectionUrlParser.getHostsFromConnectionUrl(this.originalUrl, false,
+ this.hostListProviderService::getHostSpecBuilder);
+ if (this.initialHostList == null || this.initialHostList.isEmpty()) {
+ throw new SQLException(Messages.get("RdsHostListProvider.parsedListEmpty", new Object[] {this.originalUrl}));
+ }
+ this.initialHostSpec = this.initialHostList.get(0);
+ this.hostListProviderService.setInitialConnectionHostSpec(this.initialHostSpec);
+
+ this.clusterId = CLUSTER_ID.getString(this.properties);
+ this.refreshRateNano =
+ TimeUnit.MILLISECONDS.toNanos(CLUSTER_TOPOLOGY_REFRESH_RATE_MS.getInteger(properties));
+
+ HostSpecBuilder hostSpecBuilder = this.hostListProviderService.getHostSpecBuilder();
+ String clusterInstancePattern = CLUSTER_INSTANCE_HOST_PATTERN.getString(this.properties);
+ if (clusterInstancePattern != null) {
+ this.instanceTemplate =
+ ConnectionUrlParser.parseHostPortPair(clusterInstancePattern, () -> hostSpecBuilder);
+ } else {
+ this.instanceTemplate =
+ hostSpecBuilder
+ .host(rdsHelper.getRdsInstanceHostPattern(this.initialHostSpec.getHost()))
+ .hostId(this.initialHostSpec.getHostId())
+ .port(this.initialHostSpec.getPort())
+ .build();
+ }
+
+ validateHostPatternSetting(this.instanceTemplate.getHost());
+ this.rdsUrlType = rdsHelper.identifyRdsType(this.initialHostSpec.getHost());
+ }
+
/**
* Get cluster topology. It may require an extra call to database to fetch the latest topology. A
* cached copy of topology is returned if it's not yet outdated (controlled by {@link
* #refreshRateNano}).
*
- * @param conn A connection to database to fetch the latest topology, if needed.
+ * @param conn A connection to database to fetch the latest topology, if needed.
* @param forceUpdate If true, it forces a service to ignore cached copy of topology and to fetch
- * a fresh one.
+ * a fresh one.
* @return a list of hosts that describes cluster topology. A writer is always at position 0.
* Returns an empty list if isn't available or is invalid (doesn't contain a writer).
* @throws SQLException if errors occurred while retrieving the topology.
@@ -230,43 +172,18 @@ protected void init() throws SQLException {
protected FetchTopologyResult getTopology(final Connection conn, final boolean forceUpdate) throws SQLException {
init();
- final String suggestedPrimaryClusterId = suggestedPrimaryClusterIdCache.get(this.clusterId);
-
- // Change clusterId by accepting a suggested one
- if (!StringUtils.isNullOrEmpty(suggestedPrimaryClusterId)
- && !this.clusterId.equals(suggestedPrimaryClusterId)) {
-
- final String oldClusterId = this.clusterId;
- this.clusterId = suggestedPrimaryClusterId;
- this.isPrimaryClusterId = true;
- this.clusterIdChanged(oldClusterId);
- }
-
final List storedHosts = this.getStoredTopology();
-
- // This clusterId is a primary one and is about to create a new entry in the cache.
- // When a primary entry is created it needs to be suggested for other (non-primary) entries.
- // Remember a flag to do suggestion after cache is updated.
- final boolean needToSuggest = storedHosts == null && this.isPrimaryClusterId;
-
if (storedHosts == null || forceUpdate) {
-
- // need to re-fetch topology
-
+ // We need to re-fetch topology.
if (conn == null) {
- // can't fetch the latest topology since no connection
- // return original hosts parsed from connection string
+ // We cannot fetch the latest topology since we do not have access to a connection, so we return the original
+ // hosts parsed from the connection string.
return new FetchTopologyResult(false, this.initialHostList);
}
- // fetch topology from the DB
- final List hosts = queryForTopology(conn);
-
+ final List hosts = this.queryForTopology(conn);
if (!Utils.isNullOrEmpty(hosts)) {
this.servicesContainer.getStorageService().set(this.clusterId, new Topology(hosts));
- if (needToSuggest) {
- this.suggestPrimaryCluster(hosts);
- }
return new FetchTopologyResult(false, hosts);
}
}
@@ -274,78 +191,11 @@ protected FetchTopologyResult getTopology(final Connection conn, final boolean f
if (storedHosts == null) {
return new FetchTopologyResult(false, this.initialHostList);
} else {
- // use cached data
+ // Return the cached data.
return new FetchTopologyResult(true, storedHosts);
}
}
- protected void clusterIdChanged(final String oldClusterId) throws SQLException {
- // do nothing
- }
-
- protected ClusterSuggestedResult getSuggestedClusterId(final String url) {
- Map entries = this.servicesContainer.getStorageService().getEntries(Topology.class);
- if (entries == null) {
- return null;
- }
-
- for (final Entry entry : entries.entrySet()) {
- final String key = entry.getKey(); // clusterId
- final List hosts = entry.getValue().getHosts();
- final boolean isPrimaryCluster = primaryClusterIdCache.get(key, false,
- suggestedClusterIdRefreshRateNano);
- if (key.equals(url)) {
- return new ClusterSuggestedResult(url, isPrimaryCluster);
- }
- if (hosts == null) {
- continue;
- }
- for (final HostSpec host : hosts) {
- if (host.getHostAndPort().equals(url)) {
- LOGGER.finest(() -> Messages.get("RdsHostListProvider.suggestedClusterId",
- new Object[] {key, url}));
- return new ClusterSuggestedResult(key, isPrimaryCluster);
- }
- }
- }
- return null;
- }
-
- protected void suggestPrimaryCluster(final @NonNull List primaryClusterHosts) {
- if (Utils.isNullOrEmpty(primaryClusterHosts)) {
- return;
- }
-
- Map entries = this.servicesContainer.getStorageService().getEntries(Topology.class);
- if (entries == null) {
- return;
- }
-
- for (final Entry entry : entries.entrySet()) {
- final String clusterId = entry.getKey();
- final List clusterHosts = entry.getValue().getHosts();
- final boolean isPrimaryCluster = primaryClusterIdCache.get(clusterId, false,
- suggestedClusterIdRefreshRateNano);
- final String suggestedPrimaryClusterId = suggestedPrimaryClusterIdCache.get(clusterId);
- if (isPrimaryCluster
- || !StringUtils.isNullOrEmpty(suggestedPrimaryClusterId)
- || Utils.isNullOrEmpty(clusterHosts)) {
- continue;
- }
-
- // The entry is non-primary
- for (final HostSpec host : clusterHosts) {
- if (Utils.containsHostAndPort(primaryClusterHosts, host.getHostAndPort())) {
- // Instance on this cluster matches with one of the instance on primary cluster
- // Suggest the primary clusterId to this entry
- suggestedPrimaryClusterIdCache.put(clusterId, this.clusterId,
- suggestedClusterIdRefreshRateNano);
- break;
- }
- }
- }
- }
-
/**
* Obtain a cluster topology from database.
*
@@ -354,142 +204,8 @@ protected void suggestPrimaryCluster(final @NonNull List primaryCluste
* @throws SQLException if errors occurred while retrieving the topology.
*/
protected List queryForTopology(final Connection conn) throws SQLException {
- int networkTimeout = -1;
- try {
- networkTimeout = conn.getNetworkTimeout();
- // The topology query is not monitored by the EFM plugin, so it needs a socket timeout
- if (networkTimeout == 0) {
- conn.setNetworkTimeout(networkTimeoutExecutor, defaultTopologyQueryTimeoutMs);
- }
- } catch (SQLException e) {
- LOGGER.warning(() -> Messages.get("RdsHostListProvider.errorGettingNetworkTimeout",
- new Object[] {e.getMessage()}));
- }
-
- try (final Statement stmt = conn.createStatement();
- final ResultSet resultSet = stmt.executeQuery(this.topologyQuery)) {
- return processQueryResults(resultSet);
- } catch (final SQLSyntaxErrorException e) {
- throw new SQLException(Messages.get("RdsHostListProvider.invalidQuery"), e);
- } finally {
- if (networkTimeout == 0 && !conn.isClosed()) {
- conn.setNetworkTimeout(networkTimeoutExecutor, networkTimeout);
- }
- }
- }
-
- /**
- * Form a list of hosts from the results of the topology query.
- *
- * @param resultSet The results of the topology query
- * @return a list of {@link HostSpec} objects representing
- * the topology that was returned by the
- * topology query. The list will be empty if the topology query returned an invalid topology
- * (no writer instance).
- */
- private List processQueryResults(final ResultSet resultSet) throws SQLException {
-
- final HashMap hostMap = new HashMap<>();
-
- // Data is result set is ordered by last updated time so the latest records go last.
- // When adding hosts to a map, the newer records replace the older ones.
- while (resultSet.next()) {
- final HostSpec host = createHost(resultSet);
- hostMap.put(host.getHost(), host);
- }
-
- final List hosts = new ArrayList<>();
- final List writers = new ArrayList<>();
-
- for (final HostSpec host : hostMap.values()) {
- if (host.getRole() != HostRole.WRITER) {
- hosts.add(host);
- } else {
- writers.add(host);
- }
- }
-
- int writerCount = writers.size();
-
- if (writerCount == 0) {
- LOGGER.severe(
- () -> Messages.get(
- "RdsHostListProvider.invalidTopology"));
- hosts.clear();
- } else if (writerCount == 1) {
- hosts.add(writers.get(0));
- } else {
- // Take the latest updated writer node as the current writer. All others will be ignored.
- List sortedWriters = writers.stream()
- .sorted(Comparator.comparing(HostSpec::getLastUpdateTime, Comparator.nullsLast(Comparator.reverseOrder())))
- .collect(Collectors.toList());
- hosts.add(sortedWriters.get(0));
- }
-
- return hosts;
- }
-
- /**
- * Creates an instance of HostSpec which captures details about a connectable host.
- *
- * @param resultSet the result set from querying the topology
- * @return a {@link HostSpec} instance for a specific instance from the cluster
- * @throws SQLException If unable to retrieve the hostName from the result set
- */
- protected HostSpec createHost(final ResultSet resultSet) throws SQLException {
- // According to the topology query the result set
- // should contain 4 columns: node ID, 1/0 (writer/reader), CPU utilization, node lag in time.
- String hostName = resultSet.getString(1);
- final boolean isWriter = resultSet.getBoolean(2);
- final double cpuUtilization = resultSet.getDouble(3);
- final double nodeLag = resultSet.getDouble(4);
- Timestamp lastUpdateTime;
- try {
- lastUpdateTime = resultSet.getTimestamp(5);
- } catch (Exception e) {
- lastUpdateTime = Timestamp.from(Instant.now());
- }
-
- // Calculate weight based on node lag in time and CPU utilization.
- final long weight = Math.round(nodeLag) * 100L + Math.round(cpuUtilization);
-
- return createHost(hostName, isWriter, weight, lastUpdateTime);
- }
-
- protected HostSpec createHost(
- String host,
- final boolean isWriter,
- final long weight,
- final Timestamp lastUpdateTime) {
-
- host = host == null ? "?" : host;
- final String endpoint = getHostEndpoint(host);
- final int port = this.clusterInstanceTemplate.isPortSpecified()
- ? this.clusterInstanceTemplate.getPort()
- : this.initialHostSpec.getPort();
-
- final HostSpec hostSpec = this.hostListProviderService.getHostSpecBuilder()
- .host(endpoint)
- .port(port)
- .role(isWriter ? HostRole.WRITER : HostRole.READER)
- .availability(HostAvailability.AVAILABLE)
- .weight(weight)
- .lastUpdateTime(lastUpdateTime)
- .build();
- hostSpec.addAlias(host);
- hostSpec.setHostId(host);
- return hostSpec;
- }
-
- /**
- * Build a host dns endpoint based on host/node name.
- *
- * @param nodeName A host name.
- * @return Host dns endpoint
- */
- protected String getHostEndpoint(final String nodeName) {
- final String host = this.clusterInstanceTemplate.getHost();
- return host.replace("?", nodeName);
+ init();
+ return this.topologyUtils.queryForTopology(conn, this.initialHostSpec, this.instanceTemplate);
}
/**
@@ -503,14 +219,6 @@ protected String getHostEndpoint(final String nodeName) {
return topology == null ? null : topology.getHosts();
}
- /**
- * Clear topology cache for all clusters.
- */
- public static void clearAll() {
- primaryClusterIdCache.clear();
- suggestedPrimaryClusterIdCache.clear();
- }
-
/**
* Clear topology cache for the current cluster.
*/
@@ -531,7 +239,7 @@ public List refresh(final Connection connection) throws SQLException {
: this.hostListProviderService.getCurrentConnection();
final FetchTopologyResult results = getTopology(currentConnection, false);
- LOGGER.finest(() -> Utils.logTopology(results.hosts, results.isCachedData ? "[From cache] Topology:" : null));
+ LOGGER.finest(() -> LogUtils.logTopology(results.hosts, results.isCachedData ? "[From cache] Topology:" : null));
this.hostList = results.hosts;
return Collections.unmodifiableList(hostList);
@@ -550,7 +258,7 @@ public List forceRefresh(final Connection connection) throws SQLExcept
: this.hostListProviderService.getCurrentConnection();
final FetchTopologyResult results = getTopology(currentConnection, true);
- LOGGER.finest(() -> Utils.logTopology(results.hosts));
+ LOGGER.finest(() -> LogUtils.logTopology(results.hosts));
this.hostList = results.hosts;
return Collections.unmodifiableList(this.hostList);
}
@@ -560,30 +268,22 @@ public RdsUrlType getRdsUrlType() throws SQLException {
return this.rdsUrlType;
}
- private void validateHostPatternSetting(final String hostPattern) {
+ protected void validateHostPatternSetting(final String hostPattern) {
if (!rdsHelper.isDnsPatternValid(hostPattern)) {
- // "Invalid value for the 'clusterInstanceHostPattern' configuration setting - the host
- // pattern must contain a '?'
- // character as a placeholder for the DB instance identifiers of the instances in the cluster"
final String message = Messages.get("RdsHostListProvider.invalidPattern");
LOGGER.severe(message);
throw new RuntimeException(message);
}
final RdsUrlType rdsUrlType = rdsHelper.identifyRdsType(hostPattern);
- if (rdsUrlType == RdsUrlType.RDS_PROXY) {
- // "An RDS Proxy url can't be used as the 'clusterInstanceHostPattern' configuration setting."
- final String message =
- Messages.get("RdsHostListProvider.clusterInstanceHostPatternNotSupportedForRDSProxy");
+ if (rdsUrlType == RdsUrlType.RDS_PROXY || rdsUrlType == RdsUrlType.RDS_PROXY_ENDPOINT) {
+ final String message = Messages.get("RdsHostListProvider.clusterInstanceHostPatternNotSupportedForRDSProxy");
LOGGER.severe(message);
throw new RuntimeException(message);
}
if (rdsUrlType == RdsUrlType.RDS_CUSTOM_CLUSTER) {
- // "An RDS Custom Cluster endpoint can't be used as the 'clusterInstanceHostPattern'
- // configuration setting."
- final String message =
- Messages.get("RdsHostListProvider.clusterInstanceHostPatternNotSupportedForRdsCustom");
+ final String message = Messages.get("RdsHostListProvider.clusterInstanceHostPatternNotSupportedForRdsCustom");
LOGGER.severe(message);
throw new RuntimeException(message);
}
@@ -602,64 +302,54 @@ public FetchTopologyResult(final boolean isCachedData, final List host
@Override
public HostRole getHostRole(Connection conn) throws SQLException {
- try (final Statement stmt = conn.createStatement();
- final ResultSet rs = stmt.executeQuery(this.isReaderQuery)) {
- if (rs.next()) {
- boolean isReader = rs.getBoolean(1);
- return isReader ? HostRole.READER : HostRole.WRITER;
- }
- } catch (SQLException e) {
- throw new SQLException(Messages.get("RdsHostListProvider.errorGettingHostRole"), e);
- }
-
- throw new SQLException(Messages.get("RdsHostListProvider.errorGettingHostRole"));
+ init();
+ return this.topologyUtils.getHostRole(conn);
}
@Override
- public HostSpec identifyConnection(Connection connection) throws SQLException {
- try (final Statement stmt = connection.createStatement();
- final ResultSet resultSet = stmt.executeQuery(this.nodeIdQuery)) {
- if (resultSet.next()) {
- final String instanceName = resultSet.getString(1);
+ public @Nullable HostSpec identifyConnection(Connection connection) throws SQLException {
+ init();
+ try {
+ Pair instanceIds = this.topologyUtils.getInstanceId(connection);
+ if (instanceIds == null) {
+ throw new SQLException(Messages.get("RdsHostListProvider.errorIdentifyConnection"));
+ }
- List topology = this.refresh(connection);
+ List topology = this.refresh(connection);
+ boolean isForcedRefresh = false;
+ if (topology == null) {
+ topology = this.forceRefresh(connection);
+ isForcedRefresh = true;
+ }
- boolean isForcedRefresh = false;
- if (topology == null) {
- topology = this.forceRefresh(connection);
- isForcedRefresh = true;
- }
+ if (topology == null) {
+ return null;
+ }
+ String instanceName = instanceIds.getValue2();
+ HostSpec foundHost = topology
+ .stream()
+ .filter(host -> Objects.equals(instanceName, host.getHostId()))
+ .findAny()
+ .orElse(null);
+
+ if (foundHost == null && !isForcedRefresh) {
+ topology = this.forceRefresh(connection);
if (topology == null) {
return null;
}
- HostSpec foundHost = topology
+ foundHost = topology
.stream()
.filter(host -> Objects.equals(instanceName, host.getHostId()))
.findAny()
.orElse(null);
-
- if (foundHost == null && !isForcedRefresh) {
- topology = this.forceRefresh(connection);
- if (topology == null) {
- return null;
- }
-
- foundHost = topology
- .stream()
- .filter(host -> Objects.equals(instanceName, host.getHostId()))
- .findAny()
- .orElse(null);
- }
-
- return foundHost;
}
+
+ return foundHost;
} catch (final SQLException e) {
throw new SQLException(Messages.get("RdsHostListProvider.errorIdentifyConnection"), e);
}
-
- throw new SQLException(Messages.get("RdsHostListProvider.errorIdentifyConnection"));
}
@Override
@@ -667,15 +357,4 @@ public String getClusterId() throws UnsupportedOperationException, SQLException
init();
return this.clusterId;
}
-
- public static class ClusterSuggestedResult {
-
- public String clusterId;
- public boolean isPrimaryClusterId;
-
- public ClusterSuggestedResult(final String clusterId, final boolean isPrimaryClusterId) {
- this.clusterId = clusterId;
- this.isPrimaryClusterId = isPrimaryClusterId;
- }
- }
}
diff --git a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/RdsMultiAzDbClusterListProvider.java b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/RdsMultiAzDbClusterListProvider.java
deleted file mode 100644
index a63323176..000000000
--- a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/RdsMultiAzDbClusterListProvider.java
+++ /dev/null
@@ -1,208 +0,0 @@
-/*
- * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License").
- * You may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package software.amazon.jdbc.hostlistprovider;
-
-import java.sql.Connection;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.SQLSyntaxErrorException;
-import java.sql.Statement;
-import java.sql.Timestamp;
-import java.time.Instant;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Properties;
-import java.util.logging.Logger;
-import software.amazon.jdbc.HostRole;
-import software.amazon.jdbc.HostSpec;
-import software.amazon.jdbc.hostavailability.HostAvailability;
-import software.amazon.jdbc.util.FullServicesContainer;
-import software.amazon.jdbc.util.Messages;
-
-public class RdsMultiAzDbClusterListProvider extends RdsHostListProvider {
- private final String fetchWriterNodeQuery;
- private final String fetchWriterNodeQueryHeader;
- static final Logger LOGGER = Logger.getLogger(RdsMultiAzDbClusterListProvider.class.getName());
-
- public RdsMultiAzDbClusterListProvider(
- final Properties properties,
- final String originalUrl,
- final FullServicesContainer servicesContainer,
- final String topologyQuery,
- final String nodeIdQuery,
- final String isReaderQuery,
- final String fetchWriterNodeQuery,
- final String fetchWriterNodeQueryHeader
- ) {
- super(properties,
- originalUrl,
- servicesContainer,
- topologyQuery,
- nodeIdQuery,
- isReaderQuery);
- this.fetchWriterNodeQuery = fetchWriterNodeQuery;
- this.fetchWriterNodeQueryHeader = fetchWriterNodeQueryHeader;
- }
-
- /**
- * Obtain a cluster topology from database.
- *
- * @param conn A connection to database to fetch the latest topology.
- * @return a list of {@link HostSpec} objects representing the topology
- * @throws SQLException if errors occurred while retrieving the topology.
- */
- protected List queryForTopology(final Connection conn) throws SQLException {
- int networkTimeout = -1;
- try {
- networkTimeout = conn.getNetworkTimeout();
- // The topology query is not monitored by the EFM plugin, so it needs a socket timeout
- if (networkTimeout == 0) {
- conn.setNetworkTimeout(networkTimeoutExecutor, defaultTopologyQueryTimeoutMs);
- }
- } catch (SQLException e) {
- LOGGER.warning(() -> Messages.get("RdsHostListProvider.errorGettingNetworkTimeout",
- new Object[] {e.getMessage()}));
- }
-
- try {
- final Statement stmt = conn.createStatement();
- String writerNodeId = processWriterNodeId(stmt.executeQuery(this.fetchWriterNodeQuery));
- if (writerNodeId == null) {
- final ResultSet nodeIdResultSet = stmt.executeQuery(this.nodeIdQuery);
- while (nodeIdResultSet.next()) {
- writerNodeId = nodeIdResultSet.getString(1);
- }
- }
- final ResultSet topologyResultSet = stmt.executeQuery(this.topologyQuery);
- return processTopologyQueryResults(topologyResultSet, writerNodeId);
- } catch (final SQLSyntaxErrorException e) {
- throw new SQLException(Messages.get("RdsHostListProvider.invalidQuery"), e);
- } finally {
- if (networkTimeout == 0 && !conn.isClosed()) {
- conn.setNetworkTimeout(networkTimeoutExecutor, networkTimeout);
- }
- }
- }
-
- /**
- * Get writer node ID.
- *
- * @param fetchWriterNodeResultSet A ResultSet of writer node query
- * @return String The ID of a writer node
- * @throws SQLException if errors occurred while retrieving the topology
- */
- private String processWriterNodeId(final ResultSet fetchWriterNodeResultSet) throws SQLException {
- String writerNodeId = null;
- if (fetchWriterNodeResultSet.next()) {
- writerNodeId = fetchWriterNodeResultSet.getString(fetchWriterNodeQueryHeader);
- }
- return writerNodeId;
- }
-
- /**
- * Form a list of hosts from the results of the topology query.
- *
- * @param topologyResultSet The results of the topology query
- * @param writerNodeId The writer node ID
- * @return a list of {@link HostSpec} objects representing
- * the topology that was returned by the
- * topology query. The list will be empty if the topology query returned an invalid topology
- * (no writer instance).
- */
- private List processTopologyQueryResults(
- final ResultSet topologyResultSet,
- final String writerNodeId) throws SQLException {
-
- final HashMap hostMap = new HashMap<>();
-
- // Data is result set is ordered by last updated time so the latest records go last.
- // When adding hosts to a map, the newer records replace the older ones.
- while (topologyResultSet.next()) {
- final HostSpec host = createHost(topologyResultSet, writerNodeId);
- hostMap.put(host.getHost(), host);
- }
-
- final List hosts = new ArrayList<>();
- final List writers = new ArrayList<>();
-
- for (final HostSpec host : hostMap.values()) {
- if (host.getRole() != HostRole.WRITER) {
- hosts.add(host);
- } else {
- writers.add(host);
- }
- }
-
- int writerCount = writers.size();
-
- if (writerCount == 0) {
- LOGGER.severe(() -> Messages.get("RdsHostListProvider.invalidTopology"));
- hosts.clear();
- } else {
- hosts.add(writers.get(0));
- }
-
- return hosts;
- }
-
- /**
- * Creates an instance of HostSpec which captures details about a connectable host.
- *
- * @param resultSet the result set from querying the topology
- * @return a {@link HostSpec} instance for a specific instance from the cluster
- * @throws SQLException If unable to retrieve the hostName from the result set
- */
- private HostSpec createHost(final ResultSet resultSet, final String writerNodeId) throws SQLException {
-
- String hostName = resultSet.getString("endpoint"); // "instance-name.XYZ.us-west-2.rds.amazonaws.com"
- String instanceName = hostName.substring(0, hostName.indexOf(".")); // "instance-name"
-
- // "instance-name.XYZ.us-west-2.rds.amazonaws.com" based on cluster instance template
- final String endpoint = getHostEndpoint(instanceName);
-
- String hostId = resultSet.getString("id");
- int queryPort = resultSet.getInt("port");
- final int port = this.clusterInstanceTemplate.isPortSpecified()
- ? this.clusterInstanceTemplate.getPort()
- : queryPort;
- final boolean isWriter = hostId.equals(writerNodeId);
-
- final HostSpec hostSpec = this.hostListProviderService.getHostSpecBuilder()
- .host(endpoint)
- .hostId(hostId)
- .port(port)
- .role(isWriter ? HostRole.WRITER : HostRole.READER)
- .availability(HostAvailability.AVAILABLE)
- .weight(0)
- .lastUpdateTime(Timestamp.from(Instant.now()))
- .build();
- hostSpec.addAlias(hostName);
- return hostSpec;
- }
-
- /**
- * Build a host dns endpoint based on host/node name.
- *
- * @param nodeName A host name.
- * @return Host dns endpoint
- */
- protected String getHostEndpoint(final String nodeName) {
- final String host = this.clusterInstanceTemplate.getHost();
- return host.replace("?", nodeName);
- }
-}
diff --git a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/StaticHostListProvider.java b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/StaticHostListProvider.java
index b37eb4cc3..13e646a03 100644
--- a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/StaticHostListProvider.java
+++ b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/StaticHostListProvider.java
@@ -16,8 +16,6 @@
package software.amazon.jdbc.hostlistprovider;
-import software.amazon.jdbc.HostListProvider;
-
-// A marker interface for providers that fetch node lists, and it never changes since after.
-// An example of such provider is a provider that use connection string as a source.
+// A marker interface for providers that fetch host lists that do not change over time.
+// An example is a provider that uses a connection string to determine the host list.
public interface StaticHostListProvider extends HostListProvider {}
diff --git a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/TopologyUtils.java b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/TopologyUtils.java
new file mode 100644
index 000000000..5a6a2ef68
--- /dev/null
+++ b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/TopologyUtils.java
@@ -0,0 +1,235 @@
+/*
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License").
+ * You may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+package software.amazon.jdbc.hostlistprovider;
+
+import java.sql.Connection;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.SQLSyntaxErrorException;
+import java.sql.Statement;
+import java.sql.Timestamp;
+import java.util.ArrayList;
+import java.util.Comparator;
+import java.util.List;
+import java.util.concurrent.Executor;
+import java.util.logging.Logger;
+import java.util.stream.Collectors;
+import org.checkerframework.checker.nullness.qual.Nullable;
+import software.amazon.jdbc.HostRole;
+import software.amazon.jdbc.HostSpec;
+import software.amazon.jdbc.HostSpecBuilder;
+import software.amazon.jdbc.dialect.TopologyDialect;
+import software.amazon.jdbc.hostavailability.HostAvailability;
+import software.amazon.jdbc.util.Messages;
+import software.amazon.jdbc.util.Pair;
+import software.amazon.jdbc.util.SynchronousExecutor;
+
+/**
+ * An abstract class defining utility methods that can be used to retrieve and process a variety of database topology
+ * information. This class can be overridden to define logic specific to various database engine deployments
+ * (e.g. Aurora, Multi-AZ, Global Aurora etc.).
+ */
+public abstract class TopologyUtils {
+ private static final Logger LOGGER = Logger.getLogger(TopologyUtils.class.getName());
+ protected static final int DEFAULT_QUERY_TIMEOUT_MS = 1000;
+
+ protected final Executor networkTimeoutExecutor = new SynchronousExecutor();
+ protected final TopologyDialect dialect;
+ protected final HostSpecBuilder hostSpecBuilder;
+
+ public TopologyUtils(
+ TopologyDialect dialect,
+ HostSpecBuilder hostSpecBuilder) {
+ this.dialect = dialect;
+ this.hostSpecBuilder = hostSpecBuilder;
+ }
+
+ /**
+ * Query the database for information for each instance in the database topology.
+ *
+ * @param conn the connection to use to query the database.
+ * @param initialHostSpec the {@link HostSpec} that was used to initially connect.
+ * @param instanceTemplate the template {@link HostSpec} to use when constructing new {@link HostSpec} objects from
+ * the data returned by the topology query.
+ * @return a list of {@link HostSpec} objects representing the results of the topology query.
+ * @throws SQLException if an error occurs when executing the topology or processing the results.
+ */
+ public @Nullable List queryForTopology(Connection conn, HostSpec initialHostSpec, HostSpec instanceTemplate)
+ throws SQLException {
+ int originalNetworkTimeout = setNetworkTimeout(conn);
+ try (final Statement stmt = conn.createStatement();
+ final ResultSet rs = stmt.executeQuery(this.dialect.getTopologyQuery())) {
+ if (rs.getMetaData().getColumnCount() == 0) {
+ // We expect at least 4 columns. Note that the server may return 0 columns if failover has occurred.
+ LOGGER.finest(Messages.get("TopologyUtils.unexpectedTopologyQueryColumnCount"));
+ return null;
+ }
+
+ return this.verifyWriter(this.getHosts(conn, rs, initialHostSpec, instanceTemplate));
+ } catch (final SQLSyntaxErrorException e) {
+ throw new SQLException(Messages.get("TopologyUtils.invalidQuery"), e);
+ } finally {
+ if (originalNetworkTimeout == 0 && !conn.isClosed()) {
+ conn.setNetworkTimeout(networkTimeoutExecutor, originalNetworkTimeout);
+ }
+ }
+ }
+
+ protected int setNetworkTimeout(Connection conn) {
+ int networkTimeout = -1;
+ try {
+ networkTimeout = conn.getNetworkTimeout();
+ // The topology query is not monitored by the EFM plugin, so it needs a socket timeout.
+ if (networkTimeout == 0) {
+ conn.setNetworkTimeout(this.networkTimeoutExecutor, DEFAULT_QUERY_TIMEOUT_MS);
+ }
+ } catch (SQLException e) {
+ LOGGER.warning(() -> Messages.get("TopologyUtils.errorGettingNetworkTimeout", new Object[] {e.getMessage()}));
+ }
+ return networkTimeout;
+ }
+
+ protected abstract @Nullable List getHosts(
+ Connection conn, ResultSet rs, HostSpec initialHostSpec, HostSpec instanceTemplate) throws SQLException;
+
+ protected @Nullable List verifyWriter(@Nullable List allHosts) {
+ if (allHosts == null) {
+ return null;
+ }
+
+ List hosts = new ArrayList<>();
+ List writers = new ArrayList<>();
+ for (HostSpec host : allHosts) {
+ if (HostRole.WRITER == host.getRole()) {
+ writers.add(host);
+ } else {
+ hosts.add(host);
+ }
+ }
+
+ int writerCount = writers.size();
+ if (writerCount == 0) {
+ return null;
+ } else if (writerCount == 1) {
+ hosts.add(writers.get(0));
+ } else {
+ // Assume the latest updated writer instance is the current writer. Other potential writers will be ignored.
+ List sortedWriters = writers.stream()
+ .sorted(Comparator.comparing(HostSpec::getLastUpdateTime, Comparator.nullsLast(Comparator.reverseOrder())))
+ .collect(Collectors.toList());
+ hosts.add(sortedWriters.get(0));
+ }
+
+ return hosts;
+ }
+
+ /**
+ * Creates a {@link HostSpec} from the given topology information.
+ *
+ * @param instanceId the database instance identifier, e.g. "mydb-instance-1".
+ * @param isWriter true if this is a writer instance, false for reader.
+ * @param weight the instance weight for load balancing.
+ * @param lastUpdateTime the timestamp of the last update to this instance's information.
+ * @param initialHostSpec the original host specification used for connecting.
+ * @param instanceTemplate the template used to construct the new {@link HostSpec}.
+ * @return a {@link HostSpec} representing the given information.
+ */
+ public HostSpec createHost(
+ String instanceId,
+ String instanceName,
+ final boolean isWriter,
+ final long weight,
+ final Timestamp lastUpdateTime,
+ final HostSpec initialHostSpec,
+ final HostSpec instanceTemplate) {
+ instanceName = instanceName == null ? "?" : instanceName;
+ final String endpoint = instanceTemplate.getHost().replace("?", instanceName);
+ final int port = instanceTemplate.isPortSpecified()
+ ? instanceTemplate.getPort()
+ : initialHostSpec.getPort();
+
+ final HostSpec hostSpec = this.hostSpecBuilder
+ .hostId(instanceId)
+ .host(endpoint)
+ .port(port)
+ .role(isWriter ? HostRole.WRITER : HostRole.READER)
+ .availability(HostAvailability.AVAILABLE)
+ .weight(weight)
+ .lastUpdateTime(lastUpdateTime)
+ .build();
+ hostSpec.addAlias(instanceName);
+ hostSpec.setHostId(instanceName);
+ return hostSpec;
+ }
+
+ /**
+ * Identifies instances across different database types using instanceId and instanceName values.
+ *
+ * Database types handle these identifiers differently:
+ * - Aurora: Uses the instance name as both instanceId and instanceName
+ * Example: "test-instance-1" for both values
+ * - RDS Cluster: Uses distinct values for instanceId and instanceName
+ * Example:
+ * instanceId: "db-WQFQKBTL2LQUPIEFIFBGENS4ZQ"
+ * instanceName: "test-multiaz-instance-1"
+ */
+ public @Nullable Pair getInstanceId(final Connection connection) {
+ try {
+ try (final Statement stmt = connection.createStatement();
+ final ResultSet rs = stmt.executeQuery(this.dialect.getInstanceIdQuery())) {
+ if (rs.next()) {
+ return Pair.create(rs.getString(1), rs.getString(2));
+ }
+ }
+ } catch (SQLException ex) {
+ return null;
+ }
+
+ return null;
+ }
+
+ /**
+ * Evaluate whether the given connection is to a writer instance.
+ *
+ * @param connection the connection to evaluate.
+ * @return true if the connection is to a writer instance, false otherwise.
+ * @throws SQLException if an exception occurs when querying the database or processing the database response.
+ */
+ public abstract boolean isWriterInstance(Connection connection) throws SQLException;
+
+ /**
+ * Evaluate the database role of the given connection, either {@link HostRole#WRITER} or {@link HostRole#READER}.
+ *
+ * @param conn the connection to evaluate.
+ * @return the database role of the given connection.
+ * @throws SQLException if an exception occurs when querying the database or processing the database response.
+ */
+ public HostRole getHostRole(Connection conn) throws SQLException {
+ try (final Statement stmt = conn.createStatement();
+ final ResultSet rs = stmt.executeQuery(this.dialect.getIsReaderQuery())) {
+ if (rs.next()) {
+ boolean isReader = rs.getBoolean(1);
+ return isReader ? HostRole.READER : HostRole.WRITER;
+ }
+ } catch (SQLException e) {
+ throw new SQLException(Messages.get("TopologyUtils.errorGettingHostRole"), e);
+ }
+
+ throw new SQLException(Messages.get("TopologyUtils.errorGettingHostRole"));
+ }
+}
diff --git a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/ClusterTopologyMonitor.java b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/ClusterTopologyMonitor.java
index 482323f92..51b456939 100644
--- a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/ClusterTopologyMonitor.java
+++ b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/ClusterTopologyMonitor.java
@@ -29,8 +29,6 @@ public interface ClusterTopologyMonitor extends Monitor, EventSubscriber {
boolean canDispose();
- void setClusterId(final String clusterId);
-
List forceRefresh(final boolean writerImportant, final long timeoutMs)
throws SQLException, TimeoutException;
diff --git a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/ClusterTopologyMonitorImpl.java b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/ClusterTopologyMonitorImpl.java
index 5800c993b..845ef1c5f 100644
--- a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/ClusterTopologyMonitorImpl.java
+++ b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/ClusterTopologyMonitorImpl.java
@@ -17,12 +17,8 @@
package software.amazon.jdbc.hostlistprovider.monitoring;
import java.sql.Connection;
-import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.SQLSyntaxErrorException;
-import java.sql.Statement;
-import java.sql.Timestamp;
-import java.time.Instant;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
@@ -40,21 +36,21 @@
import java.util.concurrent.locks.ReentrantLock;
import java.util.logging.Level;
import java.util.logging.Logger;
-import java.util.stream.Collectors;
import org.checkerframework.checker.nullness.qual.NonNull;
import org.checkerframework.checker.nullness.qual.Nullable;
import software.amazon.jdbc.HostRole;
import software.amazon.jdbc.HostSpec;
import software.amazon.jdbc.PropertyDefinition;
-import software.amazon.jdbc.hostavailability.HostAvailability;
import software.amazon.jdbc.hostlistprovider.Topology;
+import software.amazon.jdbc.hostlistprovider.TopologyUtils;
import software.amazon.jdbc.util.ExecutorFactory;
import software.amazon.jdbc.util.FullServicesContainer;
+import software.amazon.jdbc.util.LogUtils;
import software.amazon.jdbc.util.Messages;
+import software.amazon.jdbc.util.Pair;
import software.amazon.jdbc.util.PropertyUtils;
import software.amazon.jdbc.util.RdsUtils;
import software.amazon.jdbc.util.ServiceUtility;
-import software.amazon.jdbc.util.StringUtils;
import software.amazon.jdbc.util.SynchronousExecutor;
import software.amazon.jdbc.util.Utils;
import software.amazon.jdbc.util.events.Event;
@@ -69,38 +65,22 @@ public class ClusterTopologyMonitorImpl extends AbstractMonitor implements Clust
protected static final Executor networkTimeoutExecutor = new SynchronousExecutor();
protected static final RdsUtils rdsHelper = new RdsUtils();
protected static final long monitorTerminationTimeoutSec = 30;
-
- protected static final int defaultTopologyQueryTimeoutMs = 1000;
protected static final int closeConnectionNetworkTimeoutMs = 500;
-
protected static final int defaultConnectionTimeoutMs = 5000;
protected static final int defaultSocketTimeoutMs = 5000;
- // Keep monitoring topology with a high rate for 30s after failover.
+ // Keep monitoring topology at a high rate for 30s after failover.
protected static final long highRefreshPeriodAfterPanicNano = TimeUnit.SECONDS.toNanos(30);
protected static final long ignoreTopologyRequestNano = TimeUnit.SECONDS.toNanos(10);
- protected final long refreshRateNano;
- protected final long highRefreshRateNano;
- protected final FullServicesContainer servicesContainer;
- protected final Properties properties;
- protected final Properties monitoringProperties;
- protected final HostSpec initialHostSpec;
- protected final String topologyQuery;
- protected final String nodeIdQuery;
- protected final String writerTopologyQuery;
- protected final HostSpec clusterInstanceTemplate;
-
- protected String clusterId;
protected final AtomicReference writerHostSpec = new AtomicReference<>(null);
protected final AtomicReference monitoringConnection = new AtomicReference<>(null);
- protected boolean isVerifiedWriterConnection = false;
- protected long highRefreshRateEndTimeNano = 0;
+
protected final Object topologyUpdated = new Object();
protected final AtomicBoolean requestToUpdateTopology = new AtomicBoolean(false);
protected final AtomicLong ignoreNewTopologyRequestsEndTimeNano = new AtomicLong(-1);
protected final ConcurrentHashMap submittedNodes = new ConcurrentHashMap<>();
- protected ExecutorService nodeExecutorService = null;
+
protected final ReentrantLock nodeExecutorLock = new ReentrantLock();
protected final AtomicBoolean nodeThreadsStop = new AtomicBoolean(false);
protected final AtomicReference nodeThreadsWriterConnection = new AtomicReference<>(null);
@@ -108,29 +88,39 @@ public class ClusterTopologyMonitorImpl extends AbstractMonitor implements Clust
protected final AtomicReference nodeThreadsReaderConnection = new AtomicReference<>(null);
protected final AtomicReference> nodeThreadsLatestTopology = new AtomicReference<>(null);
+ protected final long refreshRateNano;
+ protected final long highRefreshRateNano;
+ protected final TopologyUtils topologyUtils;
+ protected final FullServicesContainer servicesContainer;
+ protected final Properties properties;
+ protected final Properties monitoringProperties;
+ protected final HostSpec initialHostSpec;
+ protected final HostSpec instanceTemplate;
+
+ protected ExecutorService nodeExecutorService = null;
+ protected boolean isVerifiedWriterConnection = false;
+ protected long highRefreshRateEndTimeNano = 0;
+ protected String clusterId;
+
public ClusterTopologyMonitorImpl(
final FullServicesContainer servicesContainer,
+ final TopologyUtils topologyUtils,
final String clusterId,
final HostSpec initialHostSpec,
final Properties properties,
- final HostSpec clusterInstanceTemplate,
+ final HostSpec instanceTemplate,
final long refreshRateNano,
- final long highRefreshRateNano,
- final String topologyQuery,
- final String writerTopologyQuery,
- final String nodeIdQuery) {
+ final long highRefreshRateNano) {
super(monitorTerminationTimeoutSec);
- this.clusterId = clusterId;
this.servicesContainer = servicesContainer;
+ this.topologyUtils = topologyUtils;
+ this.clusterId = clusterId;
this.initialHostSpec = initialHostSpec;
- this.clusterInstanceTemplate = clusterInstanceTemplate;
+ this.instanceTemplate = instanceTemplate;
this.properties = properties;
this.refreshRateNano = refreshRateNano;
this.highRefreshRateNano = highRefreshRateNano;
- this.topologyQuery = topologyQuery;
- this.writerTopologyQuery = writerTopologyQuery;
- this.nodeIdQuery = nodeIdQuery;
this.monitoringProperties = PropertyUtils.copyProperties(properties);
this.properties.stringPropertyNames().stream()
@@ -159,11 +149,6 @@ public boolean canDispose() {
return true;
}
- @Override
- public void setClusterId(String clusterId) {
- this.clusterId = clusterId;
- }
-
@Override
public List forceRefresh(final boolean shouldVerifyWriter, final long timeoutMs)
throws SQLException, TimeoutException {
@@ -171,10 +156,11 @@ public List forceRefresh(final boolean shouldVerifyWriter, final long
if (this.ignoreNewTopologyRequestsEndTimeNano.get() > 0
&& System.nanoTime() < this.ignoreNewTopologyRequestsEndTimeNano.get()) {
- // Previous failover has just completed. We can use results of it without triggering a new topology update.
+ // A previous failover event has completed recently.
+ // We can use the results of it without triggering a new topology update.
List currentHosts = getStoredHosts();
LOGGER.finest(() ->
- Utils.logTopology(currentHosts, Messages.get("ClusterTopologyMonitorImpl.ignoringTopologyRequest")));
+ LogUtils.logTopology(currentHosts, Messages.get("ClusterTopologyMonitorImpl.ignoringTopologyRequest")));
if (currentHosts != null) {
return currentHosts;
}
@@ -191,13 +177,12 @@ public List forceRefresh(final boolean shouldVerifyWriter, final long
@Override
public List forceRefresh(@Nullable Connection connection, final long timeoutMs)
throws SQLException, TimeoutException {
-
if (this.isVerifiedWriterConnection) {
- // Push monitoring thread to refresh topology with a verified connection
+ // Get the monitoring thread to refresh the topology using a verified connection.
return this.waitTillTopologyGetsUpdated(timeoutMs);
}
- // Otherwise use provided unverified connection to update topology
+ // Otherwise, use the provided unverified connection to update the topology.
return this.fetchTopologyAndUpdateCache(connection);
}
@@ -208,12 +193,13 @@ protected List waitTillTopologyGetsUpdated(final long timeoutMs) throw
synchronized (this.requestToUpdateTopology) {
this.requestToUpdateTopology.set(true);
- // Notify monitoring thread (that might be sleeping) that topology should be refreshed immediately.
+ // Notify the monitoring thread, which may be sleeping, that topology should be refreshed immediately.
this.requestToUpdateTopology.notifyAll();
}
if (timeoutMs == 0) {
- LOGGER.finest(() -> Utils.logTopology(currentHosts, Messages.get("ClusterTopologyMonitorImpl.timeoutSetToZero")));
+ LOGGER.finest(() ->
+ LogUtils.logTopology(currentHosts, Messages.get("ClusterTopologyMonitorImpl.timeoutSetToZero")));
return currentHosts;
}
@@ -236,9 +222,8 @@ protected List waitTillTopologyGetsUpdated(final long timeoutMs) throw
}
if (System.nanoTime() >= end) {
- throw new TimeoutException(Messages.get(
- "ClusterTopologyMonitorImpl.topologyNotUpdated",
- new Object[]{timeoutMs}));
+ throw new TimeoutException(
+ Messages.get("ClusterTopologyMonitorImpl.topologyNotUpdated", new Object[] {timeoutMs}));
}
return latestHosts;
@@ -254,7 +239,7 @@ public void stop() {
this.nodeThreadsStop.set(true);
this.shutdownNodeExecutorService();
- // It breaks a waiting/sleeping cycles in monitoring thread
+ // This code interrupts the waiting/sleeping cycle in the monitoring thread.
synchronized (this.requestToUpdateTopology) {
this.requestToUpdateTopology.set(true);
this.requestToUpdateTopology.notifyAll();
@@ -277,7 +262,7 @@ public void monitor() throws Exception {
try {
LOGGER.finest(() -> Messages.get(
"ClusterTopologyMonitorImpl.startMonitoringThread",
- new Object[]{this.clusterId, this.initialHostSpec.getHost()}));
+ new Object[] {this.clusterId, this.initialHostSpec.getHost()}));
this.servicesContainer.getEventPublisher().subscribe(
this, Collections.singleton(MonitorResetEvent.class));
@@ -290,7 +275,7 @@ public void monitor() throws Exception {
if (this.submittedNodes.isEmpty()) {
LOGGER.finest(() -> Messages.get("ClusterTopologyMonitorImpl.startingNodeMonitoringThreads"));
- // start node threads
+ // Start node monitors.
this.nodeThreadsStop.set(false);
this.nodeThreadsWriterConnection.set(null);
this.nodeThreadsReaderConnection.set(null);
@@ -299,7 +284,7 @@ public void monitor() throws Exception {
List hosts = getStoredHosts();
if (hosts == null) {
- // need any connection to get topology
+ // Use any available connection to get the topology.
hosts = this.openAnyConnectionAndUpdateTopology();
}
@@ -330,19 +315,17 @@ public void monitor() throws Exception {
throw exceptionList.get(0);
}
}
- // It's not possible to call shutdown() on this.nodeExecutorService since more node may be added later.
+ // We do not call nodeExecutorService.shutdown() here since more node monitors may be submitted later.
}
- // otherwise let's try it again the next round
-
+ // We will try again in the next iteration.
} else {
- // node threads are running
- // check if writer is already detected
+ // The node monitors are running, so we check if the writer has been detected.
final Connection writerConnection = this.nodeThreadsWriterConnection.get();
final HostSpec writerConnectionHostSpec = this.nodeThreadsWriterHostSpec.get();
if (writerConnection != null && writerConnectionHostSpec != null) {
LOGGER.finest(() -> Messages.get(
"ClusterTopologyMonitorImpl.writerPickedUpFromNodeMonitors",
- new Object[]{writerConnectionHostSpec}));
+ new Object[] {writerConnectionHostSpec}));
this.closeConnection(this.monitoringConnection);
this.monitoringConnection.set(writerConnection);
@@ -364,7 +347,7 @@ public void monitor() throws Exception {
continue;
} else {
- // update node threads with new nodes in the topology
+ // Update node monitors with the new instances in the topology
List hosts = this.nodeThreadsLatestTopology.get();
if (hosts != null && !this.nodeThreadsStop.get()) {
for (HostSpec hostSpec : hosts) {
@@ -388,7 +371,7 @@ public void monitor() throws Exception {
throw exceptionList.get(0);
}
}
- // It's not possible to call shutdown() on this.nodeExecutorService since more node may be added later.
+ // We do not call nodeExecutorService.shutdown() here since more node monitors may be submitted later.
}
}
}
@@ -396,8 +379,7 @@ public void monitor() throws Exception {
this.delay(true);
} else {
- // regular mode (not panic mode)
-
+ // We are in regular mode (not panic mode).
if (!this.submittedNodes.isEmpty()) {
this.shutdownNodeExecutorService();
this.submittedNodes.clear();
@@ -405,8 +387,7 @@ public void monitor() throws Exception {
final List hosts = this.fetchTopologyAndUpdateCache(this.monitoringConnection.get());
if (hosts == null) {
- // can't get topology
- // let's switch to panic mode
+ // Attempt to fetch topology failed, so we switch to panic mode.
this.closeConnection(this.monitoringConnection);
this.isVerifiedWriterConnection = false;
continue;
@@ -416,9 +397,9 @@ public void monitor() throws Exception {
this.highRefreshRateEndTimeNano = 0;
}
- // Do not log topology while in high refresh rate. It's noisy!
+ // We avoid logging the topology while using the high refresh rate because it is too noisy.
if (this.highRefreshRateEndTimeNano == 0) {
- LOGGER.finest(() -> Utils.logTopology(getStoredHosts()));
+ LOGGER.finest(() -> LogUtils.logTopology(getStoredHosts()));
}
this.delay(false);
@@ -433,14 +414,14 @@ public void monitor() throws Exception {
} catch (final InterruptedException intEx) {
Thread.currentThread().interrupt();
} catch (final Exception ex) {
- // this should not be reached; log and exit thread
+ // This should not be reached.
if (LOGGER.isLoggable(Level.FINEST)) {
- // We want to print full trace stack of the exception.
+ // We want to print the full trace stack of the exception.
LOGGER.log(
Level.FINEST,
Messages.get(
"ClusterTopologyMonitorImpl.exceptionDuringMonitoringStop",
- new Object[]{this.initialHostSpec.getHost()}),
+ new Object[] {this.initialHostSpec.getHost()}),
ex);
}
@@ -458,7 +439,7 @@ public void monitor() throws Exception {
LOGGER.finest(() -> Messages.get(
"ClusterTopologyMonitorImpl.stopMonitoringThread",
- new Object[]{this.initialHostSpec.getHost()}));
+ new Object[] {this.initialHostSpec.getHost()}));
}
}
@@ -522,7 +503,7 @@ protected void shutdownNodeExecutorService() {
this.nodeExecutorService.shutdownNow();
}
} catch (InterruptedException e) {
- // do nothing
+ // Do nothing.
}
this.nodeExecutorService = null;
@@ -559,21 +540,20 @@ protected List openAnyConnectionAndUpdateTopology() {
Connection conn;
- // open a new connection
+ // Open a new connection.
try {
conn = this.servicesContainer.getPluginService().forceConnect(this.initialHostSpec, this.monitoringProperties);
} catch (SQLException ex) {
- // can't connect
return null;
}
if (this.monitoringConnection.compareAndSet(null, conn)) {
LOGGER.finest(() -> Messages.get(
"ClusterTopologyMonitorImpl.openedMonitoringConnection",
- new Object[]{this.initialHostSpec.getHost()}));
+ new Object[] {this.initialHostSpec.getHost()}));
try {
- if (!StringUtils.isNullOrEmpty(this.getWriterNodeId(this.monitoringConnection.get()))) {
+ if (this.topologyUtils.isWriterInstance(this.monitoringConnection.get())) {
this.isVerifiedWriterConnection = true;
writerVerifiedByThisThread = true;
@@ -581,24 +561,27 @@ protected List openAnyConnectionAndUpdateTopology() {
this.writerHostSpec.set(this.initialHostSpec);
LOGGER.finest(() -> Messages.get(
"ClusterTopologyMonitorImpl.writerMonitoringConnection",
- new Object[]{this.writerHostSpec.get().getHost()}));
+ new Object[] {this.writerHostSpec.get().getHost()}));
} else {
- final String nodeId = this.getNodeId(this.monitoringConnection.get());
- if (!StringUtils.isNullOrEmpty(nodeId)) {
- this.writerHostSpec.set(this.createHost(nodeId, true, 0, null));
+ final Pair pair = this.topologyUtils.getInstanceId(this.monitoringConnection.get());
+ if (pair != null) {
+ HostSpec instanceTemplate = this.getInstanceTemplate(pair.getValue2(), this.monitoringConnection.get());
+ HostSpec writerHost = this.topologyUtils.createHost(
+ pair.getValue1(), pair.getValue2(), true, 0, null, this.initialHostSpec, instanceTemplate);
+ this.writerHostSpec.set(writerHost);
LOGGER.finest(() -> Messages.get(
- "ClusterTopologyMonitorImpl.writerMonitoringConnection",
- new Object[]{this.writerHostSpec.get().getHost()}));
+ "ClusterTopologyMonitorImpl.writerMonitoringConnection",
+ new Object[] {this.writerHostSpec.get().getHost()}));
}
}
}
} catch (SQLException ex) {
- // do nothing
+ // Do nothing.
}
} else {
- // monitoring connection has already been set by other thread
- // close new connection as we don't need it
+ // The monitoring connection has already been detected by another thread. We close the new connection since it
+ // is not needed anymore.
this.closeConnection(conn);
}
}
@@ -614,8 +597,7 @@ protected List openAnyConnectionAndUpdateTopology() {
}
if (hosts == null) {
- // can't get topology; it might be something's wrong with a connection
- // close connection
+ // Attempt to fetch topology failed. There might be something wrong with the connection, so we close it here.
this.closeConnection(this.monitoringConnection);
this.isVerifiedWriterConnection = false;
}
@@ -623,18 +605,8 @@ protected List openAnyConnectionAndUpdateTopology() {
return hosts;
}
- protected String getNodeId(final Connection connection) {
- try {
- try (final Statement stmt = connection.createStatement();
- final ResultSet resultSet = stmt.executeQuery(this.nodeIdQuery)) {
- if (resultSet.next()) {
- return resultSet.getString(1);
- }
- }
- } catch (SQLException ex) {
- // do nothing
- }
- return null;
+ protected HostSpec getInstanceTemplate(String nodeId, Connection connection) throws SQLException {
+ return this.instanceTemplate;
}
protected void closeConnection(final @Nullable Connection connection) {
@@ -643,12 +615,12 @@ protected void closeConnection(final @Nullable Connection connection) {
try {
connection.setNetworkTimeout(networkTimeoutExecutor, closeConnectionNetworkTimeoutMs);
} catch (SQLException ex) {
- // do nothing
+ // Do nothing.
}
connection.close();
}
} catch (final SQLException ex) {
- // ignore
+ // Do nothing.
}
}
@@ -657,7 +629,7 @@ protected void closeConnection(final @NonNull AtomicReference connec
this.closeConnection(connection);
}
- // Sleep that can be easily interrupted
+ // Sleep method that can be easily interrupted.
protected void delay(boolean useHighRefreshRate) throws InterruptedException {
if (this.highRefreshRateEndTimeNano > 0 && System.nanoTime() < this.highRefreshRateEndTimeNano) {
useHighRefreshRate = true;
@@ -687,12 +659,16 @@ protected void delay(boolean useHighRefreshRate) throws InterruptedException {
}
return hosts;
} catch (SQLException ex) {
- // do nothing
- LOGGER.finest(() -> Messages.get("ClusterTopologyMonitorImpl.errorFetchingTopology", new Object[]{ex}));
+ LOGGER.finest(() -> Messages.get("ClusterTopologyMonitorImpl.errorFetchingTopology", new Object[] {ex}));
}
+
return null;
}
+ protected List queryForTopology(Connection connection) throws SQLException {
+ return this.topologyUtils.queryForTopology(connection, this.initialHostSpec, this.instanceTemplate);
+ }
+
protected void updateTopologyCache(final @NonNull List hosts) {
synchronized (this.requestToUpdateTopology) {
this.servicesContainer.getStorageService().set(this.clusterId, new Topology(hosts));
@@ -717,158 +693,6 @@ protected void clearTopologyCache() {
}
}
- // Returns a writer node ID if connected to a writer node. Returns null otherwise.
- protected String getWriterNodeId(final Connection connection) throws SQLException {
- try (final Statement stmt = connection.createStatement()) {
- try (final ResultSet resultSet = stmt.executeQuery(this.writerTopologyQuery)) {
- if (resultSet.next()) {
- return resultSet.getString(1);
- }
- }
- }
- return null;
- }
-
- protected @Nullable List queryForTopology(final Connection conn) throws SQLException {
- int networkTimeout = -1;
- try {
- networkTimeout = conn.getNetworkTimeout();
- // The topology query is not monitored by the EFM plugin, so it needs a socket timeout
- if (networkTimeout == 0) {
- conn.setNetworkTimeout(networkTimeoutExecutor, defaultTopologyQueryTimeoutMs);
- }
- } catch (SQLException e) {
- LOGGER.warning(() -> Messages.get("ClusterTopologyMonitorImpl.errorGettingNetworkTimeout",
- new Object[] {e.getMessage()}));
- }
-
- final String suggestedWriterNodeId = this.getSuggestedWriterNodeId(conn);
- try (final Statement stmt = conn.createStatement();
- final ResultSet resultSet = stmt.executeQuery(this.topologyQuery)) {
- return this.processQueryResults(resultSet, suggestedWriterNodeId);
- } catch (final SQLSyntaxErrorException e) {
- throw new SQLException(Messages.get("ClusterTopologyMonitorImpl.invalidQuery"), e);
- } finally {
- if (networkTimeout == 0 && !conn.isClosed()) {
- conn.setNetworkTimeout(networkTimeoutExecutor, networkTimeout);
- }
- }
- }
-
- protected String getSuggestedWriterNodeId(final Connection connection) throws SQLException {
- // Aurora topology query can detect a writer for itself so it doesn't need any suggested writer node ID.
- return null; // intentionally null
- }
-
- protected @Nullable List processQueryResults(
- final ResultSet resultSet,
- final String suggestedWriterNodeId) throws SQLException {
-
- final HashMap hostMap = new HashMap<>();
-
- if (resultSet.getMetaData().getColumnCount() == 0) {
- // We expect at least 4 columns. Note that the server may return 0 columns if failover has occurred.
- LOGGER.finest(() -> Messages.get("ClusterTopologyMonitorImpl.unexpectedTopologyQueryColumnCount"));
- return null;
- }
-
- // Data is result set is ordered by last updated time so the latest records go last.
- // When adding hosts to a map, the newer records replace the older ones.
- while (resultSet.next()) {
- try {
- final HostSpec host = createHost(resultSet, suggestedWriterNodeId);
- hostMap.put(host.getHost(), host);
- } catch (Exception e) {
- LOGGER.finest(() ->
- Messages.get("ClusterTopologyMonitorImpl.errorProcessingQueryResults", new Object[]{e.getMessage()}));
- return null;
- }
- }
-
- final List hosts = new ArrayList<>();
- final List writers = new ArrayList<>();
-
- for (final HostSpec host : hostMap.values()) {
- if (host.getRole() != HostRole.WRITER) {
- hosts.add(host);
- } else {
- writers.add(host);
- }
- }
-
- int writerCount = writers.size();
-
- if (writerCount == 0) {
- LOGGER.warning(() -> Messages.get("ClusterTopologyMonitorImpl.invalidTopology"));
- hosts.clear();
- } else if (writerCount == 1) {
- hosts.add(writers.get(0));
- } else {
- // Take the latest updated writer node as the current writer. All others will be ignored.
- List sortedWriters = writers.stream()
- .sorted(Comparator.comparing(HostSpec::getLastUpdateTime, Comparator.nullsLast(Comparator.reverseOrder())))
- .collect(Collectors.toList());
- hosts.add(sortedWriters.get(0));
- }
-
- return hosts;
- }
-
- protected HostSpec createHost(
- final ResultSet resultSet,
- final String suggestedWriterNodeId) throws SQLException {
-
- // suggestedWriterNodeId is not used for Aurora clusters. Topology query can detect a writer for itself.
-
- // According to the topology query the result set
- // should contain 4 columns: node ID, 1/0 (writer/reader), CPU utilization, node lag in time.
- String hostName = resultSet.getString(1);
- final boolean isWriter = resultSet.getBoolean(2);
- final float cpuUtilization = resultSet.getFloat(3);
- final float nodeLag = resultSet.getFloat(4);
- Timestamp lastUpdateTime;
- try {
- lastUpdateTime = resultSet.getTimestamp(5);
- } catch (Exception e) {
- lastUpdateTime = Timestamp.from(Instant.now());
- }
-
- // Calculate weight based on node lag in time and CPU utilization.
- final long weight = Math.round(nodeLag) * 100L + Math.round(cpuUtilization);
-
- return createHost(hostName, isWriter, weight, lastUpdateTime);
- }
-
- protected HostSpec createHost(
- String nodeName,
- final boolean isWriter,
- final long weight,
- final Timestamp lastUpdateTime) {
-
- nodeName = nodeName == null ? "?" : nodeName;
- final String endpoint = getHostEndpoint(nodeName);
- final int port = this.clusterInstanceTemplate.isPortSpecified()
- ? this.clusterInstanceTemplate.getPort()
- : this.initialHostSpec.getPort();
-
- final HostSpec hostSpec = this.servicesContainer.getHostListProviderService().getHostSpecBuilder()
- .host(endpoint)
- .port(port)
- .role(isWriter ? HostRole.WRITER : HostRole.READER)
- .availability(HostAvailability.AVAILABLE)
- .weight(weight)
- .lastUpdateTime(lastUpdateTime)
- .build();
- hostSpec.addAlias(nodeName);
- hostSpec.setHostId(nodeName);
- return hostSpec;
- }
-
- protected String getHostEndpoint(final String nodeName) {
- final String host = this.clusterInstanceTemplate.getHost();
- return host.replace("?", nodeName);
- }
-
private static class NodeMonitoringWorker implements Runnable {
private static final Logger LOGGER = Logger.getLogger(NodeMonitoringWorker.class.getName());
@@ -913,26 +737,24 @@ public void run() {
}
if (connection != null) {
-
- String writerId = null;
+ boolean isWriter = false;
try {
- writerId = this.monitor.getWriterNodeId(connection);
-
+ isWriter = this.monitor.topologyUtils.isWriterInstance(connection);
} catch (SQLSyntaxErrorException ex) {
- LOGGER.severe(() -> Messages.get("NodeMonitoringThread.invalidWriterQuery",
+ LOGGER.severe(() -> Messages.get(
+ "NodeMonitoringThread.invalidWriterQuery",
new Object[] {ex.getMessage()}));
throw new RuntimeException(ex);
-
} catch (SQLException ex) {
this.monitor.closeConnection(connection);
connection = null;
}
- if (!StringUtils.isNullOrEmpty(writerId)) {
+ if (isWriter) {
try {
if (this.servicesContainer.getPluginService().getHostRole(connection) != HostRole.WRITER) {
// The first connection after failover may be stale.
- writerId = null;
+ isWriter = false;
}
} catch (SQLException e) {
// Invalid connection, retry.
@@ -940,40 +762,39 @@ public void run() {
}
}
- if (!StringUtils.isNullOrEmpty(writerId)) {
- // this prevents closing connection in finally block
+ if (isWriter) {
+ // This prevents us from closing the connection in the finally block.
if (!this.monitor.nodeThreadsWriterConnection.compareAndSet(null, connection)) {
- // writer connection is already setup
+ // The writer connection is already set up, probably by another node monitor.
this.monitor.closeConnection(connection);
-
} else {
- // writer connection is successfully set to writerConnection
- LOGGER.fine(Messages.get("NodeMonitoringThread.detectedWriter", new Object[]{writerId}));
+ // Successfully updated the node monitor writer connection.
+ LOGGER.fine(() ->
+ Messages.get("NodeMonitoringThread.detectedWriter", new Object[] {hostSpec.getUrl()}));
// When nodeThreadsWriterConnection and nodeThreadsWriterHostSpec are both set, the topology monitor may
// set ignoreNewTopologyRequestsEndTimeNano, in which case other threads will use the cached topology
// for the ignore duration, so we need to update the topology before setting nodeThreadsWriterHostSpec.
this.monitor.fetchTopologyAndUpdateCache(connection);
this.monitor.nodeThreadsWriterHostSpec.set(hostSpec);
this.monitor.nodeThreadsStop.set(true);
- LOGGER.fine(Utils.logTopology(this.monitor.getStoredHosts()));
+ LOGGER.fine(() -> LogUtils.logTopology(this.monitor.getStoredHosts()));
}
- // Setting the connection to null here prevents the final block
- // from closing nodeThreadsWriterConnection.
+ // We set the connection to null to prevent the finally block from closing nodeThreadsWriterConnection.
connection = null;
return;
-
} else if (connection != null) {
- // this connection is a reader connection
+ // This connection is a reader connection.
if (this.monitor.nodeThreadsWriterConnection.get() == null) {
- // while writer connection isn't yet established this reader connection may update topology
+ // We can use this reader connection to update the topology while we wait for the writer connection to
+ // be established.
if (updateTopology) {
- this.readerThreadFetchTopology(connection, writerHostSpec);
+ this.readerThreadFetchTopology(connection, this.writerHostSpec);
} else if (this.monitor.nodeThreadsReaderConnection.get() == null) {
if (this.monitor.nodeThreadsReaderConnection.compareAndSet(null, connection)) {
- // let's use this connection to update topology
+ // Use this connection to update the topology.
updateTopology = true;
- this.readerThreadFetchTopology(connection, writerHostSpec);
+ this.readerThreadFetchTopology(connection, this.writerHostSpec);
}
}
}
@@ -989,7 +810,8 @@ public void run() {
} finally {
this.monitor.closeConnection(connection);
final long end = System.nanoTime();
- LOGGER.finest(() -> Messages.get("NodeMonitoringThread.threadCompleted",
+ LOGGER.finest(() -> Messages.get(
+ "NodeMonitoringThread.threadCompleted",
new Object[] {TimeUnit.NANOSECONDS.toMillis(end - start)}));
}
}
@@ -1001,7 +823,8 @@ private void readerThreadFetchTopology(final Connection connection, final @Nulla
List hosts;
try {
- hosts = this.monitor.queryForTopology(connection);
+ hosts = this.monitor.topologyUtils.queryForTopology(
+ connection, this.monitor.initialHostSpec, this.monitor.instanceTemplate);
if (hosts == null) {
return;
}
@@ -1009,12 +832,12 @@ private void readerThreadFetchTopology(final Connection connection, final @Nulla
return;
}
- // share this topology so the main monitoring thread be able to adjust node monitoring threads
+ // Share this topology so that the main monitoring thread can adjust the node monitoring threads.
this.monitor.nodeThreadsLatestTopology.set(hosts);
if (this.writerChanged) {
this.monitor.updateTopologyCache(hosts);
- LOGGER.finest(Utils.logTopology(hosts));
+ LOGGER.finest(() -> LogUtils.logTopology(hosts));
return;
}
@@ -1025,16 +848,14 @@ private void readerThreadFetchTopology(final Connection connection, final @Nulla
if (latestWriterHostSpec != null
&& writerHostSpec != null
&& !latestWriterHostSpec.getHostAndPort().equals(writerHostSpec.getHostAndPort())) {
-
- // writer node has changed
this.writerChanged = true;
-
- LOGGER.fine(() -> Messages.get("NodeMonitoringThread.writerNodeChanged",
+ LOGGER.fine(() -> Messages.get(
+ "NodeMonitoringThread.writerNodeChanged",
new Object[] {writerHostSpec.getHost(), latestWriterHostSpec.getHost()}));
- // we can update topology cache and notify all waiting threads
+ // Update the topology cache and notify all waiting threads.
this.monitor.updateTopologyCache(hosts);
- LOGGER.fine(Utils.logTopology(hosts));
+ LOGGER.fine(() -> LogUtils.logTopology(hosts));
}
}
}
diff --git a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/GlobalAuroraTopologyMonitor.java b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/GlobalAuroraTopologyMonitor.java
new file mode 100644
index 000000000..c280035d3
--- /dev/null
+++ b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/GlobalAuroraTopologyMonitor.java
@@ -0,0 +1,78 @@
+/*
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License").
+ * You may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package software.amazon.jdbc.hostlistprovider.monitoring;
+
+import java.sql.Connection;
+import java.sql.SQLException;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import software.amazon.jdbc.HostSpec;
+import software.amazon.jdbc.hostlistprovider.GlobalAuroraTopologyUtils;
+import software.amazon.jdbc.util.FullServicesContainer;
+import software.amazon.jdbc.util.Messages;
+import software.amazon.jdbc.util.StringUtils;
+
+
+public class GlobalAuroraTopologyMonitor extends ClusterTopologyMonitorImpl {
+ protected final Map instanceTemplatesByRegion;
+ protected final GlobalAuroraTopologyUtils topologyUtils;
+
+ public GlobalAuroraTopologyMonitor(
+ final FullServicesContainer servicesContainer,
+ final GlobalAuroraTopologyUtils topologyUtils,
+ final String clusterId,
+ final HostSpec initialHostSpec,
+ final Properties properties,
+ final HostSpec instanceTemplate,
+ final long refreshRateNano,
+ final long highRefreshRateNano,
+ final Map instanceTemplatesByRegion) {
+ super(servicesContainer,
+ topologyUtils,
+ clusterId,
+ initialHostSpec,
+ properties,
+ instanceTemplate,
+ refreshRateNano,
+ highRefreshRateNano);
+
+ this.instanceTemplatesByRegion = instanceTemplatesByRegion;
+ this.topologyUtils = topologyUtils;
+ }
+
+ @Override
+ protected HostSpec getInstanceTemplate(String instanceId, Connection connection) throws SQLException {
+ String region = this.topologyUtils.getRegion(instanceId, connection);
+ if (!StringUtils.isNullOrEmpty(region)) {
+ final HostSpec instanceTemplate = this.instanceTemplatesByRegion.get(region);
+ if (instanceTemplate == null) {
+ throw new SQLException(
+ Messages.get("GlobalAuroraTopologyMonitor.cannotFindRegionTemplate", new Object[] {region}));
+ }
+
+ return instanceTemplate;
+ }
+
+ return this.instanceTemplate;
+ }
+
+ @Override
+ protected List queryForTopology(Connection connection) throws SQLException {
+ return this.topologyUtils.queryForTopology(connection, this.initialHostSpec, this.instanceTemplatesByRegion);
+ }
+}
diff --git a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/MonitoringGlobalAuroraHostListProvider.java b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/MonitoringGlobalAuroraHostListProvider.java
new file mode 100644
index 000000000..b258c223d
--- /dev/null
+++ b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/MonitoringGlobalAuroraHostListProvider.java
@@ -0,0 +1,91 @@
+/*
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License").
+ * You may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package software.amazon.jdbc.hostlistprovider.monitoring;
+
+import java.sql.Connection;
+import java.sql.SQLException;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.logging.Logger;
+import software.amazon.jdbc.HostSpec;
+import software.amazon.jdbc.PropertyDefinition;
+import software.amazon.jdbc.hostlistprovider.GlobalAuroraHostListProvider;
+import software.amazon.jdbc.hostlistprovider.GlobalAuroraTopologyUtils;
+import software.amazon.jdbc.util.FullServicesContainer;
+import software.amazon.jdbc.util.LogUtils;
+import software.amazon.jdbc.util.Messages;
+import software.amazon.jdbc.util.RdsUtils;
+import software.amazon.jdbc.util.StringUtils;
+
+public class MonitoringGlobalAuroraHostListProvider extends MonitoringRdsHostListProvider {
+
+ static final Logger LOGGER = Logger.getLogger(MonitoringGlobalAuroraHostListProvider.class.getName());
+
+ protected Map instanceTemplatesByRegion = new HashMap<>();
+
+ protected final RdsUtils rdsUtils = new RdsUtils();
+ protected final GlobalAuroraTopologyUtils topologyUtils;
+
+ static {
+ // Intentionally register property definition using the GlobalAuroraHostListProvider class.
+ PropertyDefinition.registerPluginProperties(GlobalAuroraHostListProvider.class);
+ }
+
+ public MonitoringGlobalAuroraHostListProvider(
+ GlobalAuroraTopologyUtils topologyUtils,
+ Properties properties,
+ String originalUrl,
+ FullServicesContainer servicesContainer) {
+ super(topologyUtils, properties, originalUrl, servicesContainer);
+ this.topologyUtils = topologyUtils;
+ }
+
+ @Override
+ protected void initSettings() throws SQLException {
+ super.initSettings();
+
+ String instanceTemplates = GlobalAuroraHostListProvider.GLOBAL_CLUSTER_INSTANCE_HOST_PATTERNS.getString(properties);
+ this.instanceTemplatesByRegion =
+ this.topologyUtils.parseInstanceTemplates(instanceTemplates, this::validateHostPatternSetting);
+ }
+
+ protected ClusterTopologyMonitor initMonitor() throws SQLException {
+ return this.servicesContainer.getMonitorService().runIfAbsent(
+ ClusterTopologyMonitorImpl.class,
+ this.clusterId,
+ this.servicesContainer,
+ this.properties,
+ (servicesContainer) ->
+ new GlobalAuroraTopologyMonitor(
+ servicesContainer,
+ this.topologyUtils,
+ this.clusterId,
+ this.initialHostSpec,
+ this.properties,
+ this.instanceTemplate,
+ this.refreshRateNano,
+ this.highRefreshRateNano,
+ this.instanceTemplatesByRegion));
+ }
+
+ @Override
+ protected List queryForTopology(Connection connection) throws SQLException {
+ return this.topologyUtils.queryForTopology(connection, this.initialHostSpec, this.instanceTemplatesByRegion);
+ }
+}
diff --git a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/MonitoringRdsHostListProvider.java b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/MonitoringRdsHostListProvider.java
index 3a6bc50e6..c3fa52442 100644
--- a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/MonitoringRdsHostListProvider.java
+++ b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/MonitoringRdsHostListProvider.java
@@ -22,7 +22,6 @@
import java.util.Properties;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
-import java.util.logging.Logger;
import software.amazon.jdbc.AwsWrapperProperty;
import software.amazon.jdbc.BlockingHostListProvider;
import software.amazon.jdbc.HostSpec;
@@ -30,15 +29,11 @@
import software.amazon.jdbc.PropertyDefinition;
import software.amazon.jdbc.cleanup.CanReleaseResources;
import software.amazon.jdbc.hostlistprovider.RdsHostListProvider;
-import software.amazon.jdbc.hostlistprovider.Topology;
+import software.amazon.jdbc.hostlistprovider.TopologyUtils;
import software.amazon.jdbc.util.FullServicesContainer;
-import software.amazon.jdbc.util.monitoring.MonitorService;
-import software.amazon.jdbc.util.storage.StorageService;
-public class MonitoringRdsHostListProvider extends RdsHostListProvider
- implements BlockingHostListProvider, CanReleaseResources {
-
- private static final Logger LOGGER = Logger.getLogger(MonitoringRdsHostListProvider.class.getName());
+public class MonitoringRdsHostListProvider
+ extends RdsHostListProvider implements BlockingHostListProvider, CanReleaseResources {
public static final AwsWrapperProperty CLUSTER_TOPOLOGY_HIGH_REFRESH_RATE_MS =
new AwsWrapperProperty(
@@ -53,33 +48,19 @@ public class MonitoringRdsHostListProvider extends RdsHostListProvider
protected final FullServicesContainer servicesContainer;
protected final PluginService pluginService;
protected final long highRefreshRateNano;
- protected final String writerTopologyQuery;
public MonitoringRdsHostListProvider(
+ final TopologyUtils topologyUtils,
final Properties properties,
final String originalUrl,
- final FullServicesContainer servicesContainer,
- final String topologyQuery,
- final String nodeIdQuery,
- final String isReaderQuery,
- final String writerTopologyQuery) {
- super(properties, originalUrl, servicesContainer, topologyQuery, nodeIdQuery, isReaderQuery);
+ final FullServicesContainer servicesContainer) {
+ super(topologyUtils, properties, originalUrl, servicesContainer);
this.servicesContainer = servicesContainer;
this.pluginService = servicesContainer.getPluginService();
- this.writerTopologyQuery = writerTopologyQuery;
this.highRefreshRateNano = TimeUnit.MILLISECONDS.toNanos(
CLUSTER_TOPOLOGY_HIGH_REFRESH_RATE_MS.getLong(this.properties));
}
- public static void clearCache() {
- clearAll();
- }
-
- @Override
- protected void init() throws SQLException {
- super.init();
- }
-
protected ClusterTopologyMonitor initMonitor() throws SQLException {
return this.servicesContainer.getMonitorService().runIfAbsent(
ClusterTopologyMonitorImpl.class,
@@ -88,15 +69,13 @@ protected ClusterTopologyMonitor initMonitor() throws SQLException {
this.properties,
(servicesContainer) -> new ClusterTopologyMonitorImpl(
this.servicesContainer,
+ this.topologyUtils,
this.clusterId,
this.initialHostSpec,
this.properties,
- this.clusterInstanceTemplate,
+ this.instanceTemplate,
this.refreshRateNano,
- this.highRefreshRateNano,
- this.topologyQuery,
- this.writerTopologyQuery,
- this.nodeIdQuery));
+ this.highRefreshRateNano));
}
@Override
@@ -110,31 +89,6 @@ protected List queryForTopology(final Connection conn) throws SQLExcep
}
}
- @Override
- protected void clusterIdChanged(final String oldClusterId) throws SQLException {
- MonitorService monitorService = this.servicesContainer.getMonitorService();
- final ClusterTopologyMonitorImpl existingMonitor =
- monitorService.get(ClusterTopologyMonitorImpl.class, oldClusterId);
- if (existingMonitor != null) {
- this.servicesContainer.getMonitorService().runIfAbsent(
- ClusterTopologyMonitorImpl.class,
- this.clusterId,
- this.servicesContainer,
- this.properties,
- (servicesContainer) -> existingMonitor);
- assert monitorService.get(ClusterTopologyMonitorImpl.class, this.clusterId) == existingMonitor;
- existingMonitor.setClusterId(this.clusterId);
- monitorService.remove(ClusterTopologyMonitorImpl.class, oldClusterId);
- }
-
- final StorageService storageService = this.servicesContainer.getStorageService();
- final Topology existingTopology = storageService.get(Topology.class, oldClusterId);
- final List existingHosts = existingTopology == null ? null : existingTopology.getHosts();
- if (existingHosts != null) {
- storageService.set(this.clusterId, new Topology(existingHosts));
- }
- }
-
@Override
public List forceRefresh(final boolean shouldVerifyWriter, final long timeoutMs)
throws SQLException, TimeoutException {
@@ -150,6 +104,6 @@ public List forceRefresh(final boolean shouldVerifyWriter, final long
@Override
public void releaseResources() {
- // do nothing
+ // Do nothing.
}
}
diff --git a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/MonitoringRdsMultiAzHostListProvider.java b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/MonitoringRdsMultiAzHostListProvider.java
deleted file mode 100644
index c11da2be9..000000000
--- a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/MonitoringRdsMultiAzHostListProvider.java
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License").
- * You may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package software.amazon.jdbc.hostlistprovider.monitoring;
-
-import java.sql.SQLException;
-import java.util.Properties;
-import java.util.logging.Logger;
-import software.amazon.jdbc.util.FullServicesContainer;
-
-public class MonitoringRdsMultiAzHostListProvider extends MonitoringRdsHostListProvider {
-
- private static final Logger LOGGER = Logger.getLogger(MonitoringRdsMultiAzHostListProvider.class.getName());
-
- protected final String fetchWriterNodeQuery;
- protected final String fetchWriterNodeColumnName;
-
- public MonitoringRdsMultiAzHostListProvider(
- final Properties properties,
- final String originalUrl,
- final FullServicesContainer servicesContainer,
- final String topologyQuery,
- final String nodeIdQuery,
- final String isReaderQuery,
- final String fetchWriterNodeQuery,
- final String fetchWriterNodeColumnName) {
- super(
- properties,
- originalUrl,
- servicesContainer,
- topologyQuery,
- nodeIdQuery,
- isReaderQuery,
- "");
- this.fetchWriterNodeQuery = fetchWriterNodeQuery;
- this.fetchWriterNodeColumnName = fetchWriterNodeColumnName;
- }
-
- @Override
- protected ClusterTopologyMonitor initMonitor() throws SQLException {
- return this.servicesContainer.getMonitorService().runIfAbsent(MultiAzClusterTopologyMonitorImpl.class,
- this.clusterId,
- this.servicesContainer,
- this.properties,
- (servicesContainer) -> new MultiAzClusterTopologyMonitorImpl(
- servicesContainer,
- this.clusterId,
- this.initialHostSpec,
- this.properties,
- this.hostListProviderService,
- this.clusterInstanceTemplate,
- this.refreshRateNano,
- this.highRefreshRateNano,
- this.topologyQuery,
- this.writerTopologyQuery,
- this.nodeIdQuery,
- this.fetchWriterNodeQuery,
- this.fetchWriterNodeColumnName));
- }
-
-}
diff --git a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/MultiAzClusterTopologyMonitorImpl.java b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/MultiAzClusterTopologyMonitorImpl.java
deleted file mode 100644
index 36bab8f90..000000000
--- a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/MultiAzClusterTopologyMonitorImpl.java
+++ /dev/null
@@ -1,128 +0,0 @@
-/*
- * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License").
- * You may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package software.amazon.jdbc.hostlistprovider.monitoring;
-
-import java.sql.Connection;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.sql.Timestamp;
-import java.time.Instant;
-import java.util.Properties;
-import java.util.logging.Logger;
-import software.amazon.jdbc.HostListProviderService;
-import software.amazon.jdbc.HostSpec;
-import software.amazon.jdbc.util.FullServicesContainer;
-import software.amazon.jdbc.util.StringUtils;
-
-public class MultiAzClusterTopologyMonitorImpl extends ClusterTopologyMonitorImpl {
-
- private static final Logger LOGGER = Logger.getLogger(MultiAzClusterTopologyMonitorImpl.class.getName());
-
- protected final String fetchWriterNodeQuery;
- protected final String fetchWriterNodeColumnName;
-
- public MultiAzClusterTopologyMonitorImpl(
- final FullServicesContainer servicesContainer,
- final String clusterId,
- final HostSpec initialHostSpec,
- final Properties properties,
- final HostListProviderService hostListProviderService,
- final HostSpec clusterInstanceTemplate,
- final long refreshRateNano,
- final long highRefreshRateNano,
- final String topologyQuery,
- final String writerTopologyQuery,
- final String nodeIdQuery,
- final String fetchWriterNodeQuery,
- final String fetchWriterNodeColumnName) {
- super(
- servicesContainer,
- clusterId,
- initialHostSpec,
- properties,
- clusterInstanceTemplate,
- refreshRateNano,
- highRefreshRateNano,
- topologyQuery,
- writerTopologyQuery,
- nodeIdQuery);
- this.fetchWriterNodeQuery = fetchWriterNodeQuery;
- this.fetchWriterNodeColumnName = fetchWriterNodeColumnName;
- }
-
- // Returns a writer node ID if connected to a writer node. Returns null otherwise.
- @Override
- protected String getWriterNodeId(final Connection connection) throws SQLException {
- try (final Statement stmt = connection.createStatement()) {
- try (final ResultSet resultSet = stmt.executeQuery(this.fetchWriterNodeQuery)) {
- if (resultSet.next()) {
- String nodeId = resultSet.getString(this.fetchWriterNodeColumnName);
- if (!StringUtils.isNullOrEmpty(nodeId)) {
- // Replica status exists and shows a writer node ID.
- // That means that this node (this connection) is a reader
- return null;
- }
- }
- }
- // Replica status doesn't exist. That means that this node is a writer.
- try (final ResultSet resultSet = stmt.executeQuery(this.nodeIdQuery)) {
- if (resultSet.next()) {
- return resultSet.getString(1);
- }
- }
- }
- return null;
- }
-
- @Override
- protected String getSuggestedWriterNodeId(final Connection connection) throws SQLException {
- try (final Statement stmt = connection.createStatement()) {
- try (final ResultSet resultSet = stmt.executeQuery(this.fetchWriterNodeQuery)) {
- if (resultSet.next()) {
- String nodeId = resultSet.getString(this.fetchWriterNodeColumnName);
- if (!StringUtils.isNullOrEmpty(nodeId)) {
- // Replica status exists and shows a writer node ID.
- // That means that this node (this connection) is a reader.
- // But we now what replication source is and that is a writer node.
- return nodeId;
- }
- }
- }
- // Replica status doesn't exist. That means that this node is a writer.
- try (final ResultSet resultSet = stmt.executeQuery(this.nodeIdQuery)) {
- if (resultSet.next()) {
- return resultSet.getString(1);
- }
- }
- }
- return null;
- }
-
- @Override
- protected HostSpec createHost(
- final ResultSet resultSet,
- final String suggestedWriterNodeId) throws SQLException {
-
- String endpoint = resultSet.getString("endpoint"); // "instance-name.XYZ.us-west-2.rds.amazonaws.com"
- String instanceName = endpoint.substring(0, endpoint.indexOf(".")); // "instance-name"
- String hostId = resultSet.getString("id"); // "1034958454"
- final boolean isWriter = hostId.equals(suggestedWriterNodeId);
-
- return createHost(instanceName, isWriter, 0, Timestamp.from(Instant.now()));
- }
-}
diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/AbstractConnectionPlugin.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/AbstractConnectionPlugin.java
index 035e4ecf9..9cfb8de24 100644
--- a/wrapper/src/main/java/software/amazon/jdbc/plugin/AbstractConnectionPlugin.java
+++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/AbstractConnectionPlugin.java
@@ -24,12 +24,12 @@
import java.util.Properties;
import java.util.Set;
import software.amazon.jdbc.ConnectionPlugin;
-import software.amazon.jdbc.HostListProviderService;
import software.amazon.jdbc.HostRole;
import software.amazon.jdbc.HostSpec;
import software.amazon.jdbc.JdbcCallable;
import software.amazon.jdbc.NodeChangeOptions;
import software.amazon.jdbc.OldConnectionSuggestedAction;
+import software.amazon.jdbc.hostlistprovider.HostListProviderService;
public abstract class AbstractConnectionPlugin implements ConnectionPlugin {
diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/AuroraInitialConnectionStrategyPlugin.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/AuroraInitialConnectionStrategyPlugin.java
index aef063a63..d467e8b22 100644
--- a/wrapper/src/main/java/software/amazon/jdbc/plugin/AuroraInitialConnectionStrategyPlugin.java
+++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/AuroraInitialConnectionStrategyPlugin.java
@@ -19,24 +19,26 @@
import java.sql.Connection;
import java.sql.SQLException;
import java.util.Collections;
-import java.util.HashMap;
import java.util.HashSet;
-import java.util.Map;
+import java.util.List;
import java.util.Properties;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import java.util.logging.Logger;
+import java.util.stream.Collectors;
+import org.jetbrains.annotations.Nullable;
import software.amazon.jdbc.AwsWrapperProperty;
-import software.amazon.jdbc.HostListProviderService;
import software.amazon.jdbc.HostRole;
import software.amazon.jdbc.HostSpec;
import software.amazon.jdbc.JdbcCallable;
import software.amazon.jdbc.PluginService;
import software.amazon.jdbc.PropertyDefinition;
import software.amazon.jdbc.hostavailability.HostAvailability;
+import software.amazon.jdbc.hostlistprovider.HostListProviderService;
import software.amazon.jdbc.util.Messages;
import software.amazon.jdbc.util.RdsUrlType;
import software.amazon.jdbc.util.RdsUtils;
+import software.amazon.jdbc.util.StringUtils;
import software.amazon.jdbc.util.Utils;
import software.amazon.jdbc.util.WrapperUtils;
@@ -76,31 +78,11 @@ public class AuroraInitialConnectionStrategyPlugin extends AbstractConnectionPlu
null,
"Force to verify an opened connection to be either a writer or a reader.");
- private enum VerifyOpenedConnectionType {
- WRITER,
- READER;
-
- private static final Map nameToValue =
- new HashMap() {
- {
- put("writer", WRITER);
- put("reader", READER);
- }
- };
-
- public static VerifyOpenedConnectionType fromValue(String value) {
- if (value == null) {
- return null;
- }
- return nameToValue.get(value.toLowerCase());
- }
- }
-
private final PluginService pluginService;
private HostListProviderService hostListProviderService;
private final RdsUtils rdsUtils = new RdsUtils();
- private VerifyOpenedConnectionType verifyOpenedConnectionType = null;
+ private final HostRole verifyOpenedConnectionType;
static {
PropertyDefinition.registerPluginProperties(AuroraInitialConnectionStrategyPlugin.class);
@@ -109,7 +91,7 @@ public static VerifyOpenedConnectionType fromValue(String value) {
public AuroraInitialConnectionStrategyPlugin(final PluginService pluginService, final Properties properties) {
this.pluginService = pluginService;
this.verifyOpenedConnectionType =
- VerifyOpenedConnectionType.fromValue(VERIFY_OPENED_CONNECTION_TYPE.getString(properties));
+ HostRole.verifyConnectionTypeFromValue(VERIFY_OPENED_CONNECTION_TYPE.getString(properties));
}
@Override
@@ -126,9 +108,6 @@ public void initHostProvider(
final JdbcCallable initHostProviderFunc) throws SQLException {
this.hostListProviderService = hostListProviderService;
- if (hostListProviderService.isStaticHostListProvider()) {
- throw new SQLException(Messages.get("AuroraInitialConnectionStrategyPlugin.requireDynamicProvider"));
- }
initHostProviderFunc.call();
}
@@ -143,8 +122,14 @@ public Connection connect(
final RdsUrlType type = this.rdsUtils.identifyRdsType(hostSpec.getHost());
+ if (!type.isRdsCluster()) {
+ // It's not a cluster endpoint. Continue with a normal workflow.
+ return connectFunc.call();
+ }
+
if (type == RdsUrlType.RDS_WRITER_CLUSTER
- || isInitialConnection && this.verifyOpenedConnectionType == VerifyOpenedConnectionType.WRITER) {
+ || type == RdsUrlType.RDS_GLOBAL_WRITER_CLUSTER
+ || isInitialConnection && this.verifyOpenedConnectionType == HostRole.WRITER) {
Connection writerCandidateConn = this.getVerifiedWriterConnection(props, isInitialConnection, connectFunc);
if (writerCandidateConn == null) {
// Can't get writer connection. Continue with a normal workflow.
@@ -154,8 +139,9 @@ public Connection connect(
}
if (type == RdsUrlType.RDS_READER_CLUSTER
- || isInitialConnection && this.verifyOpenedConnectionType == VerifyOpenedConnectionType.READER) {
- Connection readerCandidateConn = this.getVerifiedReaderConnection(props, isInitialConnection, connectFunc);
+ || isInitialConnection && this.verifyOpenedConnectionType == HostRole.READER) {
+ Connection readerCandidateConn =
+ this.getVerifiedReaderConnection(type, hostSpec, props, isInitialConnection, connectFunc);
if (readerCandidateConn == null) {
// Can't get a reader connection. Continue with a normal workflow.
LOGGER.finest("Continue with normal workflow.");
@@ -190,7 +176,9 @@ private Connection getVerifiedWriterConnection(
try {
writerCandidate = Utils.getWriter(this.pluginService.getAllHosts());
- if (writerCandidate == null || this.rdsUtils.isRdsClusterDns(writerCandidate.getHost())) {
+ if (writerCandidate == null
+ || this.rdsUtils.isRdsClusterDns(writerCandidate.getHost())
+ || this.rdsUtils.isGlobalDbWriterClusterDns(writerCandidate.getHost())) {
// Writer is not found. It seems that topology is outdated.
writerCandidateConn = connectFunc.call();
@@ -247,6 +235,8 @@ private Connection getVerifiedWriterConnection(
}
private Connection getVerifiedReaderConnection(
+ final RdsUrlType rdsUrlType,
+ final HostSpec hostSpec,
final Properties props,
final boolean isInitialConnection,
final JdbcCallable connectFunc)
@@ -259,6 +249,9 @@ private Connection getVerifiedReaderConnection(
Connection readerCandidateConn;
HostSpec readerCandidate;
+ final String awsRegion = rdsUrlType == RdsUrlType.RDS_READER_CLUSTER
+ ? this.rdsUtils.getRdsRegion(hostSpec.getHost())
+ : null;
while (this.getTime() < endTimeNano) {
@@ -266,7 +259,7 @@ private Connection getVerifiedReaderConnection(
readerCandidate = null;
try {
- readerCandidate = this.getReader(props);
+ readerCandidate = this.getReader(props, awsRegion);
if (readerCandidate == null || this.rdsUtils.isRdsClusterDns(readerCandidate.getHost())) {
@@ -364,14 +357,20 @@ private void delay(final long delayMs) {
}
}
- private HostSpec getReader(final Properties props) throws SQLException {
+ private HostSpec getReader(final Properties props, final @Nullable String awsRegion) throws SQLException {
final String strategy = READER_HOST_SELECTOR_STRATEGY.getString(props);
if (this.pluginService.acceptsStrategy(HostRole.READER, strategy)) {
try {
- return this.pluginService.getHostSpecByStrategy(HostRole.READER, strategy);
- } catch (UnsupportedOperationException ex) {
- throw ex;
+ if (!StringUtils.isNullOrEmpty(awsRegion)) {
+ final List hostsInRegion = this.pluginService.getHosts()
+ .stream()
+ .filter(x -> awsRegion.equalsIgnoreCase(this.rdsUtils.getRdsRegion(x.getHost())))
+ .collect(Collectors.toList());
+ return this.pluginService.getHostSpecByStrategy(hostsInRegion, HostRole.READER, strategy);
+ } else {
+ return this.pluginService.getHostSpecByStrategy(HostRole.READER, strategy);
+ }
} catch (SQLException ex) {
// host isn't found
return null;
diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/DefaultConnectionPlugin.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/DefaultConnectionPlugin.java
index 66277275b..f6f0f391d 100644
--- a/wrapper/src/main/java/software/amazon/jdbc/plugin/DefaultConnectionPlugin.java
+++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/DefaultConnectionPlugin.java
@@ -31,10 +31,10 @@
import java.util.stream.Collectors;
import org.checkerframework.checker.nullness.qual.NonNull;
import org.checkerframework.checker.nullness.qual.Nullable;
+import software.amazon.jdbc.ConnectionInfo;
import software.amazon.jdbc.ConnectionPlugin;
import software.amazon.jdbc.ConnectionProvider;
import software.amazon.jdbc.ConnectionProviderManager;
-import software.amazon.jdbc.HostListProviderService;
import software.amazon.jdbc.HostRole;
import software.amazon.jdbc.HostSpec;
import software.amazon.jdbc.JdbcCallable;
@@ -43,6 +43,7 @@
import software.amazon.jdbc.PluginManagerService;
import software.amazon.jdbc.PluginService;
import software.amazon.jdbc.hostavailability.HostAvailability;
+import software.amazon.jdbc.hostlistprovider.HostListProviderService;
import software.amazon.jdbc.util.Messages;
import software.amazon.jdbc.util.SqlMethodAnalyzer;
import software.amazon.jdbc.util.WrapperUtils;
@@ -121,6 +122,10 @@ public T execute(
TelemetryContext telemetryContext = telemetryFactory.openTelemetryContext(
this.pluginService.getTargetName(), TelemetryTraceLevel.NESTED);
+ // Check previous autocommit value before calling jdbcMethodFunc.
+ final boolean doesSwitchAutoCommitFalseTrue = sqlMethodAnalyzer.doesSwitchAutoCommitFalseTrue(
+ this.pluginService.getCurrentConnection(), methodName, jdbcMethodArgs);
+
T result;
try {
result = jdbcMethodFunc.call();
@@ -143,8 +148,7 @@ public T execute(
} else if (
sqlMethodAnalyzer.doesCloseTransaction(currentConn, methodName, jdbcMethodArgs)
// According to the JDBC spec, transactions are committed if autocommit is switched from false to true.
- || sqlMethodAnalyzer.doesSwitchAutoCommitFalseTrue(currentConn, methodName,
- jdbcMethodArgs)) {
+ || doesSwitchAutoCommitFalseTrue) {
this.pluginManagerService.setInTransaction(false);
}
@@ -190,9 +194,9 @@ private Connection connectInternal(
TelemetryContext telemetryContext = telemetryFactory.openTelemetryContext(
connProvider.getTargetName(), TelemetryTraceLevel.NESTED);
- Connection conn;
+ ConnectionInfo connectionInfo;
try {
- conn = connProvider.connect(
+ connectionInfo = connProvider.connect(
driverProtocol,
this.pluginService.getDialect(),
this.pluginService.getTargetDriverDialect(),
@@ -204,14 +208,17 @@ private Connection connectInternal(
}
}
- this.connProviderManager.initConnection(conn, driverProtocol, hostSpec, props);
+ this.pluginManagerService.setIsPooledConnection(connectionInfo.isPooled());
+ this.connProviderManager.initConnection(connectionInfo.getConnection(), driverProtocol, hostSpec, props);
- this.pluginService.setAvailability(hostSpec.asAliases(), HostAvailability.AVAILABLE);
+ if (connectionInfo.getConnection() != null) {
+ this.pluginService.setAvailability(hostSpec.asAliases(), HostAvailability.AVAILABLE);
+ }
if (isInitialConnection) {
- this.pluginService.updateDialect(conn);
+ this.pluginService.updateDialect(connectionInfo.getConnection());
}
- return conn;
+ return connectionInfo.getConnection();
}
@Override
diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/BlueGreenInterimStatus.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/BlueGreenInterimStatus.java
index 63a2cef2a..4a49d57a6 100644
--- a/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/BlueGreenInterimStatus.java
+++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/BlueGreenInterimStatus.java
@@ -23,8 +23,8 @@
import java.util.Set;
import java.util.stream.Collectors;
import software.amazon.jdbc.HostSpec;
+import software.amazon.jdbc.util.LogUtils;
import software.amazon.jdbc.util.StringUtils;
-import software.amazon.jdbc.util.Utils;
public class BlueGreenInterimStatus {
public BlueGreenPhase blueGreenPhase;
@@ -74,8 +74,8 @@ public String toString() {
.map(x -> String.format("%s -> %s", x.getKey(), x.getValue()))
.collect(Collectors.joining("\n "));
String allHostNamesStr = String.join("\n ", this.hostNames);
- String startTopologyStr = Utils.logTopology(this.startTopology);
- String currentTopologyStr = Utils.logTopology(this.currentTopology);
+ String startTopologyStr = LogUtils.logTopology(this.startTopology);
+ String currentTopologyStr = LogUtils.logTopology(this.currentTopology);
return String.format("%s [\n"
+ " phase %s, \n"
+ " version '%s', \n"
diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/BlueGreenStatusMonitor.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/BlueGreenStatusMonitor.java
index 33fb2fd4d..19cd1ef6d 100644
--- a/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/BlueGreenStatusMonitor.java
+++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/BlueGreenStatusMonitor.java
@@ -43,12 +43,12 @@
import java.util.stream.Collectors;
import org.checkerframework.checker.nullness.qual.NonNull;
import org.checkerframework.checker.nullness.qual.Nullable;
-import software.amazon.jdbc.HostListProvider;
import software.amazon.jdbc.HostSpec;
import software.amazon.jdbc.HostSpecBuilder;
import software.amazon.jdbc.PluginService;
import software.amazon.jdbc.dialect.BlueGreenDialect;
import software.amazon.jdbc.hostavailability.SimpleHostAvailabilityStrategy;
+import software.amazon.jdbc.hostlistprovider.HostListProvider;
import software.amazon.jdbc.hostlistprovider.RdsHostListProvider;
import software.amazon.jdbc.plugin.iam.IamAuthConnectionPlugin;
import software.amazon.jdbc.util.ConnectionUrlParser;
@@ -548,16 +548,15 @@ protected void openConnection() {
this.openConnectionFuture = openConnectionExecutorService.submit(() -> {
- HostSpec connectionHostSpecCopy = this.connectionHostSpec.get();
- String connectedIpAddressCopy = this.connectedIpAddress.get();
-
- if (connectionHostSpecCopy == null) {
+ if (this.connectionHostSpec.get() == null) {
this.connectionHostSpec.set(this.initialHostSpec);
- connectionHostSpecCopy = this.initialHostSpec;
this.connectedIpAddress.set(null);
- connectedIpAddressCopy = null;
this.connectionHostSpecCorrect.set(false);
}
+
+ HostSpec connectionHostSpecCopy = this.connectionHostSpec.get();
+ String connectedIpAddressCopy = this.connectedIpAddress.get();
+
try {
if (this.useIpAddress.get() && connectedIpAddressCopy != null) {
@@ -628,7 +627,7 @@ protected void initHostListProvider() {
if (connectionHostSpecCopy != null) {
String hostListProviderUrl = String.format("%s%s/", protocol, connectionHostSpecCopy.getHostAndPort());
this.hostListProvider = this.pluginService.getDialect()
- .getHostListProvider()
+ .getHostListProviderSupplier()
.getProvider(
hostListProperties,
hostListProviderUrl,
diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/BlueGreenStatusProvider.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/BlueGreenStatusProvider.java
index 1cb3eb6ed..76405657c 100644
--- a/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/BlueGreenStatusProvider.java
+++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/BlueGreenStatusProvider.java
@@ -967,11 +967,7 @@ protected void registerIamHost(String connectHost, String iamHost) {
boolean differentNodeNames = connectHost != null && !connectHost.equals(iamHost);
if (differentNodeNames) {
- boolean alreadyChangedName = this.iamHostSuccessfulConnects
- .computeIfAbsent(connectHost, (key) -> ConcurrentHashMap.newKeySet())
- .contains(iamHost);
-
- if (!alreadyChangedName) {
+ if (!isAlreadySuccessfullyConnected(connectHost, iamHost)) {
this.greenNodeChangeNameTimes.computeIfAbsent(connectHost, (key) -> Instant.now());
LOGGER.finest(() -> Messages.get("bgd.greenNodeChangedName", new Object[] {connectHost, iamHost}));
}
diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/customendpoint/CustomEndpointPlugin.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/customendpoint/CustomEndpointPlugin.java
index adfb408e9..49a9dbc9a 100644
--- a/wrapper/src/main/java/software/amazon/jdbc/plugin/customendpoint/CustomEndpointPlugin.java
+++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/customendpoint/CustomEndpointPlugin.java
@@ -19,6 +19,7 @@
import java.sql.Connection;
import java.sql.SQLException;
import java.util.Collections;
+import java.util.EnumSet;
import java.util.HashSet;
import java.util.Properties;
import java.util.Set;
@@ -53,8 +54,8 @@ public class CustomEndpointPlugin extends AbstractConnectionPlugin {
private static final Logger LOGGER = Logger.getLogger(CustomEndpointPlugin.class.getName());
protected static final String TELEMETRY_WAIT_FOR_INFO_COUNTER = "customEndpoint.waitForInfo.counter";
protected static final RegionUtils regionUtils = new RegionUtils();
- protected static final Set monitorErrorResponses =
- new HashSet<>(Collections.singletonList(MonitorErrorResponse.RECREATE));
+ protected static final EnumSet monitorErrorResponses =
+ EnumSet.of(MonitorErrorResponse.RECREATE);
public static final AwsWrapperProperty CUSTOM_ENDPOINT_INFO_REFRESH_RATE_MS = new AwsWrapperProperty(
"customEndpointInfoRefreshRateMs", "30000",
diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/failover/ClusterAwareReaderFailoverHandler.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/failover/ClusterAwareReaderFailoverHandler.java
index 6b8661e02..1b5078d26 100644
--- a/wrapper/src/main/java/software/amazon/jdbc/plugin/failover/ClusterAwareReaderFailoverHandler.java
+++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/failover/ClusterAwareReaderFailoverHandler.java
@@ -317,7 +317,7 @@ private ReaderFailoverResult getConnectionFromHostGroup(final List hos
}
}
- return new ReaderFailoverResult(null, null, false);
+ return FAILED_READER_FAILOVER_RESULT;
} finally {
executor.shutdownNow();
}
@@ -364,7 +364,7 @@ private ReaderFailoverResult getResultFromNextTaskBatch(
return result;
}
}
- return new ReaderFailoverResult(null, null, false);
+ return FAILED_READER_FAILOVER_RESULT;
}
private ReaderFailoverResult getNextResult(final CompletionService service)
diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/failover/ClusterAwareWriterFailoverHandler.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/failover/ClusterAwareWriterFailoverHandler.java
index 8504842c6..a6eafd1d3 100644
--- a/wrapper/src/main/java/software/amazon/jdbc/plugin/failover/ClusterAwareWriterFailoverHandler.java
+++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/failover/ClusterAwareWriterFailoverHandler.java
@@ -37,6 +37,7 @@
import software.amazon.jdbc.hostavailability.HostAvailability;
import software.amazon.jdbc.util.ExecutorFactory;
import software.amazon.jdbc.util.FullServicesContainer;
+import software.amazon.jdbc.util.LogUtils;
import software.amazon.jdbc.util.Messages;
import software.amazon.jdbc.util.PropertyUtils;
import software.amazon.jdbc.util.ServiceUtility;
@@ -465,7 +466,7 @@ private boolean refreshTopologyAndConnectToNewWriter() throws InterruptedExcepti
if (allowOldWriter || !isSame(writerCandidate, this.originalWriterHost)) {
// new writer is available, and it's different from the previous writer
- LOGGER.finest(() -> Utils.logTopology(this.currentTopology, "[TaskB] Topology:"));
+ LOGGER.finest(() -> LogUtils.logTopology(this.currentTopology, "[TaskB] Topology:"));
if (connectToWriter(writerCandidate)) {
return true;
}
diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/failover/FailoverConnectionPlugin.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/failover/FailoverConnectionPlugin.java
index ef2f95550..7928912fe 100644
--- a/wrapper/src/main/java/software/amazon/jdbc/plugin/failover/FailoverConnectionPlugin.java
+++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/failover/FailoverConnectionPlugin.java
@@ -33,7 +33,6 @@
import java.util.logging.Logger;
import org.checkerframework.checker.nullness.qual.Nullable;
import software.amazon.jdbc.AwsWrapperProperty;
-import software.amazon.jdbc.HostListProviderService;
import software.amazon.jdbc.HostRole;
import software.amazon.jdbc.HostSpec;
import software.amazon.jdbc.JdbcCallable;
@@ -43,10 +42,12 @@
import software.amazon.jdbc.PluginService;
import software.amazon.jdbc.PropertyDefinition;
import software.amazon.jdbc.hostavailability.HostAvailability;
+import software.amazon.jdbc.hostlistprovider.HostListProviderService;
import software.amazon.jdbc.plugin.AbstractConnectionPlugin;
import software.amazon.jdbc.plugin.staledns.AuroraStaleDnsHelper;
import software.amazon.jdbc.targetdriverdialect.TargetDriverDialect;
import software.amazon.jdbc.util.FullServicesContainer;
+import software.amazon.jdbc.util.LogUtils;
import software.amazon.jdbc.util.Messages;
import software.amazon.jdbc.util.RdsUrlType;
import software.amazon.jdbc.util.RdsUtils;
@@ -351,30 +352,34 @@ public void notifyNodeListChanged(final Map>
return;
}
- if (LOGGER.isLoggable(Level.FINEST)) {
- final StringBuilder sb = new StringBuilder("Changes:");
- for (final Map.Entry> change : changes.entrySet()) {
- if (sb.length() > 0) {
- sb.append("\n");
+ try {
+ if (LOGGER.isLoggable(Level.FINEST)) {
+ final StringBuilder sb = new StringBuilder("Changes:");
+ for (final Map.Entry> change : changes.entrySet()) {
+ if (sb.length() > 0) {
+ sb.append("\n");
+ }
+ sb.append(String.format("\tHost '%s': %s", change.getKey(), change.getValue()));
}
- sb.append(String.format("\tHost '%s': %s", change.getKey(), change.getValue()));
+ LOGGER.finest(sb.toString());
}
- LOGGER.finest(sb.toString());
- }
- final HostSpec currentHost = this.pluginService.getCurrentHostSpec();
- final String url = currentHost.getUrl();
- if (isNodeStillValid(url, changes)) {
- return;
- }
-
- for (final String alias : currentHost.getAliases()) {
- if (isNodeStillValid(alias + "/", changes)) {
+ final HostSpec currentHost = this.pluginService.getCurrentHostSpec();
+ final String url = currentHost.getUrl();
+ if (isNodeStillValid(url, changes)) {
return;
}
- }
- LOGGER.fine(() -> Messages.get("Failover.invalidNode", new Object[]{currentHost}));
+ for (final String alias : currentHost.getAliases()) {
+ if (isNodeStillValid(alias + "/", changes)) {
+ return;
+ }
+ }
+
+ LOGGER.fine(() -> Messages.get("Failover.invalidNode", new Object[]{currentHost}));
+ } finally {
+ this.staleDnsHelper.notifyNodeListChanged(changes);
+ }
}
private boolean isNodeStillValid(final String node, final Map> changes) {
@@ -388,6 +393,7 @@ private boolean isNodeStillValid(final String node, final Map(remainingReaders),
Messages.get("Failover.errorSelectingReaderHost", new Object[]{ex.getMessage()})));
break;
@@ -437,7 +443,7 @@ protected ReaderFailoverResult getReaderFailoverConnection(long failoverEndTimeN
if (readerCandidate == null) {
LOGGER.finest(
- Utils.logTopology(new ArrayList<>(remainingReaders), Messages.get("Failover.readerCandidateNull")));
+ LogUtils.logTopology(new ArrayList<>(remainingReaders), Messages.get("Failover.readerCandidateNull")));
break;
}
@@ -558,7 +564,7 @@ protected void failoverWriter() throws SQLException {
if (this.failoverWriterFailedCounter != null) {
this.failoverWriterFailedCounter.inc();
}
- String message = Utils.logTopology(updatedHosts, Messages.get("Failover.noWriterHost"));
+ String message = LogUtils.logTopology(updatedHosts, Messages.get("Failover.noWriterHost"));
LOGGER.severe(message);
throw new FailoverFailedSQLException(message);
}
@@ -568,7 +574,7 @@ protected void failoverWriter() throws SQLException {
if (this.failoverWriterFailedCounter != null) {
this.failoverWriterFailedCounter.inc();
}
- String topologyString = Utils.logTopology(allowedHosts, "");
+ String topologyString = LogUtils.logTopology(allowedHosts, "");
LOGGER.severe(Messages.get("Failover.newWriterNotAllowed",
new Object[] {writerCandidate.getUrl(), topologyString}));
throw new FailoverFailedSQLException(
@@ -696,7 +702,14 @@ protected boolean shouldExceptionTriggerConnectionSwitch(final Throwable t) {
return false;
}
- return this.pluginService.isNetworkException(t, this.pluginService.getTargetDriverDialect());
+ if (this.pluginService.isNetworkException(t, this.pluginService.getTargetDriverDialect())) {
+ return true;
+ }
+
+ // For STRICT_WRITER failover mode when connection exception indicate that the connection's in read-only mode,
+ // initiate a failover by returning true.
+ return this.failoverMode == FailoverMode.STRICT_WRITER
+ && this.pluginService.isReadOnlyConnectionException(t, this.pluginService.getTargetDriverDialect());
}
/**
@@ -792,4 +805,9 @@ public Connection connect(
return conn;
}
+
+ @Override
+ public void notifyNodeListChanged(final Map> changes) {
+ this.staleDnsHelper.notifyNodeListChanged(changes);
+ }
}
diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/iam/IamAuthConnectionPlugin.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/iam/IamAuthConnectionPlugin.java
index 5541ac917..f8a2be272 100644
--- a/wrapper/src/main/java/software/amazon/jdbc/plugin/iam/IamAuthConnectionPlugin.java
+++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/iam/IamAuthConnectionPlugin.java
@@ -72,6 +72,10 @@ public class IamAuthConnectionPlugin extends AbstractConnectionPlugin {
"iamExpiration", String.valueOf(DEFAULT_TOKEN_EXPIRATION_SEC),
"IAM token cache expiration in seconds");
+ public static final AwsWrapperProperty IAM_TOKEN_PROPERTY_NAME = new AwsWrapperProperty(
+ "iamAccessTokenPropertyName", PropertyDefinition.PASSWORD.name,
+ "Overrides default IAM access token property name");
+
protected static final RegionUtils regionUtils = new RegionUtils();
protected final PluginService pluginService;
protected final RdsUtils rdsUtils = new RdsUtils();
@@ -121,6 +125,10 @@ private Connection connectInternal(String driverProtocol, HostSpec hostSpec, Pro
throw new SQLException(PropertyDefinition.USER.name + " is null or empty.");
}
+ if (StringUtils.isNullOrEmpty(IAM_TOKEN_PROPERTY_NAME.getString(props))) {
+ throw new SQLException(IAM_TOKEN_PROPERTY_NAME.name + " is null or empty.");
+ }
+
String host = IamAuthUtils.getIamHost(IAM_HOST.getString(props), hostSpec);
int port = IamAuthUtils.getIamPort(
@@ -149,7 +157,7 @@ private Connection connectInternal(String driverProtocol, HostSpec hostSpec, Pro
() -> Messages.get(
"AuthenticationToken.useCachedToken",
new Object[] {tokenInfo.getToken()}));
- PropertyDefinition.PASSWORD.set(props, tokenInfo.getToken());
+ props.setProperty(IAM_TOKEN_PROPERTY_NAME.getString(props), tokenInfo.getToken());
} else {
final Instant tokenExpiry = Instant.now().plus(tokenExpirationSec, ChronoUnit.SECONDS);
if (this.fetchTokenCounter != null) {
@@ -167,7 +175,8 @@ private Connection connectInternal(String driverProtocol, HostSpec hostSpec, Pro
() -> Messages.get(
"AuthenticationToken.generatedNewToken",
new Object[] {token}));
- PropertyDefinition.PASSWORD.set(props, token);
+
+ props.setProperty(IAM_TOKEN_PROPERTY_NAME.getString(props), token);
IamAuthCacheHolder.tokenCache.put(
cacheKey,
new TokenInfo(token, tokenExpiry));
@@ -206,7 +215,7 @@ private Connection connectInternal(String driverProtocol, HostSpec hostSpec, Pro
() -> Messages.get(
"AuthenticationToken.generatedNewToken",
new Object[] {token}));
- PropertyDefinition.PASSWORD.set(props, token);
+ props.setProperty(IAM_TOKEN_PROPERTY_NAME.getString(props), token);
IamAuthCacheHolder.tokenCache.put(
cacheKey,
new TokenInfo(token, tokenExpiry));
diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/limitless/LimitlessRouterMonitor.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/limitless/LimitlessRouterMonitor.java
index f4075a285..9264e1603 100644
--- a/wrapper/src/main/java/software/amazon/jdbc/plugin/limitless/LimitlessRouterMonitor.java
+++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/limitless/LimitlessRouterMonitor.java
@@ -26,9 +26,9 @@
import org.checkerframework.checker.nullness.qual.NonNull;
import software.amazon.jdbc.HostSpec;
import software.amazon.jdbc.util.FullServicesContainer;
+import software.amazon.jdbc.util.LogUtils;
import software.amazon.jdbc.util.Messages;
import software.amazon.jdbc.util.PropertyUtils;
-import software.amazon.jdbc.util.Utils;
import software.amazon.jdbc.util.monitoring.AbstractMonitor;
import software.amazon.jdbc.util.storage.StorageService;
import software.amazon.jdbc.util.telemetry.TelemetryContext;
@@ -116,7 +116,7 @@ public void monitor() {
List newLimitlessRouters =
queryHelper.queryForLimitlessRouters(this.monitoringConn, this.hostSpec.getPort());
this.storageService.set(this.limitlessRouterCacheKey, new LimitlessRouters(newLimitlessRouters));
- LOGGER.finest(Utils.logTopology(newLimitlessRouters, "[limitlessRouterMonitor] Topology:"));
+ LOGGER.finest(LogUtils.logTopology(newLimitlessRouters, "[limitlessRouterMonitor] Topology:"));
TimeUnit.MILLISECONDS.sleep(this.intervalMs); // do not include this in the telemetry
} catch (final Exception ex) {
if (telemetryContext != null) {
diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/limitless/LimitlessRouterServiceImpl.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/limitless/LimitlessRouterServiceImpl.java
index ae4e7b026..5bfecd9dc 100644
--- a/wrapper/src/main/java/software/amazon/jdbc/plugin/limitless/LimitlessRouterServiceImpl.java
+++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/limitless/LimitlessRouterServiceImpl.java
@@ -17,12 +17,10 @@
package software.amazon.jdbc.plugin.limitless;
import java.sql.SQLException;
-import java.util.Collections;
-import java.util.HashSet;
+import java.util.EnumSet;
import java.util.List;
import java.util.Map;
import java.util.Properties;
-import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.ReentrantLock;
@@ -51,8 +49,8 @@ public class LimitlessRouterServiceImpl implements LimitlessRouterService {
"600000", // 10min
"Interval in milliseconds for an Limitless router monitor to be considered inactive and to be disposed.");
protected static final Map forceGetLimitlessRoutersLockMap = new ConcurrentHashMap<>();
- protected static final Set monitorErrorResponses =
- new HashSet<>(Collections.singletonList(MonitorErrorResponse.RECREATE));
+ protected static final EnumSet monitorErrorResponses =
+ EnumSet.of(MonitorErrorResponse.RECREATE);
protected final FullServicesContainer servicesContainer;
protected final PluginService pluginService;
protected final LimitlessQueryHelper queryHelper;
diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/readwritesplitting/ReadWriteSplittingPlugin.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/readwritesplitting/ReadWriteSplittingPlugin.java
index 2d25675e2..899f6d97a 100644
--- a/wrapper/src/main/java/software/amazon/jdbc/plugin/readwritesplitting/ReadWriteSplittingPlugin.java
+++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/readwritesplitting/ReadWriteSplittingPlugin.java
@@ -24,11 +24,11 @@
import java.util.List;
import java.util.Properties;
import java.util.Set;
+import java.util.concurrent.TimeUnit;
import java.util.logging.Level;
import java.util.logging.Logger;
import org.checkerframework.checker.nullness.qual.NonNull;
import software.amazon.jdbc.AwsWrapperProperty;
-import software.amazon.jdbc.HostListProviderService;
import software.amazon.jdbc.HostRole;
import software.amazon.jdbc.HostSpec;
import software.amazon.jdbc.JdbcCallable;
@@ -38,8 +38,11 @@
import software.amazon.jdbc.PluginService;
import software.amazon.jdbc.PropertyDefinition;
import software.amazon.jdbc.cleanup.CanReleaseResources;
+import software.amazon.jdbc.hostlistprovider.HostListProviderService;
import software.amazon.jdbc.plugin.AbstractConnectionPlugin;
import software.amazon.jdbc.plugin.failover.FailoverSQLException;
+import software.amazon.jdbc.util.CacheItem;
+import software.amazon.jdbc.util.LogUtils;
import software.amazon.jdbc.util.Messages;
import software.amazon.jdbc.util.SqlState;
import software.amazon.jdbc.util.Utils;
@@ -81,10 +84,10 @@ public class ReadWriteSplittingPlugin extends AbstractConnectionPlugin
private volatile boolean inReadWriteSplit = false;
private HostListProviderService hostListProviderService;
private Connection writerConnection;
- private Connection readerConnection;
private HostSpec readerHostSpec;
private boolean isReaderConnFromInternalPool;
private boolean isWriterConnFromInternalPool;
+ private CacheItem readerConnection;
public static final AwsWrapperProperty READER_HOST_SELECTOR_STRATEGY =
new AwsWrapperProperty(
@@ -92,6 +95,13 @@ public class ReadWriteSplittingPlugin extends AbstractConnectionPlugin
"random",
"The strategy that should be used to select a new reader host.");
+ public static final AwsWrapperProperty CACHED_READER_KEEP_ALIVE_TIMEOUT =
+ new AwsWrapperProperty(
+ "cachedReaderKeepAliveTimeoutMs",
+ "0",
+ "The time in milliseconds to keep a reader connection alive in the cache. "
+ + "Default value 0 means the Wrapper will keep reusing the same cached reader connection.");
+
static {
PropertyDefinition.registerPluginProperties(ReadWriteSplittingPlugin.class);
}
@@ -114,7 +124,7 @@ public class ReadWriteSplittingPlugin extends AbstractConnectionPlugin
this(pluginService, properties);
this.hostListProviderService = hostListProviderService;
this.writerConnection = writerConnection;
- this.readerConnection = readerConnection;
+ this.readerConnection = new CacheItem<>(readerConnection, CACHED_READER_KEEP_ALIVE_TIMEOUT.getLong(properties));
}
@Override
@@ -147,7 +157,7 @@ public Connection connect(
if (!pluginService.acceptsStrategy(hostSpec.getRole(), this.readerSelectorStrategy)) {
throw new UnsupportedOperationException(
Messages.get("ReadWriteSplittingPlugin.unsupportedHostSpecSelectorStrategy",
- new Object[] { this.readerSelectorStrategy }));
+ new Object[] {this.readerSelectorStrategy}));
}
final Connection currentConnection = connectFunc.call();
@@ -209,8 +219,8 @@ public T execute(
if (this.writerConnection != null && !this.writerConnection.isClosed()) {
this.writerConnection.clearWarnings();
}
- if (this.readerConnection != null && !this.readerConnection.isClosed()) {
- this.readerConnection.clearWarnings();
+ if (this.readerConnection != null && isConnectionUsable(this.readerConnection.get())) {
+ this.readerConnection.get().clearWarnings();
}
} catch (final SQLException e) {
throw WrapperUtils.wrapExceptionIfNeeded(exceptionClass, e);
@@ -266,9 +276,9 @@ private boolean isReader(final @NonNull HostSpec hostSpec) {
return HostRole.READER.equals(hostSpec.getRole());
}
- private void getNewWriterConnection(final HostSpec writerHostSpec) throws SQLException {
+ private void initializeWriterConnection(final HostSpec writerHostSpec) throws SQLException {
final Connection conn = this.pluginService.connect(writerHostSpec, this.properties, this);
- this.isWriterConnFromInternalPool = this.pluginService.isPooledConnectionProvider(writerHostSpec, this.properties);
+ this.isWriterConnFromInternalPool = Boolean.TRUE.equals(this.pluginService.isPooledConnection());
setWriterConnection(conn, writerHostSpec);
switchCurrentConnectionTo(this.writerConnection, writerHostSpec);
}
@@ -280,17 +290,18 @@ private void setWriterConnection(final Connection writerConnection,
() -> Messages.get(
"ReadWriteSplittingPlugin.setWriterConnection",
new Object[] {
- writerHostSpec.getUrl()}));
+ writerHostSpec.getHostAndPort()}));
}
private void setReaderConnection(final Connection conn, final HostSpec host) {
- this.readerConnection = conn;
+ closeReaderConnectionIfIdle(this.readerConnection);
+ this.readerConnection = new CacheItem<>(conn, this.getKeepAliveTimeout(host));
this.readerHostSpec = host;
LOGGER.finest(
() -> Messages.get(
"ReadWriteSplittingPlugin.setReaderConnection",
new Object[] {
- host.getUrl()}));
+ host.getHostAndPort()}));
}
void switchConnectionIfRequired(final boolean readOnly) throws SQLException {
@@ -321,7 +332,7 @@ void switchConnectionIfRequired(final boolean readOnly) throws SQLException {
} catch (final SQLException e) {
if (!isConnectionUsable(currentConnection)) {
logAndThrowException(
- Messages.get("ReadWriteSplittingPlugin.errorSwitchingToReader", new Object[] { e.getMessage() }),
+ Messages.get("ReadWriteSplittingPlugin.errorSwitchingToReader", new Object[] {e.getMessage()}),
SqlState.CONNECTION_UNABLE_TO_CONNECT,
e);
return;
@@ -332,7 +343,7 @@ void switchConnectionIfRequired(final boolean readOnly) throws SQLException {
"ReadWriteSplittingPlugin.fallbackToWriter",
new Object[] {
e.getMessage(),
- this.pluginService.getCurrentHostSpec().getUrl()}));
+ this.pluginService.getCurrentHostSpec().getHostAndPort()}));
}
}
} else {
@@ -383,17 +394,17 @@ private void switchToWriterConnection(
final HostSpec writerHost = getWriter(hosts);
this.inReadWriteSplit = true;
if (!isConnectionUsable(this.writerConnection)) {
- getNewWriterConnection(writerHost);
+ initializeWriterConnection(writerHost);
} else {
switchCurrentConnectionTo(this.writerConnection, writerHost);
}
if (this.isReaderConnFromInternalPool) {
- this.closeConnectionIfIdle(this.readerConnection);
+ this.closeReaderConnectionIfIdle(this.readerConnection);
}
LOGGER.finer(() -> Messages.get("ReadWriteSplittingPlugin.switchedFromReaderToWriter",
- new Object[] {writerHost.getUrl()}));
+ new Object[] {writerHost.getHostAndPort()}));
}
private void switchCurrentConnectionTo(
@@ -409,7 +420,7 @@ private void switchCurrentConnectionTo(
LOGGER.finest(() -> Messages.get(
"ReadWriteSplittingPlugin.settingCurrentConnection",
new Object[] {
- newConnectionHost.getUrl()}));
+ newConnectionHost.getHostAndPort()}));
}
private void switchToReaderConnection(final List hosts)
@@ -425,37 +436,35 @@ private void switchToReaderConnection(final List hosts)
LOGGER.finest(
Messages.get(
"ReadWriteSplittingPlugin.previousReaderNotAllowed",
- new Object[] {this.readerHostSpec, Utils.logTopology(hosts, "")}));
- closeConnectionIfIdle(this.readerConnection);
+ new Object[] {this.readerHostSpec, LogUtils.logTopology(hosts, "")}));
+ closeReaderConnectionIfIdle(this.readerConnection);
}
this.inReadWriteSplit = true;
- if (!isConnectionUsable(this.readerConnection)) {
+ if (this.readerConnection == null || !isConnectionUsable(this.readerConnection.get())) {
initializeReaderConnection(hosts);
} else {
try {
- switchCurrentConnectionTo(this.readerConnection, this.readerHostSpec);
+ switchCurrentConnectionTo(this.readerConnection.get(), this.readerHostSpec);
LOGGER.finer(() -> Messages.get("ReadWriteSplittingPlugin.switchedFromWriterToReader",
- new Object[] {this.readerHostSpec.getUrl()}));
+ new Object[] {this.readerHostSpec.getHostAndPort()}));
} catch (SQLException e) {
if (e.getMessage() != null) {
LOGGER.warning(
() -> Messages.get("ReadWriteSplittingPlugin.errorSwitchingToCachedReaderWithCause",
- new Object[] {this.readerHostSpec.getUrl(), e.getMessage()}));
+ new Object[] {this.readerHostSpec.getHostAndPort(), e.getMessage()}));
} else {
LOGGER.warning(() -> Messages.get("ReadWriteSplittingPlugin.errorSwitchingToCachedReader",
- new Object[] {this.readerHostSpec.getUrl()}));
+ new Object[] {this.readerHostSpec.getHostAndPort()}));
}
- this.readerConnection.close();
- this.readerConnection = null;
- this.readerHostSpec = null;
+ closeReaderConnectionIfIdle(this.readerConnection);
initializeReaderConnection(hosts);
}
}
if (this.isWriterConnFromInternalPool) {
- this.closeConnectionIfIdle(this.writerConnection);
+ this.closeWriterConnectionIfIdle(this.writerConnection);
}
}
@@ -463,14 +472,14 @@ private void initializeReaderConnection(final @NonNull List hosts) thr
if (hosts.size() == 1) {
final HostSpec writerHost = getWriter(hosts);
if (!isConnectionUsable(this.writerConnection)) {
- getNewWriterConnection(writerHost);
+ initializeWriterConnection(writerHost);
}
LOGGER.warning(() -> Messages.get("ReadWriteSplittingPlugin.noReadersFound",
- new Object[] {writerHost.getUrl()}));
+ new Object[] {writerHost.getHostAndPort()}));
} else {
- getNewReaderConnection();
+ openNewReaderConnection();
LOGGER.finer(() -> Messages.get("ReadWriteSplittingPlugin.switchedFromWriterToReader",
- new Object[] {this.readerHostSpec.getUrl()}));
+ new Object[] {this.readerHostSpec.getHostAndPort()}));
}
}
@@ -483,7 +492,7 @@ private HostSpec getWriter(final @NonNull List hosts) throws SQLExcept
return writerHost;
}
- private void getNewReaderConnection() throws SQLException {
+ private void openNewReaderConnection() throws SQLException {
Connection conn = null;
HostSpec readerHost = null;
@@ -492,7 +501,7 @@ private void getNewReaderConnection() throws SQLException {
HostSpec hostSpec = this.pluginService.getHostSpecByStrategy(HostRole.READER, this.readerSelectorStrategy);
try {
conn = this.pluginService.connect(hostSpec, this.properties, this);
- this.isReaderConnFromInternalPool = this.pluginService.isPooledConnectionProvider(hostSpec, this.properties);
+ this.isReaderConnFromInternalPool = Boolean.TRUE.equals(this.pluginService.isPooledConnection());
readerHost = hostSpec;
break;
} catch (final SQLException e) {
@@ -501,7 +510,7 @@ private void getNewReaderConnection() throws SQLException {
Messages.get(
"ReadWriteSplittingPlugin.failedToConnectToReader",
new Object[]{
- hostSpec.getUrl()}),
+ hostSpec.getHostAndPort()}),
e);
}
}
@@ -516,15 +525,24 @@ private void getNewReaderConnection() throws SQLException {
final HostSpec finalReaderHost = readerHost;
LOGGER.finest(
() -> Messages.get("ReadWriteSplittingPlugin.successfullyConnectedToReader",
- new Object[] {finalReaderHost.getUrl()}));
+ new Object[] {finalReaderHost.getHostAndPort()}));
setReaderConnection(conn, readerHost);
- switchCurrentConnectionTo(this.readerConnection, this.readerHostSpec);
+ switchCurrentConnectionTo(this.readerConnection.get(), this.readerHostSpec);
}
private boolean isConnectionUsable(final Connection connection) throws SQLException {
return connection != null && !connection.isClosed();
}
+ private long getKeepAliveTimeout(final HostSpec host) {
+ if (this.pluginService.isPooledConnectionProvider(host, properties)) {
+ // Let the connection pool handle the lifetime of the reader connection.
+ return 0;
+ }
+ final long keepAliveMs = CACHED_READER_KEEP_ALIVE_TIMEOUT.getLong(properties);
+ return keepAliveMs > 0 ? System.nanoTime() + TimeUnit.MILLISECONDS.toNanos(keepAliveMs) : 0;
+ }
+
@Override
public void releaseResources() {
closeIdleConnections();
@@ -532,25 +550,37 @@ public void releaseResources() {
private void closeIdleConnections() {
LOGGER.finest(() -> Messages.get("ReadWriteSplittingPlugin.closingInternalConnections"));
- closeConnectionIfIdle(this.readerConnection);
- closeConnectionIfIdle(this.writerConnection);
+ closeReaderConnectionIfIdle(this.readerConnection);
+ closeWriterConnectionIfIdle(this.writerConnection);
}
- void closeConnectionIfIdle(final Connection internalConnection) {
+ void closeReaderConnectionIfIdle(CacheItem readerConnection) {
+ if (readerConnection == null) {
+ return;
+ }
+
final Connection currentConnection = this.pluginService.getCurrentConnection();
+ final Connection readerConnectionCache = readerConnection.get(true);
+
try {
- if (internalConnection != null
- && internalConnection != currentConnection
- && !internalConnection.isClosed()) {
- internalConnection.close();
- if (internalConnection == writerConnection) {
- writerConnection = null;
- }
+ if (isConnectionUsable(readerConnectionCache) && readerConnectionCache != currentConnection) {
+ readerConnectionCache.close();
+ }
+ } catch (SQLException e) {
+ // Do nothing.
+ }
- if (internalConnection == readerConnection) {
- readerConnection = null;
- readerHostSpec = null;
- }
+ this.readerConnection = null;
+ this.readerHostSpec = null;
+ }
+
+ void closeWriterConnectionIfIdle(final Connection internalConnection) {
+ final Connection currentConnection = this.pluginService.getCurrentConnection();
+ try {
+ if (isConnectionUsable(internalConnection)
+ && internalConnection != currentConnection) {
+ internalConnection.close();
+ writerConnection = null;
}
} catch (final SQLException e) {
// ignore
@@ -565,6 +595,6 @@ Connection getWriterConnection() {
}
Connection getReaderConnection() {
- return this.readerConnection;
+ return this.readerConnection == null ? null : this.readerConnection.get();
}
}
diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/srw/SimpleReadWriteSplittingPlugin.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/srw/SimpleReadWriteSplittingPlugin.java
new file mode 100644
index 000000000..c129c17c4
--- /dev/null
+++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/srw/SimpleReadWriteSplittingPlugin.java
@@ -0,0 +1,709 @@
+/*
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License").
+ * You may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package software.amazon.jdbc.plugin.srw;
+
+import java.sql.Connection;
+import java.sql.SQLException;
+import java.util.Collections;
+import java.util.EnumSet;
+import java.util.HashSet;
+import java.util.Properties;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
+import java.util.logging.Logger;
+import org.checkerframework.checker.nullness.qual.NonNull;
+import software.amazon.jdbc.AwsWrapperProperty;
+import software.amazon.jdbc.HostRole;
+import software.amazon.jdbc.HostSpec;
+import software.amazon.jdbc.HostSpecBuilder;
+import software.amazon.jdbc.JdbcCallable;
+import software.amazon.jdbc.JdbcMethod;
+import software.amazon.jdbc.NodeChangeOptions;
+import software.amazon.jdbc.OldConnectionSuggestedAction;
+import software.amazon.jdbc.PluginService;
+import software.amazon.jdbc.PropertyDefinition;
+import software.amazon.jdbc.cleanup.CanReleaseResources;
+import software.amazon.jdbc.hostavailability.HostAvailability;
+import software.amazon.jdbc.hostlistprovider.HostListProviderService;
+import software.amazon.jdbc.plugin.AbstractConnectionPlugin;
+import software.amazon.jdbc.plugin.failover.FailoverSQLException;
+import software.amazon.jdbc.plugin.readwritesplitting.ReadWriteSplittingSQLException;
+import software.amazon.jdbc.util.FullServicesContainer;
+import software.amazon.jdbc.util.Messages;
+import software.amazon.jdbc.util.RdsUrlType;
+import software.amazon.jdbc.util.RdsUtils;
+import software.amazon.jdbc.util.SqlState;
+import software.amazon.jdbc.util.StringUtils;
+import software.amazon.jdbc.util.WrapperUtils;
+
+public class SimpleReadWriteSplittingPlugin extends AbstractConnectionPlugin
+ implements CanReleaseResources {
+
+ private static final Logger LOGGER = Logger.getLogger(SimpleReadWriteSplittingPlugin.class.getName());
+ private static final Set subscribedMethods =
+ Collections.unmodifiableSet(new HashSet() {
+ {
+ add(JdbcMethod.CONNECT.methodName);
+ add(JdbcMethod.INITHOSTPROVIDER.methodName);
+ add(JdbcMethod.NOTIFYCONNECTIONCHANGED.methodName);
+ add(JdbcMethod.CONNECTION_SETREADONLY.methodName);
+ add(JdbcMethod.CONNECTION_CLEARWARNINGS.methodName);
+ add(JdbcMethod.STATEMENT_EXECUTE.methodName);
+ add(JdbcMethod.STATEMENT_EXECUTEQUERY.methodName);
+ add(JdbcMethod.STATEMENT_EXECUTEBATCH.methodName);
+ add(JdbcMethod.STATEMENT_EXECUTEUPDATE.methodName);
+ add(JdbcMethod.PREPAREDSTATEMENT_EXECUTE.methodName);
+ add(JdbcMethod.PREPAREDSTATEMENT_EXECUTEUPDATE.methodName);
+ add(JdbcMethod.PREPAREDSTATEMENT_EXECUTELARGEUPDATE.methodName);
+ add(JdbcMethod.PREPAREDSTATEMENT_EXECUTEQUERY.methodName);
+ add(JdbcMethod.PREPAREDSTATEMENT_EXECUTEBATCH.methodName);
+ add(JdbcMethod.CALLABLESTATEMENT_EXECUTE.methodName);
+ add(JdbcMethod.CALLABLESTATEMENT_EXECUTEQUERY.methodName);
+ add(JdbcMethod.CALLABLESTATEMENT_EXECUTELARGEUPDATE.methodName);
+ add(JdbcMethod.CALLABLESTATEMENT_EXECUTEBATCH.methodName);
+ add(JdbcMethod.CALLABLESTATEMENT_EXECUTEUPDATE.methodName);
+ add(JdbcMethod.CONNECTION_SETAUTOCOMMIT.methodName);
+ }
+ });
+
+ private final PluginService pluginService;
+ private final Properties properties;
+ private final RdsUtils rdsUtils = new RdsUtils();
+ private final boolean verifyNewConnections;
+ private volatile boolean inReadWriteSplit = false;
+ private HostListProviderService hostListProviderService;
+ private Connection writerConnection;
+ private Connection readerConnection;
+ private final String writeEndpoint;
+ private final String readEndpoint;
+ private HostSpec readEndpointHostSpec;
+ private HostSpec writeEndpointHostSpec;
+ private final HostRole verifyOpenedConnectionType;
+ private final int connectRetryIntervalMs;
+ private final long connectRetryTimeoutMs;
+ private boolean isReaderConnFromInternalPool;
+ private boolean isWriterConnFromInternalPool;
+
+ public static final AwsWrapperProperty SRW_READ_ENDPOINT =
+ new AwsWrapperProperty(
+ "srwReadEndpoint",
+ null,
+ "The read-only endpoint that should be used to connect to a reader.");
+
+ public static final AwsWrapperProperty SRW_WRITE_ENDPOINT =
+ new AwsWrapperProperty(
+ "srwWriteEndpoint",
+ null,
+ "The read-write/cluster endpoint that should be used to connect to the writer.");
+
+ public static final AwsWrapperProperty VERIFY_NEW_SRW_CONNECTIONS =
+ new AwsWrapperProperty(
+ "verifyNewSrwConnections",
+ "true",
+ "Enables role verification for new connections made by the Simple Read/Write Splitting Plugin.",
+ false,
+ new String[] {
+ "true", "false"
+ });
+
+ public static final AwsWrapperProperty SRW_CONNECT_RETRY_TIMEOUT_MS =
+ new AwsWrapperProperty(
+ "srwConnectRetryTimeoutMs",
+ "60000",
+ "Maximum allowed time for the retries opening a connection.");
+
+ public static final AwsWrapperProperty SRW_CONNECT_RETRY_INTERVAL_MS =
+ new AwsWrapperProperty(
+ "srwConnectRetryIntervalMs",
+ "1000",
+ "Time between each retry of opening a connection.");
+
+ public static final AwsWrapperProperty VERIFY_INITIAL_CONNECTION_TYPE =
+ new AwsWrapperProperty(
+ "verifyInitialConnectionType",
+ null,
+ "Force to verify the initial connection to be either a writer or a reader.");
+
+ static {
+ PropertyDefinition.registerPluginProperties(SimpleReadWriteSplittingPlugin.class);
+ }
+
+ SimpleReadWriteSplittingPlugin(final FullServicesContainer servicesContainer, final Properties properties) {
+ this(servicesContainer.getPluginService(), properties);
+ }
+
+ SimpleReadWriteSplittingPlugin(final PluginService pluginService, final Properties properties) {
+ this.writeEndpoint = SRW_WRITE_ENDPOINT.getString(properties);
+ if (StringUtils.isNullOrEmpty(writeEndpoint)) {
+ throw new
+ RuntimeException(
+ Messages.get(
+ "SimpleReadWriteSplittingPlugin.missingRequiredConfigParameter",
+ new Object[] {SRW_WRITE_ENDPOINT.name}));
+ }
+ this.readEndpoint = SRW_READ_ENDPOINT.getString(properties);
+ if (StringUtils.isNullOrEmpty(readEndpoint)) {
+ throw new
+ RuntimeException(
+ Messages.get(
+ "SimpleReadWriteSplittingPlugin.missingRequiredConfigParameter",
+ new Object[] {SRW_READ_ENDPOINT.name}));
+ }
+ this.pluginService = pluginService;
+ this.properties = properties;
+ this.verifyNewConnections = VERIFY_NEW_SRW_CONNECTIONS.getBoolean(properties);
+ this.verifyOpenedConnectionType =
+ HostRole.verifyConnectionTypeFromValue(
+ VERIFY_INITIAL_CONNECTION_TYPE.getString(properties));
+ this.connectRetryIntervalMs = SRW_CONNECT_RETRY_INTERVAL_MS.getInteger(properties);
+ this.connectRetryTimeoutMs = SRW_CONNECT_RETRY_TIMEOUT_MS.getInteger(properties);
+ }
+
+ /**
+ * For testing purposes only.
+ */
+ SimpleReadWriteSplittingPlugin(
+ final PluginService pluginService,
+ final Properties properties,
+ final HostListProviderService hostListProviderService,
+ final Connection writerConnection,
+ final Connection readerConnection,
+ final HostSpec writeEndpointHostSpec,
+ final HostSpec readEndpointHostSpec) {
+ this(pluginService, properties);
+ this.hostListProviderService = hostListProviderService;
+ this.writerConnection = writerConnection;
+ this.readerConnection = readerConnection;
+ this.writeEndpointHostSpec = writeEndpointHostSpec;
+ this.readEndpointHostSpec = readEndpointHostSpec;
+ }
+
+ @Override
+ public Set getSubscribedMethods() {
+ return subscribedMethods;
+ }
+
+ @Override
+ public void initHostProvider(
+ final String driverProtocol,
+ final String initialUrl,
+ final Properties props,
+ final HostListProviderService hostListProviderService,
+ final JdbcCallable initHostProviderFunc)
+ throws SQLException {
+
+ this.hostListProviderService = hostListProviderService;
+ initHostProviderFunc.call();
+ }
+
+ @Override
+ public OldConnectionSuggestedAction notifyConnectionChanged(
+ final EnumSet changes) {
+ try {
+ updateInternalConnectionInfo();
+ } catch (final SQLException e) {
+ // ignore
+ }
+
+ if (this.inReadWriteSplit) {
+ return OldConnectionSuggestedAction.PRESERVE;
+ }
+ return OldConnectionSuggestedAction.NO_OPINION;
+ }
+
+ @Override
+ public Connection connect(
+ final String driverProtocol,
+ final HostSpec hostSpec,
+ final Properties props,
+ final boolean isInitialConnection,
+ final JdbcCallable connectFunc)
+ throws SQLException {
+
+ if (!isInitialConnection || !this.verifyNewConnections) {
+ // No verification required. Continue with a normal workflow.
+ return connectFunc.call();
+ }
+
+ final RdsUrlType type = this.rdsUtils.identifyRdsType(hostSpec.getHost());
+
+ Connection conn = null;
+ if (type == RdsUrlType.RDS_WRITER_CLUSTER
+ || type == RdsUrlType.RDS_GLOBAL_WRITER_CLUSTER
+ || this.verifyOpenedConnectionType == HostRole.WRITER) {
+ conn = this.getVerifiedConnection(props, hostSpec, HostRole.WRITER, connectFunc);
+ } else if (type == RdsUrlType.RDS_READER_CLUSTER
+ || this.verifyOpenedConnectionType == HostRole.READER) {
+ conn = this.getVerifiedConnection(props, hostSpec, HostRole.READER, connectFunc);
+ }
+
+ if (conn == null) {
+ // Continue with a normal workflow.
+ conn = connectFunc.call();
+ }
+ this.setInitialConnectionHostSpec(conn, hostSpec);
+ return conn;
+ }
+
+ private Connection getVerifiedConnection(
+ final Properties props,
+ final HostSpec hostSpec,
+ final HostRole hostRole,
+ final JdbcCallable connectFunc)
+ throws SQLException {
+
+ final long endTimeNano = System.nanoTime()
+ + TimeUnit.MILLISECONDS.toNanos(this.connectRetryTimeoutMs);
+
+ Connection candidateConn;
+
+ while (System.nanoTime() < endTimeNano) {
+
+ candidateConn = null;
+
+ try {
+ if (connectFunc != null) {
+ candidateConn = connectFunc.call();
+ } else if (hostSpec != null) {
+ candidateConn = this.pluginService.connect(hostSpec, props, this);
+ } else {
+ // Unable to verify.
+ break;
+ }
+
+ if (candidateConn == null || this.pluginService.getHostRole(candidateConn) != hostRole) {
+ // The connection does not have the desired role. Retry.
+ this.closeConnection(candidateConn);
+ this.delay();
+ continue;
+ }
+
+ // Connection is valid and verified.
+ return candidateConn;
+ } catch (SQLException ex) {
+ this.closeConnection(candidateConn);
+ if (this.pluginService.isLoginException(ex, this.pluginService.getTargetDriverDialect())) {
+ throw WrapperUtils.wrapExceptionIfNeeded(SQLException.class, ex);
+ }
+ this.delay();
+ } catch (Throwable ex) {
+ this.closeConnection(candidateConn);
+ throw ex;
+ }
+ }
+
+ LOGGER.fine(
+ () -> Messages.get("SimpleReadWriteSplittingPlugin.verificationFailed",
+ new Object[] {hostRole, this.connectRetryTimeoutMs}));
+ return null;
+ }
+
+ private void setInitialConnectionHostSpec(Connection conn, HostSpec hostSpec) {
+ if (hostSpec == null) {
+ try {
+ hostSpec = this.pluginService.identifyConnection(conn);
+ } catch (Exception e) {
+ // Ignore error
+ }
+ }
+
+ if (hostSpec != null && hostListProviderService != null) {
+ hostListProviderService.setInitialConnectionHostSpec(hostSpec);
+ }
+ }
+
+ @Override
+ public T execute(
+ final Class resultClass,
+ final Class exceptionClass,
+ final Object methodInvokeOn,
+ final String methodName,
+ final JdbcCallable jdbcMethodFunc,
+ final Object[] args)
+ throws E {
+ final Connection conn = WrapperUtils.getConnectionFromSqlObject(methodInvokeOn);
+ if (conn != null && conn != this.pluginService.getCurrentConnection()) {
+ LOGGER.fine(
+ () -> Messages.get("ReadWriteSplittingPlugin.executingAgainstOldConnection",
+ new Object[] {methodInvokeOn}));
+ return jdbcMethodFunc.call();
+ }
+
+ if (JdbcMethod.CONNECTION_CLEARWARNINGS.methodName.equals(methodName)) {
+ try {
+ if (this.writerConnection != null && !this.writerConnection.isClosed()) {
+ this.writerConnection.clearWarnings();
+ }
+ if (this.readerConnection != null && !this.readerConnection.isClosed()) {
+ this.readerConnection.clearWarnings();
+ }
+ } catch (final SQLException e) {
+ throw WrapperUtils.wrapExceptionIfNeeded(exceptionClass, e);
+ }
+ }
+
+ if (JdbcMethod.CONNECTION_SETREADONLY.methodName.equals(methodName)
+ && args != null
+ && args.length > 0) {
+ try {
+ switchConnectionIfRequired((Boolean) args[0]);
+ } catch (final SQLException e) {
+ throw WrapperUtils.wrapExceptionIfNeeded(exceptionClass, e);
+ }
+ }
+
+ try {
+ return jdbcMethodFunc.call();
+ } catch (final Exception e) {
+ if (e instanceof FailoverSQLException) {
+ LOGGER.finer(
+ () -> Messages.get("ReadWriteSplittingPlugin.failoverExceptionWhileExecutingCommand",
+ new Object[] {methodName}));
+ closeIdleConnections();
+ } else {
+ LOGGER.finest(
+ () -> Messages.get("ReadWriteSplittingPlugin.exceptionWhileExecutingCommand",
+ new Object[] {methodName}));
+ }
+ throw e;
+ }
+ }
+
+ private void updateInternalConnectionInfo() throws SQLException {
+ final Connection currentConnection = this.pluginService.getCurrentConnection();
+ final HostSpec currentHost = this.pluginService.getCurrentHostSpec();
+ if (currentConnection == null || currentHost == null) {
+ return;
+ }
+
+ // Only update internal connection info if connection is to the endpoint and different from internal connection.
+ if (isWriteEndpoint(currentHost) && !currentConnection.equals(this.writerConnection)
+ && (!this.verifyNewConnections || this.pluginService.getHostRole(currentConnection) == HostRole.WRITER)) {
+ setWriterConnection(currentConnection, currentHost);
+ } else if (isReadEndpoint(currentHost) && !currentConnection.equals(this.readerConnection)
+ && (!this.verifyNewConnections || this.pluginService.getHostRole(currentConnection) == HostRole.READER)) {
+ setReaderConnection(currentConnection, currentHost);
+ }
+ }
+
+ private boolean isWriteEndpoint(final @NonNull HostSpec hostSpec) {
+ return this.writeEndpoint.equalsIgnoreCase(hostSpec.getHost())
+ || this.writeEndpoint.equalsIgnoreCase(hostSpec.getHostAndPort());
+ }
+
+ private boolean isReadEndpoint(final @NonNull HostSpec hostSpec) {
+ return this.readEndpoint.equalsIgnoreCase(hostSpec.getHost())
+ || this.readEndpoint.equalsIgnoreCase(hostSpec.getHostAndPort());
+ }
+
+ private void setWriterConnection(final Connection conn, final HostSpec host) {
+ this.writerConnection = conn;
+ this.writeEndpointHostSpec = host;
+ LOGGER.finest(
+ () -> Messages.get(
+ "ReadWriteSplittingPlugin.setWriterConnection",
+ new Object[] {
+ host.getHostAndPort()}));
+ }
+
+ private void setReaderConnection(final Connection conn, final HostSpec host) {
+ this.readerConnection = conn;
+ this.readEndpointHostSpec = host;
+ LOGGER.finest(
+ () -> Messages.get(
+ "ReadWriteSplittingPlugin.setReaderConnection",
+ new Object[] {
+ host.getHostAndPort()}));
+ }
+
+ void switchConnectionIfRequired(final boolean readOnly) throws SQLException {
+ final Connection currentConnection = this.pluginService.getCurrentConnection();
+ if (currentConnection != null && currentConnection.isClosed()) {
+ logAndThrowException(Messages.get("ReadWriteSplittingPlugin.setReadOnlyOnClosedConnection"),
+ SqlState.CONNECTION_NOT_OPEN);
+ }
+
+ final HostSpec currentHost = this.pluginService.getCurrentHostSpec();
+ if (readOnly) {
+ if (!pluginService.isInTransaction() && !isReadEndpoint(currentHost)) {
+ try {
+ switchToReaderConnection();
+ } catch (final SQLException e) {
+ if (!isConnectionUsable(currentConnection)) {
+ logAndThrowException(
+ Messages.get("ReadWriteSplittingPlugin.errorSwitchingToReader", new Object[]{e.getMessage()}),
+ e);
+ }
+ // Failed to switch to the reader endpoint. The current connection will be used as a fallback.
+ LOGGER.fine(() -> Messages.get(
+ "SimpleReadWriteSplittingPlugin.fallbackToCurrentConnection",
+ new Object[] {
+ this.pluginService.getCurrentHostSpec().getHostAndPort(),
+ e.getMessage()}));
+ }
+ }
+ } else {
+ if (!isWriteEndpoint(currentHost) && pluginService.isInTransaction()) {
+ logAndThrowException(
+ Messages.get("ReadWriteSplittingPlugin.setReadOnlyFalseInTransaction"),
+ SqlState.ACTIVE_SQL_TRANSACTION);
+ }
+
+ if (!isWriteEndpoint(currentHost)) {
+ try {
+ switchToWriterConnection();
+ LOGGER.finer(() -> Messages.get("ReadWriteSplittingPlugin.switchedFromReaderToWriter",
+ new Object[] {writeEndpointHostSpec.getHostAndPort()}));
+ } catch (final SQLException e) {
+ logAndThrowException(Messages.get("ReadWriteSplittingPlugin.errorSwitchingToWriter"),
+ e);
+ }
+ }
+ }
+ }
+
+ private void logAndThrowException(final String logMessage) throws SQLException {
+ LOGGER.severe(logMessage);
+ throw new ReadWriteSplittingSQLException(logMessage);
+ }
+
+ private void logAndThrowException(final String logMessage, final SqlState sqlState)
+ throws SQLException {
+ LOGGER.severe(logMessage);
+ throw new ReadWriteSplittingSQLException(logMessage, sqlState.getState());
+ }
+
+ private void logAndThrowException(
+ final String logMessage, final Throwable cause)
+ throws SQLException {
+ LOGGER.fine(logMessage);
+ throw new ReadWriteSplittingSQLException(logMessage, SqlState.CONNECTION_UNABLE_TO_CONNECT.getState(), cause);
+ }
+
+ private void switchToReaderConnection() throws SQLException {
+ final Connection currentConnection = this.pluginService.getCurrentConnection();
+ final HostSpec currentHost = this.pluginService.getCurrentHostSpec();
+ if (isReadEndpoint(currentHost) && isConnectionUsable(currentConnection)) {
+ // Already connected to the read-only endpoint.
+ return;
+ }
+
+ this.inReadWriteSplit = true;
+ if (!isConnectionUsable(this.readerConnection)) {
+ initializeReaderConnection();
+ } else {
+ try {
+ switchCurrentConnectionTo(this.readerConnection, this.readEndpointHostSpec);
+ LOGGER.finer(() -> Messages.get("ReadWriteSplittingPlugin.switchedFromWriterToReader",
+ new Object[] {this.readEndpointHostSpec.getHostAndPort()}));
+ } catch (SQLException e) {
+ if (e.getMessage() != null) {
+ LOGGER.warning(
+ () -> Messages.get("ReadWriteSplittingPlugin.errorSwitchingToCachedReaderWithCause",
+ new Object[] {this.readEndpointHostSpec.getHostAndPort(), e.getMessage()}));
+ } else {
+ LOGGER.warning(() -> Messages.get("ReadWriteSplittingPlugin.errorSwitchingToCachedReader",
+ new Object[] {this.readEndpointHostSpec.getHostAndPort()}));
+ }
+
+ this.readerConnection.close();
+ this.readerConnection = null;
+ initializeReaderConnection();
+ }
+ }
+
+ if (this.isWriterConnFromInternalPool) {
+ this.closeConnectionIfIdle(this.writerConnection);
+ }
+ }
+
+ private void switchToWriterConnection() throws SQLException {
+ final Connection currentConnection = this.pluginService.getCurrentConnection();
+ final HostSpec currentHost = this.pluginService.getCurrentHostSpec();
+ if (isWriteEndpoint(currentHost) && isConnectionUsable(currentConnection)) {
+ // Already connected to the cluster/read-write endpoint.
+ return;
+ }
+
+ this.inReadWriteSplit = true;
+ if (!isConnectionUsable(this.writerConnection)) {
+ initializeWriterConnection();
+ } else {
+ switchCurrentConnectionTo(this.writerConnection, this.writeEndpointHostSpec);
+ }
+
+ if (this.isReaderConnFromInternalPool) {
+ this.closeConnectionIfIdle(this.readerConnection);
+ }
+ }
+
+ private void initializeWriterConnection() throws SQLException {
+ if (this.writeEndpointHostSpec == null) {
+ this.writeEndpointHostSpec = createHostSpec(this.writeEndpoint, HostRole.WRITER);
+ }
+ final Connection conn;
+ if (this.verifyNewConnections) {
+ conn = this.getVerifiedConnection(
+ this.properties, this.writeEndpointHostSpec, HostRole.WRITER, null);
+ } else {
+ conn = this.pluginService.connect(this.writeEndpointHostSpec, this.properties, this);
+ }
+
+ if (conn == null) {
+ logAndThrowException(
+ Messages.get("SimpleReadWriteSplittingPlugin.failedToConnectToWriter",
+ new Object[]{this.writeEndpoint}));
+ }
+
+ setWriterConnection(conn, writeEndpointHostSpec);
+ switchCurrentConnectionTo(this.writerConnection, writeEndpointHostSpec);
+ this.isWriterConnFromInternalPool = Boolean.TRUE.equals(this.pluginService.isPooledConnection());
+ }
+
+ private void initializeReaderConnection() throws SQLException {
+ if (this.readEndpointHostSpec == null) {
+ this.readEndpointHostSpec = createHostSpec(this.readEndpoint, HostRole.READER);
+ }
+ final Connection conn;
+
+ if (this.verifyNewConnections) {
+ conn = this.getVerifiedConnection(
+ this.properties, this.readEndpointHostSpec, HostRole.READER, null);
+ } else {
+ conn = this.pluginService.connect(this.readEndpointHostSpec, this.properties, this);
+ }
+
+ if (conn == null) {
+ logAndThrowException(Messages.get("ReadWriteSplittingPlugin.failedToConnectToReader",
+ new Object[]{this.readEndpoint}),
+ SqlState.CONNECTION_UNABLE_TO_CONNECT);
+ }
+
+ LOGGER.finest(
+ () -> Messages.get("ReadWriteSplittingPlugin.successfullyConnectedToReader",
+ new Object[]{readEndpointHostSpec.getHostAndPort()}));
+
+ // Store reader connection for reuse.
+ setReaderConnection(conn, readEndpointHostSpec);
+ switchCurrentConnectionTo(conn, this.readEndpointHostSpec);
+ this.isReaderConnFromInternalPool = Boolean.TRUE.equals(this.pluginService.isPooledConnection());
+ LOGGER.finer(() -> Messages.get("ReadWriteSplittingPlugin.switchedFromWriterToReader",
+ new Object[] {readEndpoint}));
+ }
+
+ private void switchCurrentConnectionTo(
+ final Connection newConnection,
+ final HostSpec newConnectionHost)
+ throws SQLException {
+
+ final Connection currentConnection = this.pluginService.getCurrentConnection();
+ if (currentConnection == newConnection) {
+ return;
+ }
+ this.pluginService.setCurrentConnection(newConnection, newConnectionHost);
+ LOGGER.finest(() -> Messages.get(
+ "ReadWriteSplittingPlugin.settingCurrentConnection",
+ new Object[] {
+ newConnectionHost.getHostAndPort()}));
+ }
+
+ private HostSpec createHostSpec(String endpoint, HostRole role) {
+ endpoint = endpoint.trim();
+
+ String host = endpoint;
+ int port = this.hostListProviderService.getCurrentHostSpec().getPort();
+ int colonIndex = endpoint.lastIndexOf(":");
+ if (colonIndex != -1 && endpoint.substring(colonIndex + 1).matches("\\d+")) {
+ host = endpoint.substring(0, colonIndex);
+ port = Integer.parseInt(endpoint.substring(colonIndex + 1));
+ }
+
+ return new HostSpecBuilder(this.hostListProviderService.getHostSpecBuilder())
+ .host(host)
+ .port(port)
+ .role(role)
+ .availability(HostAvailability.AVAILABLE)
+ .build();
+ }
+
+ private boolean isConnectionUsable(final Connection connection) throws SQLException {
+ return connection != null && !connection.isClosed();
+ }
+
+ @Override
+ public void releaseResources() {
+ closeIdleConnections();
+ }
+
+ private void closeIdleConnections() {
+ LOGGER.finest(() -> Messages.get("ReadWriteSplittingPlugin.closingInternalConnections"));
+ closeConnectionIfIdle(this.readerConnection);
+ closeConnectionIfIdle(this.writerConnection);
+ this.readerConnection = null;
+ this.writerConnection = null;
+ }
+
+ void closeConnectionIfIdle(final Connection internalConnection) {
+ final Connection currentConnection = this.pluginService.getCurrentConnection();
+ try {
+ if (internalConnection != null
+ && internalConnection != currentConnection
+ && !internalConnection.isClosed()) {
+ internalConnection.close();
+ }
+ } catch (final SQLException e) {
+ // ignore
+ }
+ }
+
+ private void closeConnection(final Connection connection) {
+ if (connection != null) {
+ try {
+ connection.close();
+ } catch (final SQLException ex) {
+ // ignore
+ }
+ }
+ }
+
+ private void delay() {
+ try {
+ TimeUnit.MILLISECONDS.sleep(this.connectRetryIntervalMs);
+ } catch (InterruptedException ex) {
+ Thread.currentThread().interrupt();
+ }
+ }
+
+ /**
+ * Methods for testing purposes only.
+ */
+ Connection getWriterConnection() {
+ return this.writerConnection;
+ }
+
+ Connection getReaderConnection() {
+ return this.readerConnection;
+ }
+
+ HostSpec getReaderHostSpec() {
+ return this.readEndpointHostSpec;
+ }
+
+ HostSpec getWriterHostSpec() {
+ return this.writeEndpointHostSpec;
+ }
+}
diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/srw/SimpleReadWriteSplittingPluginFactory.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/srw/SimpleReadWriteSplittingPluginFactory.java
new file mode 100644
index 000000000..4f0cdba58
--- /dev/null
+++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/srw/SimpleReadWriteSplittingPluginFactory.java
@@ -0,0 +1,40 @@
+/*
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License").
+ * You may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package software.amazon.jdbc.plugin.srw;
+
+import java.util.Properties;
+import software.amazon.jdbc.ConnectionPlugin;
+import software.amazon.jdbc.ConnectionPluginFactory;
+import software.amazon.jdbc.PluginService;
+import software.amazon.jdbc.ServicesContainerPluginFactory;
+import software.amazon.jdbc.plugin.failover.FailoverConnectionPlugin;
+import software.amazon.jdbc.util.FullServicesContainer;
+import software.amazon.jdbc.util.Messages;
+
+public class SimpleReadWriteSplittingPluginFactory implements ServicesContainerPluginFactory {
+ @Override
+ public ConnectionPlugin getInstance(final PluginService pluginService, final Properties props) {
+ throw new UnsupportedOperationException(
+ Messages.get(
+ "ServicesContainerPluginFactory.servicesContainerRequired", new Object[] {"FailoverConnectionPlugin"}));
+ }
+
+ @Override
+ public ConnectionPlugin getInstance(final FullServicesContainer servicesContainer, final Properties props) {
+ return new SimpleReadWriteSplittingPlugin(servicesContainer, props);
+ }
+}
diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/staledns/AuroraStaleDnsHelper.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/staledns/AuroraStaleDnsHelper.java
index 682c3080f..9af641402 100644
--- a/wrapper/src/main/java/software/amazon/jdbc/plugin/staledns/AuroraStaleDnsHelper.java
+++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/staledns/AuroraStaleDnsHelper.java
@@ -25,13 +25,15 @@
import java.util.Map;
import java.util.Properties;
import java.util.logging.Logger;
-import software.amazon.jdbc.HostListProviderService;
import software.amazon.jdbc.HostRole;
import software.amazon.jdbc.HostSpec;
import software.amazon.jdbc.JdbcCallable;
import software.amazon.jdbc.NodeChangeOptions;
import software.amazon.jdbc.PluginService;
+import software.amazon.jdbc.hostlistprovider.HostListProviderService;
+import software.amazon.jdbc.util.LogUtils;
import software.amazon.jdbc.util.Messages;
+import software.amazon.jdbc.util.RdsUrlType;
import software.amazon.jdbc.util.RdsUtils;
import software.amazon.jdbc.util.Utils;
import software.amazon.jdbc.util.telemetry.TelemetryCounter;
@@ -66,7 +68,11 @@ public Connection getVerifiedConnection(
final Properties props,
final JdbcCallable connectFunc) throws SQLException {
- if (!this.rdsUtils.isWriterClusterDns(hostSpec.getHost())) {
+ final RdsUrlType type = this.rdsUtils.identifyRdsType(hostSpec.getHost());
+
+ if (type != RdsUrlType.RDS_WRITER_CLUSTER
+ && type != RdsUrlType.RDS_GLOBAL_WRITER_CLUSTER) {
+ // It's not a writer cluster endpoint. Continue with a normal workflow.
return connectFunc.call();
}
@@ -96,7 +102,7 @@ public Connection getVerifiedConnection(
this.pluginService.refreshHostList(conn);
}
- LOGGER.finest(() -> Utils.logTopology(this.pluginService.getAllHosts()));
+ LOGGER.finest(() -> LogUtils.logTopology(this.pluginService.getAllHosts()));
if (this.writerHostSpec == null) {
final HostSpec writerCandidate = Utils.getWriter(this.pluginService.getAllHosts());
@@ -144,7 +150,7 @@ public Connection getVerifiedConnection(
Messages.get("AuroraStaleDnsHelper.currentWriterNotAllowed",
new Object[] {
this.writerHostSpec == null ? "" : this.writerHostSpec.getHostAndPort(),
- Utils.logTopology(allowedHosts, "")})
+ LogUtils.logTopology(allowedHosts, "")})
);
}
@@ -178,6 +184,7 @@ public void notifyNodeListChanged(final Map>
LOGGER.finest(() -> Messages.get("AuroraStaleDnsHelper.reset"));
this.writerHostSpec = null;
this.writerHostAddress = null;
+ return;
}
}
}
diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/staledns/AuroraStaleDnsPlugin.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/staledns/AuroraStaleDnsPlugin.java
index a5babfc3c..080d19990 100644
--- a/wrapper/src/main/java/software/amazon/jdbc/plugin/staledns/AuroraStaleDnsPlugin.java
+++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/staledns/AuroraStaleDnsPlugin.java
@@ -25,16 +25,18 @@
import java.util.Properties;
import java.util.Set;
import java.util.logging.Logger;
-import software.amazon.jdbc.HostListProviderService;
import software.amazon.jdbc.HostSpec;
import software.amazon.jdbc.JdbcCallable;
import software.amazon.jdbc.JdbcMethod;
import software.amazon.jdbc.NodeChangeOptions;
import software.amazon.jdbc.PluginService;
+import software.amazon.jdbc.hostlistprovider.HostListProviderService;
import software.amazon.jdbc.plugin.AbstractConnectionPlugin;
/**
- * After Aurora DB cluster fail over is completed and a cluster has elected a new writer node, the corresponding
+ * Deprecated. Use 'initialConnection' plugin instead.
+ *
+ * After Aurora DB cluster fail over is completed and a cluster has elected a new writer node, the corresponding
* cluster (writer) endpoint contains stale data and points to an old writer node. That old writer node plays
* a reader role after fail over and connecting with the cluster endpoint connects to it. In such case a user
* application expects a writer connection but practically gets connected to a reader. Any DML statements fail
diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/strategy/fastestresponse/HostResponseTimeServiceImpl.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/strategy/fastestresponse/HostResponseTimeServiceImpl.java
index 9915391ac..4d90f6d5d 100644
--- a/wrapper/src/main/java/software/amazon/jdbc/plugin/strategy/fastestresponse/HostResponseTimeServiceImpl.java
+++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/strategy/fastestresponse/HostResponseTimeServiceImpl.java
@@ -28,7 +28,6 @@
import software.amazon.jdbc.PluginService;
import software.amazon.jdbc.util.FullServicesContainer;
import software.amazon.jdbc.util.Messages;
-import software.amazon.jdbc.util.storage.SlidingExpirationCacheWithCleanupThread;
public class HostResponseTimeServiceImpl implements HostResponseTimeService {
diff --git a/wrapper/src/main/java/software/amazon/jdbc/targetdriverdialect/TargetDriverDialectManager.java b/wrapper/src/main/java/software/amazon/jdbc/targetdriverdialect/TargetDriverDialectManager.java
index 865dbfe7e..47040059c 100644
--- a/wrapper/src/main/java/software/amazon/jdbc/targetdriverdialect/TargetDriverDialectManager.java
+++ b/wrapper/src/main/java/software/amazon/jdbc/targetdriverdialect/TargetDriverDialectManager.java
@@ -70,28 +70,6 @@ public class TargetDriverDialectManager implements TargetDriverDialectProvider {
PropertyDefinition.registerPluginProperties(TargetDriverDialectManager.class);
}
- /**
- * Sets a custom target driver dialect handler.
- *
- * @param targetDriverDialect A custom driver dialect to use.
- *
- * @deprecated Use software.amazon.jdbc.Driver instead
- */
- @Deprecated
- public static void setCustomDialect(final @NonNull TargetDriverDialect targetDriverDialect) {
- software.amazon.jdbc.Driver.setCustomTargetDriverDialect(targetDriverDialect);
- }
-
- /**
- * Resets a custom target driver dialect.
- *
- * @deprecated Use {@link software.amazon.jdbc.Driver#resetCustomTargetDriverDialect()} instead
- */
- @Deprecated
- public static void resetCustomDialect() {
- software.amazon.jdbc.Driver.resetCustomTargetDriverDialect();
- }
-
@Override
public TargetDriverDialect getDialect(
final @NonNull Driver driver,
diff --git a/wrapper/src/main/java/software/amazon/jdbc/util/CacheItem.java b/wrapper/src/main/java/software/amazon/jdbc/util/CacheItem.java
new file mode 100644
index 000000000..31379014b
--- /dev/null
+++ b/wrapper/src/main/java/software/amazon/jdbc/util/CacheItem.java
@@ -0,0 +1,68 @@
+/*
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License").
+ * You may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package software.amazon.jdbc.util;
+
+import java.util.Objects;
+
+public class CacheItem {
+
+ final V item;
+ final long expirationTime;
+
+ public CacheItem(final V item, final long expirationTime) {
+ this.item = item;
+ this.expirationTime = expirationTime;
+ }
+
+ public boolean isExpired() {
+ if (expirationTime <= 0) {
+ // No expiration time.
+ return false;
+ }
+ return System.nanoTime() > expirationTime;
+ }
+
+ public V get() {
+ return get(false);
+ }
+
+ public V get(final boolean returnExpired) {
+ return (this.isExpired() && !returnExpired) ? null : item;
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hashCode(item);
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) {
+ return true;
+ }
+ if (!(obj instanceof CacheItem)) {
+ return false;
+ }
+ CacheItem> other = (CacheItem>) obj;
+ return Objects.equals(this.item, other.item);
+ }
+
+ @Override
+ public String toString() {
+ return "CacheItem [item=" + item + ", expirationTime=" + expirationTime + "]";
+ }
+}
diff --git a/wrapper/src/main/java/software/amazon/jdbc/util/ConnectionUrlParser.java b/wrapper/src/main/java/software/amazon/jdbc/util/ConnectionUrlParser.java
index 435907141..b66f631e4 100644
--- a/wrapper/src/main/java/software/amazon/jdbc/util/ConnectionUrlParser.java
+++ b/wrapper/src/main/java/software/amazon/jdbc/util/ConnectionUrlParser.java
@@ -43,6 +43,11 @@ public class ConnectionUrlParser {
// follows by any char except "/", "?" or "#"
+ "(?:[/?#].*)?"); // Anything starting with either "/", "?" or "#"
+ private static final Pattern URL_WITH_REGION_PATTERN =
+ Pattern.compile(
+ "^(\\[(?.+)\\])?(?[a-zA-Z0-9\\?\\.\\-]+)(:(?[0-9]+))?$",
+ Pattern.CASE_INSENSITIVE);
+
static final Pattern EMPTY_STRING_IN_QUOTATIONS = Pattern.compile("\"(\\s*)\"");
private static final RdsUtils rdsUtils = new RdsUtils();
@@ -90,6 +95,59 @@ public static HostSpec parseHostPortPair(final String url, final HostRole role,
return getHostSpec(hostPortPair, role, hostSpecBuilderSupplier.get());
}
+ /**
+ * Parse strings in the following formats:
+ * "url", for example: "instance-1.XYZ.us-east-2.rds.amazonaws.com"
+ * "url:port", for example: "instance-1.XYZ.us-east-2.rds.amazonaws.com:9999"
+ * "[region_name]url", for example: "us-east-2:instance-1.any-domain.com"
+ * "[region_name]url:port", for example: "us-east-2:instance-1.any-domain.com:9999"
+ *
+ * @param urlWithRegionPrefix Url with region prexix
+ * @param hostSpecBuilderSupplier A host builder supplier
+ * @return A pair of region and HostSpec
+ */
+ public static Pair parseHostPortPairWithRegionPrefix(
+ final String urlWithRegionPrefix,
+ final Supplier hostSpecBuilderSupplier) {
+
+ final Matcher matcher = URL_WITH_REGION_PATTERN.matcher(urlWithRegionPrefix);
+ if (!matcher.find()) {
+ throw new IllegalArgumentException(
+ Messages.get(
+ "ConnectionUrlParser.cantParseUrl",
+ new Object[] {urlWithRegionPrefix}));
+ }
+ String awsRegion = matcher.group("region");
+ final String host = matcher.group("domain");
+ final String port = matcher.group("port");
+
+ if (StringUtils.isNullOrEmpty(host)) {
+ throw new IllegalArgumentException(
+ Messages.get(
+ "ConnectionUrlParser.cantParseHost",
+ new Object[] {urlWithRegionPrefix}));
+ }
+
+ if (StringUtils.isNullOrEmpty(awsRegion)) {
+ awsRegion = rdsUtils.getRdsRegion(host);
+ if (StringUtils.isNullOrEmpty(awsRegion)) {
+ throw new IllegalArgumentException(
+ Messages.get(
+ "ConnectionUrlParser.cantParseAwsRegion",
+ new Object[] {urlWithRegionPrefix}));
+ }
+ }
+
+ final RdsUrlType urlType = rdsUtils.identifyRdsType(host);
+
+ // Assign HostRole of READER if using the reader cluster URL, otherwise assume a HostRole of WRITER
+ final HostRole hostRole = RdsUrlType.RDS_READER_CLUSTER.equals(urlType) ? HostRole.READER : HostRole.WRITER;
+ final String[] hostPortPair = StringUtils.isNullOrEmpty(port)
+ ? new String[] { host }
+ : new String[] { host, port };
+ return Pair.create(awsRegion, getHostSpec(hostPortPair, hostRole, hostSpecBuilderSupplier.get()));
+ }
+
private static HostSpec getHostSpec(final String[] hostPortPair, final HostRole hostRole,
final HostSpecBuilder hostSpecBuilder) {
String hostId = rdsUtils.getRdsInstanceId(hostPortPair[0]);
diff --git a/wrapper/src/main/java/software/amazon/jdbc/util/FullServicesContainer.java b/wrapper/src/main/java/software/amazon/jdbc/util/FullServicesContainer.java
index 373b011cc..f406486e3 100644
--- a/wrapper/src/main/java/software/amazon/jdbc/util/FullServicesContainer.java
+++ b/wrapper/src/main/java/software/amazon/jdbc/util/FullServicesContainer.java
@@ -18,9 +18,9 @@
import software.amazon.jdbc.ConnectionPluginManager;
import software.amazon.jdbc.ConnectionProvider;
-import software.amazon.jdbc.HostListProviderService;
import software.amazon.jdbc.PluginManagerService;
import software.amazon.jdbc.PluginService;
+import software.amazon.jdbc.hostlistprovider.HostListProviderService;
import software.amazon.jdbc.util.events.EventPublisher;
import software.amazon.jdbc.util.monitoring.MonitorService;
import software.amazon.jdbc.util.storage.StorageService;
diff --git a/wrapper/src/main/java/software/amazon/jdbc/util/FullServicesContainerImpl.java b/wrapper/src/main/java/software/amazon/jdbc/util/FullServicesContainerImpl.java
index 9f9cb73fc..7bd479caf 100644
--- a/wrapper/src/main/java/software/amazon/jdbc/util/FullServicesContainerImpl.java
+++ b/wrapper/src/main/java/software/amazon/jdbc/util/FullServicesContainerImpl.java
@@ -18,9 +18,9 @@
import software.amazon.jdbc.ConnectionPluginManager;
import software.amazon.jdbc.ConnectionProvider;
-import software.amazon.jdbc.HostListProviderService;
import software.amazon.jdbc.PluginManagerService;
import software.amazon.jdbc.PluginService;
+import software.amazon.jdbc.hostlistprovider.HostListProviderService;
import software.amazon.jdbc.util.events.EventPublisher;
import software.amazon.jdbc.util.monitoring.MonitorService;
import software.amazon.jdbc.util.storage.StorageService;
diff --git a/wrapper/src/main/java/software/amazon/jdbc/util/LogUtils.java b/wrapper/src/main/java/software/amazon/jdbc/util/LogUtils.java
new file mode 100644
index 000000000..932e4a21e
--- /dev/null
+++ b/wrapper/src/main/java/software/amazon/jdbc/util/LogUtils.java
@@ -0,0 +1,55 @@
+/*
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License").
+ * You may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package software.amazon.jdbc.util;
+
+import java.util.List;
+import java.util.Map;
+import java.util.stream.Collectors;
+import org.checkerframework.checker.nullness.qual.Nullable;
+import software.amazon.jdbc.HostSpec;
+
+public class LogUtils {
+ public static String logTopology(final @Nullable List hosts) {
+ return logTopology(hosts, null);
+ }
+
+ public static String logTopology(
+ final @Nullable List hosts,
+ final @Nullable String messagePrefix) {
+
+ final StringBuilder msg = new StringBuilder();
+ if (hosts == null) {
+ msg.append("");
+ } else {
+ for (final HostSpec host : hosts) {
+ if (msg.length() > 0) {
+ msg.append("\n");
+ }
+ msg.append(" ").append(host == null ? "" : host);
+ }
+ }
+
+ return Messages.get("Utils.topology",
+ new Object[] {messagePrefix == null ? "Topology:" : messagePrefix, msg.toString()});
+ }
+
+ public static String toLogString(Map map) {
+ return map.entrySet().stream()
+ .map(x -> String.format("\t[%s] -> %s", x.getKey(), x.getValue().getHostAndPort()))
+ .collect(Collectors.joining("\n"));
+ }
+}
diff --git a/wrapper/src/main/java/software/amazon/jdbc/util/RdsUrlType.java b/wrapper/src/main/java/software/amazon/jdbc/util/RdsUrlType.java
index dff1e663a..8c3084192 100644
--- a/wrapper/src/main/java/software/amazon/jdbc/util/RdsUrlType.java
+++ b/wrapper/src/main/java/software/amazon/jdbc/util/RdsUrlType.java
@@ -22,8 +22,10 @@ public enum RdsUrlType {
RDS_READER_CLUSTER(true, true),
RDS_CUSTOM_CLUSTER(true, true),
RDS_PROXY(true, false),
+ RDS_PROXY_ENDPOINT(true, false),
RDS_INSTANCE(true, false),
RDS_AURORA_LIMITLESS_DB_SHARD_GROUP(true, false),
+ RDS_GLOBAL_WRITER_CLUSTER(true, true),
OTHER(false, false);
private final boolean isRds;
diff --git a/wrapper/src/main/java/software/amazon/jdbc/util/RdsUtils.java b/wrapper/src/main/java/software/amazon/jdbc/util/RdsUtils.java
index 2da967a9b..c1eec6545 100644
--- a/wrapper/src/main/java/software/amazon/jdbc/util/RdsUtils.java
+++ b/wrapper/src/main/java/software/amazon/jdbc/util/RdsUtils.java
@@ -73,6 +73,20 @@ public class RdsUtils {
// Governmental endpoints
// https://aws.amazon.com/compliance/fips/#FIPS_Endpoints_by_Service
// https://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/s3/model/Region.html
+ //
+ //
+ // Aurora Global Database
+ // https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Concepts.Aurora_Fea_Regions_DB-eng.Feature.GlobalDatabase.html
+ // Global Database Endpoint: .global-.global.rds.amazonaws.com
+ // Example: test-global-db-name.global-123456789012.global.rds.amazonaws.com
+ //
+ //
+ // RDS Proxy
+ // RDS Proxy Endpoint: .proxy-..rds.amazonaws.com
+ // Example: test-rds-proxy-name.proxy-123456789012.us-east-2.rds.amazonaws.com
+ //
+ // RDS Proxy Custom Endpoint: .endpoint.proxy-..rds.amazonaws.com
+ // Example: test-custom-endpoint-name.endpoint.proxy-123456789012.us-east-2.rds.amazonaws.com
private static final Pattern AURORA_DNS_PATTERN =
Pattern.compile(
@@ -177,6 +191,38 @@ public class RdsUtils {
".*(?-old1)\\..*",
Pattern.CASE_INSENSITIVE);
+ // https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Concepts.Aurora_Fea_Regions_DB-eng.Feature.GlobalDatabase.html
+ private static final Pattern AURORA_GLOBAL_WRITER_DNS_PATTERN =
+ Pattern.compile(
+ "^(?.+)\\."
+ + "(?global-)?"
+ + "(?[a-zA-Z0-9]+\\.global\\.rds\\.amazonaws\\.com\\.?)$",
+ Pattern.CASE_INSENSITIVE);
+
+ private static final Pattern RDS_PROXY_ENDPOINT_DNS_PATTERN =
+ Pattern.compile(
+ "^(?.+)\\.endpoint\\."
+ + "(?proxy-)?"
+ + "(?[a-zA-Z0-9]+\\.(?[a-zA-Z0-9\\-]+)"
+ + "\\.rds\\.amazonaws\\.com\\.?)$",
+ Pattern.CASE_INSENSITIVE);
+
+ private static final Pattern RDS_PROXY_ENDPOINT_CHINA_DNS_PATTERN =
+ Pattern.compile(
+ "^(?