diff --git a/CHANGELOG.md b/CHANGELOG.md index f287cceba..f0393b684 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,28 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/#semantic-versioning-200). +## [3.0.0] - TBD + +### :crab: Breaking Changes + +> [!WARNING]\ +> 3.0 removes the suggested ClusterId functionality ([PR #1570](https://github.com/aws/aws-advanced-jdbc-wrapper/pull/1570)). +> #### Suggested ClusterId Functionality +> Prior to this change, the wrapper would generate a unique cluster ID based on the connection string and the cluster topology; however, in some cases (such as custom endpoints, IP addresses, and CNAME aliases, etc), the wrapper would generate an incorrect identifier. This change was needed to prevent applications with several clusters from accidentally relying on incorrect topology during failover which could result in the wrapper failing to complete failover successfully. +> #### Migration +> | Number of Database Clusters in Use | Requires Changes | Action Items | +> |-----------------------------------|------------------|--------------| +> | Single database cluster | No | No changes required | +> | Multiple database clusters | Yes | Review all connection strings and add mandatory `clusterId` parameter ([PR #1476](https://github.com/aws/aws-advanced-jdbc-wrapper/pull/1476)). See [documentation](https://github.com/aws/aws-advanced-jdbc-wrapper/blob/main/docs/using-the-jdbc-driver/using-plugins/UsingTheFailover2Plugin.md#failover-plugin-v2-configuration-parameters) for `clusterId` parameter configuration | + +> [!WARNING]\ +> 3.0 removes deprecated coded ([PR #1572](https://github.com/aws/aws-advanced-jdbc-wrapper/pull/1572)). +> #### Deprecated Code Removal +> Some methods marked as deprecated in version 2.x.x are now removed in 3.0. + + +### :magic_wand: Added +- Added support of Global Databases including and Global Database endpoint. ([PR #1573](https://github.com/aws/aws-advanced-jdbc-wrapper/pull/1573)). ## [2.6.7] - 2025-11-25 ### :bug: Fixed diff --git a/Maintenance.md b/Maintenance.md index 62a2983cf..20fa5a983 100644 --- a/Maintenance.md +++ b/Maintenance.md @@ -93,4 +93,5 @@ from the updated source after the PRs are merged. | Major Version | Latest Minor Version | Status | Initial Release | Maintenance Window Start | Maintenance Window End | |---------------|----------------------|-------------|-----------------|--------------------------|------------------------| | 1 | 1.0.2 | Maintenance | Oct 5, 2022 | Apr 28, 2023 | Apr 28, 2024 | -| 2 | 2.6.7 | Current | Apr 28, 2023 | N/A | N/A | +| 2 | 2.6.7 | Maintenance | Jan 1, 2026 | Dec 31, 2026 | N/A | +| 3 | 3.0.0 | Current | Dec 12, 2025 | N/A | N/A | diff --git a/README.md b/README.md index 2cff94a62..189bf1d6a 100644 --- a/README.md +++ b/README.md @@ -52,6 +52,10 @@ With the `failover` plugin, the downtime during certain DB cluster operations, s Visit [this page](./docs/using-the-jdbc-driver/SupportForRDSMultiAzDBCluster.md) for more details. +### Using the AWS Advanced JDBC Wrapper with Amazon Aurora Global Databases + +This driver supports in-region `failover` and cross-region `planned failover` and `switchover` of [Amazon Aurora Global Databases](https://aws.amazon.com/ru/rds/aurora/global-database/). A [Global Writer Endpoint](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-global-database-connecting.html) is also recognized and can be handled to minimize potential stale DNS issues. Please check [failover plugin](./docs/using-the-jdbc-driver/using-plugins/UsingTheFailoverPlugin.md), [failover2 plugin](./docs/using-the-jdbc-driver/using-plugins/UsingTheFailover2Plugin.md) and [Aurora Initial Connection Strategy plugin](./docs/using-the-jdbc-driver/using-plugins/UsingTheAuroraInitialConnectionStrategyPlugin.md) for more information. + ### Plain Amazon RDS databases The AWS Advanced JDBC Wrapper also works with RDS provided databases that are not Aurora. diff --git a/benchmarks/README.md b/benchmarks/README.md index 38e9bf49f..3183c8b6e 100644 --- a/benchmarks/README.md +++ b/benchmarks/README.md @@ -7,5 +7,5 @@ The benchmarks do not measure the performance of target JDBC drivers nor the per ## Usage 1. Build the benchmarks with the following command `../gradlew jmhJar`. 1. the JAR file will be outputted to `build/libs` -2. Run the benchmarks with the following command `java -jar build/libs/benchmarks-2.6.7-jmh.jar`. +2. Run the benchmarks with the following command `java -jar build/libs/benchmarks-3.0.0-jmh.jar`. 1. you may have to update the command based on the exact version of the produced JAR file diff --git a/benchmarks/src/jmh/java/software/amazon/jdbc/benchmarks/ConnectionPluginManagerBenchmarks.java b/benchmarks/src/jmh/java/software/amazon/jdbc/benchmarks/ConnectionPluginManagerBenchmarks.java index 413a37e03..ebdb34355 100644 --- a/benchmarks/src/jmh/java/software/amazon/jdbc/benchmarks/ConnectionPluginManagerBenchmarks.java +++ b/benchmarks/src/jmh/java/software/amazon/jdbc/benchmarks/ConnectionPluginManagerBenchmarks.java @@ -50,10 +50,11 @@ import org.openjdk.jmh.runner.RunnerException; import org.openjdk.jmh.runner.options.Options; import org.openjdk.jmh.runner.options.OptionsBuilder; +import software.amazon.jdbc.ConnectionInfo; import software.amazon.jdbc.ConnectionPluginFactory; import software.amazon.jdbc.ConnectionPluginManager; import software.amazon.jdbc.ConnectionProvider; -import software.amazon.jdbc.HostListProviderService; +import software.amazon.jdbc.hostlistprovider.HostListProviderService; import software.amazon.jdbc.HostSpec; import software.amazon.jdbc.HostSpecBuilder; import software.amazon.jdbc.JdbcMethod; @@ -126,7 +127,7 @@ public void setUpIteration() throws Exception { any(Dialect.class), any(TargetDriverDialect.class), any(HostSpec.class), - any(Properties.class))).thenReturn(mockConnection); + any(Properties.class))).thenReturn(new ConnectionInfo(mockConnection, false)); when(mockTelemetryFactory.openTelemetryContext(anyString(), any())).thenReturn(mockTelemetryContext); when(mockTelemetryFactory.openTelemetryContext(eq(null), any())).thenReturn(mockTelemetryContext); when(mockTelemetryFactory.createCounter(anyString())).thenReturn(mockTelemetryCounter); diff --git a/benchmarks/src/jmh/java/software/amazon/jdbc/benchmarks/PluginBenchmarks.java b/benchmarks/src/jmh/java/software/amazon/jdbc/benchmarks/PluginBenchmarks.java index a9c10b2f6..dda847f73 100644 --- a/benchmarks/src/jmh/java/software/amazon/jdbc/benchmarks/PluginBenchmarks.java +++ b/benchmarks/src/jmh/java/software/amazon/jdbc/benchmarks/PluginBenchmarks.java @@ -48,12 +48,13 @@ import org.openjdk.jmh.runner.RunnerException; import org.openjdk.jmh.runner.options.Options; import org.openjdk.jmh.runner.options.OptionsBuilder; +import software.amazon.jdbc.ConnectionInfo; import software.amazon.jdbc.ConnectionPluginManager; import software.amazon.jdbc.ConnectionProvider; import software.amazon.jdbc.ConnectionProviderManager; import software.amazon.jdbc.Driver; import software.amazon.jdbc.HikariPooledConnectionProvider; -import software.amazon.jdbc.HostListProviderService; +import software.amazon.jdbc.hostlistprovider.HostListProviderService; import software.amazon.jdbc.HostSpec; import software.amazon.jdbc.HostSpecBuilder; import software.amazon.jdbc.JdbcMethod; @@ -134,7 +135,7 @@ public void setUpIteration() throws Exception { any(Dialect.class), any(TargetDriverDialect.class), any(HostSpec.class), - any(Properties.class))).thenReturn(mockConnection); + any(Properties.class))).thenReturn(new ConnectionInfo(mockConnection, false)); when(mockConnection.createStatement()).thenReturn(mockStatement); when(mockStatement.executeQuery(anyString())).thenReturn(mockResultSet); when(mockResultSet.next()).thenReturn(true, true, false); diff --git a/benchmarks/src/jmh/java/software/amazon/jdbc/benchmarks/testplugin/BenchmarkPlugin.java b/benchmarks/src/jmh/java/software/amazon/jdbc/benchmarks/testplugin/BenchmarkPlugin.java index ffed10f77..07fede27a 100644 --- a/benchmarks/src/jmh/java/software/amazon/jdbc/benchmarks/testplugin/BenchmarkPlugin.java +++ b/benchmarks/src/jmh/java/software/amazon/jdbc/benchmarks/testplugin/BenchmarkPlugin.java @@ -28,7 +28,7 @@ import java.util.Set; import java.util.logging.Logger; import software.amazon.jdbc.ConnectionPlugin; -import software.amazon.jdbc.HostListProviderService; +import software.amazon.jdbc.hostlistprovider.HostListProviderService; import software.amazon.jdbc.HostRole; import software.amazon.jdbc.HostSpec; import software.amazon.jdbc.HostSpecBuilder; diff --git a/benchmarks/src/jmh/java/software/amazon/jdbc/benchmarks/testplugin/TestConnectionWrapper.java b/benchmarks/src/jmh/java/software/amazon/jdbc/benchmarks/testplugin/TestConnectionWrapper.java index a3c0cd7f2..483d6768c 100644 --- a/benchmarks/src/jmh/java/software/amazon/jdbc/benchmarks/testplugin/TestConnectionWrapper.java +++ b/benchmarks/src/jmh/java/software/amazon/jdbc/benchmarks/testplugin/TestConnectionWrapper.java @@ -20,7 +20,7 @@ import java.util.Properties; import org.checkerframework.checker.nullness.qual.NonNull; import software.amazon.jdbc.ConnectionPluginManager; -import software.amazon.jdbc.HostListProviderService; +import software.amazon.jdbc.hostlistprovider.HostListProviderService; import software.amazon.jdbc.PluginManagerService; import software.amazon.jdbc.PluginService; import software.amazon.jdbc.wrapper.ConnectionWrapper; diff --git a/docs/GettingStarted.md b/docs/GettingStarted.md index d66b844ca..a62a0b053 100644 --- a/docs/GettingStarted.md +++ b/docs/GettingStarted.md @@ -16,7 +16,7 @@ If you are using the AWS Advanced JDBC Wrapper as part of a Gradle project, incl ```gradle dependencies { - implementation group: 'software.amazon.jdbc', name: 'aws-advanced-jdbc-wrapper', version: '2.6.7' + implementation group: 'software.amazon.jdbc', name: 'aws-advanced-jdbc-wrapper', version: '3.0.0' implementation group: 'org.postgresql', name: 'postgresql', version: '42.5.0' } ``` @@ -30,16 +30,16 @@ You can use pre-compiled packages that can be downloaded directly from [GitHub R For example, the following command uses wget to download the wrapper: ```bash -wget https://github.com/aws/aws-advanced-jdbc-wrapper/releases/download/2.6.7/aws-advanced-jdbc-wrapper-2.6.7.jar +wget https://github.com/aws/aws-advanced-jdbc-wrapper/releases/download/3.0.0/aws-advanced-jdbc-wrapper-3.0.0.jar ``` Then, the following command adds the AWS Advanced JDBC Wrapper to the CLASSPATH: ```bash -export CLASSPATH=$CLASSPATH:/home/userx/libs/aws-advanced-jdbc-wrapper-2.6.7.jar +export CLASSPATH=$CLASSPATH:/home/userx/libs/aws-advanced-jdbc-wrapper-3.0.0.jar ``` -> **Note**: There is also a JAR suffixed with `-bundle-federated-auth`. It is an Uber JAR that contains the AWS Advanced JDBC Wrapper as well as all the dependencies needed to run the Federated Authentication Plugin. **Our general recommendation is to use the `aws-advanced-jdbc-wrapper-2.6.7.jar` for use cases unrelated to complex Federated Authentication environments**. To learn more, please check out the [Federated Authentication Plugin](./using-the-jdbc-driver/using-plugins/UsingTheFederatedAuthPlugin.md#bundled-uber-jar). +> **Note**: There is also a JAR suffixed with `-bundle-federated-auth`. It is an Uber JAR that contains the AWS Advanced JDBC Wrapper as well as all the dependencies needed to run the Federated Authentication Plugin. **Our general recommendation is to use the `aws-advanced-jdbc-wrapper-3.0.0.jar` for use cases unrelated to complex Federated Authentication environments**. To learn more, please check out the [Federated Authentication Plugin](./using-the-jdbc-driver/using-plugins/UsingTheFederatedAuthPlugin.md#bundled-uber-jar). ### As a Maven Dependency @@ -50,7 +50,7 @@ You can use [Maven's dependency management](https://central.sonatype.com/artifac software.amazon.jdbc aws-advanced-jdbc-wrapper - 2.6.7 + 3.0.0 ``` @@ -61,7 +61,7 @@ You can use [Gradle's dependency management](https://central.sonatype.com/artifa ```gradle dependencies { - implementation group: 'software.amazon.jdbc', name: 'aws-advanced-jdbc-wrapper', version: '2.6.7' + implementation group: 'software.amazon.jdbc', name: 'aws-advanced-jdbc-wrapper', version: '3.0.0' } ``` @@ -69,7 +69,7 @@ To add a Gradle dependency in a Kotlin syntax, use the following configuration: ```kotlin dependencies { - implementation("software.amazon.jdbc:aws-advanced-jdbc-wrapper:2.6.7") + implementation("software.amazon.jdbc:aws-advanced-jdbc-wrapper:3.0.0") } ``` diff --git a/docs/using-the-jdbc-driver/CompatibilityCrossPlugins.md b/docs/using-the-jdbc-driver/CompatibilityCrossPlugins.md index 0f20293f0..744ecce9d 100644 --- a/docs/using-the-jdbc-driver/CompatibilityCrossPlugins.md +++ b/docs/using-the-jdbc-driver/CompatibilityCrossPlugins.md @@ -14,7 +14,7 @@ | [awsSecretsManager](./using-plugins/UsingTheAwsSecretsManagerPlugin.md) | | | | | | [federatedAuth](./using-plugins/UsingTheFederatedAuthPlugin.md) | | | | | | [okta](./using-plugins/UsingTheOktaAuthPlugin.md) | | | | | -| auroraStaleDns | | | | | +| ~~auroraStaleDns~~ | | | | | | [readWriteSplitting](./using-plugins/UsingTheReadWriteSplittingPlugin.md) | | | | | | [auroraConnectionTracker](./using-plugins/UsingTheAuroraConnectionTrackerPlugin.md) | | | | | | [driverMetaData](./using-plugins/UsingTheDriverMetadataConnectionPlugin.md) | | | | | @@ -24,6 +24,7 @@ | [initialConnection](./using-plugins/UsingTheAuroraInitialConnectionStrategyPlugin.md) | | | | | | [limitless](./using-plugins/UsingTheLimitlessConnectionPlugin.md) | | | | | | [bg](./using-plugins/UsingTheBlueGreenPlugin.md) | | | | | +| [srw](./using-plugins/UsingTheSimpleReadWriteSplittingPlugin.md) | | | | |
@@ -36,7 +37,7 @@ | [awsSecretsManager](./using-plugins/UsingTheAwsSecretsManagerPlugin.md) | | | | | | [federatedAuth](./using-plugins/UsingTheFederatedAuthPlugin.md) | | | | | | [okta](./using-plugins/UsingTheOktaAuthPlugin.md) | | | | | -| auroraStaleDns | | | | | +| ~~auroraStaleDns~~ | | | | | | [readWriteSplitting](./using-plugins/UsingTheReadWriteSplittingPlugin.md) | | | | | | [auroraConnectionTracker](./using-plugins/UsingTheAuroraConnectionTrackerPlugin.md) | | | | | | [driverMetaData](./using-plugins/UsingTheDriverMetadataConnectionPlugin.md) | | | | | @@ -46,6 +47,7 @@ | [initialConnection](./using-plugins/UsingTheAuroraInitialConnectionStrategyPlugin.md) | | | | | | [limitless](./using-plugins/UsingTheLimitlessConnectionPlugin.md) | | | | | | [bg](./using-plugins/UsingTheBlueGreenPlugin.md) | | | | | +| [srw](./using-plugins/UsingTheSimpleReadWriteSplittingPlugin.md) | | | | |
@@ -54,7 +56,7 @@ | [awsSecretsManager](./using-plugins/UsingTheAwsSecretsManagerPlugin.md) | | | | | | [federatedAuth](./using-plugins/UsingTheFederatedAuthPlugin.md) | | | | | | [okta](./using-plugins/UsingTheOktaAuthPlugin.md) | | | | | -| auroraStaleDns | | | | | +| ~~auroraStaleDns~~ | | | | | | [readWriteSplitting](./using-plugins/UsingTheReadWriteSplittingPlugin.md) | | | | | | [auroraConnectionTracker](./using-plugins/UsingTheAuroraConnectionTrackerPlugin.md) | | | | | | [driverMetaData](./using-plugins/UsingTheDriverMetadataConnectionPlugin.md) | | | | | @@ -64,10 +66,11 @@ | [initialConnection](./using-plugins/UsingTheAuroraInitialConnectionStrategyPlugin.md) | | | | | | [limitless](./using-plugins/UsingTheLimitlessConnectionPlugin.md) | | | | | | [bg](./using-plugins/UsingTheBlueGreenPlugin.md) | | | | | +| [srw](./using-plugins/UsingTheSimpleReadWriteSplittingPlugin.md) | | | | |
-| Plugin codes / Plugin codes | auroraStaleDns | [readWriteSplitting](./using-plugins/UsingTheReadWriteSplittingPlugin.md) | [auroraConnectionTracker](./using-plugins/UsingTheAuroraConnectionTrackerPlugin.md) | [driverMetaData](./using-plugins/UsingTheDriverMetadataConnectionPlugin.md) | +| Plugin codes / Plugin codes | ~~auroraStaleDns~~ | [readWriteSplitting](./using-plugins/UsingTheReadWriteSplittingPlugin.md) | [auroraConnectionTracker](./using-plugins/UsingTheAuroraConnectionTrackerPlugin.md) | [driverMetaData](./using-plugins/UsingTheDriverMetadataConnectionPlugin.md) | |---------------------------------------------------------------------------------------|----------------------------------------------------------|---------------------------------------------------------------------------|-------------------------------------------------------------------------------------|-----------------------------------------------------------------------------| | [readWriteSplitting](./using-plugins/UsingTheReadWriteSplittingPlugin.md) | | | | | | [auroraConnectionTracker](./using-plugins/UsingTheAuroraConnectionTrackerPlugin.md) | | | | | @@ -78,6 +81,7 @@ | [initialConnection](./using-plugins/UsingTheAuroraInitialConnectionStrategyPlugin.md) | | | | | | [limitless](./using-plugins/UsingTheLimitlessConnectionPlugin.md) | | | | | | [bg](./using-plugins/UsingTheBlueGreenPlugin.md) | | | | | +| [srw](./using-plugins/UsingTheSimpleReadWriteSplittingPlugin.md) | | | | |
@@ -88,9 +92,11 @@ | [initialConnection](./using-plugins/UsingTheAuroraInitialConnectionStrategyPlugin.md) | | | | | | [limitless](./using-plugins/UsingTheLimitlessConnectionPlugin.md) | | | | | | [bg](./using-plugins/UsingTheBlueGreenPlugin.md) | | | | | +| [srw](./using-plugins/UsingTheSimpleReadWriteSplittingPlugin.md) | | | | |
-| Plugin codes / Plugin codes | [limitless](./using-plugins/UsingTheLimitlessConnectionPlugin.md) | [bg](./using-plugins/UsingTheBlueGreenPlugin.md) | -|--------------------------------------------------|-------------------------------------------------------------------|--------------------------------------------------| -| [bg](./using-plugins/UsingTheBlueGreenPlugin.md) | | | +| Plugin codes / Plugin codes | [limitless](./using-plugins/UsingTheLimitlessConnectionPlugin.md) | [bg](./using-plugins/UsingTheBlueGreenPlugin.md) | +|------------------------------------------------------------------|-------------------------------------------------------------------|----------------------------------------------------------| +| [bg](./using-plugins/UsingTheBlueGreenPlugin.md) | | | +| [srw](./using-plugins/UsingTheSimpleReadWriteSplittingPlugin.md) | | | diff --git a/docs/using-the-jdbc-driver/CompatibilityDatabaseTypes.md b/docs/using-the-jdbc-driver/CompatibilityDatabaseTypes.md index e0876f3db..fd46c9206 100644 --- a/docs/using-the-jdbc-driver/CompatibilityDatabaseTypes.md +++ b/docs/using-the-jdbc-driver/CompatibilityDatabaseTypes.md @@ -8,46 +8,48 @@ | customEndpoint | | | | | [efm](./using-plugins/UsingTheHostMonitoringPlugin.md) | | | | | [efm2](./using-plugins/UsingTheHostMonitoringPlugin.md#host-monitoring-plugin-v2) | | | | -| [failover](./using-plugins/UsingTheFailoverPlugin.md) | | | | -| [failover2](./using-plugins/UsingTheFailover2Plugin.md) | | | | +| [failover](./using-plugins/UsingTheFailoverPlugin.md) | | | | +| [failover2](./using-plugins/UsingTheFailover2Plugin.md) | | | | | [iam](./using-plugins/UsingTheIamAuthenticationPlugin.md) | | | | | [awsSecretsManager](./using-plugins/UsingTheAwsSecretsManagerPlugin.md) | | | | | [federatedAuth](./using-plugins/UsingTheFederatedAuthPlugin.md) | | | | | [okta](./using-plugins/UsingTheOktaAuthPlugin.md) | | | | -| auroraStaleDns | | | | -| [readWriteSplitting](./using-plugins/UsingTheReadWriteSplittingPlugin.md) | | | | +| ~~auroraStaleDns~~ | | | | +| [readWriteSplitting](./using-plugins/UsingTheReadWriteSplittingPlugin.md) | | | | +| [srw](./using-plugins/UsingTheSimpleReadWriteSplittingPlugin.md) | | | | | [auroraConnectionTracker](./using-plugins/UsingTheAuroraConnectionTrackerPlugin.md) | | | | | [driverMetaData](./using-plugins/UsingTheDriverMetadataConnectionPlugin.md) | | | | | connectTime | | | | | [dev](./using-plugins/UsingTheDeveloperPlugin.md) | | | | -| fastestResponseStrategy | | | | -| [initialConnection](./using-plugins/UsingTheAuroraInitialConnectionStrategyPlugin.md) | | | | +| fastestResponseStrategy | | | | +| [initialConnection](./using-plugins/UsingTheAuroraInitialConnectionStrategyPlugin.md) | | | | | [limitless](./using-plugins/UsingTheLimitlessConnectionPlugin.md) | | (PostgreSQL only) | | | [bg](./using-plugins/UsingTheBlueGreenPlugin.md) | | | |
-| Plugin codes / Database types | [RDS Multi-AZ DB Instance deployment (2 instances)](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.MultiAZSingleStandby.html)
(MySQL and PG) | [RDS Single-AZ Instance deployment (1 instance)](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.DBInstance.html)
(MySQL and PG) | Community Database
(MySQL and PG) | -|---------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------| -| executionTime | | | | -| logQuery | | | | -| dataCache | | | | -| customEndpoint | | | | -| [efm](./using-plugins/UsingTheHostMonitoringPlugin.md) | | | | -| [efm2](./using-plugins/UsingTheHostMonitoringPlugin.md#host-monitoring-plugin-v2) | | | | -| [failover](./using-plugins/UsingTheFailoverPlugin.md) | | | | -| [failover2](./using-plugins/UsingTheFailover2Plugin.md) | | | | -| [iam](./using-plugins/UsingTheIamAuthenticationPlugin.md) | | | | -| [awsSecretsManager](./using-plugins/UsingTheAwsSecretsManagerPlugin.md) | | | | -| [federatedAuth](./using-plugins/UsingTheFederatedAuthPlugin.md) | | | | -| [okta](./using-plugins/UsingTheOktaAuthPlugin.md) | | | | -| auroraStaleDns | | | | -| [readWriteSplitting](./using-plugins/UsingTheReadWriteSplittingPlugin.md) | | | | -| [auroraConnectionTracker](./using-plugins/UsingTheAuroraConnectionTrackerPlugin.md) | | | | -| [driverMetaData](./using-plugins/UsingTheDriverMetadataConnectionPlugin.md) | | | | -| connectTime | | | | -| [dev](./using-plugins/UsingTheDeveloperPlugin.md) | | | | -| fastestResponseStrategy | | | | -| [initialConnection](./using-plugins/UsingTheAuroraInitialConnectionStrategyPlugin.md) | | | | -| [limitless](./using-plugins/UsingTheLimitlessConnectionPlugin.md) | | | | -| [bg](./using-plugins/UsingTheBlueGreenPlugin.md) | | | | +| Plugin codes / Database types | [RDS Multi-AZ DB Instance deployment (2 instances)](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.MultiAZSingleStandby.html)
(MySQL and PG) | [RDS Single-AZ Instance deployment (1 instance)](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.DBInstance.html)
(MySQL and PG) | Community Database
(MySQL and PG) | +|---------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------| +| executionTime | | | | +| logQuery | | | | +| dataCache | | | | +| customEndpoint | | | | +| [efm](./using-plugins/UsingTheHostMonitoringPlugin.md) | | | | +| [efm2](./using-plugins/UsingTheHostMonitoringPlugin.md#host-monitoring-plugin-v2) | | | | +| [failover](./using-plugins/UsingTheFailoverPlugin.md) | | | | +| [failover2](./using-plugins/UsingTheFailover2Plugin.md) | | | | +| [iam](./using-plugins/UsingTheIamAuthenticationPlugin.md) | | | | +| [awsSecretsManager](./using-plugins/UsingTheAwsSecretsManagerPlugin.md) | | | | +| [federatedAuth](./using-plugins/UsingTheFederatedAuthPlugin.md) | | | | +| [okta](./using-plugins/UsingTheOktaAuthPlugin.md) | | | | +| ~~auroraStaleDns~~ | | | | +| [readWriteSplitting](./using-plugins/UsingTheReadWriteSplittingPlugin.md) | | | | +| [srw](./using-plugins/UsingTheSimpleReadWriteSplittingPlugin.md) | | | (disable `verifyNewSrwConnections`) | +| [auroraConnectionTracker](./using-plugins/UsingTheAuroraConnectionTrackerPlugin.md) | | | | +| [driverMetaData](./using-plugins/UsingTheDriverMetadataConnectionPlugin.md) | | | | +| connectTime | | | | +| [dev](./using-plugins/UsingTheDeveloperPlugin.md) | | | | +| fastestResponseStrategy | | | | +| [initialConnection](./using-plugins/UsingTheAuroraInitialConnectionStrategyPlugin.md) | | | | +| [limitless](./using-plugins/UsingTheLimitlessConnectionPlugin.md) | | | | +| [bg](./using-plugins/UsingTheBlueGreenPlugin.md) | | | | diff --git a/docs/using-the-jdbc-driver/CompatibilityEndpoints.md b/docs/using-the-jdbc-driver/CompatibilityEndpoints.md index 1a68dde96..4394fbfa0 100644 --- a/docs/using-the-jdbc-driver/CompatibilityEndpoints.md +++ b/docs/using-the-jdbc-driver/CompatibilityEndpoints.md @@ -23,22 +23,23 @@ There are many different URL types (endpoints) that can be used with The AWS Adv | logQuery | | | dataCache | | | customEndpoint | | -| [efm](./using-plugins/UsingTheHostMonitoringPlugin.md) | | -| [efm2](./using-plugins/UsingTheHostMonitoringPlugin.md#host-monitoring-plugin-v2) | | -| [failover](./using-plugins/UsingTheFailoverPlugin.md) | | -| [failover2](./using-plugins/UsingTheFailover2Plugin.md) | | -| [iam](./using-plugins/UsingTheIamAuthenticationPlugin.md) | (requires special configuration) | +| [efm](./using-plugins/UsingTheHostMonitoringPlugin.md) | (requires `initialConnection` plugin) | +| [efm2](./using-plugins/UsingTheHostMonitoringPlugin.md#host-monitoring-plugin-v2) | (requires `initialConnection` plugin) | +| [failover](./using-plugins/UsingTheFailoverPlugin.md) | | +| [failover2](./using-plugins/UsingTheFailover2Plugin.md) | | +| [iam](./using-plugins/UsingTheIamAuthenticationPlugin.md) | (requires `initialConnection` plugin) | | [awsSecretsManager](./using-plugins/UsingTheAwsSecretsManagerPlugin.md) | | | [federatedAuth](./using-plugins/UsingTheFederatedAuthPlugin.md) | | | [okta](./using-plugins/UsingTheOktaAuthPlugin.md) | | -| auroraStaleDns | | -| [readWriteSplitting](./using-plugins/UsingTheReadWriteSplittingPlugin.md) | | +| ~~auroraStaleDns~~ | | +| [readWriteSplitting](./using-plugins/UsingTheReadWriteSplittingPlugin.md) | | +| [srw](./using-plugins/UsingTheSimpleReadWriteSplittingPlugin.md) | | | [auroraConnectionTracker](./using-plugins/UsingTheAuroraConnectionTrackerPlugin.md) | | | [driverMetaData](./using-plugins/UsingTheDriverMetadataConnectionPlugin.md) | | | connectTime | | | [dev](./using-plugins/UsingTheDeveloperPlugin.md) | | -| fastestResponseStrategy | | -| [initialConnection](./using-plugins/UsingTheAuroraInitialConnectionStrategyPlugin.md) | | +| fastestResponseStrategy | | +| [initialConnection](./using-plugins/UsingTheAuroraInitialConnectionStrategyPlugin.md) | | | [limitless](./using-plugins/UsingTheLimitlessConnectionPlugin.md) | | | [bg](./using-plugins/UsingTheBlueGreenPlugin.md) | | @@ -58,8 +59,9 @@ There are many different URL types (endpoints) that can be used with The AWS Adv | [awsSecretsManager](./using-plugins/UsingTheAwsSecretsManagerPlugin.md) | | | | | | [federatedAuth](./using-plugins/UsingTheFederatedAuthPlugin.md) | | | | | | [okta](./using-plugins/UsingTheOktaAuthPlugin.md) | | | | | -| auroraStaleDns | | | | | +| ~~auroraStaleDns~~ | | | | | | [readWriteSplitting](./using-plugins/UsingTheReadWriteSplittingPlugin.md) | | | | | +| [srw](./using-plugins/UsingTheSimpleReadWriteSplittingPlugin.md) | | | | | | [auroraConnectionTracker](./using-plugins/UsingTheAuroraConnectionTrackerPlugin.md) | | | | | | [driverMetaData](./using-plugins/UsingTheDriverMetadataConnectionPlugin.md) | | | | | | connectTime | | | | | @@ -85,8 +87,9 @@ There are many different URL types (endpoints) that can be used with The AWS Adv | [awsSecretsManager](./using-plugins/UsingTheAwsSecretsManagerPlugin.md) | | | | | | [federatedAuth](./using-plugins/UsingTheFederatedAuthPlugin.md) | | | | | | [okta](./using-plugins/UsingTheOktaAuthPlugin.md) | | | | | -| auroraStaleDns | | | | | +| ~~auroraStaleDns~~ | | | | | | [readWriteSplitting](./using-plugins/UsingTheReadWriteSplittingPlugin.md) | | | | | +| [srw](./using-plugins/UsingTheSimpleReadWriteSplittingPlugin.md) | | | | | | [auroraConnectionTracker](./using-plugins/UsingTheAuroraConnectionTrackerPlugin.md) | | | | | | [driverMetaData](./using-plugins/UsingTheDriverMetadataConnectionPlugin.md) | | | | | | connectTime | | | | | @@ -113,8 +116,9 @@ There are many different URL types (endpoints) that can be used with The AWS Adv | [awsSecretsManager](./using-plugins/UsingTheAwsSecretsManagerPlugin.md) | (requires special configuration) | (requires special configuration) | | [federatedAuth](./using-plugins/UsingTheFederatedAuthPlugin.md) | (requires special configuration) | (requires special configuration) | | [okta](./using-plugins/UsingTheOktaAuthPlugin.md) | (requires special configuration) | (requires special configuration) | -| auroraStaleDns | | | +| ~~auroraStaleDns~~ | | | | [readWriteSplitting](./using-plugins/UsingTheReadWriteSplittingPlugin.md) | | | +| [srw](./using-plugins/UsingTheSimpleReadWriteSplittingPlugin.md) | | | | [auroraConnectionTracker](./using-plugins/UsingTheAuroraConnectionTrackerPlugin.md) | | | | [driverMetaData](./using-plugins/UsingTheDriverMetadataConnectionPlugin.md) | | | | connectTime | | | diff --git a/docs/using-the-jdbc-driver/DatabaseDialects.md b/docs/using-the-jdbc-driver/DatabaseDialects.md index 16d3e6205..a315f0437 100644 --- a/docs/using-the-jdbc-driver/DatabaseDialects.md +++ b/docs/using-the-jdbc-driver/DatabaseDialects.md @@ -16,12 +16,14 @@ The AWS Advanced JDBC Wrapper is a wrapper that requires an underlying driver, a Dialect codes specify what kind of database any connections will be made to. | Dialect Code Reference | Value | Database | -| ---------------------------- | ---------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------- | -| `AURORA_MYSQL` | `aurora-mysql` | Aurora MySQL | +| ---------------------------- | ---------------------------- |----------------------------------------------------------------------------------------------------------------------------------------------------| +| `AURORA_MYSQL` | `aurora-mysql` | [Aurora MySQL](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_GettingStartedAurora.html) | +| `GLOBAL_AURORA_MYSQL` | `global-aurora-mysql` | [Aurora Global Database MySQL](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-global-database-getting-started.html) | | `RDS_MULTI_AZ_MYSQL_CLUSTER` | `rds-multi-az-mysql-cluster` | [Amazon RDS MySQL Multi-AZ DB Cluster Deployments](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/multi-az-db-clusters-concepts.html) | | `RDS_MYSQL` | `rds-mysql` | Amazon RDS MySQL | | `MYSQL` | `mysql` | MySQL | -| `AURORA_PG` | `aurora-pg` | Aurora PostgreSQL | +| `AURORA_PG` | `aurora-pg` | [Aurora PostgreSQL](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_GettingStartedAurora.html) | +| `GLOBAL_AURORA_PG` | `global-aurora-pg` | [Aurora Global Database PostgreSQL](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-global-database-getting-started.html) | | `RDS_MULTI_AZ_PG_CLUSTER` | `rds-multi-az-pg-cluster` | [Amazon RDS PostgreSQL Multi-AZ DB Cluster Deployments](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/multi-az-db-clusters-concepts.html) | | `RDS_PG` | `rds-pg` | Amazon RDS PostgreSQL | | `PG` | `pg` | PostgreSQL | diff --git a/docs/using-the-jdbc-driver/SupportForRDSMultiAzDBCluster.md b/docs/using-the-jdbc-driver/SupportForRDSMultiAzDBCluster.md index a9583d1f4..ce1d7f926 100644 --- a/docs/using-the-jdbc-driver/SupportForRDSMultiAzDBCluster.md +++ b/docs/using-the-jdbc-driver/SupportForRDSMultiAzDBCluster.md @@ -8,6 +8,14 @@ The process of using the AWS Advanced JDBC Wrapper with RDS Multi-AZ DB Cluster ### MySQL +There are extra permissions that must be granted to all non-administrative users who need database access. Without proper access, these users cannot utilize many of the wrappers's advanced features, including failover and blue/green deployment support. To grant the necessary permissions to non-administrative users, execute the following statement: + +```sql +GRANT SELECT ON mysql.rds_topology TO 'non-admin-username'@'%' +``` + +Since granting these permissions manually introduce significant operation overhead, see the [Granting Permissions to Non-admin User In MYSQL](./using-plugins/GrantingPermissionsToNonAdminUserInMySQL.md) guide to simplify this process. + Preparing a connection with MySQL in a Multi-AZ Cluster remains the same as before: ```java @@ -24,6 +32,12 @@ Per AWS documentation and [this blog post](https://aws.amazon.com/blogs/database CREATE EXTENSION rds_tools; ``` +The extension must be granted to all non-administrative users who need database access. Without access to `rds_tools`, non-admin users cannot utilize many of the driver's advanced features, including failover support. To grant the necessary permissions to non-administrative users, execute the following statement: + +```sql +GRANT EXECUTE ON ALL FUNCTIONS IN SCHEMA rds_tools TO non-admin-username; +``` + Then, prepare the connection with: ```java diff --git a/docs/using-the-jdbc-driver/UsingTheJdbcDriver.md b/docs/using-the-jdbc-driver/UsingTheJdbcDriver.md index 829b4f38a..c11155ec0 100644 --- a/docs/using-the-jdbc-driver/UsingTheJdbcDriver.md +++ b/docs/using-the-jdbc-driver/UsingTheJdbcDriver.md @@ -5,6 +5,10 @@ The JDBC Wrapper also supports [connection pooling](./DataSource.md#Using-the-Aw ## Using the AWS Advanced JDBC Wrapper with plain RDS databases It is possible to use the AWS Advanced JDBC Wrapper with plain RDS databases, but individual features may or may not be compatible. For example, failover handling and enhanced failure monitoring are not compatible with plain RDS databases and the relevant plugins must be disabled. Plugins can be enabled or disabled as seen in the [Connection Plugin Manager Parameters](#connection-plugin-manager-parameters) section. Please note that some plugins have been enabled by default. Plugin compatibility can be verified in the [plugins table](#list-of-available-plugins). +## Using the AWS JDBC Driver to access multiple database clusters +> [!WARNING]\ +> If connecting to multiple database clusters within a single application, each connection string must set the `clusterId` property. The property value should be the same for all connections to the same cluster. Connections to different clusters should have difference `clusterId` values. If the `clusterId` is omitted, you may experience various issues. For more information, please see the [AWS Advanced JDBC Driver Parameters](https://github.com/aws/aws-advanced-jdbc-wrapper/blob/main/docs/using-the-jdbc-driver/UsingTheJdbcDriver.md#aws-advanced-jdbc-driver-parameters) section. + ## Using the AWS Advanced JDBC Wrapper with custom endpoints and other non-standard URLs > [!WARNING]\ > If connecting using a non-standard RDS URL (e.g. a custom endpoint, ip address, rds proxy, or custom domain URL), the clusterId property must be set. If the `clusterId` is omitted when using a non-standard RDS URL, you may experience various issues. For more information, please see the [AWS Advanced JDBC Wrapper Parameters](https://github.com/aws/aws-advanced-jdbc-wrapper/blob/main/docs/using-the-jdbc-driver/UsingTheJdbcDriver.md#aws-advanced-jdbc-wrapper-parameters) section. @@ -78,7 +82,7 @@ These parameters are applicable to any instance of the AWS Advanced JDBC Wrapper | Parameter | Value | Required | Description | Default Value | |---------------------------------------------------|-----------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------| -| `clusterId` | `String` | If connecting using a non-standard RDS URL (e.g. an IP address, custom endpoint, rds proxy, or custom domain URL): Yes

Otherwise: No

:warning:If `clusterId` is omitted when using a non-standard RDS URL, you may experience various issues. | A unique identifier for the cluster. Connections with the same cluster id share a cluster topology cache. | None | +| `clusterId` | `String` | If connecting to multiple database clusters within a single application: Yes

Otherwise: No

:warning:If `clusterId` is omitted, you may experience various issues. | A unique identifier for the cluster. Connections with the same cluster id share a cluster topology cache. For applications that only use a single cluster, this parameter is optional and defaults to `1`. When supporting multiple database clusters, this parameter becomes mandatory. Each connection string must include the `clusterId` parameter with a value that can be any number or string. However, all connection strings associated with the same database cluster must use identical `clusterId` values, while connection strings belonging to different database clusters must specify distinct values. Examples of value: `1`, `2`, `1234`, `abc-1`, `abc-2`. | `1` | | `wrapperLoggerLevel` | `String` | No | Logger level of the AWS Advanced JDBC Wrapper.

If it is used, it must be one of the following values: `OFF`, `SEVERE`, `WARNING`, `INFO`, `CONFIG`, `FINE`, `FINER`, `FINEST`, `ALL`. | `null` | | `database` | `String` | No | Database name. | `null` | | `user` | `String` | No | Database username. | `null` | @@ -94,7 +98,7 @@ These parameters are applicable to any instance of the AWS Advanced JDBC Wrapper | `resetSessionStateOnClose` | `Boolean` | No | Enables resetting the session state before closing connection. | `true` | | `rollbackOnSwitch` | `Boolean` | No | Enables rolling back a current transaction, if any in effect, before switching to a new connection. | `true` | | `awsProfile` | `String` | No | Allows users to specify a profile name for AWS credentials. This parameter is used by plugins that require AWS credentials, like the [IAM Authentication Connection Plugin](./using-plugins/UsingTheIamAuthenticationPlugin.md) and the [AWS Secrets Manager Connection Plugin](./using-plugins/UsingTheAwsSecretsManagerPlugin.md). | `null` | -| `enableGreenNodeReplacement` | `Boolean` | No | Enables replacing a green node host name with the original host name when the green host DNS doesn't exist anymore after a blue/green switchover. Refer to [Overview of Amazon RDS Blue/Green Deployments](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/blue-green-deployments-overview.html) for more details about green and blue nodes. | `false` | +| ~~`enableGreenNodeReplacement`~~ | `Boolean` | No | **Deprecated. Use `bg` plugin instead.** Enables replacing a green node host name with the original host name when the green host DNS doesn't exist anymore after a blue/green switchover. Refer to [Overview of Amazon RDS Blue/Green Deployments](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/blue-green-deployments-overview.html) for more details about green and blue nodes. | `false` | | `wrapperCaseSensitive`,
`wrappercasesensitive` | `Boolean` | No | Allows the driver to change case sensitivity for parameter names in the connection string and in connection properties. Set parameter to `false` to allow case-insensitive parameter names. | `true` | | `skipWrappingForPackages` | `String` | No | Register Java package names (separated by comma) which will be left unwrapped. This setting modifies all future connections established by the driver, not just a particular connection. | `com.pgvector` | @@ -107,7 +111,7 @@ Plugins are loaded and managed through the Connection Plugin Manager and may be | Parameter | Value | Required | Description | Default Value | |-----------------------------------|-----------|----------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------| -| `wrapperPlugins` | `String` | No | Comma separated list of connection plugin codes.

Example: `failover,efm2` | `auroraConnectionTracker,failover2,efm2` | +| `wrapperPlugins` | `String` | No | Comma separated list of connection plugin codes.

Example: `failover,efm2` | `initialConnection,auroraConnectionTracker,failover2,efm2` | | `autoSortWrapperPluginOrder` | `Boolean` | No | Allows the AWS Advanced JDBC Wrapper to sort connection plugins to prevent plugin misconfiguration. Allows a user to provide a custom plugin order if needed. | `true` | | `wrapperProfileName` | `String` | No | Driver configuration profile name. Instead of listing plugin codes with `wrapperPlugins`, the driver profile can be set with this parameter.

Example: See [below](#configuration-profiles). | `null` | @@ -207,12 +211,13 @@ The AWS Advanced JDBC Wrapper has several built-in plugins that are available to | [AWS Secrets Manager Connection Plugin](./using-plugins/UsingTheAwsSecretsManagerPlugin.md) | `awsSecretsManager` | Any database | Enables fetching database credentials from the AWS Secrets Manager service. | [Jackson Databind](https://central.sonatype.com/artifact/com.fasterxml.jackson.core/jackson-databind)
[AWS Secrets Manager](https://central.sonatype.com/artifact/software.amazon.awssdk/secretsmanager) | | [Federated Authentication Plugin](./using-plugins/UsingTheFederatedAuthPlugin.md) | `federatedAuth` | Aurora, RDS Multi-AZ DB Cluster | Enables users to authenticate using Federated Identity and then connect to their Amazon Aurora Cluster using AWS Identity and Access Management (IAM). | [Jackson Databind](https://central.sonatype.com/artifact/com.fasterxml.jackson.core/jackson-databind)
[AWS Java SDK RDS v2.7.x](https://central.sonatype.com/artifact/software.amazon.awssdk/rds)
[AWS Java SDK STS v2.7.x](https://central.sonatype.com/artifact/software.amazon.awssdk/sts) | | [Okta Authentication Plugin](./using-plugins/UsingTheOktaAuthPlugin.md) | `okta` | Aurora, RDS Multi-AZ DB Cluster | Enables users to authenticate using Federated Identity and then connect to their Amazon Aurora Cluster using AWS Identity and Access Management (IAM). | [Jackson Databind](https://central.sonatype.com/artifact/com.fasterxml.jackson.core/jackson-databind)
[AWS Java SDK RDS v2.7.x](https://central.sonatype.com/artifact/software.amazon.awssdk/rds)
[AWS Java SDK STS v2.7.x](https://central.sonatype.com/artifact/software.amazon.awssdk/sts) | -| Aurora Stale DNS Plugin | `auroraStaleDns` | Aurora | Prevents incorrectly opening a new connection to an old writer node when DNS records have not yet updated after a recent failover event.

:warning:**Note:** Contrary to `failover` plugin, `auroraStaleDns` plugin doesn't implement failover support itself. It helps to eliminate opening wrong connections to an old writer node after cluster failover is completed.

:warning:**Note:** This logic is already included in `failover` plugin so you can omit using both plugins at the same time. | None | +| ~~Aurora Stale DNS Plugin~~ | `auroraStaleDns` | Aurora | **Deprecated**. Use `initialConnection` plugin instead.

Prevents incorrectly opening a new connection to an old writer node when DNS records have not yet updated after a recent failover event.

:warning:**Note:** Contrary to `failover` plugin, `auroraStaleDns` plugin doesn't implement failover support itself. It helps to eliminate opening wrong connections to an old writer node after cluster failover is completed.

:warning:**Note:** This logic is already included in `failover` plugin so you can omit using both plugins at the same time. | None | | [Aurora Connection Tracker Plugin](./using-plugins/UsingTheAuroraConnectionTrackerPlugin.md) | `auroraConnectionTracker` | Aurora, RDS Multi-AZ DB Cluster | Tracks all the opened connections. In the event of a cluster failover, the plugin will close all the impacted connections to the node. This plugin is enabled by default. | None | | [Driver Metadata Connection Plugin](./using-plugins/UsingTheDriverMetadataConnectionPlugin.md) | `driverMetaData` | Any database | Allows user application to override the return value of `DatabaseMetaData#getDriverName` | None | | [Read Write Splitting Plugin](./using-plugins/UsingTheReadWriteSplittingPlugin.md) | `readWriteSplitting` | Aurora | Enables read write splitting functionality where users can switch between database reader and writer instances. | None | +| [Simple Read Write Splitting Plugin](./using-plugins/UsingTheSimpleReadWriteSplittingPlugin.md) | `srw` | Any database | Enables read write splitting functionality where users can switch between reader and writer endpoints. | None | | [Developer Plugin](./using-plugins/UsingTheDeveloperPlugin.md) | `dev` | Any database | Helps developers test various everyday scenarios including rare events like network outages and database cluster failover. The plugin allows injecting and raising an expected exception, then verifying how applications handle it. | None | -| [Aurora Initial Connection Strategy](./using-plugins/UsingTheAuroraInitialConnectionStrategyPlugin.md) | `initialConnection` | Aurora | Allows users to configure their initial connection strategy to reader cluster endpoints. | None | +| [Aurora Initial Connection Strategy](./using-plugins/UsingTheAuroraInitialConnectionStrategyPlugin.md) | `initialConnection` | Aurora | Allows users to configure their initial connection strategy to reader cluster endpoints. Prevents incorrectly opening a new connection to an old writer node when DNS records have not yet updated after a recent failover event.

This plugin is **strongly** suggested when using cluster writer endpoint, cluster reader endpoint or global database endpoint in the connection string.

:warning:**Note:** Contrary to `failover` and `failover2` plugins, `initialConnection` plugin doesn't implement failover support itself. It helps to eliminate opening wrong connections to an old writer node after cluster failover is completed. | None | | [Limitless Connection Plugin](./using-plugins/UsingTheLimitlessConnectionPlugin.md) | `limitless` | Aurora | Enables client-side load-balancing of Transaction Routers on Amazon Aurora Limitless Databases . | None | | Fastest Response Strategy Plugin | `fastestResponseStrategy` | Aurora | When read-write splitting is enabled, this plugin selects the reader to switch to based on the host with the fastest response time. The plugin achieves this by periodically monitoring the hosts' response times and storing the fastest host in a cache. **Note:** the `readerHostSelectorStrategy` parameter must be set to `fastestResponse` in the user-defined connection properties in order to enable this plugin. See [reader selection strategies](./ReaderSelectionStrategies.md). | None | | [Blue/Green Deployment Plugin](./using-plugins/UsingTheBlueGreenPlugin.md) | `bg` | Aurora,
RDS Instance | Enables client-side Blue/Green Deployment support. | None | @@ -236,7 +241,7 @@ To use a snapshot build in your project, check the following examples. More info software.amazon.jdbc aws-advanced-jdbc-wrapper - 2.6.8-SNAPSHOT + 3.0.0-SNAPSHOT diff --git a/docs/using-the-jdbc-driver/using-plugins/UsingTheAuroraInitialConnectionStrategyPlugin.md b/docs/using-the-jdbc-driver/using-plugins/UsingTheAuroraInitialConnectionStrategyPlugin.md index 70f23c974..9483f7989 100644 --- a/docs/using-the-jdbc-driver/using-plugins/UsingTheAuroraInitialConnectionStrategyPlugin.md +++ b/docs/using-the-jdbc-driver/using-plugins/UsingTheAuroraInitialConnectionStrategyPlugin.md @@ -5,6 +5,8 @@ When this plugin is enabled, if the initial connection is to a reader cluster en This plugin also helps retrieve connections more reliably. When a user connects to a cluster endpoint, the actual instance for a new connection is resolved by DNS. During failover, the cluster elects another instance to be the writer. While DNS is updating, which can take up to 40-60 seconds, if a user tries to connect to the cluster endpoint, they may be connecting to an old node. This plugin helps by replacing the out of date endpoint if DNS is updating. +When using Aurora Global Database, the user has an option to use an [Aurora Global Writer Endpoint](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-global-database-connecting.html). The Global Writer Endpoint makes a user application configuration easier. However, similar to the cluster writer endpoint mentioned above, it can also be affected by DNS updates. The Aurora Initial Connection Strategy Plugin recognizes an Aurora Global Writer Endpoint and substitutes it with the current writer endpoint. + Verify plugin compatibility within your driver configuration using the [compatibility guide](../Compatibility.md). ## Enabling the Aurora Initial Connection Strategy Plugin diff --git a/docs/using-the-jdbc-driver/using-plugins/UsingTheFailover2Plugin.md b/docs/using-the-jdbc-driver/using-plugins/UsingTheFailover2Plugin.md index a00399b3f..17a42f457 100644 --- a/docs/using-the-jdbc-driver/using-plugins/UsingTheFailover2Plugin.md +++ b/docs/using-the-jdbc-driver/using-plugins/UsingTheFailover2Plugin.md @@ -59,16 +59,16 @@ In addition to the parameters that you can configure for the underlying driver, |---------------------------------------|:--------:|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------:|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | `failoverMode` | String | No | Defines a mode for failover process. Failover process may prioritize nodes with different roles and connect to them. Possible values:

- `strict-writer` - Failover process follows writer node and connects to a new writer when it changes.
- `reader-or-writer` - During failover, the driver tries to connect to any available/accessible reader node. If no reader is available, the driver will connect to a writer node. This logic mimics the logic of the Aurora read-only cluster endpoint.
- `strict-reader` - During failover, the driver tries to connect to any available reader node. If no reader is available, the driver raises an error. Reader failover to a writer node will only be allowed for single-node clusters. This logic mimics the logic of the Aurora read-only cluster endpoint.

If this parameter is omitted, default value depends on connection url. For Aurora read-only cluster endpoint, it's set to `reader-or-writer`. Otherwise, it's `strict-writer`. | Default value depends on connection url. For Aurora read-only cluster endpoint, it's set to `reader-or-writer`. Otherwise, it's `strict-writer`. | | `clusterInstanceHostPattern` | String | If connecting using an IP address or custom domain URL: Yes

Otherwise: No | This parameter is not required unless connecting to an AWS RDS cluster via an IP address or custom domain URL. In those cases, this parameter specifies the cluster instance DNS pattern that will be used to build a complete instance endpoint. A "?" character in this pattern should be used as a placeholder for the DB instance identifiers of the instances in the cluster. See [here](#host-pattern) for more information.

Example: `?.my-domain.com`, `any-subdomain.?.my-domain.com:9999`

Use case Example: If your cluster instance endpoints follow this pattern:`instanceIdentifier1.customHost`, `instanceIdentifier2.customHost`, etc. and you want your initial connection to be to `customHost:1234`, then your connection string should look like this: `jdbc:aws-wrapper:mysql://customHost:1234/test?clusterInstanceHostPattern=?.customHost` | If the provided connection string is not an IP address or custom domain, the JDBC Driver will automatically acquire the cluster instance host pattern from the customer-provided connection string. | -| `clusterTopologyRefreshRateMs` | Integer | No | Cluster topology refresh rate in milliseconds when a cluster is not in failover. It refers to the regular, slow monitoring rate ex``plained above. | `30000` | +| `globalClusterInstanceHostPatterns` | String | For Global Databases: Yes

Otherwise: No | This parameter is similar to the `clusterInstanceHostPattern` parameter but it provides a comma-separated list of instance host patterns. This parameter is required for Aurora Global Databases. The list should contain host patterns for each region of the global database. Each host pattern can be based on an RDS instance endpoint or a custom user domain name. If a custom domain name is used, the instance template pattern should be prefixed with the AWS region name in square brackets (`[]`).

The parameter is ignored for other types of databases (Aurora Clusters, RDS Clusters, plain RDS databases, etc.).

Example: for an Aurora Global Database with two AWS regions `us-east-2` and `us-west-2`, the parameter value should be set to `?.XYZ1.us-east-2.rds.amazonaws.com,?.XYZ2.us-west-2.rds.amazonaws.com`. Please note that user identifiers are different for different AWS regions (`XYZ1` and `XYZ2` in the example above).

Example: if using custom domain names, the parameter value should be similar to `[us-east-2]?.customHost,[us-west-2]?.anotherCustomHost`. The port can also be provided: `[us-east-2]?.customHost:8888,[us-west-2]?.anotherCustomHost:9999` | | +| `clusterTopologyRefreshRateMs` | Integer | No | Cluster topology refresh rate in milliseconds when a cluster is not in failover. It refers to the regular, slow monitoring rate explained above. | `30000` | | `failoverTimeoutMs` | Integer | No | Maximum allowed time in milliseconds to attempt reconnecting to a new writer or reader instance after a cluster failover is initiated. | `300000` | | `clusterTopologyHighRefreshRateMs` | Integer | No | Interval of time in milliseconds to wait between attempts to update cluster topology after the writer has come back online following a failover event. It corresponds to the increased monitoring rate described earlier. Usually, the topology monitoring component uses this increased monitoring rate for 30s after a new writer was detected. | `100` | | `failoverReaderHostSelectorStrategy` | String | No | Strategy used to select a reader node during failover. For more information on the available reader selection strategies, see this [table](../ReaderSelectionStrategies.md). | `random` | -| `clusterId` | `String` | If connecting using a non-standard RDS URL (e.g. an IP address, custom endpoint, rds proxy, or custom domain URL): Yes

Otherwise: No

:warning:If `clusterId` is omitted when using a non-standard RDS URL, you may experience various issues. | A unique identifier for the cluster. Connections with the same cluster id share a cluster topology cache. | None | +| `clusterId` | `String` | If connecting to multiple database clusters within a single application:: Yes

Otherwise: No

| A unique identifier for the cluster. Connections with the same cluster id share a cluster topology cache. This parameter is optional and defaults to `1`. When supporting multiple database clusters, this parameter becomes mandatory. Each connection string must include the `clusterId` parameter with a value that can be any number or string. However, all connection strings associated with the same database cluster must use identical `clusterId` values, while connection strings belonging to different database clusters must specify distinct values. Examples of value: `1`, `2`, `1234`, `abc-1`, `abc-2`. | `1` | | `telemetryFailoverAdditionalTopTrace` | Boolean | No | Allows the driver to produce an additional telemetry span associated with failover. Such span helps to facilitate telemetry analysis in AWS CloudWatch. | `false` | | `skipFailoverOnInterruptedThread` | Boolean | No | Enable to skip failover if the current thread is interrupted. This may leave the Connection in an invalid state so the Connection should be disposed. | `false` | - Please refer to the original [Failover Plugin](./UsingTheFailoverPlugin.md) for more details about error codes, configurations, connection pooling and sample codes. ### Sample Code diff --git a/docs/using-the-jdbc-driver/using-plugins/UsingTheFailoverPlugin.md b/docs/using-the-jdbc-driver/using-plugins/UsingTheFailoverPlugin.md index 0fd47cb2f..c898a869d 100644 --- a/docs/using-the-jdbc-driver/using-plugins/UsingTheFailoverPlugin.md +++ b/docs/using-the-jdbc-driver/using-plugins/UsingTheFailoverPlugin.md @@ -23,20 +23,19 @@ Verify plugin compatibility within your driver configuration using the [compatib ### Failover Parameters In addition to the parameters that you can configure for the underlying driver, you can pass the following parameters to the AWS Advanced JDBC Wrapper through the connection URL to specify additional failover behavior. -| Parameter | Value | Required | Description | Default Value | -|----------------------------------------|:--------:|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------:|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `failoverMode` | String | No | Defines a mode for failover process. Failover process may prioritize nodes with different roles and connect to them. Possible values:

- `strict-writer` - Failover process follows writer node and connects to a new writer when it changes.
- `reader-or-writer` - During failover, the driver tries to connect to any available/accessible reader node. If no reader is available, the driver will connect to a writer node. This logic mimics the logic of the Aurora read-only cluster endpoint.
- `strict-reader` - During failover, the driver tries to connect to any available reader node. If no reader is available, the driver raises an error. Reader failover to a writer node will only be allowed for single-node clusters. This logic mimics the logic of the Aurora read-only cluster endpoint.

If this parameter is omitted, default value depends on connection url. For Aurora read-only cluster endpoint, it's set to `reader-or-writer`. Otherwise, it's `strict-writer`. | Default value depends on connection url. For Aurora read-only cluster endpoint, it's set to `reader-or-writer`. Otherwise, it's `strict-writer`. | -| `clusterId` | `String` | If connecting using a non-standard RDS URL (e.g. an IP address, custom endpoint, rds proxy, or custom domain URL): Yes

Otherwise: No

:warning:If `clusterId` is omitted when using a non-standard RDS URL, you may experience various issues. | A unique identifier for the cluster. Connections with the same cluster id share a cluster topology cache. | None | -| `clusterInstanceHostPattern` | String | If connecting using an IP address or custom domain URL: Yes

Otherwise: No | This parameter is not required unless connecting to an AWS RDS cluster via an IP address or custom domain URL. In those cases, this parameter specifies the cluster instance DNS pattern that will be used to build a complete instance endpoint. A "?" character in this pattern should be used as a placeholder for the DB instance identifiers of the instances in the cluster. See [here](#host-pattern) for more information.

Example: `?.my-domain.com`, `any-subdomain.?.my-domain.com:9999`

Use case Example: If your cluster instance endpoints follow this pattern:`instanceIdentifier1.customHost`, `instanceIdentifier2.customHost`, etc. and you want your initial connection to be to `customHost:1234`, then your connection string should look like this: `jdbc:aws-wrapper:mysql://customHost:1234/test?clusterInstanceHostPattern=?.customHost` | If the provided connection string is not an IP address or custom domain, the JDBC Driver will automatically acquire the cluster instance host pattern from the customer-provided connection string. | -| `enableClusterAwareFailover` | Boolean | No | Set to `true` to enable the fast failover behavior offered by the AWS Advanced JDBC Wrapper. Set to `false` for simple JDBC connections that do not require fast failover functionality. | `true` | -| `failoverClusterTopologyRefreshRateMs` | Integer | No | Cluster topology refresh rate in milliseconds during a writer failover process. During the writer failover process, cluster topology may be refreshed at a faster pace than normal to speed up discovery of the newly promoted writer. | `2000` | -| `failoverReaderConnectTimeoutMs` | Integer | No | Maximum allowed time in milliseconds to attempt to connect to a reader instance during a reader failover process. | `30000` | -| `failoverTimeoutMs` | Integer | No | Maximum allowed time in milliseconds to attempt reconnecting to a new writer or reader instance after a cluster failover is initiated. | `300000` | -| `failoverWriterReconnectIntervalMs` | Integer | No | Interval of time in milliseconds to wait between attempts to reconnect to a failed writer during a writer failover process. | `2000` | -| `enableConnectFailover` | Boolean | No | Enables/disables cluster-aware failover if the initial connection to the database fails due to a network exception. Note that this may result in a connection to a different instance in the cluster than was specified by the URL. | `false` | -| `skipFailoverOnInterruptedThread` | Boolean | No | Enable to skip failover if the current thread is interrupted. This may leave the Connection in an invalid state so the Connection should be disposed. | `false` | -| ~~`keepSessionStateOnFailover`~~ | Boolean | No | This parameter is no longer available. If specified, it will be ignored by the driver. See [Session State](../SessionState.md) for more details. | `false` | -| ~~`enableFailoverStrictReader`~~ | Boolean | No | This parameter is no longer available and, if specified, it will be ignored by the driver. See `failoverMode` (`reader-or-writer` or `strict-reader`) for more details. | | +| Parameter | Value | Required | Description | Default Value | +|----------------------------------------|:--------:|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------:|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `failoverMode` | String | No | Defines a mode for failover process. Failover process may prioritize nodes with different roles and connect to them. Possible values:

- `strict-writer` - Failover process follows writer node and connects to a new writer when it changes.
- `reader-or-writer` - During failover, the driver tries to connect to any available/accessible reader node. If no reader is available, the driver will connect to a writer node. This logic mimics the logic of the Aurora read-only cluster endpoint.
- `strict-reader` - During failover, the driver tries to connect to any available reader node. If no reader is available, the driver raises an error. Reader failover to a writer node will only be allowed for single-node clusters. This logic mimics the logic of the Aurora read-only cluster endpoint.

If this parameter is omitted, default value depends on connection url. For Aurora read-only cluster endpoint, it's set to `reader-or-writer`. Otherwise, it's `strict-writer`. | Default value depends on connection url. For Aurora read-only cluster endpoint, it's set to `reader-or-writer`. Otherwise, it's `strict-writer`. | +| `clusterId` | `String` | If connecting to multiple database clusters within a single application: Yes

Otherwise: No

:warning:If `clusterId` is omitted, you may experience various issues. | A unique identifier for the cluster. Connections with the same cluster id share a cluster topology cache. For applications using a single database cluster, this parameter is optional and defaults to `1`. When supporting multiple database clusters, this parameter becomes mandatory. Each connection string must include the `clusterId` parameter with a value that can be any number or string. However, all connection strings associated with the same database cluster must use identical `clusterId` values, while connection strings belonging to different database clusters must specify distinct values. Examples of value: `1`, `2`, `1234`, `abc-1`, `abc-2`. | `1` | +| `clusterInstanceHostPattern` | String | If connecting using an IP address or custom domain URL: Yes

Otherwise: No | This parameter is not required unless connecting to an AWS RDS cluster via an IP address or custom domain URL. In those cases, this parameter specifies the cluster instance DNS pattern that will be used to build a complete instance endpoint. A "?" character in this pattern should be used as a placeholder for the DB instance identifiers of the instances in the cluster. See [here](#host-pattern) for more information.

Example: `?.my-domain.com`, `any-subdomain.?.my-domain.com:9999`

Use case Example: If your cluster instance endpoints follow this pattern:`instanceIdentifier1.customHost`, `instanceIdentifier2.customHost`, etc. and you want your initial connection to be to `customHost:1234`, then your connection string should look like this: `jdbc:aws-wrapper:mysql://customHost:1234/test?clusterInstanceHostPattern=?.customHost` | If the provided connection string is not an IP address or custom domain, the JDBC Driver will automatically acquire the cluster instance host pattern from the customer-provided connection string. | +| `globalClusterInstanceHostPatterns` | String | For Global Databases: Yes

Otherwise: No | This parameter is similar to `clusterInstanceHostPattern` parameter but it provides a comma-separated list of instance host patterns. This parameter is required for Aurora Global Databases. The list should contains host pattern for each region of a global database. Each host pattern can be based on a RDS instance endpoint or a custom user domain name. If custom domain name is used, an instance template pattern should be prefixed with a AWS region name in square brackets (`[]`).

The parameter is ignored for other types of databases (Aurora Clusters, RDS Clusters, plain RDS databases, etc.).

Example: for an Aurora Global Database with two AWS regions `us-east-2` and `us-west-2`, the parameter value is `?.XYZ1.us-east-2.rds.amazonaws.com,?.XYZ2.us-west-2.rds.amazonaws.com`. Please pay attention that user identifiers are different for different AWS regions (`XYZ1` and `XYZ2` as in the example above).

In case of custom domain names, the parameter value can be `[us-east-2]?.customHost,[us-west-2]?.anotherCustomHost`. Port can be also provided: `[us-east-2]?.customHost:8888,[us-west-2]?.anotherCustomHost:9999` | | +| `enableClusterAwareFailover` | Boolean | No | Set to `true` to enable the fast failover behavior offered by the AWS Advanced JDBC Wrapper. Set to `false` for simple JDBC connections that do not require fast failover functionality. | `true` | +| `failoverClusterTopologyRefreshRateMs` | Integer | No | Cluster topology refresh rate in milliseconds during a writer failover process. During the writer failover process, cluster topology may be refreshed at a faster pace than normal to speed up discovery of the newly promoted writer. | `2000` | +| `failoverReaderConnectTimeoutMs` | Integer | No | Maximum allowed time in milliseconds to attempt to connect to a reader instance during a reader failover process. | `30000` | +| `failoverTimeoutMs` | Integer | No | Maximum allowed time in milliseconds to attempt reconnecting to a new writer or reader instance after a cluster failover is initiated. | `300000` | +| `failoverWriterReconnectIntervalMs` | Integer | No | Interval of time in milliseconds to wait between attempts to reconnect to a failed writer during a writer failover process. | `2000` | +| `enableConnectFailover` | Boolean | No | Enables/disables cluster-aware failover if the initial connection to the database fails due to a network exception. Note that this may result in a connection to a different instance in the cluster than was specified by the URL. | `false` | +| `skipFailoverOnInterruptedThread` | Boolean | No | Enable to skip failover if the current thread is interrupted. This may leave the Connection in an invalid state so the Connection should be disposed. | `false` | ## Host Pattern When connecting to Aurora clusters, the [`clusterInstanceHostPattern`](#failover-parameters) parameter is required if the connection string does not provide enough information about the database cluster domain name. If the Aurora cluster endpoint is used directly, the AWS Advanced JDBC Wrapper will recognize the standard Aurora domain name and can re-build a proper Aurora instance name when needed. In cases where the connection string uses an IP address, a custom domain name, or localhost, the driver won't know how to build a proper domain name for a database instance endpoint. For example, if a custom domain was being used and the cluster instance endpoints followed a pattern of `instanceIdentifier1.customHost`, `instanceIdentifier2.customHost`, etc., the driver would need to know how to construct the instance endpoints using the specified custom domain. Since there isn't enough information from the custom domain alone to create the instance endpoints, you should set the `clusterInstanceHostPattern` to `?.customHost`, making the connection string `jdbc:aws-wrapper:postgresql://customHost:1234/test?clusterInstanceHostPattern=?.customHost`. Refer to [this diagram](../../images/failover_behavior.png) about AWS Advanced JDBC Wrapper behavior during failover for different connection URLs and more details and examples. diff --git a/docs/using-the-jdbc-driver/using-plugins/UsingTheFederatedAuthPlugin.md b/docs/using-the-jdbc-driver/using-plugins/UsingTheFederatedAuthPlugin.md index 1599e7b2d..7746e757f 100644 --- a/docs/using-the-jdbc-driver/using-plugins/UsingTheFederatedAuthPlugin.md +++ b/docs/using-the-jdbc-driver/using-plugins/UsingTheFederatedAuthPlugin.md @@ -34,7 +34,7 @@ This JAR is a drop-in ready solution and is **recommended for customers who do n As this plugin has a number of transitive dependencies, the goal of this JAR is to eliminate the need to manually source all the dependencies and avoid potential issues with managing them. In that spirit, the dependencies in this JAR are shaded with the prefix `shaded` to avoid potential package conflicts with pre-existing packages in your environment. -It is important to note that the Uber JAR is bundled with the AWS Java RDS SDK and is larger (**15 MB**) than our `aws-advanced-jdbc-wrapper-2.6.7.jar`. So please take that into account when deciding if this solution is for you. +It is important to note that the Uber JAR is bundled with the AWS Java RDS SDK and is larger (**15 MB**) than our `aws-advanced-jdbc-wrapper-3.0.0.jar`. So please take that into account when deciding if this solution is for you. If you would like to download and install the bundled Uber JAR, follow these [instructions](../../GettingStarted.md#direct-download-and-installation). diff --git a/docs/using-the-jdbc-driver/using-plugins/UsingTheIamAuthenticationPlugin.md b/docs/using-the-jdbc-driver/using-plugins/UsingTheIamAuthenticationPlugin.md index ee56dbc60..995cbdc33 100644 --- a/docs/using-the-jdbc-driver/using-plugins/UsingTheIamAuthenticationPlugin.md +++ b/docs/using-the-jdbc-driver/using-plugins/UsingTheIamAuthenticationPlugin.md @@ -42,12 +42,13 @@ IAM database authentication use is limited to certain database engines. For more GRANT rds_iam TO db_userx;` 4. Add the plugin code `iam` to the [`wrapperPlugins`](../UsingTheJdbcDriver.md#connection-plugin-manager-parameters) parameter value. -| Parameter | Value | Required | Description | Example Value | -|-------------------|:-------:|:--------:|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------| -| `iamDefaultPort` | String | No | This property will override the default port that is used to generate the IAM token. The default port is determined based on the underlying driver protocol. For now, there is support for `jdbc:postgresql:` and `jdbc:mysql:`. Target drivers with different protocols will require users to provide a default port. | `1234` | -| `iamHost` | String | No | This property will override the default hostname that is used to generate the IAM token. The default hostname is derived from the connection string. This parameter is required when users are connecting with custom endpoints. | `database.cluster-hash.us-east-1.rds.amazonaws.com` | -| `iamRegion` | String | No | This property will override the default region that is used to generate the IAM token. The default region is parsed from the connection string. | `us-east-2` | -| `iamExpiration` | Integer | No | This property determines how long an IAM token is kept in the driver cache before a new one is generated. The default expiration time is set to be 14 minutes and 30 seconds. Note that IAM database authentication tokens have a lifetime of 15 minutes. | `600` | +| Parameter | Value | Required | Description | Example Value | +|------------------------------|:-------:|:--------:|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------| +| `iamDefaultPort` | String | No | This property will override the default port that is used to generate the IAM token. The default port is determined based on the underlying driver protocol. For now, there is support for `jdbc:postgresql:` and `jdbc:mysql:`. Target drivers with different protocols will require users to provide a default port. | `1234` | +| `iamHost` | String | No | This property will override the default hostname that is used to generate the IAM token. The default hostname is derived from the connection string. This parameter is required when users are connecting with custom endpoints. | `database.cluster-hash.us-east-1.rds.amazonaws.com` | +| `iamRegion` | String | No | This property will override the default region that is used to generate the IAM token. The default region is parsed from the connection string. | `us-east-2` | +| `iamExpiration` | Integer | No | This property determines how long an IAM token is kept in the driver cache before a new one is generated. The default expiration time is set to be 14 minutes and 30 seconds. Note that IAM database authentication tokens have a lifetime of 15 minutes. | `600` | +| `iamAccessTokenPropertyName` | String | No | This property allows you to override the property name used for passing the IAM access token. Some underlying drivers may require a specific property name for IAM authentication. Default value is `password`. | `password`, `accessToken` | ## Sample code [AwsIamAuthenticationPostgresqlExample.java](../../../examples/AWSDriverExample/src/main/java/software/amazon/AwsIamAuthenticationPostgresqlExample.java)
diff --git a/docs/using-the-jdbc-driver/using-plugins/UsingTheReadWriteSplittingPlugin.md b/docs/using-the-jdbc-driver/using-plugins/UsingTheReadWriteSplittingPlugin.md index 743d0e07a..fcee08d7d 100644 --- a/docs/using-the-jdbc-driver/using-plugins/UsingTheReadWriteSplittingPlugin.md +++ b/docs/using-the-jdbc-driver/using-plugins/UsingTheReadWriteSplittingPlugin.md @@ -8,7 +8,7 @@ The read/write splitting plugin is not loaded by default. To load the plugin, in ``` final Properties properties = new Properties(); -properties.setProperty(PropertyDefinition.PLUGINS.name, "readWriteSplitting,failover,efm"); +properties.setProperty(PropertyDefinition.PLUGINS.name, "readWriteSplitting,failover2,efm2"); ``` If you would like to use the read/write splitting plugin without the failover plugin, make sure you have the `readWriteSplitting` plugin in the `wrapperPlugins` property, and that the failover plugin is not part of it. @@ -30,7 +30,10 @@ The read/write splitting plugin is not currently supported for non-Aurora cluste > [!WARNING]\ > If internal connection pools are enabled, database passwords may not be verified with every connection request. The initial connection request for each database instance in the cluster will verify the password, but subsequent requests may return a cached pool connection without re-verifying the password. This behavior is inherent to the nature of connection pools in general and not a bug with the driver. `ConnectionProviderManager.releaseResources` can be called to close all pools and remove all cached pool connections. See [InternalConnectionPoolPasswordWarning.java](../../../examples/AWSDriverExample/src/main/java/software/amazon/InternalConnectionPoolPasswordWarning.java) for more details. -Whenever `setReadOnly(true)` is first called on a `Connection` object, the read/write plugin will internally open a new physical connection to a reader. After this first call, the physical reader connection will be cached for the given `Connection`. Future calls to `setReadOnly `on the same `Connection` object will not require opening a new physical connection. However, calling `setReadOnly(true)` for the first time on a new `Connection` object will require the plugin to establish another new physical connection to a reader. If your application frequently calls `setReadOnly`, you can enable internal connection pooling to improve performance. When enabled, the wrapper driver will maintain an internal connection pool for each instance in the cluster. This allows the read/write plugin to reuse connections that were established by `setReadOnly` calls on previous `Connection` objects. +Whenever `setReadOnly(true)` is first called on a `Connection` object, the read/write plugin will internally open a new physical connection to a reader. After this first call, the physical reader connection will be cached for the given `Connection`. By default, this cached connection will never expire, meaning all subsequent `setReadOnly(true)` calls on the same `Connection` object will keep reusing the same reader connection. +If your application frequently calls `setReadOnly`, this may have a performance impact. There are two ways to improve performance: +1. You can enable internal connection pooling to improve performance. When enabled, the wrapper driver will maintain an internal connection pool for each instance in the cluster. This allows the Read/Write Splitting plugin to reuse connections that were established by `setReadOnly` calls on previous `Connection` objects. +2. You can also use the [`cachedReaderKeepAliveTimeoutMs` connection parameter](#reader-keep-alive-timeout). This sets an expiration time on the reader connection. When `setReadOnly(true)` is called and the reader connection has expired, the plugin will create a new reader connection using the specified [reader selection strategy](#reader-selection). > [!NOTE]\ > Initial connections to a cluster URL will not be pooled. The driver does not pool cluster URLs because it can be problematic to pool a URL that resolves to different instances over time. The main benefit of internal connection pools is when setReadOnly is called. When setReadOnly is called (regardless of the initial connection URL), an internal pool will be created for the writer/reader that the plugin switches to and connections for that instance can be reused in the future. @@ -87,6 +90,16 @@ To indicate which selection strategy to use, the `readerHostSelectorStrategy` co props.setProperty(ReadWriteSplittingPlugin.READER_HOST_SELECTOR_STRATEGY.name, "leastConnections"); ``` +## Reader keep-alive timeout +If no connection pool is used, reader connections created by calls to `setReadOnly(true)` will be cached for the entire lifetime of the `Connection` object. This may have a negative performance impact if your application makes frequent calls to `setReadOnly(true)` on the same `Connection` object, as all read traffic for that `Connection` will be directed to a single reader instance. +To improve performance, you can specify a timeout for the cached reader connection using `cachedReaderKeepAliveTimeoutMs`. Once the reader has expired, the next call to `setReadOnly(true)` will create a new reader connection determined by the reader host selection strategy. + +```java +final Properties properties = new Properties(); +properties.setProperty("cachedReaderKeepAliveTimeoutMs", "600000"); +``` +> [!NOTE]\ +> If a connection pool is used, this setting is ignored and the lifespan of this cached connection object will be handled by the connection pool instead. ## Limitations diff --git a/docs/using-the-jdbc-driver/using-plugins/UsingTheSimpleReadWriteSplittingPlugin.md b/docs/using-the-jdbc-driver/using-plugins/UsingTheSimpleReadWriteSplittingPlugin.md new file mode 100644 index 000000000..f42e65eb8 --- /dev/null +++ b/docs/using-the-jdbc-driver/using-plugins/UsingTheSimpleReadWriteSplittingPlugin.md @@ -0,0 +1,90 @@ +# Simple Read/Write Splitting Plugin + +The Simple Read/Write Splitting Plugin adds functionality to switch between endpoints via calls to the `Connection#setReadOnly` method. Based on the values provided in the properties, upon calling `setReadOnly(true)`, the plugin will connect to the specified endpoint for read operations. When `setReadOnly(false)` is called, the plugin will connect to the specified endpoint for write operations. Future calls to `setReadOnly` will switch between the established writer and reader connections according to the boolean argument you supply to the `setReadOnly` method. + +The plugin will use the current connection, which may be the writer or initial connection, as a fallback if the reader connection is unable to be established, or if connection verification is enabled and the connection is not to a reader host. + +The plugin does not rely on cluster topology. It relies purely on the provided endpoints and their DNS resolution. + +## Loading the Simple Read/Write Splitting Plugin + +The Simple Read/Write Splitting Plugin is not loaded by default. To load the plugin, include it in the `wrapperPlugins` connection parameter. + +``` +final Properties properties = new Properties(); +properties.setProperty(PropertyDefinition.PLUGINS.name, "srw"); +properties.setProperty("srwWriteEndpoint", "test-db.cluster-XYZ.us-east-2.rds.amazonaws.com"); +properties.setProperty("srwReadEndpoint", "test-db.cluster-ro-XYZ.us-east-2.rds.amazonaws.com"); +``` + +## Simple Read/Write Splitting Plugin Parameters +| Parameter | Value | Required | Description | Default Value | Example Values | +|-------------------------------|:-------:|:--------:|:------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------|--------------------------------------------------------------| +| `srwWriteEndpoint` | String | Yes | The endpoint to connect to when `setReadOnly(false)` is called. | `null` | `.cluster-..rds.amazonaws.com` | +| `srwReadEndpoint` | String | Yes | The endpoint to connect to when `setReadOnly(true)` is called. | `null` | `.cluster-ro-..rds.amazonaws.com` | +| `verifyNewSrwConnections` | Boolean | No | Enables writer/reader verification for new connections made by the Simple Read/Write Splitting Plugin. More details below. | `true` | `true`, `false` | +| `verifyInitialConnectionType` | String | No | If `verifyNewSrwConnections` is set to `true`, this parameter will verify the initial opened connection to be either a writer or a reader. More details below. | `null` | `writer`, `reader` | +| `srwConnectRetryTimeoutMs` | Integer | No | If `verifyNewSrwConnections` is set to `true`, this parameter sets the maximum allowed time in milliseconds for retrying connection attempts. More details below. | `60000` | `60000` | +| `srwConnectRetryIntervalMs` | Integer | No | If `verifyNewSrwConnections` is set to `true`, this parameter sets the time delay in milliseconds between each retry of opening a connection. More details below. | `1000` | `1000` | + +## How the Simple Read/Write Splitting Plugin Verifies Connections + +The property `verifyNewSrwConnections` is enabled by default. This means that when new connections are made with the Simple Read/Write Splitting Plugin, a query is sent to the new connection to verify its role. If the connection cannot be verified as having the correct role—that is, a write connection is not connected to a writer, or a read connection is not connected to a reader—the plugin will retry the connection up to the time limit of `srwConnectRetryTimeoutMs`. + +The values of `srwConnectRetryTimeoutMs` and `srwConnectRetryIntervalMs` control the timing and aggressiveness of the plugin's retries. + +Additionally, to consistently ensure the role of connections made with the plugin, the plugin also provides role verification for the initial connection. When connecting with an RDS writer cluster or reader cluster endpoint, the plugin will retry the initial connection up to `srwConnectRetryTimeoutMs` until it has verified the intended role of the endpoint. +If it is unable to return a verified initial connection, it will log a message and continue with the normal workflow of the other plugins. +When connecting with custom endpoints and other non-standard URLs, role verification on the initial connection can also be triggered by providing the expected role through the `verifyInitialConnectionType` parameter. Set this to `writer` or `reader` accordingly. + +## Limitations When Verifying Connections + +#### Non-RDS clusters +The verification step determines the role of the connection by executing a query against it. If the endpoint is not part of an Aurora or RDS cluster, the plugin will not be able to verify the role, so `verifyNewSrwConnections` must be set to `false`. + +#### Autocommit +The verification logic results in errors such as `Cannot change transaction read-only property in the middle of a transaction` from the underlying driver when: +- autocommit is set to false +- setReadOnly is called +- as part of setReadOnly, a new connection is opened +- that connection's role is verified + +This is a result of the plugin executing the role-verification query against a new connection, and when autocommit is false, this opens a transaction. + +If autocommit is essential to a workflow, either ensure the plugin has connected to the desired target connection of the setReadOnly query before setting autocommit to false or disable `verifyNewSrwConnections`. Examples of the former can be found in the [Simple Read/Write Splitting Examples](UsingTheSimpleReadWriteSplittingPlugin.md#examples). + +## Using the Simple Read/Write Splitting Plugin with RDS Proxy + +RDS Proxy provides connection pooling and management that significantly improves application scalability by reducing database connection overhead and enabling thousands of concurrent connections through +connection multiplexing. Connecting exclusively through the proxy endpoint ensures consistent connection management, automatic failover handling, and centralized monitoring, while protecting the underlying database from connection exhaustion +and providing a stable abstraction layer that remains consistent even when database topology changes. By providing the read/write endpoint and a read-only endpoint to the Simple Read/Write Splitting Plugin, the AWS JDBC Driver will connect using +these endpoints any time setReadOnly is called. + +To take full advantage of the benefits of RDS Proxy, it is recommended to only connect through RDS Proxy endpoints. See [Using the AWS JDBC Driver with RDS Proxy](./../../../README.md#rds-proxy) for limitations. + +## Using the Simple Read/Write Splitting Plugin against non-RDS clusters + +The Simple Read/Write Splitting Plugin can be used to switch between any two endpoints. If the endpoints do not direct to an RDS cluster, ensure the property `verifyNewSrwConnections` is set to `false`. See [Limitations of verifyNewSrwConnections](UsingTheSimpleReadWriteSplittingPlugin.md#non-rds-clusters) for details. + +## Limitations + +### General plugin limitations + +When a Statement or ResultSet is created, it is internally bound to the database connection established at that moment. There is no standard JDBC functionality to change the internal connection used by Statement or ResultSet objects. Consequently, even if the read/write plugin switches the internal connection, any Statements/ResultSets created before this will continue using the old database connection. This bypasses the desired functionality provided by the plugin. To prevent these scenarios, an exception will be thrown if your code uses any Statements/ResultSets created before a change in internal connection. To solve this problem, please ensure you create new Statement/ResultSet objects after switching between the writer/reader. + +Verify plugin compatibility within your driver configuration using the [compatibility guide](../Compatibility.md). + +### Failover + +Immediately following a failover event, due to DNS caching, an RDS cluster endpoint may connect to the previous writer, and the read-only endpoint may connect to the new writer instance. + +To avoid stale DNS connections, enable `verifyNewSrwConnections`, as this will retry the connection until the role has been verified. Service for Aurora clusters is typically restored in less than 60 seconds, and often less than 30 seconds. RDS Proxy endpoints to Aurora databases can update in as little as 3 seconds. Depending on your configuration and cluster availability `srwConnectRetryTimeoutMs` and `srwConnectRetryIntervalMs` may be set to customize the timing of the retries. + +Following failover, endpoints that point to specific instances will be impacted if their target instance was demoted to a reader or promoted to a writer. The Simple Read/Write Splitting Plugin always connects to the endpoint provided in the initial connection properties when `setReadOnly` is called. We suggest using endpoints that return connections with a specific role such as cluster or read-only endpoints, or using the [Read/Write Splitting Plugin](UsingTheReadWriteSplittingPlugin.md) to connect to instances based on the cluster's current topology. + +### Session state + +The plugin supports session state transfer when switching connections. All attributes mentioned in [Session State](../SessionState.md) are automatically transferred to a new connection. + +## Examples +[SimpleReadWriteSplittingPostgresExample.java](../../../examples/AWSDriverExample/src/main/java/software/amazon/SimpleReadWriteSplittingPostgresExample.java) and [SimpleReadWriteSplittingMySQLExample.java](../../../examples/AWSDriverExample/src/main/java/software/amazon/SimpleReadWriteSplittingMySQLExample.java) demonstrate how to enable and configure Simple Read/Write Splitting with the AWS JDBC Driver. diff --git a/examples/AWSDriverExample/src/main/java/software/amazon/SimpleReadWriteSplittingMySQLExample.java b/examples/AWSDriverExample/src/main/java/software/amazon/SimpleReadWriteSplittingMySQLExample.java new file mode 100644 index 000000000..445e17000 --- /dev/null +++ b/examples/AWSDriverExample/src/main/java/software/amazon/SimpleReadWriteSplittingMySQLExample.java @@ -0,0 +1,142 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package software.amazon; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.Properties; +import software.amazon.jdbc.PropertyDefinition; +import software.amazon.jdbc.plugin.failover.FailoverFailedSQLException; +import software.amazon.jdbc.plugin.failover.FailoverSuccessSQLException; +import software.amazon.jdbc.plugin.failover.TransactionStateUnknownSQLException; + + +public class SimpleReadWriteSplittingMySQLExample { + + // User configures connection properties here + public static final String MYSQL_CONNECTION_STRING = + "jdbc:aws-wrapper:mysql://test-db.cluster-XYZ.us-east-2.rds.amazonaws.com:3306/srwExample"; + private static final String USERNAME = "username"; + private static final String PASSWORD = "password"; + private static final String SRW_WRITE_ENDPOINT = "test-db.cluster-XYZ.us-east-2.rds.amazonaws.com"; + private static final String SRW_READ_ENDPOINT = "test-db.cluster-ro-XYZ.us-east-2.rds.amazonaws.com"; + + public static void main(String[] args) throws SQLException { + + final Properties props = new Properties(); + + // Enable srw, failover, and efm2 plugins and set properties + props.setProperty(PropertyDefinition.PLUGINS.name, "srw,failover2,efm2"); + props.setProperty(PropertyDefinition.USER.name, USERNAME); + props.setProperty(PropertyDefinition.PASSWORD.name, PASSWORD); + props.setProperty("srwWriteEndpoint", SRW_WRITE_ENDPOINT); + props.setProperty("srwReadEndpoint", SRW_READ_ENDPOINT); + + /* Setup Step: Open connection and create tables - uncomment this section to create table and test values */ + // try (final Connection connection = DriverManager.getConnection(MYSQL_CONNECTION_STRING, props)) { + // setInitialSessionSettings(connection); + // executeWithFailoverHandling(connection, + // "CREATE TABLE bank_test (id int primary key, name varchar(40), account_balance int)"); + // executeWithFailoverHandling(connection, + // "INSERT INTO bank_test VALUES (0, 'Jane Doe', 200), (1, 'John Smith', 200), (2, 'Sally Smith', 200), (3, 'Joe Smith', 200)"); + // } + + // Example Step: Open connection and perform transaction + try (final Connection conn = DriverManager.getConnection(MYSQL_CONNECTION_STRING, props)) { + setInitialSessionSettings(conn); + + // Establish the internal reader connection before setting autocommit to false. + conn.setReadOnly(true); + // Switch back to the cluster endpoint to perform write operations. + conn.setReadOnly(false); + + // Begin business transaction + conn.setAutoCommit(false); + + // Example business transaction + executeWithFailoverHandling( + conn, + "UPDATE bank_test SET account_balance=account_balance - 100 WHERE name='Jane Doe'"); + executeWithFailoverHandling( + conn, + "UPDATE bank_test SET account_balance=account_balance + 100 WHERE name='John Smith'"); + + // Commit business transaction + conn.commit(); + // Internally switch to a reader connection + conn.setReadOnly(true); + + for (int i = 0; i < 4; i++) { + executeWithFailoverHandling(conn, "SELECT * FROM bank_test WHERE id = " + i); + } + + } catch (FailoverFailedSQLException e) { + // User application should open a new connection, check the results of the failed transaction and re-run it if + // needed. See: + // https://github.com/aws/aws-advanced-jdbc-wrapper/blob/main/docs/using-the-jdbc-driver/using-plugins/UsingTheFailoverPlugin.md#08001---unable-to-establish-sql-connection + throw e; + } catch (TransactionStateUnknownSQLException e) { + // User application should check the status of the failed transaction and restart it if needed. See: + // https://github.com/aws/aws-advanced-jdbc-wrapper/blob/main/docs/using-the-jdbc-driver/using-plugins/UsingTheFailoverPlugin.md#08007---transaction-resolution-unknown + throw e; + } catch (SQLException e) { + // Unexpected exception unrelated to failover. This should be handled by the user application. + throw e; + } + } + + public static void processResults(ResultSet results) { + // User can process results as needed + } + + public static void setInitialSessionSettings(Connection conn) throws SQLException { + try (Statement stmt1 = conn.createStatement()) { + // User can edit settings + stmt1.executeUpdate("SET time_zone = 'UTC'"); + } + } + + public static void executeWithFailoverHandling(Connection conn, String query) throws SQLException { + try (Statement stmt = conn.createStatement()) { + boolean hasResults = stmt.execute(query); + if (hasResults) { + processResults(stmt.getResultSet()); + } + } catch (FailoverFailedSQLException e) { + // Connection failed, and JDBC wrapper failed to reconnect to a new instance. + throw e; + } catch (FailoverSuccessSQLException e) { + // Query execution failed and JDBC wrapper successfully failed over to a new elected writer node. + // Reconfigure the connection + setInitialSessionSettings(conn); + // Re-run query + try (Statement stmt = conn.createStatement()) { + boolean hasResults = stmt.execute(query); + if (hasResults) { + processResults(stmt.getResultSet()); + } + } + } catch (TransactionStateUnknownSQLException e) { + // Connection failed while executing a business transaction. + // Transaction status is unknown. The driver has successfully reconnected to a new writer. + throw e; + } + } +} diff --git a/examples/AWSDriverExample/src/main/java/software/amazon/SimpleReadWriteSplittingPostgresExample.java b/examples/AWSDriverExample/src/main/java/software/amazon/SimpleReadWriteSplittingPostgresExample.java new file mode 100644 index 000000000..9125e1604 --- /dev/null +++ b/examples/AWSDriverExample/src/main/java/software/amazon/SimpleReadWriteSplittingPostgresExample.java @@ -0,0 +1,142 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package software.amazon; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.Properties; +import software.amazon.jdbc.PropertyDefinition; +import software.amazon.jdbc.plugin.failover.FailoverFailedSQLException; +import software.amazon.jdbc.plugin.failover.FailoverSuccessSQLException; +import software.amazon.jdbc.plugin.failover.TransactionStateUnknownSQLException; + + +public class SimpleReadWriteSplittingPostgresExample { + + // User configures connection properties here + public static final String POSTGRESQL_CONNECTION_STRING = + "jdbc:aws-wrapper:postgresql://test-db.cluster-XYZ.us-east-2.rds.amazonaws.com:5432/srwExample"; + private static final String USERNAME = "username"; + private static final String PASSWORD = "password"; + private static final String SRW_WRITE_ENDPOINT = "test-db.cluster-XYZ.us-east-2.rds.amazonaws.com"; + private static final String SRW_READ_ENDPOINT = "test-db.cluster-ro-XYZ.us-east-2.rds.amazonaws.com"; + + public static void main(String[] args) throws SQLException { + + final Properties props = new Properties(); + + // Enable srw, failover, and efm2 plugins and set properties + props.setProperty(PropertyDefinition.PLUGINS.name, "srw,failover2,efm2"); + props.setProperty(PropertyDefinition.USER.name, USERNAME); + props.setProperty(PropertyDefinition.PASSWORD.name, PASSWORD); + props.setProperty("srwWriteEndpoint", SRW_WRITE_ENDPOINT); + props.setProperty("srwReadEndpoint", SRW_READ_ENDPOINT); + + /* Setup Step: Open connection and create tables - uncomment this section to create table and test values */ + // try (final Connection connection = DriverManager.getConnection(POSTGRESQL_CONNECTION_STRING, props)) { + // setInitialSessionSettings(connection); + // executeWithFailoverHandling(connection, + // "CREATE TABLE bank_test (id int primary key, name varchar(40), account_balance int)"); + // executeWithFailoverHandling(connection, + // "INSERT INTO bank_test VALUES (0, 'Jane Doe', 200), (1, 'John Smith', 200), (2, 'Sally Smith', 200), (3, 'Joe Smith', 200)"); + // } + + // Example Step: Open connection and perform transaction + try (final Connection conn = DriverManager.getConnection(POSTGRESQL_CONNECTION_STRING, props)) { + setInitialSessionSettings(conn); + + // Establish the internal reader connection before setting autocommit to false. + conn.setReadOnly(true); + // Switch back to the cluster endpoint to perform write operations. + conn.setReadOnly(false); + + // Begin business transaction + conn.setAutoCommit(false); + + // Example business transaction + executeWithFailoverHandling( + conn, + "UPDATE bank_test SET account_balance=account_balance - 100 WHERE name='Jane Doe'"); + executeWithFailoverHandling( + conn, + "UPDATE bank_test SET account_balance=account_balance + 100 WHERE name='John Smith'"); + + // Commit business transaction + conn.commit(); + // Internally switch to a reader connection + conn.setReadOnly(true); + + for (int i = 0; i < 4; i++) { + executeWithFailoverHandling(conn, "SELECT * FROM bank_test WHERE id = " + i); + } + + } catch (FailoverFailedSQLException e) { + // User application should open a new connection, check the results of the failed transaction and re-run it if + // needed. See: + // https://github.com/aws/aws-advanced-jdbc-wrapper/blob/main/docs/using-the-jdbc-driver/using-plugins/UsingTheFailoverPlugin.md#08001---unable-to-establish-sql-connection + throw e; + } catch (TransactionStateUnknownSQLException e) { + // User application should check the status of the failed transaction and restart it if needed. See: + // https://github.com/aws/aws-advanced-jdbc-wrapper/blob/main/docs/using-the-jdbc-driver/using-plugins/UsingTheFailoverPlugin.md#08007---transaction-resolution-unknown + throw e; + } catch (SQLException e) { + // Unexpected exception unrelated to failover. This should be handled by the user application. + throw e; + } + } + + public static void processResults(ResultSet results) { + // User can process results as needed + } + + public static void setInitialSessionSettings(Connection conn) throws SQLException { + try (Statement stmt1 = conn.createStatement()) { + // User can edit settings + stmt1.executeUpdate("SET TIME ZONE 'UTC'"); + } + } + + public static void executeWithFailoverHandling(Connection conn, String query) throws SQLException { + try (Statement stmt = conn.createStatement()) { + boolean hasResults = stmt.execute(query); + if (hasResults) { + processResults(stmt.getResultSet()); + } + } catch (FailoverFailedSQLException e) { + // Connection failed, and JDBC wrapper failed to reconnect to a new instance. + throw e; + } catch (FailoverSuccessSQLException e) { + // Query execution failed and JDBC wrapper successfully failed over to a new elected writer node. + // Reconfigure the connection + setInitialSessionSettings(conn); + // Re-run query + try (Statement stmt = conn.createStatement()) { + boolean hasResults = stmt.execute(query); + if (hasResults) { + processResults(stmt.getResultSet()); + } + } + } catch (TransactionStateUnknownSQLException e) { + // Connection failed while executing a business transaction. + // Transaction status is unknown. The driver has successfully reconnected to a new writer. + throw e; + } + } +} diff --git a/examples/SpringBootHikariExample/README.md b/examples/SpringBootHikariExample/README.md index d90addb82..2aafdb6dd 100644 --- a/examples/SpringBootHikariExample/README.md +++ b/examples/SpringBootHikariExample/README.md @@ -4,7 +4,7 @@ In this tutorial, you will set up a Spring Boot application using Hikari and the > Note: this tutorial was written using the following technologies: > - Spring Boot 2.7.0 -> - AWS Advanced JDBC Wrapper 2.6.7 +> - AWS Advanced JDBC Wrapper 3.0.0 > - Postgresql 42.5.4 > - Java 8 diff --git a/examples/SpringHibernateExample/README.md b/examples/SpringHibernateExample/README.md index 4b95f95c3..f82ab3d41 100644 --- a/examples/SpringHibernateExample/README.md +++ b/examples/SpringHibernateExample/README.md @@ -5,7 +5,7 @@ In this tutorial, you will set up a Spring Boot and Hibernate application with t > Note: this tutorial was written using the following technologies: > - Spring Boot 2.7.1 > - Hibernate -> - AWS Advanced JDBC Wrapper 2.6.7 +> - AWS Advanced JDBC Wrapper 3.0.0 > - Postgresql 42.5.4 > - Gradle 7 > - Java 11 diff --git a/examples/SpringTxFailoverExample/README.md b/examples/SpringTxFailoverExample/README.md index 53364492a..d9fda1949 100644 --- a/examples/SpringTxFailoverExample/README.md +++ b/examples/SpringTxFailoverExample/README.md @@ -4,7 +4,7 @@ In this tutorial, you will set up a Spring Boot application using the AWS Advanc > Note: this tutorial was written using the following technologies: > - Spring Boot 2.7.0 -> - AWS Advanced JDBC Wrapper 2.6.7 +> - AWS Advanced JDBC Wrapper 3.0.0 > - Postgresql 42.5.4 > - Java 8 @@ -127,12 +127,8 @@ spring: max-lifetime: 1260000 auto-commit: false maximum-pool-size: 3 - data-source-properties: - keepSessionStateOnFailover: true ``` -Please also note the use of the [`keepSessionStateOnFailover`](https://github.com/aws/aws-advanced-jdbc-wrapper/blob/main/docs/using-the-jdbc-driver/using-plugins/UsingTheFailoverPlugin.md#failover-parameters) property. When failover occurs, the connection's auto commit value is reset to true. When the auto commit value is set to false or transactions are used, further operations such as a rollback or commit on the same connection will cause errors. This parameter is used when connections cannot be reconfigured manually as seen in this [example](https://github.com/aws/aws-advanced-jdbc-wrapper/tree/main/examples/AWSDriverExample/src/main/java/software/amazon/PgFailoverSample.java). - ## Step 4: Set up a data access object Set up a simple data access object (DAO) interface and implementation. The data access object will be responsible for executing any queries. In this tutorial, only a get method will be included, but other methods are available within the sample code. diff --git a/examples/SpringTxFailoverExample/src/main/resources/application.yml b/examples/SpringTxFailoverExample/src/main/resources/application.yml index 31f8417a8..1d46238b4 100644 --- a/examples/SpringTxFailoverExample/src/main/resources/application.yml +++ b/examples/SpringTxFailoverExample/src/main/resources/application.yml @@ -9,5 +9,4 @@ spring: max-lifetime: 1260000 auto-commit: false maximum-pool-size: 3 - data-source-properties: - keepSessionStateOnFailover: true + diff --git a/examples/SpringWildflyExample/README.md b/examples/SpringWildflyExample/README.md index bcf6f8d71..016a6706e 100644 --- a/examples/SpringWildflyExample/README.md +++ b/examples/SpringWildflyExample/README.md @@ -5,7 +5,7 @@ In this tutorial, you will set up a Wildfly and Spring Boot application with the > Note: this tutorial was written using the following technologies: > - Spring Boot 2.7.1 > - Wildfly 26.1.1 Final -> - AWS Advanced JDBC Wrapper 2.6.7 +> - AWS Advanced JDBC Wrapper 3.0.0 > - Postgresql 42.5.4 > - Gradle 7 > - Java 11 diff --git a/examples/SpringWildflyExample/wildfly/modules/software/amazon/jdbc/main/module.xml b/examples/SpringWildflyExample/wildfly/modules/software/amazon/jdbc/main/module.xml index c264ba335..cda59a492 100644 --- a/examples/SpringWildflyExample/wildfly/modules/software/amazon/jdbc/main/module.xml +++ b/examples/SpringWildflyExample/wildfly/modules/software/amazon/jdbc/main/module.xml @@ -19,7 +19,7 @@ - + diff --git a/examples/VertxExample/README.md b/examples/VertxExample/README.md index 6b954516c..7975c13be 100644 --- a/examples/VertxExample/README.md +++ b/examples/VertxExample/README.md @@ -3,7 +3,7 @@ In this tutorial, you will set up a Vert.x application with the AWS Advanced JDBC Wrapper, and use the driver to execute some simple database operations on an Aurora PostgreSQL database. > Note: this tutorial was written using the following technologies: -> - AWS Advanced JDBC Wrapper 2.6.7 +> - AWS Advanced JDBC Wrapper 3.0.0 > - PostgreSQL 42.5.4 > - Java 8 > - Vert.x 4.4.2 diff --git a/gradle.properties b/gradle.properties index 0c0667eff..a7c788727 100644 --- a/gradle.properties +++ b/gradle.properties @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -aws-advanced-jdbc-wrapper.version.major=2 -aws-advanced-jdbc-wrapper.version.minor=6 -aws-advanced-jdbc-wrapper.version.subminor=7 +aws-advanced-jdbc-wrapper.version.major=3 +aws-advanced-jdbc-wrapper.version.minor=0 +aws-advanced-jdbc-wrapper.version.subminor=0 snapshot=false nexus.publish=true diff --git a/wrapper/build.gradle.kts b/wrapper/build.gradle.kts index 0e6fb29f1..2e1a99bbd 100644 --- a/wrapper/build.gradle.kts +++ b/wrapper/build.gradle.kts @@ -283,8 +283,21 @@ tasks.shadowJar { archiveBaseName.set("aws-advanced-jdbc-wrapper") archiveClassifier.set("bundle-federated-auth") + duplicatesStrategy = DuplicatesStrategy.EXCLUDE - mergeServiceFiles("META-INF") + from("${layout.buildDirectory.get()}/META-INF/services/") { + into("META-INF/services/") + } + + mergeServiceFiles() + + manifest { + attributes("Implementation-Title" to "AWS Advanced JDBC Wrapper") + attributes("Implementation-Version" to project.version) + attributes("Implementation-Vendor" to "Amazon Web Services") + attributes("Export-Package" to + "software.amazon.jdbc.*,shaded.software.amazon.awssdk.*,shaded.org.apache.http.*,shaded.org.jsoup.*") + } relocate("au", "shaded.au") relocate("com", "shaded.com") diff --git a/wrapper/src/main/java/software/amazon/jdbc/BlockingHostListProvider.java b/wrapper/src/main/java/software/amazon/jdbc/BlockingHostListProvider.java index 9fe7e40fb..31f3d1182 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/BlockingHostListProvider.java +++ b/wrapper/src/main/java/software/amazon/jdbc/BlockingHostListProvider.java @@ -19,6 +19,7 @@ import java.sql.SQLException; import java.util.List; import java.util.concurrent.TimeoutException; +import software.amazon.jdbc.hostlistprovider.HostListProvider; public interface BlockingHostListProvider extends HostListProvider { diff --git a/wrapper/src/main/java/software/amazon/jdbc/C3P0PooledConnectionProvider.java b/wrapper/src/main/java/software/amazon/jdbc/C3P0PooledConnectionProvider.java index dc4aa70eb..0a2fb5841 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/C3P0PooledConnectionProvider.java +++ b/wrapper/src/main/java/software/amazon/jdbc/C3P0PooledConnectionProvider.java @@ -79,7 +79,7 @@ public HostSpec getHostSpecByStrategy(@NonNull List hosts, @NonNull Ho } @Override - public Connection connect(@NonNull String protocol, @NonNull Dialect dialect, + public @NonNull ConnectionInfo connect(@NonNull String protocol, @NonNull Dialect dialect, @NonNull TargetDriverDialect targetDriverDialect, @NonNull HostSpec hostSpec, @NonNull Properties props) throws SQLException { final Properties copy = PropertyUtils.copyProperties(props); @@ -93,7 +93,7 @@ public Connection connect(@NonNull String protocol, @NonNull Dialect dialect, ds.setPassword(copy.getProperty(PropertyDefinition.PASSWORD.name)); - return ds.getConnection(); + return new ConnectionInfo(ds.getConnection(), true); } protected ComboPooledDataSource createDataSource( diff --git a/wrapper/src/test/java/integration/container/aurora/TestAuroraHostListProvider.java b/wrapper/src/main/java/software/amazon/jdbc/ConnectionInfo.java similarity index 54% rename from wrapper/src/test/java/integration/container/aurora/TestAuroraHostListProvider.java rename to wrapper/src/main/java/software/amazon/jdbc/ConnectionInfo.java index c35f6b0f8..ca253e6da 100644 --- a/wrapper/src/test/java/integration/container/aurora/TestAuroraHostListProvider.java +++ b/wrapper/src/main/java/software/amazon/jdbc/ConnectionInfo.java @@ -14,20 +14,28 @@ * limitations under the License. */ -package integration.container.aurora; +package software.amazon.jdbc; -import java.util.Properties; -import software.amazon.jdbc.hostlistprovider.AuroraHostListProvider; -import software.amazon.jdbc.util.FullServicesContainer; +import java.sql.Connection; -public class TestAuroraHostListProvider extends AuroraHostListProvider { +public class ConnectionInfo { + private final Connection connection; + private final boolean isPooled; - public TestAuroraHostListProvider( - FullServicesContainer servicesContainer, Properties properties, String originalUrl) { - super(properties, originalUrl, servicesContainer, "", "", ""); + public ConnectionInfo(Connection connection, boolean isPooled) { + this.connection = connection; + this.isPooled = isPooled; } - public static void clearCache() { - AuroraHostListProvider.clearAll(); + public ConnectionInfo(Connection connection) { + this(connection, false); + } + + public Connection getConnection() { + return connection; + } + + public boolean isPooled() { + return isPooled; } } diff --git a/wrapper/src/main/java/software/amazon/jdbc/ConnectionPlugin.java b/wrapper/src/main/java/software/amazon/jdbc/ConnectionPlugin.java index d2d72b05c..ad271f2ad 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/ConnectionPlugin.java +++ b/wrapper/src/main/java/software/amazon/jdbc/ConnectionPlugin.java @@ -23,6 +23,7 @@ import java.util.Map; import java.util.Properties; import java.util.Set; +import software.amazon.jdbc.hostlistprovider.HostListProviderService; /** * Interface for connection plugins. This class implements ways to execute a JDBC method and to clean up resources used diff --git a/wrapper/src/main/java/software/amazon/jdbc/ConnectionPluginChainBuilder.java b/wrapper/src/main/java/software/amazon/jdbc/ConnectionPluginChainBuilder.java index 952b00936..b0104cda6 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/ConnectionPluginChainBuilder.java +++ b/wrapper/src/main/java/software/amazon/jdbc/ConnectionPluginChainBuilder.java @@ -48,6 +48,7 @@ import software.amazon.jdbc.plugin.iam.IamAuthConnectionPluginFactory; import software.amazon.jdbc.plugin.limitless.LimitlessConnectionPluginFactory; import software.amazon.jdbc.plugin.readwritesplitting.ReadWriteSplittingPluginFactory; +import software.amazon.jdbc.plugin.srw.SimpleReadWriteSplittingPluginFactory; import software.amazon.jdbc.plugin.staledns.AuroraStaleDnsPluginFactory; import software.amazon.jdbc.plugin.strategy.fastestresponse.FastestResponseStrategyPluginFactory; import software.amazon.jdbc.profile.ConfigurationProfile; @@ -80,6 +81,7 @@ public class ConnectionPluginChainBuilder { put("okta", new OktaAuthPluginFactory()); put("auroraStaleDns", new AuroraStaleDnsPluginFactory()); put("readWriteSplitting", new ReadWriteSplittingPluginFactory()); + put("srw", new SimpleReadWriteSplittingPluginFactory()); put("auroraConnectionTracker", new AuroraConnectionTrackerPluginFactory()); put("driverMetaData", new DriverMetaDataConnectionPluginFactory()); put("connectTime", new ConnectTimeConnectionPluginFactory()); @@ -107,6 +109,7 @@ public class ConnectionPluginChainBuilder { put(AuroraStaleDnsPluginFactory.class, 500); put(BlueGreenConnectionPluginFactory.class, 550); put(ReadWriteSplittingPluginFactory.class, 600); + put(SimpleReadWriteSplittingPluginFactory.class, 610); put(FailoverConnectionPluginFactory.class, 700); put(software.amazon.jdbc.plugin.failover2.FailoverConnectionPluginFactory.class, 710); put(HostMonitoringConnectionPluginFactory.class, 800); @@ -126,7 +129,7 @@ public class ConnectionPluginChainBuilder { protected static final ConcurrentMap, ConnectionPluginFactory> pluginFactoriesByClass = new ConcurrentHashMap<>(); - protected static final String DEFAULT_PLUGINS = "auroraConnectionTracker,failover2,efm2"; + protected static final String DEFAULT_PLUGINS = "initialConnection,auroraConnectionTracker,failover2,efm2"; /* Internal class used for plugin factory sorting. It holds a reference to a plugin diff --git a/wrapper/src/main/java/software/amazon/jdbc/ConnectionPluginManager.java b/wrapper/src/main/java/software/amazon/jdbc/ConnectionPluginManager.java index 2697c5b03..33f7618b9 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/ConnectionPluginManager.java +++ b/wrapper/src/main/java/software/amazon/jdbc/ConnectionPluginManager.java @@ -30,6 +30,7 @@ import org.checkerframework.checker.nullness.qual.NonNull; import org.checkerframework.checker.nullness.qual.Nullable; import software.amazon.jdbc.cleanup.CanReleaseResources; +import software.amazon.jdbc.hostlistprovider.HostListProviderService; import software.amazon.jdbc.plugin.AuroraConnectionTrackerPlugin; import software.amazon.jdbc.plugin.AuroraInitialConnectionStrategyPlugin; import software.amazon.jdbc.plugin.AwsSecretsManagerConnectionPlugin; diff --git a/wrapper/src/main/java/software/amazon/jdbc/ConnectionProvider.java b/wrapper/src/main/java/software/amazon/jdbc/ConnectionProvider.java index 8fddcf269..6967a673c 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/ConnectionProvider.java +++ b/wrapper/src/main/java/software/amazon/jdbc/ConnectionProvider.java @@ -78,10 +78,10 @@ HostSpec getHostSpecByStrategy( * @param targetDriverDialect the target driver dialect * @param hostSpec the HostSpec containing the host-port information for the host to connect to * @param props the Properties to use for the connection - * @return {@link Connection} resulting from the given connection information + * @return {@link ConnectionInfo} resulting from the given connection information * @throws SQLException if an error occurs */ - Connection connect( + @NonNull ConnectionInfo connect( @NonNull String protocol, @NonNull Dialect dialect, @NonNull TargetDriverDialect targetDriverDialect, diff --git a/wrapper/src/main/java/software/amazon/jdbc/ConnectionProviderManager.java b/wrapper/src/main/java/software/amazon/jdbc/ConnectionProviderManager.java index a8d1e6100..014acf082 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/ConnectionProviderManager.java +++ b/wrapper/src/main/java/software/amazon/jdbc/ConnectionProviderManager.java @@ -45,24 +45,10 @@ public ConnectionProviderManager( this.effectiveConnProvider = effectiveConnProvider; } - /** - * Setter that can optionally be called to request a non-default {@link ConnectionProvider}. The - * requested ConnectionProvider will be used to establish future connections unless it does not - * support a requested URL, in which case the default ConnectionProvider will be used. See - * {@link ConnectionProvider#acceptsUrl} for more info. - * - * @param connProvider the {@link ConnectionProvider} to use to establish new connections - * @deprecated Use {@link Driver#setCustomConnectionProvider(ConnectionProvider)} instead. - */ - @Deprecated - public static void setConnectionProvider(ConnectionProvider connProvider) { - Driver.setCustomConnectionProvider(connProvider); - } - /** * Get the {@link ConnectionProvider} to use to establish a connection using the given driver * protocol, host details, and properties. If a non-default ConnectionProvider has been set using - * {@link #setConnectionProvider} and {@link ConnectionProvider#acceptsUrl} returns true, the + * {@link Driver#setCustomConnectionProvider} and {@link ConnectionProvider#acceptsUrl} returns true, the * non-default ConnectionProvider will be returned. Otherwise, the default ConnectionProvider will * be returned. See {@link ConnectionProvider#acceptsUrl} for more info. * @@ -163,18 +149,6 @@ public HostSpec getHostSpecByStrategy(List hosts, HostRole role, Strin return this.defaultProvider.getHostSpecByStrategy(hosts, role, strategy, props); } - /** - * Clears the non-default {@link ConnectionProvider} if it has been set. The default - * ConnectionProvider will be used if the non-default ConnectionProvider has not been set or has - * been cleared. - * - * @deprecated Use {@link Driver#resetCustomConnectionProvider()} instead - */ - @Deprecated - public static void resetProvider() { - Driver.resetCustomConnectionProvider(); - } - /** * Releases any resources held by the available {@link ConnectionProvider} instances. */ @@ -185,29 +159,6 @@ public static void releaseResources() { } } - /** - * Sets a custom connection initialization function. It'll be used - * for every brand-new database connection. - * - * @param func A function that initialize a new connection - * - * @deprecated @see Driver#setConnectionInitFunc(ConnectionInitFunc) - */ - @Deprecated - public static void setConnectionInitFunc(final @NonNull ConnectionInitFunc func) { - Driver.setConnectionInitFunc(func); - } - - /** - * Resets a custom connection initialization function. - * - * @deprecated Use {@link Driver#resetConnectionInitFunc()} instead - */ - @Deprecated - public static void resetConnectionInitFunc() { - Driver.resetConnectionInitFunc(); - } - public void initConnection( final @Nullable Connection connection, final @NonNull String protocol, diff --git a/wrapper/src/main/java/software/amazon/jdbc/DataSourceConnectionProvider.java b/wrapper/src/main/java/software/amazon/jdbc/DataSourceConnectionProvider.java index 6c5afcda3..d12eeefad 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/DataSourceConnectionProvider.java +++ b/wrapper/src/main/java/software/amazon/jdbc/DataSourceConnectionProvider.java @@ -110,11 +110,11 @@ public HostSpec getHostSpecByStrategy( * @param protocol The connection protocol (example "jdbc:mysql://") * @param hostSpec The HostSpec containing the host-port information for the host to connect to * @param props The Properties to use for the connection - * @return {@link Connection} resulting from the given connection information + * @return {@link ConnectionInfo} resulting from the given connection information * @throws SQLException if an error occurs */ @Override - public Connection connect( + public @NonNull ConnectionInfo connect( final @NonNull String protocol, final @NonNull Dialect dialect, final @NonNull TargetDriverDialect targetDriverDialect, @@ -151,7 +151,7 @@ public Connection connect( throw new SQLLoginException(Messages.get("ConnectionProvider.noConnection")); } - return conn; + return new ConnectionInfo(conn, false); } protected Connection openConnection( diff --git a/wrapper/src/main/java/software/amazon/jdbc/Driver.java b/wrapper/src/main/java/software/amazon/jdbc/Driver.java index 62b81cf11..e67193d55 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/Driver.java +++ b/wrapper/src/main/java/software/amazon/jdbc/Driver.java @@ -427,10 +427,8 @@ public static void resetConnectionInitFunc() { public static void clearCaches() { CoreServicesContainer.getInstance().getStorageService().clearAll(); RdsUtils.clearCache(); - RdsHostListProvider.clearAll(); PluginServiceImpl.clearCache(); DialectManager.resetEndpointCache(); - MonitoringRdsHostListProvider.clearCache(); CustomEndpointMonitorImpl.clearCache(); OpenedConnectionTracker.clearCache(); AwsSecretsManagerCacheHolder.clearCache(); diff --git a/wrapper/src/main/java/software/amazon/jdbc/DriverConnectionProvider.java b/wrapper/src/main/java/software/amazon/jdbc/DriverConnectionProvider.java index e2a04e399..1cd8891b3 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/DriverConnectionProvider.java +++ b/wrapper/src/main/java/software/amazon/jdbc/DriverConnectionProvider.java @@ -108,11 +108,11 @@ public HostSpec getHostSpecByStrategy( * @param targetDriverDialect The target driver dialect * @param hostSpec The HostSpec containing the host-port information for the host to connect to * @param props The Properties to use for the connection - * @return {@link Connection} resulting from the given connection information + * @return {@link ConnectionInfo} resulting from the given connection information * @throws SQLException if an error occurs */ @Override - public Connection connect( + public @NonNull ConnectionInfo connect( final @NonNull String protocol, final @NonNull Dialect dialect, final @NonNull TargetDriverDialect targetDriverDialect, @@ -197,7 +197,7 @@ public Connection connect( if (conn == null) { throw new SQLLoginException(Messages.get("ConnectionProvider.noConnection")); } - return conn; + return new ConnectionInfo(conn, false); } @Override diff --git a/wrapper/src/main/java/software/amazon/jdbc/HikariPooledConnectionProvider.java b/wrapper/src/main/java/software/amazon/jdbc/HikariPooledConnectionProvider.java index f0d58c841..25c608d31 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/HikariPooledConnectionProvider.java +++ b/wrapper/src/main/java/software/amazon/jdbc/HikariPooledConnectionProvider.java @@ -241,7 +241,7 @@ public HostSpec getHostSpecByStrategy( } @Override - public Connection connect( + public @NonNull ConnectionInfo connect( @NonNull String protocol, @NonNull Dialect dialect, @NonNull TargetDriverDialect targetDriverDialect, @@ -286,7 +286,7 @@ public Connection connect( ds.setPassword(copy.getProperty(PropertyDefinition.PASSWORD.name)); - return ds.getConnection(); + return new ConnectionInfo(ds.getConnection(), true); } // The pool key should always be retrieved using this method, because the username diff --git a/wrapper/src/main/java/software/amazon/jdbc/HostRole.java b/wrapper/src/main/java/software/amazon/jdbc/HostRole.java index 3f9be267b..9fd3d1369 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/HostRole.java +++ b/wrapper/src/main/java/software/amazon/jdbc/HostRole.java @@ -16,8 +16,27 @@ package software.amazon.jdbc; +import java.util.HashMap; +import java.util.Map; + public enum HostRole { UNKNOWN, WRITER, - READER + READER; + + private static final Map nameToVerifyConnectionTypeValue = + // Does not map to UNKNOWN, as is not a valid verification type option. + new HashMap() { + { + put("writer", WRITER); + put("reader", READER); + } + }; + + public static HostRole verifyConnectionTypeFromValue(String value) { + if (value == null) { + return null; + } + return nameToVerifyConnectionTypeValue.get(value.toLowerCase()); + } } diff --git a/wrapper/src/main/java/software/amazon/jdbc/HostSpec.java b/wrapper/src/main/java/software/amazon/jdbc/HostSpec.java index 63197de04..5c3babb53 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/HostSpec.java +++ b/wrapper/src/main/java/software/amazon/jdbc/HostSpec.java @@ -38,13 +38,13 @@ public class HostSpec { protected final String host; protected final int port; + protected final HostRole role; + protected final Timestamp lastUpdateTime; + protected final Set aliases = ConcurrentHashMap.newKeySet(); + protected final Set allAliases = ConcurrentHashMap.newKeySet(); protected volatile HostAvailability availability; - protected HostRole role; - protected Set aliases = ConcurrentHashMap.newKeySet(); - protected Set allAliases = ConcurrentHashMap.newKeySet(); protected long weight; // Greater or equal 0. Lesser the weight, the healthier node. protected String hostId; - protected Timestamp lastUpdateTime; protected HostAvailabilityStrategy hostAvailabilityStrategy; private HostSpec( @@ -205,14 +205,14 @@ public Set asAliases() { } public String toString() { - return String.format("HostSpec@%s [host=%s, port=%d, %s, %s, weight=%d, %s]", + return String.format("HostSpec@%s [hostId=%s, host=%s, port=%d, %s, %s, weight=%d, %s]", Integer.toHexString(System.identityHashCode(this)), - this.host, this.port, this.role, this.availability, this.weight, this.lastUpdateTime); + this.hostId, this.host, this.port, this.role, this.availability, this.weight, this.lastUpdateTime); } @Override public int hashCode() { - return Objects.hash(this.host, this.port, this.availability, this.role, this.weight, this.lastUpdateTime); + return Objects.hash(this.host, this.port, this.role); } @Override diff --git a/wrapper/src/main/java/software/amazon/jdbc/PartialPluginService.java b/wrapper/src/main/java/software/amazon/jdbc/PartialPluginService.java index 677140dda..b01cc7e51 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/PartialPluginService.java +++ b/wrapper/src/main/java/software/amazon/jdbc/PartialPluginService.java @@ -42,11 +42,14 @@ import software.amazon.jdbc.exceptions.ExceptionManager; import software.amazon.jdbc.hostavailability.HostAvailability; import software.amazon.jdbc.hostavailability.HostAvailabilityStrategyFactory; +import software.amazon.jdbc.hostlistprovider.HostListProvider; +import software.amazon.jdbc.hostlistprovider.HostListProviderService; import software.amazon.jdbc.hostlistprovider.StaticHostListProvider; import software.amazon.jdbc.profile.ConfigurationProfile; import software.amazon.jdbc.states.SessionStateService; import software.amazon.jdbc.targetdriverdialect.TargetDriverDialect; import software.amazon.jdbc.util.FullServicesContainer; +import software.amazon.jdbc.util.LogUtils; import software.amazon.jdbc.util.Messages; import software.amazon.jdbc.util.Utils; import software.amazon.jdbc.util.storage.CacheMap; @@ -132,7 +135,7 @@ public PartialPluginService( ? this.configurationProfile.getExceptionHandler() : null; - HostListProviderSupplier supplier = this.dbDialect.getHostListProvider(); + HostListProviderSupplier supplier = this.dbDialect.getHostListProviderSupplier(); this.hostListProvider = supplier.getProvider(this.props, this.originalUrl, this.servicesContainer); } @@ -159,7 +162,7 @@ public HostSpec getCurrentHostSpec() { Messages.get("PluginServiceImpl.currentHostNotAllowed", new Object[] { currentHostSpec == null ? "" : currentHostSpec.getHostAndPort(), - Utils.logTopology(allowedHosts, "")}) + LogUtils.logTopology(allowedHosts, "")}) ); } @@ -189,12 +192,6 @@ public String getOriginalUrl() { return this.originalUrl; } - @Override - public void setAllowedAndBlockedHosts(AllowedAndBlockedHosts allowedAndBlockedHosts) { - throw new UnsupportedOperationException( - Messages.get("PartialPluginService.unexpectedMethodCall", new Object[] {"setAllowedAndBlockedHosts"})); - } - @Override public boolean acceptsStrategy(HostRole role, String strategy) throws SQLException { throw new UnsupportedOperationException( @@ -218,17 +215,12 @@ public HostRole getHostRole(Connection conn) throws SQLException { return this.hostListProvider.getHostRole(conn); } - @Override - @Deprecated - public ConnectionProvider getConnectionProvider() { - return this.pluginManager.defaultConnProvider; - } - @Override public ConnectionProvider getDefaultConnectionProvider() { return this.connectionProviderManager.getDefaultProvider(); } + @Deprecated public boolean isPooledConnectionProvider(HostSpec host, Properties props) { final ConnectionProvider connectionProvider = this.connectionProviderManager.getConnectionProvider(this.driverProtocol, host, props); @@ -540,11 +532,6 @@ public void releaseResources() { Messages.get("PartialPluginService.unexpectedMethodCall", new Object[] {"releaseResources"})); } - @Override - public boolean isNetworkException(Throwable throwable) { - return this.isNetworkException(throwable, this.targetDriverDialect); - } - @Override public boolean isNetworkException(final Throwable throwable, @Nullable TargetDriverDialect targetDriverDialect) { if (this.exceptionHandler != null) { @@ -561,11 +548,6 @@ public boolean isNetworkException(final String sqlState) { return this.exceptionManager.isNetworkException(this.dbDialect, sqlState); } - @Override - public boolean isLoginException(Throwable throwable) { - return this.isLoginException(throwable, this.targetDriverDialect); - } - @Override public boolean isLoginException(final Throwable throwable, @Nullable TargetDriverDialect targetDriverDialect) { if (this.exceptionHandler != null) { @@ -582,6 +564,22 @@ public boolean isLoginException(final String sqlState) { return this.exceptionManager.isLoginException(this.dbDialect, sqlState); } + @Override + public boolean isReadOnlyConnectionException(@Nullable String sqlState, @Nullable Integer errorCode) { + if (this.exceptionHandler != null) { + return this.exceptionHandler.isReadOnlyConnectionException(sqlState, errorCode); + } + return this.exceptionManager.isReadOnlyConnectionException(this.dbDialect, sqlState, errorCode); + } + + @Override + public boolean isReadOnlyConnectionException(Throwable throwable, @Nullable TargetDriverDialect targetDriverDialect) { + if (this.exceptionHandler != null) { + return this.exceptionHandler.isReadOnlyConnectionException(throwable, targetDriverDialect); + } + return this.exceptionManager.isReadOnlyConnectionException(this.dbDialect, throwable, targetDriverDialect); + } + @Override public Dialect getDialect() { return this.dbDialect; @@ -668,37 +666,32 @@ public T getPlugin(final Class pluginClazz) { return null; } - @Override - public void setStatus(Class clazz, @Nullable T status, boolean clusterBound) { - throw new UnsupportedOperationException( - Messages.get("PartialPluginService.unexpectedMethodCall", new Object[] {"setStatus"})); + public boolean isPluginInUse(final Class pluginClazz) { + try { + return this.pluginManager.isWrapperFor(pluginClazz); + } catch (SQLException e) { + return false; + } } @Override - public void setStatus(Class clazz, @Nullable T status, String key) { + public Boolean isPooledConnection() { + // This service implementation doesn't support call context. throw new UnsupportedOperationException( - Messages.get("PartialPluginService.unexpectedMethodCall", new Object[] {"setStatus"})); + Messages.get("PartialPluginService.unexpectedMethodCall", new Object[] {"getSessionStateService"})); } @Override - public T getStatus(@NonNull Class clazz, boolean clusterBound) { - throw new UnsupportedOperationException( - Messages.get("PartialPluginService.unexpectedMethodCall", new Object[] {"getStatus"})); + public void setIsPooledConnection(Boolean pooledConnection) { + // This service implementation doesn't support call context. + // Do nothing. } @Override - public T getStatus(@NonNull Class clazz, String key) { + public void resetCallContext() { + // This service implementation doesn't support call context. throw new UnsupportedOperationException( - Messages.get("PartialPluginService.unexpectedMethodCall", new Object[] {"getStatus"})); - } - - @Override - public boolean isPluginInUse(final Class pluginClazz) { - try { - return this.pluginManager.isWrapperFor(pluginClazz); - } catch (SQLException e) { - return false; - } + Messages.get("PartialPluginService.unexpectedMethodCall", new Object[] {"getSessionStateService"})); } @Override diff --git a/wrapper/src/main/java/software/amazon/jdbc/PluginManagerService.java b/wrapper/src/main/java/software/amazon/jdbc/PluginManagerService.java index 79c9168da..2965fe821 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/PluginManagerService.java +++ b/wrapper/src/main/java/software/amazon/jdbc/PluginManagerService.java @@ -16,7 +16,13 @@ package software.amazon.jdbc; +import org.checkerframework.checker.nullness.qual.Nullable; + public interface PluginManagerService { void setInTransaction(boolean inTransaction); + + void setIsPooledConnection(@Nullable Boolean pooledConnection); + + void resetCallContext(); } diff --git a/wrapper/src/main/java/software/amazon/jdbc/PluginService.java b/wrapper/src/main/java/software/amazon/jdbc/PluginService.java index b01679aba..bc1e232f2 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/PluginService.java +++ b/wrapper/src/main/java/software/amazon/jdbc/PluginService.java @@ -28,6 +28,7 @@ import software.amazon.jdbc.dialect.Dialect; import software.amazon.jdbc.exceptions.ExceptionHandler; import software.amazon.jdbc.hostavailability.HostAvailability; +import software.amazon.jdbc.hostlistprovider.HostListProvider; import software.amazon.jdbc.states.SessionStateService; import software.amazon.jdbc.targetdriverdialect.TargetDriverDialect; import software.amazon.jdbc.util.telemetry.TelemetryFactory; @@ -84,15 +85,6 @@ EnumSet setCurrentConnection( String getOriginalUrl(); - /** - * Set the collection of hosts that should be allowed and/or blocked for connections. - * - * @param allowedAndBlockedHosts An object defining the allowed and blocked sets of hosts. - * @deprecated use StorageService#set(key, allowedAndBlockedHosts) instead. - */ - @Deprecated - void setAllowedAndBlockedHosts(AllowedAndBlockedHosts allowedAndBlockedHosts); - /** * Returns a boolean indicating if the available {@link ConnectionProvider} or * {@link ConnectionPlugin} instances support the selection of a host with the requested role and @@ -240,11 +232,9 @@ Connection forceConnect( HostSpecBuilder getHostSpecBuilder(); - @Deprecated - ConnectionProvider getConnectionProvider(); - ConnectionProvider getDefaultConnectionProvider(); + @Deprecated boolean isPooledConnectionProvider(HostSpec host, Properties props); String getDriverProtocol(); @@ -259,13 +249,20 @@ Connection forceConnect( T getPlugin(final Class pluginClazz); - void setStatus(final Class clazz, final @Nullable T status, final boolean clusterBound); - - void setStatus(final Class clazz, final @Nullable T status, final String key); - - T getStatus(final @NonNull Class clazz, final boolean clusterBound); + boolean isPluginInUse(final Class pluginClazz); - T getStatus(final @NonNull Class clazz, final String key); + // JDBC call context functions - boolean isPluginInUse(final Class pluginClazz); + /** + * Retrieves details about the most recent {@link PluginService#connect} or + * {@link PluginService#forceConnect} calls. Specifically indicates whether the + * returned connection was obtained from a connection pool or newly created. + * + *

Note: The {@link ConnectionPlugin} must process or store this information during + * the current JDBC call, as these details will be reset before the next JDBC call + * is processed, or another {@link PluginService#connect} or {@link PluginService#forceConnect} + * is made. + * + */ + @Nullable Boolean isPooledConnection(); } diff --git a/wrapper/src/main/java/software/amazon/jdbc/PluginServiceImpl.java b/wrapper/src/main/java/software/amazon/jdbc/PluginServiceImpl.java index b6a5c318c..75e9ceff2 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/PluginServiceImpl.java +++ b/wrapper/src/main/java/software/amazon/jdbc/PluginServiceImpl.java @@ -47,12 +47,15 @@ import software.amazon.jdbc.exceptions.ExceptionManager; import software.amazon.jdbc.hostavailability.HostAvailability; import software.amazon.jdbc.hostavailability.HostAvailabilityStrategyFactory; +import software.amazon.jdbc.hostlistprovider.HostListProvider; +import software.amazon.jdbc.hostlistprovider.HostListProviderService; import software.amazon.jdbc.hostlistprovider.StaticHostListProvider; import software.amazon.jdbc.profile.ConfigurationProfile; import software.amazon.jdbc.states.SessionStateService; import software.amazon.jdbc.states.SessionStateServiceImpl; import software.amazon.jdbc.targetdriverdialect.TargetDriverDialect; import software.amazon.jdbc.util.FullServicesContainer; +import software.amazon.jdbc.util.LogUtils; import software.amazon.jdbc.util.Messages; import software.amazon.jdbc.util.Utils; import software.amazon.jdbc.util.storage.CacheMap; @@ -92,6 +95,9 @@ public class PluginServiceImpl implements PluginService, CanReleaseResources, protected final ReentrantLock connectionSwitchLock = new ReentrantLock(); + // JDBC call context members + protected Boolean pooledConnection = null; + public PluginServiceImpl( @NonNull final FullServicesContainer servicesContainer, @NonNull final Properties props, @@ -189,7 +195,7 @@ public HostSpec getCurrentHostSpec() { Messages.get("PluginServiceImpl.currentHostNotAllowed", new Object[] { currentHostSpec == null ? "" : currentHostSpec.getHostAndPort(), - Utils.logTopology(allowedHosts, "")}) + LogUtils.logTopology(allowedHosts, "")}) ); } @@ -219,12 +225,6 @@ public String getOriginalUrl() { return this.originalUrl; } - @Override - @Deprecated - public void setAllowedAndBlockedHosts(AllowedAndBlockedHosts allowedAndBlockedHosts) { - this.servicesContainer.getStorageService().set(this.initialConnectionHostSpec.getHost(), allowedAndBlockedHosts); - } - @Override public boolean acceptsStrategy(HostRole role, String strategy) throws SQLException { return this.pluginManager.acceptsStrategy(role, strategy); @@ -245,17 +245,12 @@ public HostRole getHostRole(Connection conn) throws SQLException { return this.hostListProvider.getHostRole(conn); } - @Override - @Deprecated - public ConnectionProvider getConnectionProvider() { - return this.pluginManager.defaultConnProvider; - } - @Override public ConnectionProvider getDefaultConnectionProvider() { return this.connectionProviderManager.getDefaultProvider(); } + @Deprecated public boolean isPooledConnectionProvider(HostSpec host, Properties props) { final ConnectionProvider connectionProvider = this.connectionProviderManager.getConnectionProvider(this.driverProtocol, host, props); @@ -664,12 +659,6 @@ public void releaseResources() { } } - @Override - @Deprecated - public boolean isNetworkException(Throwable throwable) { - return this.isNetworkException(throwable, this.targetDriverDialect); - } - @Override public boolean isNetworkException(final Throwable throwable, @Nullable TargetDriverDialect targetDriverDialect) { if (this.exceptionHandler != null) { @@ -686,12 +675,6 @@ public boolean isNetworkException(final String sqlState) { return this.exceptionManager.isNetworkException(this.dialect, sqlState); } - @Override - @Deprecated - public boolean isLoginException(Throwable throwable) { - return this.isLoginException(throwable, this.targetDriverDialect); - } - @Override public boolean isLoginException(final Throwable throwable, @Nullable TargetDriverDialect targetDriverDialect) { if (this.exceptionHandler != null) { @@ -708,6 +691,22 @@ public boolean isLoginException(final String sqlState) { return this.exceptionManager.isLoginException(this.dialect, sqlState); } + @Override + public boolean isReadOnlyConnectionException(@Nullable String sqlState, @Nullable Integer errorCode) { + if (this.exceptionHandler != null) { + return this.exceptionHandler.isReadOnlyConnectionException(sqlState, errorCode); + } + return this.exceptionManager.isReadOnlyConnectionException(this.dialect, sqlState, errorCode); + } + + @Override + public boolean isReadOnlyConnectionException(Throwable throwable, @Nullable TargetDriverDialect targetDriverDialect) { + if (this.exceptionHandler != null) { + return this.exceptionHandler.isReadOnlyConnectionException(throwable, targetDriverDialect); + } + return this.exceptionManager.isReadOnlyConnectionException(this.dialect, throwable, targetDriverDialect); + } + @Override public Dialect getDialect() { return this.dialect; @@ -728,7 +727,7 @@ public void updateDialect(final @NonNull Connection connection) throws SQLExcept return; } - final HostListProviderSupplier supplier = this.dialect.getHostListProvider(); + final HostListProviderSupplier supplier = this.dialect.getHostListProviderSupplier(); this.setHostListProvider(supplier.getProvider(this.props, this.originalUrl, this.servicesContainer)); this.refreshHostList(connection); } @@ -806,57 +805,27 @@ public static void clearCache() { hostAvailabilityExpiringCache.clear(); } - @Deprecated // Use StorageService#set instead. - public void setStatus(final Class clazz, final @Nullable T status, final boolean clusterBound) { - String clusterId = null; - if (clusterBound) { - try { - clusterId = this.hostListProvider.getClusterId(); - } catch (Exception ex) { - // do nothing - } - } - this.setStatus(clazz, status, clusterId); - } - - @Deprecated // Use StorageService#set instead. - public void setStatus(final Class clazz, final @Nullable T status, final String key) { - final String cacheKey = this.getStatusCacheKey(clazz, key); - if (status == null) { - statusesExpiringCache.remove(cacheKey); - } else { - statusesExpiringCache.put(cacheKey, status, DEFAULT_STATUS_CACHE_EXPIRE_NANO); - } - } - - @Deprecated // Use StorageService#get instead. - public T getStatus(final @NonNull Class clazz, final boolean clusterBound) { - String clusterId = null; - if (clusterBound) { - try { - clusterId = this.hostListProvider.getClusterId(); - } catch (Exception ex) { - // do nothing - } + public boolean isPluginInUse(final Class pluginClazz) { + try { + return this.pluginManager.isWrapperFor(pluginClazz); + } catch (SQLException e) { + return false; } - return this.getStatus(clazz, clusterId); } - @Deprecated // Use StorageService#get instead. - public T getStatus(final @NonNull Class clazz, String key) { - return clazz.cast(statusesExpiringCache.get(this.getStatusCacheKey(clazz, key))); + @Override + public Boolean isPooledConnection() { + return this.pooledConnection; } - protected String getStatusCacheKey(final Class clazz, final String key) { - return String.format("%s::%s", key == null ? "" : key.trim().toLowerCase(), clazz.getName()); + @Override + public void setIsPooledConnection(Boolean isPooledConnection) { + this.pooledConnection = isPooledConnection; } - public boolean isPluginInUse(final Class pluginClazz) { - try { - return this.pluginManager.isWrapperFor(pluginClazz); - } catch (SQLException e) { - return false; - } + @Override + public void resetCallContext() { + this.pooledConnection = null; } @Override diff --git a/wrapper/src/main/java/software/amazon/jdbc/dialect/AuroraMysqlDialect.java b/wrapper/src/main/java/software/amazon/jdbc/dialect/AuroraMysqlDialect.java index e64352899..8ae253cbd 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/dialect/AuroraMysqlDialect.java +++ b/wrapper/src/main/java/software/amazon/jdbc/dialect/AuroraMysqlDialect.java @@ -17,69 +17,39 @@ package software.amazon.jdbc.dialect; import java.sql.Connection; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Statement; import java.util.Collections; import java.util.List; import software.amazon.jdbc.PluginService; -import software.amazon.jdbc.hostlistprovider.AuroraHostListProvider; +import software.amazon.jdbc.hostlistprovider.AuroraTopologyUtils; +import software.amazon.jdbc.hostlistprovider.RdsHostListProvider; +import software.amazon.jdbc.hostlistprovider.TopologyUtils; import software.amazon.jdbc.hostlistprovider.monitoring.MonitoringRdsHostListProvider; import software.amazon.jdbc.plugin.failover2.FailoverConnectionPlugin; -public class AuroraMysqlDialect extends MysqlDialect implements BlueGreenDialect { +public class AuroraMysqlDialect extends MysqlDialect implements TopologyDialect, BlueGreenDialect { - private static final String TOPOLOGY_QUERY = + protected static final String AURORA_VERSION_EXISTS_QUERY = "SHOW VARIABLES LIKE 'aurora_version'"; + protected static final String TOPOLOGY_QUERY = "SELECT SERVER_ID, CASE WHEN SESSION_ID = 'MASTER_SESSION_ID' THEN TRUE ELSE FALSE END, " + "CPU, REPLICA_LAG_IN_MILLISECONDS, LAST_UPDATE_TIMESTAMP " + "FROM information_schema.replica_host_status " - // filter out nodes that haven't been updated in the last 5 minutes + // filter out instances that have not been updated in the last 5 minutes + "WHERE time_to_sec(timediff(now(), LAST_UPDATE_TIMESTAMP)) <= 300 OR SESSION_ID = 'MASTER_SESSION_ID' "; - private static final String IS_WRITER_QUERY = + protected static final String INSTANCE_ID_QUERY = "SELECT @@aurora_server_id, @@aurora_server_id"; + protected static final String WRITER_ID_QUERY = "SELECT SERVER_ID FROM information_schema.replica_host_status " - + "WHERE SESSION_ID = 'MASTER_SESSION_ID' AND SERVER_ID = @@aurora_server_id"; + + "WHERE SESSION_ID = 'MASTER_SESSION_ID' AND SERVER_ID = @@aurora_server_id"; + protected static final String IS_READER_QUERY = "SELECT @@innodb_read_only"; - private static final String NODE_ID_QUERY = "SELECT @@aurora_server_id"; - private static final String IS_READER_QUERY = "SELECT @@innodb_read_only"; - - private static final String BG_STATUS_QUERY = - "SELECT * FROM mysql.rds_topology"; - - private static final String TOPOLOGY_TABLE_EXIST_QUERY = + protected static final String BG_TOPOLOGY_EXISTS_QUERY = "SELECT 1 AS tmp FROM information_schema.tables WHERE" + " table_schema = 'mysql' AND table_name = 'rds_topology'"; + protected static final String BG_STATUS_QUERY = "SELECT * FROM mysql.rds_topology"; @Override public boolean isDialect(final Connection connection) { - Statement stmt = null; - ResultSet rs = null; - try { - stmt = connection.createStatement(); - rs = stmt.executeQuery("SHOW VARIABLES LIKE 'aurora_version'"); - if (rs.next()) { - // If variable with such name is presented then it means it's an Aurora cluster - return true; - } - } catch (final SQLException ex) { - // ignore - } finally { - if (stmt != null) { - try { - stmt.close(); - } catch (SQLException ex) { - // ignore - } - } - if (rs != null) { - try { - rs.close(); - } catch (SQLException ex) { - // ignore - } - } - } - return false; + return dialectUtils.checkExistenceQueries(connection, AURORA_VERSION_EXISTS_QUERY); } @Override @@ -88,45 +58,44 @@ public boolean isDialect(final Connection connection) { } @Override - public HostListProviderSupplier getHostListProvider() { + public HostListProviderSupplier getHostListProviderSupplier() { return (properties, initialUrl, servicesContainer) -> { final PluginService pluginService = servicesContainer.getPluginService(); + final TopologyUtils topologyUtils = new AuroraTopologyUtils(this, pluginService.getHostSpecBuilder()); if (pluginService.isPluginInUse(FailoverConnectionPlugin.class)) { - return new MonitoringRdsHostListProvider( - properties, - initialUrl, - servicesContainer, - TOPOLOGY_QUERY, - NODE_ID_QUERY, - IS_READER_QUERY, - IS_WRITER_QUERY); + return new MonitoringRdsHostListProvider(topologyUtils, properties, initialUrl, servicesContainer); } - return new AuroraHostListProvider( - properties, - initialUrl, - servicesContainer, - TOPOLOGY_QUERY, - NODE_ID_QUERY, - IS_READER_QUERY); + return new RdsHostListProvider(topologyUtils, properties, initialUrl, servicesContainer); }; } @Override - public String getBlueGreenStatusQuery() { - return BG_STATUS_QUERY; + public String getTopologyQuery() { + return TOPOLOGY_QUERY; + } + + @Override + public String getInstanceIdQuery() { + return INSTANCE_ID_QUERY; + } + + @Override + public String getWriterIdQuery() { + return WRITER_ID_QUERY; + } + + @Override + public String getIsReaderQuery() { + return IS_READER_QUERY; } @Override public boolean isBlueGreenStatusAvailable(final Connection connection) { - try { - try (Statement statement = connection.createStatement(); - ResultSet rs = statement.executeQuery(TOPOLOGY_TABLE_EXIST_QUERY)) { - return rs.next(); - } - } catch (SQLException ex) { - return false; - } + return dialectUtils.checkExistenceQueries(connection, BG_TOPOLOGY_EXISTS_QUERY); } + @Override + public String getBlueGreenStatusQuery() { + return BG_STATUS_QUERY; + } } - diff --git a/wrapper/src/main/java/software/amazon/jdbc/dialect/AuroraPgDialect.java b/wrapper/src/main/java/software/amazon/jdbc/dialect/AuroraPgDialect.java index c81d85f70..e43f30cb8 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/dialect/AuroraPgDialect.java +++ b/wrapper/src/main/java/software/amazon/jdbc/dialect/AuroraPgDialect.java @@ -20,53 +20,53 @@ import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; +import java.util.Arrays; +import java.util.List; import java.util.logging.Logger; import software.amazon.jdbc.PluginService; -import software.amazon.jdbc.hostlistprovider.AuroraHostListProvider; +import software.amazon.jdbc.hostlistprovider.AuroraTopologyUtils; +import software.amazon.jdbc.hostlistprovider.RdsHostListProvider; +import software.amazon.jdbc.hostlistprovider.TopologyUtils; import software.amazon.jdbc.hostlistprovider.monitoring.MonitoringRdsHostListProvider; import software.amazon.jdbc.plugin.failover2.FailoverConnectionPlugin; import software.amazon.jdbc.util.DriverInfo; +import software.amazon.jdbc.util.Messages; -/** - * Suitable for the following AWS PG configurations. - * - Regional Cluster - */ -public class AuroraPgDialect extends PgDialect implements AuroraLimitlessDialect, BlueGreenDialect { - private static final Logger LOGGER = Logger.getLogger(AuroraPgDialect.class.getName()); +public class AuroraPgDialect extends PgDialect implements TopologyDialect, AuroraLimitlessDialect, BlueGreenDialect { - private static final String extensionsSql = + protected static final String AURORA_UTILS_EXIST_QUERY = "SELECT (setting LIKE '%aurora_stat_utils%') AS aurora_stat_utils " + "FROM pg_catalog.pg_settings " + "WHERE name OPERATOR(pg_catalog.=) 'rds.extensions'"; - - private static final String topologySql = "SELECT 1 FROM pg_catalog.aurora_replica_status() LIMIT 1"; - - private static final String TOPOLOGY_QUERY = + protected static final String TOPOLOGY_EXISTS_QUERY = "SELECT 1 FROM pg_catalog.aurora_replica_status() LIMIT 1"; + protected static final String TOPOLOGY_QUERY = "SELECT SERVER_ID, CASE WHEN SESSION_ID OPERATOR(pg_catalog.=) 'MASTER_SESSION_ID' THEN TRUE ELSE FALSE END, " + "CPU, COALESCE(REPLICA_LAG_IN_MSEC, 0), LAST_UPDATE_TIMESTAMP " + "FROM pg_catalog.aurora_replica_status() " - // filter out nodes that haven't been updated in the last 5 minutes + // filter out instances that haven't been updated in the last 5 minutes + "WHERE EXTRACT(" + "EPOCH FROM(pg_catalog.NOW() OPERATOR(pg_catalog.-) LAST_UPDATE_TIMESTAMP)) OPERATOR(pg_catalog.<=) 300 " + "OR SESSION_ID OPERATOR(pg_catalog.=) 'MASTER_SESSION_ID' " + "OR LAST_UPDATE_TIMESTAMP IS NULL"; - private static final String IS_WRITER_QUERY = + protected static final String INSTANCE_ID_QUERY = + "SELECT pg_catalog.aurora_db_instance_identifier(), pg_catalog.aurora_db_instance_identifier()"; + protected static final String WRITER_ID_QUERY = "SELECT SERVER_ID FROM pg_catalog.aurora_replica_status() " + "WHERE SESSION_ID OPERATOR(pg_catalog.=) 'MASTER_SESSION_ID' " + "AND SERVER_ID OPERATOR(pg_catalog.=) pg_catalog.aurora_db_instance_identifier()"; + protected static final String IS_READER_QUERY = "SELECT pg_catalog.pg_is_in_recovery()"; - private static final String NODE_ID_QUERY = "SELECT pg_catalog.aurora_db_instance_identifier()"; - private static final String IS_READER_QUERY = "SELECT pg_catalog.pg_is_in_recovery()"; protected static final String LIMITLESS_ROUTER_ENDPOINT_QUERY = "select router_endpoint, load from pg_catalog.aurora_limitless_router_endpoints()"; - private static final String BG_STATUS_QUERY = + protected static final String BG_TOPOLOGY_EXISTS_QUERY = + "SELECT 'pg_catalog.get_blue_green_fast_switchover_metadata'::regproc"; + protected static final String BG_STATUS_QUERY = "SELECT * FROM " - + "pg_catalog.get_blue_green_fast_switchover_metadata('aws_jdbc_driver-" + DriverInfo.DRIVER_VERSION + "')"; + + "pg_catalog.get_blue_green_fast_switchover_metadata('aws_jdbc_driver-" + DriverInfo.DRIVER_VERSION + "')"; - private static final String TOPOLOGY_TABLE_EXIST_QUERY = - "SELECT 'pg_catalog.get_blue_green_fast_switchover_metadata'::regproc"; + private static final Logger LOGGER = Logger.getLogger(AuroraPgDialect.class.getName()); @Override public boolean isDialect(final Connection connection) { @@ -74,112 +74,80 @@ public boolean isDialect(final Connection connection) { return false; } - Statement stmt = null; - ResultSet rs = null; boolean hasExtensions = false; - boolean hasTopology = false; - try { - stmt = connection.createStatement(); - rs = stmt.executeQuery(extensionsSql); - if (rs.next()) { - final boolean auroraUtils = rs.getBoolean("aurora_stat_utils"); - LOGGER.finest(() -> String.format("auroraUtils: %b", auroraUtils)); - if (auroraUtils) { - hasExtensions = true; - } - } - } catch (SQLException ex) { - // ignore - } finally { - if (stmt != null) { - try { - stmt.close(); - } catch (SQLException ex) { - // ignore - } + try (Statement stmt = connection.createStatement(); + ResultSet rs = stmt.executeQuery(AURORA_UTILS_EXIST_QUERY)) { + if (!rs.next()) { + return false; } - if (rs != null) { - try { - rs.close(); - } catch (SQLException ex) { - // ignore - } + + final boolean auroraUtils = rs.getBoolean("aurora_stat_utils"); + LOGGER.finest(Messages.get("AuroraPgDialect.auroraUtils", new Object[] {auroraUtils})); + if (auroraUtils) { + hasExtensions = true; } + } catch (SQLException ex) { + return false; } + if (!hasExtensions) { return false; } - try { - stmt = connection.createStatement(); - rs = stmt.executeQuery(topologySql); - if (rs.next()) { - LOGGER.finest(() -> "hasTopology: true"); - hasTopology = true; - } - } catch (final SQLException ex) { - // ignore - } finally { - if (stmt != null) { - try { - stmt.close(); - } catch (SQLException ex) { - // ignore - } - } - if (rs != null) { - try { - rs.close(); - } catch (SQLException ex) { - // ignore - } - } - } - return hasExtensions && hasTopology; + + return dialectUtils.checkExistenceQueries(connection, TOPOLOGY_EXISTS_QUERY); } @Override - public HostListProviderSupplier getHostListProvider() { + public List getDialectUpdateCandidates() { + return Arrays.asList(DialectCodes.GLOBAL_AURORA_PG, + DialectCodes.RDS_MULTI_AZ_PG_CLUSTER, + DialectCodes.RDS_PG); + } + + @Override + public HostListProviderSupplier getHostListProviderSupplier() { return (properties, initialUrl, servicesContainer) -> { final PluginService pluginService = servicesContainer.getPluginService(); + final TopologyUtils topologyUtils = new AuroraTopologyUtils(this, pluginService.getHostSpecBuilder()); if (pluginService.isPluginInUse(FailoverConnectionPlugin.class)) { - return new MonitoringRdsHostListProvider( - properties, - initialUrl, - servicesContainer, - TOPOLOGY_QUERY, - NODE_ID_QUERY, - IS_READER_QUERY, - IS_WRITER_QUERY); + return new MonitoringRdsHostListProvider(topologyUtils, properties, initialUrl, servicesContainer); } - return new AuroraHostListProvider( - properties, - initialUrl, - servicesContainer, - TOPOLOGY_QUERY, - NODE_ID_QUERY, - IS_READER_QUERY); + return new RdsHostListProvider(topologyUtils, properties, initialUrl, servicesContainer); }; } + @Override + public String getTopologyQuery() { + return TOPOLOGY_QUERY; + } + + @Override + public String getInstanceIdQuery() { + return INSTANCE_ID_QUERY; + } + + @Override + public String getWriterIdQuery() { + return WRITER_ID_QUERY; + } + + @Override + public String getIsReaderQuery() { + return IS_READER_QUERY; + } + @Override public String getLimitlessRouterEndpointQuery() { return LIMITLESS_ROUTER_ENDPOINT_QUERY; } @Override - public String getBlueGreenStatusQuery() { - return BG_STATUS_QUERY; + public boolean isBlueGreenStatusAvailable(final Connection connection) { + return dialectUtils.checkExistenceQueries(connection, BG_TOPOLOGY_EXISTS_QUERY); } @Override - public boolean isBlueGreenStatusAvailable(final Connection connection) { - try { - try (Statement statement = connection.createStatement(); - ResultSet rs = statement.executeQuery(TOPOLOGY_TABLE_EXIST_QUERY)) { - return rs.next(); - } - } catch (SQLException ex) { - return false; - } + public String getBlueGreenStatusQuery() { + return BG_STATUS_QUERY; } } diff --git a/wrapper/src/main/java/software/amazon/jdbc/dialect/BlueGreenDialect.java b/wrapper/src/main/java/software/amazon/jdbc/dialect/BlueGreenDialect.java index ce1b678d3..a5e34f150 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/dialect/BlueGreenDialect.java +++ b/wrapper/src/main/java/software/amazon/jdbc/dialect/BlueGreenDialect.java @@ -19,7 +19,7 @@ import java.sql.Connection; public interface BlueGreenDialect { - String getBlueGreenStatusQuery(); - boolean isBlueGreenStatusAvailable(final Connection connection); + + String getBlueGreenStatusQuery(); } diff --git a/wrapper/src/main/java/software/amazon/jdbc/dialect/Dialect.java b/wrapper/src/main/java/software/amazon/jdbc/dialect/Dialect.java index 367db7d25..5f09aae0b 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/dialect/Dialect.java +++ b/wrapper/src/main/java/software/amazon/jdbc/dialect/Dialect.java @@ -26,22 +26,23 @@ import software.amazon.jdbc.plugin.failover.FailoverRestriction; public interface Dialect { - int getDefaultPort(); - ExceptionHandler getExceptionHandler(); + boolean isDialect(Connection connection); - String getHostAliasQuery(); + int getDefaultPort(); - String getServerVersionQuery(); + List getDialectUpdateCandidates(); - boolean isDialect(Connection connection); + ExceptionHandler getExceptionHandler(); - List getDialectUpdateCandidates(); + HostListProviderSupplier getHostListProviderSupplier(); - HostListProviderSupplier getHostListProvider(); + String getHostAliasQuery(); void prepareConnectProperties( final @NonNull Properties connectProperties, final @NonNull String protocol, final @NonNull HostSpec hostSpec); EnumSet getFailoverRestrictions(); + + String getServerVersionQuery(); } diff --git a/wrapper/src/main/java/software/amazon/jdbc/dialect/DialectCodes.java b/wrapper/src/main/java/software/amazon/jdbc/dialect/DialectCodes.java index 74c48a67c..47c0de3c5 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/dialect/DialectCodes.java +++ b/wrapper/src/main/java/software/amazon/jdbc/dialect/DialectCodes.java @@ -17,12 +17,14 @@ package software.amazon.jdbc.dialect; public class DialectCodes { + public static final String GLOBAL_AURORA_MYSQL = "global-aurora-mysql"; public static final String AURORA_MYSQL = "aurora-mysql"; public static final String RDS_MYSQL = "rds-mysql"; public static final String MYSQL = "mysql"; // https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/multi-az-db-clusters-concepts.html public static final String RDS_MULTI_AZ_MYSQL_CLUSTER = "rds-multi-az-mysql-cluster"; + public static final String GLOBAL_AURORA_PG = "global-aurora-pg"; public static final String AURORA_PG = "aurora-pg"; public static final String RDS_PG = "rds-pg"; // https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/multi-az-db-clusters-concepts.html diff --git a/wrapper/src/main/java/software/amazon/jdbc/dialect/DialectManager.java b/wrapper/src/main/java/software/amazon/jdbc/dialect/DialectManager.java index d29a1b3dd..ed7f4e71e 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/dialect/DialectManager.java +++ b/wrapper/src/main/java/software/amazon/jdbc/dialect/DialectManager.java @@ -57,10 +57,12 @@ public class DialectManager implements DialectProvider { put(DialectCodes.PG, new PgDialect()); put(DialectCodes.MARIADB, new MariaDbDialect()); put(DialectCodes.RDS_MYSQL, new RdsMysqlDialect()); - put(DialectCodes.RDS_MULTI_AZ_MYSQL_CLUSTER, new RdsMultiAzDbClusterMysqlDialect()); + put(DialectCodes.RDS_MULTI_AZ_MYSQL_CLUSTER, new MultiAzClusterMysqlDialect()); put(DialectCodes.RDS_PG, new RdsPgDialect()); - put(DialectCodes.RDS_MULTI_AZ_PG_CLUSTER, new RdsMultiAzDbClusterPgDialect()); + put(DialectCodes.RDS_MULTI_AZ_PG_CLUSTER, new MultiAzClusterPgDialect()); + put(DialectCodes.GLOBAL_AURORA_MYSQL, new GlobalAuroraMysqlDialect()); put(DialectCodes.AURORA_MYSQL, new AuroraMysqlDialect()); + put(DialectCodes.GLOBAL_AURORA_PG, new GlobalAuroraPgDialect()); put(DialectCodes.AURORA_PG, new AuroraPgDialect()); put(DialectCodes.UNKNOWN, new UnknownDialect()); } @@ -73,7 +75,7 @@ public class DialectManager implements DialectProvider { */ protected static final long ENDPOINT_CACHE_EXPIRATION = TimeUnit.HOURS.toNanos(24); - // Map of host name, or url, by dialect code. + // Keys are host names or URLs, values are dialect codes. protected static final CacheMap knownEndpointDialects = new CacheMap<>(); private final RdsUtils rdsHelper = new RdsUtils(); @@ -92,28 +94,6 @@ public DialectManager(PluginService pluginService) { this.pluginService = pluginService; } - /** - * Sets a custom dialect handler. - * - * @param dialect A custom dialect to use. - * - * @deprecated Use software.amazon.jdbc.Driver instead - */ - @Deprecated - public static void setCustomDialect(final @NonNull Dialect dialect) { - Driver.setCustomDialect(dialect); - } - - /** - * Resets a custom dialect handler. - * - * @deprecated Use software.amazon.jdbc.Driver instead - */ - @Deprecated - public static void resetCustomDialect() { - Driver.resetCustomDialect(); - } - public static void resetEndpointCache() { knownEndpointDialects.clear(); } @@ -149,8 +129,7 @@ public Dialect getDialect( this.logCurrentDialect(); return userDialect; } else { - throw new SQLException( - Messages.get("DialectManager.unknownDialectCode", new Object[] {dialectCode})); + throw new SQLException(Messages.get("DialectManager.unknownDialectCode", new Object[] {dialectCode})); } } @@ -160,13 +139,19 @@ public Dialect getDialect( String host = url; final List hosts = this.connectionUrlParser.getHostsFromConnectionUrl( - url, true, pluginService::getHostSpecBuilder); + url, true, pluginService::getHostSpecBuilder); if (!Utils.isNullOrEmpty(hosts)) { host = hosts.get(0).getHost(); } if (driverProtocol.contains("mysql")) { RdsUrlType type = this.rdsHelper.identifyRdsType(host); + if (type == RdsUrlType.RDS_GLOBAL_WRITER_CLUSTER) { + this.canUpdate = false; + this.dialectCode = DialectCodes.GLOBAL_AURORA_MYSQL; + this.dialect = knownDialectsByCode.get(DialectCodes.GLOBAL_AURORA_MYSQL); + return this.dialect; + } if (type.isRdsCluster()) { this.canUpdate = true; this.dialectCode = DialectCodes.AURORA_MYSQL; @@ -195,6 +180,12 @@ public Dialect getDialect( this.dialect = knownDialectsByCode.get(DialectCodes.AURORA_PG); return this.dialect; } + if (RdsUrlType.RDS_GLOBAL_WRITER_CLUSTER.equals(type)) { + this.canUpdate = false; + this.dialectCode = DialectCodes.GLOBAL_AURORA_PG; + this.dialect = knownDialectsByCode.get(DialectCodes.GLOBAL_AURORA_PG); + return this.dialect; + } if (type.isRdsCluster()) { this.canUpdate = true; this.dialectCode = DialectCodes.AURORA_PG; @@ -246,9 +237,10 @@ public Dialect getDialect( for (String dialectCandidateCode : dialectCandidates) { Dialect dialectCandidate = knownDialectsByCode.get(dialectCandidateCode); if (dialectCandidate == null) { - throw new SQLException( - Messages.get("DialectManager.unknownDialectCode", new Object[] {dialectCandidateCode})); + throw new SQLException(Messages.get( + "DialectManager.unknownDialectCode", new Object[] {dialectCandidateCode})); } + boolean isDialect = dialectCandidate.isDialect(connection); if (isDialect) { this.canUpdate = false; @@ -278,9 +270,8 @@ public Dialect getDialect( } private void logCurrentDialect() { - LOGGER.finest(() -> String.format("Current dialect: %s, %s, canUpdate: %b", - this.dialectCode, - this.dialect == null ? "" : this.dialect, - this.canUpdate)); + LOGGER.finest(Messages.get( + "DialectManager.currentDialect", + new Object[] {this.dialectCode, this.dialect == null ? "" : this.dialect, this.canUpdate})); } } diff --git a/wrapper/src/main/java/software/amazon/jdbc/dialect/DialectUtils.java b/wrapper/src/main/java/software/amazon/jdbc/dialect/DialectUtils.java new file mode 100644 index 000000000..a09480cd7 --- /dev/null +++ b/wrapper/src/main/java/software/amazon/jdbc/dialect/DialectUtils.java @@ -0,0 +1,46 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package software.amazon.jdbc.dialect; + +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; + +public class DialectUtils { + /** + * Given a series of existence queries, returns true if they all execute successfully and contain at least one record. + * Otherwise, returns false. + * + * @param conn the connection to use for executing the queries. + * @param existenceQueries the queries to check for existing records. + * @return true if all queries execute successfully and return at least one record, false otherwise. + */ + public boolean checkExistenceQueries(Connection conn, String... existenceQueries) { + for (String existenceQuery : existenceQueries) { + try (Statement stmt = conn.createStatement(); ResultSet rs = stmt.executeQuery(existenceQuery)) { + if (!rs.next()) { + return false; + } + } catch (SQLException e) { + return false; + } + } + + return true; + } +} diff --git a/wrapper/src/main/java/software/amazon/jdbc/dialect/GlobalAuroraMysqlDialect.java b/wrapper/src/main/java/software/amazon/jdbc/dialect/GlobalAuroraMysqlDialect.java new file mode 100644 index 000000000..334757af9 --- /dev/null +++ b/wrapper/src/main/java/software/amazon/jdbc/dialect/GlobalAuroraMysqlDialect.java @@ -0,0 +1,97 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package software.amazon.jdbc.dialect; + +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.Collections; +import java.util.List; +import software.amazon.jdbc.PluginService; +import software.amazon.jdbc.hostlistprovider.GlobalAuroraHostListProvider; +import software.amazon.jdbc.hostlistprovider.GlobalAuroraTopologyUtils; +import software.amazon.jdbc.hostlistprovider.monitoring.MonitoringGlobalAuroraHostListProvider; +import software.amazon.jdbc.plugin.failover2.FailoverConnectionPlugin; + +public class GlobalAuroraMysqlDialect extends AuroraMysqlDialect implements GlobalAuroraTopologyDialect { + + protected static final String GLOBAL_STATUS_TABLE_EXISTS_QUERY = + "SELECT 1 AS tmp FROM information_schema.tables WHERE" + + " upper(table_schema) = 'INFORMATION_SCHEMA' AND upper(table_name) = 'AURORA_GLOBAL_DB_STATUS'"; + protected static final String GLOBAL_INSTANCE_STATUS_EXISTS_QUERY = + "SELECT 1 AS tmp FROM information_schema.tables WHERE" + + " upper(table_schema) = 'INFORMATION_SCHEMA' AND upper(table_name) = 'AURORA_GLOBAL_DB_INSTANCE_STATUS'"; + + protected static final String GLOBAL_TOPOLOGY_QUERY = + "SELECT SERVER_ID, CASE WHEN SESSION_ID = 'MASTER_SESSION_ID' THEN TRUE ELSE FALSE END, " + + "VISIBILITY_LAG_IN_MSEC, AWS_REGION " + + "FROM information_schema.aurora_global_db_instance_status "; + + protected static final String REGION_COUNT_QUERY = "SELECT count(1) FROM information_schema.aurora_global_db_status"; + protected static final String REGION_BY_INSTANCE_ID_QUERY = + "SELECT AWS_REGION FROM information_schema.aurora_global_db_instance_status WHERE SERVER_ID = ?"; + + + @Override + public boolean isDialect(final Connection connection) { + if (!dialectUtils.checkExistenceQueries( + connection, GLOBAL_STATUS_TABLE_EXISTS_QUERY, GLOBAL_INSTANCE_STATUS_EXISTS_QUERY)) { + return false; + } + + try (Statement stmt = connection.createStatement(); + ResultSet rs = stmt.executeQuery(REGION_COUNT_QUERY)) { + if (!rs.next()) { + return false; + } + + int awsRegionCount = rs.getInt(1); + return awsRegionCount > 1; + } catch (final SQLException ex) { + return false; + } + } + + @Override + public List getDialectUpdateCandidates() { + return Collections.emptyList(); + } + + @Override + public HostListProviderSupplier getHostListProviderSupplier() { + return (properties, initialUrl, servicesContainer) -> { + final PluginService pluginService = servicesContainer.getPluginService(); + final GlobalAuroraTopologyUtils topologyUtils = + new GlobalAuroraTopologyUtils(this, pluginService.getHostSpecBuilder()); + if (pluginService.isPluginInUse(FailoverConnectionPlugin.class)) { + return new MonitoringGlobalAuroraHostListProvider(topologyUtils, properties, initialUrl, servicesContainer); + } + return new GlobalAuroraHostListProvider(topologyUtils, properties, initialUrl, servicesContainer); + }; + } + + @Override + public String getTopologyQuery() { + return GLOBAL_TOPOLOGY_QUERY; + } + + @Override + public String getRegionByInstanceIdQuery() { + return REGION_BY_INSTANCE_ID_QUERY; + } +} diff --git a/wrapper/src/main/java/software/amazon/jdbc/dialect/GlobalAuroraPgDialect.java b/wrapper/src/main/java/software/amazon/jdbc/dialect/GlobalAuroraPgDialect.java new file mode 100644 index 000000000..7c060c800 --- /dev/null +++ b/wrapper/src/main/java/software/amazon/jdbc/dialect/GlobalAuroraPgDialect.java @@ -0,0 +1,112 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package software.amazon.jdbc.dialect; + +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.Collections; +import java.util.List; +import java.util.logging.Logger; +import software.amazon.jdbc.PluginService; +import software.amazon.jdbc.hostlistprovider.GlobalAuroraHostListProvider; +import software.amazon.jdbc.hostlistprovider.GlobalAuroraTopologyUtils; +import software.amazon.jdbc.hostlistprovider.monitoring.MonitoringGlobalAuroraHostListProvider; +import software.amazon.jdbc.plugin.failover2.FailoverConnectionPlugin; +import software.amazon.jdbc.util.Messages; + +public class GlobalAuroraPgDialect extends AuroraPgDialect implements GlobalAuroraTopologyDialect { + + protected static final String GLOBAL_STATUS_FUNC_EXISTS_QUERY = "select 'aurora_global_db_status'::regproc"; + protected static final String GLOBAL_INSTANCE_STATUS_FUNC_EXISTS_QUERY = + "select 'aurora_global_db_instance_status'::regproc"; + + protected static final String GLOBAL_TOPOLOGY_QUERY = + "SELECT SERVER_ID, CASE WHEN SESSION_ID = 'MASTER_SESSION_ID' THEN TRUE ELSE FALSE END, " + + "VISIBILITY_LAG_IN_MSEC, AWS_REGION " + + "FROM aurora_global_db_instance_status()"; + + protected static final String REGION_COUNT_QUERY = "SELECT count(1) FROM aurora_global_db_status()"; + protected static final String REGION_BY_INSTANCE_ID_QUERY = + "SELECT AWS_REGION FROM aurora_global_db_instance_status() WHERE SERVER_ID = ?"; + + private static final Logger LOGGER = Logger.getLogger(GlobalAuroraPgDialect.class.getName()); + + @Override + public boolean isDialect(final Connection connection) { + try { + try (Statement stmt = connection.createStatement(); + ResultSet rs = stmt.executeQuery(AURORA_UTILS_EXIST_QUERY)) { + if (!rs.next()) { + return false; + } + + final boolean auroraUtils = rs.getBoolean("aurora_stat_utils"); + LOGGER.finest(Messages.get("AuroraPgDialect.auroraUtils", new Object[] {auroraUtils})); + if (!auroraUtils) { + return false; + } + } + + if (!dialectUtils.checkExistenceQueries( + connection, GLOBAL_STATUS_FUNC_EXISTS_QUERY, GLOBAL_INSTANCE_STATUS_FUNC_EXISTS_QUERY)) { + return false; + } + + try (Statement stmt = connection.createStatement(); + ResultSet rs = stmt.executeQuery(REGION_COUNT_QUERY)) { + if (!rs.next()) { + return false; + } + + int awsRegionCount = rs.getInt(1); + return awsRegionCount > 1; + } + } catch (final SQLException ex) { + return false; + } + } + + @Override + public List getDialectUpdateCandidates() { + return Collections.emptyList(); + } + + @Override + public HostListProviderSupplier getHostListProviderSupplier() { + return (properties, initialUrl, servicesContainer) -> { + final PluginService pluginService = servicesContainer.getPluginService(); + final GlobalAuroraTopologyUtils topologyUtils = + new GlobalAuroraTopologyUtils(this, pluginService.getHostSpecBuilder()); + if (pluginService.isPluginInUse(FailoverConnectionPlugin.class)) { + return new MonitoringGlobalAuroraHostListProvider(topologyUtils, properties, initialUrl, servicesContainer); + } + return new GlobalAuroraHostListProvider(topologyUtils, properties, initialUrl, servicesContainer); + }; + } + + @Override + public String getTopologyQuery() { + return GLOBAL_TOPOLOGY_QUERY; + } + + @Override + public String getRegionByInstanceIdQuery() { + return REGION_BY_INSTANCE_ID_QUERY; + } +} diff --git a/wrapper/src/main/java/software/amazon/jdbc/dialect/GlobalAuroraTopologyDialect.java b/wrapper/src/main/java/software/amazon/jdbc/dialect/GlobalAuroraTopologyDialect.java new file mode 100644 index 000000000..11db48dff --- /dev/null +++ b/wrapper/src/main/java/software/amazon/jdbc/dialect/GlobalAuroraTopologyDialect.java @@ -0,0 +1,21 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package software.amazon.jdbc.dialect; + +public interface GlobalAuroraTopologyDialect extends TopologyDialect { + String getRegionByInstanceIdQuery(); +} diff --git a/wrapper/src/main/java/software/amazon/jdbc/dialect/HostListProviderSupplier.java b/wrapper/src/main/java/software/amazon/jdbc/dialect/HostListProviderSupplier.java index 0dfe44dc5..bee378f9f 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/dialect/HostListProviderSupplier.java +++ b/wrapper/src/main/java/software/amazon/jdbc/dialect/HostListProviderSupplier.java @@ -18,7 +18,7 @@ import java.util.Properties; import org.checkerframework.checker.nullness.qual.NonNull; -import software.amazon.jdbc.HostListProvider; +import software.amazon.jdbc.hostlistprovider.HostListProvider; import software.amazon.jdbc.util.FullServicesContainer; @FunctionalInterface diff --git a/wrapper/src/main/java/software/amazon/jdbc/dialect/MariaDbDialect.java b/wrapper/src/main/java/software/amazon/jdbc/dialect/MariaDbDialect.java index 3b368a8a1..58453f6fa 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/dialect/MariaDbDialect.java +++ b/wrapper/src/main/java/software/amazon/jdbc/dialect/MariaDbDialect.java @@ -32,45 +32,23 @@ import software.amazon.jdbc.plugin.failover.FailoverRestriction; public class MariaDbDialect implements Dialect { + + protected static final String VERSION_QUERY = "SELECT VERSION()"; + protected static final String HOST_ALIAS_QUERY = "SELECT CONCAT(@@hostname, ':', @@port)"; + + private static MariaDBExceptionHandler mariaDBExceptionHandler; + private static final EnumSet NO_FAILOVER_RESTRICTIONS = + EnumSet.noneOf(FailoverRestriction.class); private static final List dialectUpdateCandidates = Arrays.asList( DialectCodes.AURORA_MYSQL, DialectCodes.RDS_MULTI_AZ_MYSQL_CLUSTER, DialectCodes.RDS_MYSQL, DialectCodes.MYSQL); - private static MariaDBExceptionHandler mariaDBExceptionHandler; - - private static final EnumSet NO_RESTRICTIONS = EnumSet.noneOf(FailoverRestriction.class); - - @Override - public int getDefaultPort() { - return 3306; - } - - @Override - public ExceptionHandler getExceptionHandler() { - if (mariaDBExceptionHandler == null) { - mariaDBExceptionHandler = new MariaDBExceptionHandler(); - } - return mariaDBExceptionHandler; - } - - @Override - public String getHostAliasQuery() { - return "SELECT CONCAT(@@hostname, ':', @@port)"; - } - - @Override - public String getServerVersionQuery() { - return "SELECT VERSION()"; - } @Override public boolean isDialect(final Connection connection) { - Statement stmt = null; - ResultSet rs = null; - try { - stmt = connection.createStatement(); - rs = stmt.executeQuery(this.getServerVersionQuery()); + try (Statement stmt = connection.createStatement(); + ResultSet rs = stmt.executeQuery(VERSION_QUERY)) { while (rs.next()) { final String columnValue = rs.getString(1); if (columnValue != null && columnValue.toLowerCase().contains("mariadb")) { @@ -78,32 +56,31 @@ public boolean isDialect(final Connection connection) { } } } catch (final SQLException ex) { - // ignore - } finally { - if (stmt != null) { - try { - stmt.close(); - } catch (SQLException ex) { - // ignore - } - } - if (rs != null) { - try { - rs.close(); - } catch (SQLException ex) { - // ignore - } - } + return false; } + return false; } + @Override + public int getDefaultPort() { + return 3306; + } + @Override public List getDialectUpdateCandidates() { return dialectUpdateCandidates; } - public HostListProviderSupplier getHostListProvider() { + @Override + public ExceptionHandler getExceptionHandler() { + if (mariaDBExceptionHandler == null) { + mariaDBExceptionHandler = new MariaDBExceptionHandler(); + } + return mariaDBExceptionHandler; + } + + public HostListProviderSupplier getHostListProviderSupplier() { return (properties, initialUrl, servicesContainer) -> new ConnectionStringHostListProvider(properties, initialUrl, servicesContainer.getHostListProviderService()); } @@ -116,6 +93,16 @@ public void prepareConnectProperties( @Override public EnumSet getFailoverRestrictions() { - return NO_RESTRICTIONS; + return NO_FAILOVER_RESTRICTIONS; + } + + @Override + public String getServerVersionQuery() { + return VERSION_QUERY; + } + + @Override + public String getHostAliasQuery() { + return HOST_ALIAS_QUERY; } } diff --git a/wrapper/src/main/java/software/amazon/jdbc/dialect/MultiAzClusterDialect.java b/wrapper/src/main/java/software/amazon/jdbc/dialect/MultiAzClusterDialect.java new file mode 100644 index 000000000..4dc0a584d --- /dev/null +++ b/wrapper/src/main/java/software/amazon/jdbc/dialect/MultiAzClusterDialect.java @@ -0,0 +1,23 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package software.amazon.jdbc.dialect; + +public interface MultiAzClusterDialect extends TopologyDialect { + String getWriterIdQuery(); + + String getWriterIdColumnName(); +} diff --git a/wrapper/src/main/java/software/amazon/jdbc/dialect/RdsMultiAzDbClusterMysqlDialect.java b/wrapper/src/main/java/software/amazon/jdbc/dialect/MultiAzClusterMysqlDialect.java similarity index 50% rename from wrapper/src/main/java/software/amazon/jdbc/dialect/RdsMultiAzDbClusterMysqlDialect.java rename to wrapper/src/main/java/software/amazon/jdbc/dialect/MultiAzClusterMysqlDialect.java index 930cf1631..9fe7d3b03 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/dialect/RdsMultiAzDbClusterMysqlDialect.java +++ b/wrapper/src/main/java/software/amazon/jdbc/dialect/MultiAzClusterMysqlDialect.java @@ -26,65 +26,56 @@ import org.checkerframework.checker.nullness.qual.NonNull; import software.amazon.jdbc.HostSpec; import software.amazon.jdbc.PluginService; -import software.amazon.jdbc.hostlistprovider.RdsMultiAzDbClusterListProvider; -import software.amazon.jdbc.hostlistprovider.monitoring.MonitoringRdsMultiAzHostListProvider; +import software.amazon.jdbc.hostlistprovider.MultiAzTopologyUtils; +import software.amazon.jdbc.hostlistprovider.RdsHostListProvider; +import software.amazon.jdbc.hostlistprovider.TopologyUtils; +import software.amazon.jdbc.hostlistprovider.monitoring.MonitoringRdsHostListProvider; import software.amazon.jdbc.plugin.failover.FailoverRestriction; import software.amazon.jdbc.plugin.failover2.FailoverConnectionPlugin; import software.amazon.jdbc.util.DriverInfo; import software.amazon.jdbc.util.RdsUtils; import software.amazon.jdbc.util.StringUtils; -public class RdsMultiAzDbClusterMysqlDialect extends MysqlDialect { +public class MultiAzClusterMysqlDialect extends MysqlDialect implements MultiAzClusterDialect { - private static final String TOPOLOGY_QUERY = "SELECT id, endpoint, port FROM mysql.rds_topology"; - - private static final String TOPOLOGY_TABLE_EXIST_QUERY = + protected static final String REPORT_HOST_EXISTS_QUERY = "SHOW VARIABLES LIKE 'report_host'"; + protected static final String TOPOLOGY_TABLE_EXISTS_QUERY = "SELECT 1 AS tmp FROM information_schema.tables WHERE" - + " table_schema = 'mysql' AND table_name = 'rds_topology'"; - - // For reader nodes, the query returns a writer node ID. For a writer node, the query returns no data. - private static final String FETCH_WRITER_NODE_QUERY = "SHOW REPLICA STATUS"; - - private static final String FETCH_WRITER_NODE_QUERY_COLUMN_NAME = "Source_Server_Id"; - - private static final String NODE_ID_QUERY = "SELECT @@server_id"; - private static final String IS_READER_QUERY = "SELECT @@read_only"; - - private static final EnumSet RDS_MULTI_AZ_RESTRICTIONS = + + " table_schema = 'mysql' AND table_name = 'rds_topology'"; + protected static final String TOPOLOGY_QUERY = "SELECT id, endpoint, port FROM mysql.rds_topology"; + + // This query returns both instanceId and instanceName. + // For example: "1845128080", "test-multiaz-instance-1" + protected static final String INSTANCE_ID_QUERY = "SELECT id, SUBSTRING_INDEX(endpoint, '.', 1)" + + " FROM mysql.rds_topology" + + " WHERE id = @@server_id"; + // For reader instances, this query returns a writer instance ID. For a writer instance, this query returns no data. + protected static final String WRITER_ID_QUERY = "SHOW REPLICA STATUS"; + protected static final String WRITER_ID_QUERY_COLUMN_NAME = "Source_Server_Id"; + protected static final String IS_READER_QUERY = "SELECT @@read_only"; + + private static final EnumSet FAILOVER_RESTRICTIONS = EnumSet.of(FailoverRestriction.DISABLE_TASK_A, FailoverRestriction.ENABLE_WRITER_IN_TASK_B); protected final RdsUtils rdsUtils = new RdsUtils(); @Override public boolean isDialect(final Connection connection) { - try { - try (Statement stmt = connection.createStatement(); - ResultSet rs = stmt.executeQuery(TOPOLOGY_TABLE_EXIST_QUERY)) { - if (!rs.next()) { - return false; - } - } - - try (Statement stmt = connection.createStatement(); - ResultSet rs = stmt.executeQuery(TOPOLOGY_QUERY)) { - if (!rs.next()) { - return false; - } - } + if (!dialectUtils.checkExistenceQueries(connection, TOPOLOGY_TABLE_EXISTS_QUERY, TOPOLOGY_QUERY)) { + return false; + } - try (Statement stmt = connection.createStatement(); - ResultSet rs = stmt.executeQuery("SHOW VARIABLES LIKE 'report_host'")) { - if (!rs.next()) { - return false; - } - final String reportHost = rs.getString(2); // get variable value; expected value is IP address - return !StringUtils.isNullOrEmpty(reportHost); + try (Statement stmt = connection.createStatement(); + ResultSet rs = stmt.executeQuery(REPORT_HOST_EXISTS_QUERY)) { + if (!rs.next()) { + return false; } + final String reportHost = rs.getString(2); // Expected value is an IP address + return !StringUtils.isNullOrEmpty(reportHost); } catch (final SQLException ex) { - // ignore + return false; } - return false; } @Override @@ -93,31 +84,14 @@ public boolean isDialect(final Connection connection) { } @Override - public HostListProviderSupplier getHostListProvider() { + public HostListProviderSupplier getHostListProviderSupplier() { return (properties, initialUrl, servicesContainer) -> { final PluginService pluginService = servicesContainer.getPluginService(); + final TopologyUtils topologyUtils = new MultiAzTopologyUtils(this, pluginService.getHostSpecBuilder()); if (pluginService.isPluginInUse(FailoverConnectionPlugin.class)) { - return new MonitoringRdsMultiAzHostListProvider( - properties, - initialUrl, - servicesContainer, - TOPOLOGY_QUERY, - NODE_ID_QUERY, - IS_READER_QUERY, - FETCH_WRITER_NODE_QUERY, - FETCH_WRITER_NODE_QUERY_COLUMN_NAME); - - } else { - return new RdsMultiAzDbClusterListProvider( - properties, - initialUrl, - servicesContainer, - TOPOLOGY_QUERY, - NODE_ID_QUERY, - IS_READER_QUERY, - FETCH_WRITER_NODE_QUERY, - FETCH_WRITER_NODE_QUERY_COLUMN_NAME); + return new MonitoringRdsHostListProvider(topologyUtils, properties, initialUrl, servicesContainer); } + return new RdsHostListProvider(topologyUtils, properties, initialUrl, servicesContainer); }; } @@ -134,6 +108,31 @@ public void prepareConnectProperties( @Override public EnumSet getFailoverRestrictions() { - return RDS_MULTI_AZ_RESTRICTIONS; + return FAILOVER_RESTRICTIONS; + } + + @Override + public String getTopologyQuery() { + return TOPOLOGY_QUERY; + } + + @Override + public String getInstanceIdQuery() { + return INSTANCE_ID_QUERY; + } + + @Override + public String getIsReaderQuery() { + return IS_READER_QUERY; + } + + @Override + public String getWriterIdQuery() { + return WRITER_ID_QUERY; + } + + @Override + public String getWriterIdColumnName() { + return WRITER_ID_QUERY_COLUMN_NAME; } } diff --git a/wrapper/src/main/java/software/amazon/jdbc/dialect/RdsMultiAzDbClusterPgDialect.java b/wrapper/src/main/java/software/amazon/jdbc/dialect/MultiAzClusterPgDialect.java similarity index 55% rename from wrapper/src/main/java/software/amazon/jdbc/dialect/RdsMultiAzDbClusterPgDialect.java rename to wrapper/src/main/java/software/amazon/jdbc/dialect/MultiAzClusterPgDialect.java index eb3796adb..ba3600710 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/dialect/RdsMultiAzDbClusterPgDialect.java +++ b/wrapper/src/main/java/software/amazon/jdbc/dialect/MultiAzClusterPgDialect.java @@ -21,56 +21,48 @@ import java.sql.SQLException; import java.sql.Statement; import java.util.List; -import java.util.logging.Logger; import software.amazon.jdbc.PluginService; import software.amazon.jdbc.exceptions.ExceptionHandler; import software.amazon.jdbc.exceptions.MultiAzDbClusterPgExceptionHandler; -import software.amazon.jdbc.hostlistprovider.RdsMultiAzDbClusterListProvider; -import software.amazon.jdbc.hostlistprovider.monitoring.MonitoringRdsMultiAzHostListProvider; +import software.amazon.jdbc.hostlistprovider.MultiAzTopologyUtils; +import software.amazon.jdbc.hostlistprovider.RdsHostListProvider; +import software.amazon.jdbc.hostlistprovider.TopologyUtils; +import software.amazon.jdbc.hostlistprovider.monitoring.MonitoringRdsHostListProvider; import software.amazon.jdbc.plugin.failover2.FailoverConnectionPlugin; import software.amazon.jdbc.util.DriverInfo; -public class RdsMultiAzDbClusterPgDialect extends PgDialect { +public class MultiAzClusterPgDialect extends PgDialect implements MultiAzClusterDialect { - private static final Logger LOGGER = Logger.getLogger(RdsMultiAzDbClusterPgDialect.class.getName()); - - private static MultiAzDbClusterPgExceptionHandler exceptionHandler; - - private static final String TOPOLOGY_QUERY = + protected static final String IS_RDS_CLUSTER_QUERY = + "SELECT multi_az_db_cluster_source_dbi_resource_id FROM rds_tools.multi_az_db_cluster_source_dbi_resource_id()"; + protected static final String TOPOLOGY_QUERY = "SELECT id, endpoint, port FROM rds_tools.show_topology('aws_jdbc_driver-" + DriverInfo.DRIVER_VERSION + "')"; - // For reader nodes, the query should return a writer node ID. For a writer node, the query should return no data. - private static final String FETCH_WRITER_NODE_QUERY = + // This query returns both instanceId and instanceName. + // For example: "db-WQFQKBTL2LQUPIEFIFBGENS4ZQ", "test-multiaz-instance-1" + protected static final String INSTANCE_ID_QUERY = + "SELECT id, SUBSTRING(endpoint FROM 0 FOR POSITION('.' IN endpoint))" + + " FROM rds_tools.show_topology()" + + " WHERE id OPERATOR(pg_catalog.=) rds_tools.dbi_resource_id()"; + // For reader instances, this query should return a writer instance ID. + // For a writer instance, this query should return no data. + protected static final String WRITER_ID_QUERY = "SELECT multi_az_db_cluster_source_dbi_resource_id FROM rds_tools.multi_az_db_cluster_source_dbi_resource_id()" + " WHERE multi_az_db_cluster_source_dbi_resource_id OPERATOR(pg_catalog.!=)" + " (SELECT dbi_resource_id FROM rds_tools.dbi_resource_id())"; + protected static final String WRITER_ID_QUERY_COLUMN_NAME = "multi_az_db_cluster_source_dbi_resource_id"; + protected static final String IS_READER_QUERY = "SELECT pg_catalog.pg_is_in_recovery()"; - private static final String IS_RDS_CLUSTER_QUERY = - "SELECT multi_az_db_cluster_source_dbi_resource_id FROM rds_tools.multi_az_db_cluster_source_dbi_resource_id()"; - - private static final String FETCH_WRITER_NODE_QUERY_COLUMN_NAME = "multi_az_db_cluster_source_dbi_resource_id"; - - private static final String NODE_ID_QUERY = "SELECT dbi_resource_id FROM rds_tools.dbi_resource_id()"; - - private static final String IS_READER_QUERY = "SELECT pg_catalog.pg_is_in_recovery()"; - - @Override - public ExceptionHandler getExceptionHandler() { - if (exceptionHandler == null) { - exceptionHandler = new MultiAzDbClusterPgExceptionHandler(); - } - return exceptionHandler; - } + private static MultiAzDbClusterPgExceptionHandler exceptionHandler; @Override public boolean isDialect(final Connection connection) { try (Statement stmt = connection.createStatement(); - ResultSet rs = stmt.executeQuery(IS_RDS_CLUSTER_QUERY)) { + ResultSet rs = stmt.executeQuery(IS_RDS_CLUSTER_QUERY)) { return rs.next() && rs.getString(1) != null; } catch (final SQLException ex) { - // ignore + return false; } - return false; } @Override @@ -79,32 +71,48 @@ public boolean isDialect(final Connection connection) { } @Override - public HostListProviderSupplier getHostListProvider() { + public ExceptionHandler getExceptionHandler() { + if (exceptionHandler == null) { + exceptionHandler = new MultiAzDbClusterPgExceptionHandler(); + } + return exceptionHandler; + } + + @Override + public HostListProviderSupplier getHostListProviderSupplier() { return (properties, initialUrl, servicesContainer) -> { final PluginService pluginService = servicesContainer.getPluginService(); + final TopologyUtils topologyUtils = new MultiAzTopologyUtils(this, pluginService.getHostSpecBuilder()); if (pluginService.isPluginInUse(FailoverConnectionPlugin.class)) { - return new MonitoringRdsMultiAzHostListProvider( - properties, - initialUrl, - servicesContainer, - TOPOLOGY_QUERY, - NODE_ID_QUERY, - IS_READER_QUERY, - FETCH_WRITER_NODE_QUERY, - FETCH_WRITER_NODE_QUERY_COLUMN_NAME); - - } else { - - return new RdsMultiAzDbClusterListProvider( - properties, - initialUrl, - servicesContainer, - TOPOLOGY_QUERY, - NODE_ID_QUERY, - IS_READER_QUERY, - FETCH_WRITER_NODE_QUERY, - FETCH_WRITER_NODE_QUERY_COLUMN_NAME); + return new MonitoringRdsHostListProvider(topologyUtils, properties, initialUrl, servicesContainer); } + + return new RdsHostListProvider(topologyUtils, properties, initialUrl, servicesContainer); }; } + + @Override + public String getTopologyQuery() { + return TOPOLOGY_QUERY; + } + + @Override + public String getInstanceIdQuery() { + return INSTANCE_ID_QUERY; + } + + @Override + public String getIsReaderQuery() { + return IS_READER_QUERY; + } + + @Override + public String getWriterIdQuery() { + return WRITER_ID_QUERY; + } + + @Override + public String getWriterIdColumnName() { + return WRITER_ID_QUERY_COLUMN_NAME; + } } diff --git a/wrapper/src/main/java/software/amazon/jdbc/dialect/MysqlDialect.java b/wrapper/src/main/java/software/amazon/jdbc/dialect/MysqlDialect.java index de9f181d3..e21bd06d5 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/dialect/MysqlDialect.java +++ b/wrapper/src/main/java/software/amazon/jdbc/dialect/MysqlDialect.java @@ -33,45 +33,24 @@ public class MysqlDialect implements Dialect { + protected static final String VERSION_QUERY = "SHOW VARIABLES LIKE 'version_comment'"; + protected static final String HOST_ALIAS_QUERY = "SELECT CONCAT(@@hostname, ':', @@port)"; + private static MySQLExceptionHandler mySQLExceptionHandler; + private static final EnumSet NO_FAILOVER_RESTRICTIONS = + EnumSet.noneOf(FailoverRestriction.class); private static final List dialectUpdateCandidates = Arrays.asList( - DialectCodes.RDS_MULTI_AZ_MYSQL_CLUSTER, + DialectCodes.GLOBAL_AURORA_MYSQL, DialectCodes.AURORA_MYSQL, + DialectCodes.RDS_MULTI_AZ_MYSQL_CLUSTER, DialectCodes.RDS_MYSQL ); - private static MySQLExceptionHandler mySQLExceptionHandler; - - private static final EnumSet NO_RESTRICTIONS = EnumSet.noneOf(FailoverRestriction.class); - - @Override - public int getDefaultPort() { - return 3306; - } - - @Override - public ExceptionHandler getExceptionHandler() { - if (mySQLExceptionHandler == null) { - mySQLExceptionHandler = new MySQLExceptionHandler(); - } - return mySQLExceptionHandler; - } - @Override - public String getHostAliasQuery() { - return "SELECT CONCAT(@@hostname, ':', @@port)"; - } - - @Override - public String getServerVersionQuery() { - return "SHOW VARIABLES LIKE 'version_comment'"; - } + protected final DialectUtils dialectUtils = new DialectUtils(); @Override public boolean isDialect(final Connection connection) { - Statement stmt = null; - ResultSet rs = null; - try { - stmt = connection.createStatement(); - rs = stmt.executeQuery(this.getServerVersionQuery()); + try (Statement stmt = connection.createStatement(); + ResultSet rs = stmt.executeQuery(VERSION_QUERY)) { while (rs.next()) { final String columnValue = rs.getString(2); if (columnValue != null && columnValue.toLowerCase().contains("mysql")) { @@ -79,32 +58,31 @@ public boolean isDialect(final Connection connection) { } } } catch (final SQLException ex) { - // ignore - } finally { - if (stmt != null) { - try { - stmt.close(); - } catch (SQLException ex) { - // ignore - } - } - if (rs != null) { - try { - rs.close(); - } catch (SQLException ex) { - // ignore - } - } + return false; } + return false; } + @Override + public int getDefaultPort() { + return 3306; + } + @Override public List getDialectUpdateCandidates() { return dialectUpdateCandidates; } - public HostListProviderSupplier getHostListProvider() { + @Override + public ExceptionHandler getExceptionHandler() { + if (mySQLExceptionHandler == null) { + mySQLExceptionHandler = new MySQLExceptionHandler(); + } + return mySQLExceptionHandler; + } + + public HostListProviderSupplier getHostListProviderSupplier() { return (properties, initialUrl, servicesContainer) -> new ConnectionStringHostListProvider(properties, initialUrl, servicesContainer.getHostListProviderService()); } @@ -117,6 +95,16 @@ public void prepareConnectProperties( @Override public EnumSet getFailoverRestrictions() { - return NO_RESTRICTIONS; + return NO_FAILOVER_RESTRICTIONS; + } + + @Override + public String getServerVersionQuery() { + return VERSION_QUERY; + } + + @Override + public String getHostAliasQuery() { + return HOST_ALIAS_QUERY; } } diff --git a/wrapper/src/main/java/software/amazon/jdbc/dialect/PgDialect.java b/wrapper/src/main/java/software/amazon/jdbc/dialect/PgDialect.java index 075cf242d..abf33ee56 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/dialect/PgDialect.java +++ b/wrapper/src/main/java/software/amazon/jdbc/dialect/PgDialect.java @@ -36,20 +36,37 @@ */ public class PgDialect implements Dialect { + protected static final String PG_PROC_EXISTS_QUERY = "SELECT 1 FROM pg_catalog.pg_proc LIMIT 1"; + protected static final String VERSION_QUERY = "SELECT 'version', pg_catalog.VERSION()"; + protected static final String HOST_ALIAS_QUERY = + "SELECT pg_catalog.CONCAT(pg_catalog.inet_server_addr(), ':', pg_catalog.inet_server_port())"; + + private static PgExceptionHandler pgExceptionHandler; + private static final EnumSet NO_FAILOVER_RESTRICTIONS = + EnumSet.noneOf(FailoverRestriction.class); private static final List dialectUpdateCandidates = Arrays.asList( + DialectCodes.GLOBAL_AURORA_PG, DialectCodes.AURORA_PG, DialectCodes.RDS_MULTI_AZ_PG_CLUSTER, DialectCodes.RDS_PG); - private static PgExceptionHandler pgExceptionHandler; + protected final DialectUtils dialectUtils = new DialectUtils(); - private static final EnumSet NO_RESTRICTIONS = EnumSet.noneOf(FailoverRestriction.class); + @Override + public boolean isDialect(final Connection connection) { + return dialectUtils.checkExistenceQueries(connection, PG_PROC_EXISTS_QUERY); + } @Override public int getDefaultPort() { return 5432; } + @Override + public List getDialectUpdateCandidates() { + return dialectUpdateCandidates; + } + @Override public ExceptionHandler getExceptionHandler() { if (pgExceptionHandler == null) { @@ -59,53 +76,7 @@ public ExceptionHandler getExceptionHandler() { } @Override - public String getHostAliasQuery() { - return "SELECT pg_catalog.CONCAT(pg_catalog.inet_server_addr(), ':', pg_catalog.inet_server_port())"; - } - - @Override - public String getServerVersionQuery() { - return "SELECT 'version', pg_catalog.VERSION()"; - } - - @Override - public boolean isDialect(final Connection connection) { - Statement stmt = null; - ResultSet rs = null; - try { - stmt = connection.createStatement(); - rs = stmt.executeQuery("SELECT 1 FROM pg_catalog.pg_proc LIMIT 1"); - if (rs.next()) { - return true; - } - } catch (final SQLException ex) { - // ignore - } finally { - if (stmt != null) { - try { - stmt.close(); - } catch (SQLException ex) { - // ignore - } - } - if (rs != null) { - try { - rs.close(); - } catch (SQLException ex) { - // ignore - } - } - } - return false; - } - - @Override - public List getDialectUpdateCandidates() { - return dialectUpdateCandidates; - } - - @Override - public HostListProviderSupplier getHostListProvider() { + public HostListProviderSupplier getHostListProviderSupplier() { return (properties, initialUrl, servicesContainer) -> new ConnectionStringHostListProvider(properties, initialUrl, servicesContainer.getHostListProviderService()); } @@ -118,6 +89,16 @@ public void prepareConnectProperties( @Override public EnumSet getFailoverRestrictions() { - return NO_RESTRICTIONS; + return NO_FAILOVER_RESTRICTIONS; + } + + @Override + public String getServerVersionQuery() { + return VERSION_QUERY; + } + + @Override + public String getHostAliasQuery() { + return HOST_ALIAS_QUERY; } } diff --git a/wrapper/src/main/java/software/amazon/jdbc/dialect/RdsMysqlDialect.java b/wrapper/src/main/java/software/amazon/jdbc/dialect/RdsMysqlDialect.java index 22e010ea7..1e173c772 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/dialect/RdsMysqlDialect.java +++ b/wrapper/src/main/java/software/amazon/jdbc/dialect/RdsMysqlDialect.java @@ -26,15 +26,16 @@ public class RdsMysqlDialect extends MysqlDialect implements BlueGreenDialect { - private static final String BG_STATUS_QUERY = - "SELECT * FROM mysql.rds_topology"; - - private static final String TOPOLOGY_TABLE_EXIST_QUERY = + protected static final String REPORT_HOST_EXISTS_QUERY = "SHOW VARIABLES LIKE 'report_host'"; + protected static final String TOPOLOGY_TABLE_EXISTS_QUERY = "SELECT 1 AS tmp FROM information_schema.tables WHERE" + " table_schema = 'mysql' AND table_name = 'rds_topology'"; + protected static final String BG_STATUS_QUERY = "SELECT * FROM mysql.rds_topology"; + private static final List dialectUpdateCandidates = Arrays.asList( DialectCodes.AURORA_MYSQL, + DialectCodes.GLOBAL_AURORA_MYSQL, DialectCodes.RDS_MULTI_AZ_MYSQL_CLUSTER); @Override @@ -53,50 +54,34 @@ public boolean isDialect(final Connection connection) { // | Variable_name | value | // |-----------------|---------------------| // | version_comment | Source distribution | - // If super.idDialect returns true there is no need to check for RdsMysqlDialect. + // If super.isDialect returns true there is no need to check for RdsMysqlDialect. return false; } - Statement stmt = null; - ResultSet rs = null; - - try { - stmt = connection.createStatement(); - rs = stmt.executeQuery(this.getServerVersionQuery()); - if (!rs.next()) { - return false; - } - final String columnValue = rs.getString(2); - if (!"Source distribution".equalsIgnoreCase(columnValue)) { - return false; - } - rs.close(); - rs = stmt.executeQuery("SHOW VARIABLES LIKE 'report_host'"); - if (!rs.next()) { - return false; - } - final String reportHost = rs.getString(2); // get variable value; expected empty value - return StringUtils.isNullOrEmpty(reportHost); + try (Statement stmt = connection.createStatement()) { + try (ResultSet rs = stmt.executeQuery(VERSION_QUERY)) { + if (!rs.next()) { + return false; + } - } catch (final SQLException ex) { - // ignore - } finally { - if (stmt != null) { - try { - stmt.close(); - } catch (SQLException ex) { - // ignore + final String columnValue = rs.getString(2); + if (!"Source distribution".equalsIgnoreCase(columnValue)) { + return false; } } - if (rs != null) { - try { - rs.close(); - } catch (SQLException ex) { - // ignore + + try (ResultSet rs = stmt.executeQuery(REPORT_HOST_EXISTS_QUERY)) { + if (!rs.next()) { + return false; } + + final String reportHost = rs.getString(2); // An empty value is expected + return StringUtils.isNullOrEmpty(reportHost); } + + } catch (final SQLException ex) { + return false; } - return false; } @Override @@ -105,19 +90,12 @@ public List getDialectUpdateCandidates() { } @Override - public String getBlueGreenStatusQuery() { - return BG_STATUS_QUERY; + public boolean isBlueGreenStatusAvailable(final Connection connection) { + return dialectUtils.checkExistenceQueries(connection, TOPOLOGY_TABLE_EXISTS_QUERY); } @Override - public boolean isBlueGreenStatusAvailable(final Connection connection) { - try { - try (Statement statement = connection.createStatement(); - ResultSet rs = statement.executeQuery(TOPOLOGY_TABLE_EXIST_QUERY)) { - return rs.next(); - } - } catch (SQLException ex) { - return false; - } + public String getBlueGreenStatusQuery() { + return BG_STATUS_QUERY; } } diff --git a/wrapper/src/main/java/software/amazon/jdbc/dialect/RdsPgDialect.java b/wrapper/src/main/java/software/amazon/jdbc/dialect/RdsPgDialect.java index d59b9f2eb..62a52d019 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/dialect/RdsPgDialect.java +++ b/wrapper/src/main/java/software/amazon/jdbc/dialect/RdsPgDialect.java @@ -24,6 +24,7 @@ import java.util.List; import java.util.logging.Logger; import software.amazon.jdbc.util.DriverInfo; +import software.amazon.jdbc.util.Messages; /** * Suitable for the following AWS PG configurations. @@ -33,60 +34,42 @@ */ public class RdsPgDialect extends PgDialect implements BlueGreenDialect { - private static final Logger LOGGER = Logger.getLogger(RdsPgDialect.class.getName()); - - private static final List dialectUpdateCandidates = Arrays.asList( - DialectCodes.RDS_MULTI_AZ_PG_CLUSTER, - DialectCodes.AURORA_PG); - - private static final String extensionsSql = "SELECT (setting LIKE '%rds_tools%') AS rds_tools, " + protected static final String EXTENSIONS_EXIST_SQL = "SELECT (setting LIKE '%rds_tools%') AS rds_tools, " + "(setting LIKE '%aurora_stat_utils%') AS aurora_stat_utils " + "FROM pg_catalog.pg_settings " + "WHERE name OPERATOR(pg_catalog.=) 'rds.extensions'"; + protected static final String TOPOLOGY_TABLE_EXISTS_QUERY = + "SELECT 'rds_tools.show_topology'::regproc"; - private static final String BG_STATUS_QUERY = + protected static final String BG_STATUS_QUERY = "SELECT * FROM rds_tools.show_topology('aws_jdbc_driver-" + DriverInfo.DRIVER_VERSION + "')"; - private static final String TOPOLOGY_TABLE_EXIST_QUERY = - "SELECT 'rds_tools.show_topology'::regproc"; + private static final Logger LOGGER = Logger.getLogger(RdsPgDialect.class.getName()); + private static final List dialectUpdateCandidates = Arrays.asList( + DialectCodes.RDS_MULTI_AZ_PG_CLUSTER, + DialectCodes.GLOBAL_AURORA_PG, + DialectCodes.AURORA_PG); @Override public boolean isDialect(final Connection connection) { if (!super.isDialect(connection)) { return false; } - Statement stmt = null; - ResultSet rs = null; - try { - stmt = connection.createStatement(); - rs = stmt.executeQuery(extensionsSql); + try (Statement stmt = connection.createStatement(); + ResultSet rs = stmt.executeQuery(EXTENSIONS_EXIST_SQL)) { while (rs.next()) { final boolean rdsTools = rs.getBoolean("rds_tools"); final boolean auroraUtils = rs.getBoolean("aurora_stat_utils"); - LOGGER.finest(() -> String.format("rdsTools: %b, auroraUtils: %b", rdsTools, auroraUtils)); + LOGGER.finest(Messages.get("RdsPgDialect.rdsToolsAuroraUtils", new Object[] {rdsTools, auroraUtils})); if (rdsTools && !auroraUtils) { return true; } } } catch (final SQLException ex) { - // ignore - } finally { - if (stmt != null) { - try { - stmt.close(); - } catch (SQLException ex) { - // ignore - } - } - if (rs != null) { - try { - rs.close(); - } catch (SQLException ex) { - // ignore - } - } + return false; } + return false; } @@ -96,19 +79,12 @@ public List getDialectUpdateCandidates() { } @Override - public String getBlueGreenStatusQuery() { - return BG_STATUS_QUERY; + public boolean isBlueGreenStatusAvailable(final Connection connection) { + return dialectUtils.checkExistenceQueries(connection, TOPOLOGY_TABLE_EXISTS_QUERY); } @Override - public boolean isBlueGreenStatusAvailable(final Connection connection) { - try { - try (Statement statement = connection.createStatement(); - ResultSet rs = statement.executeQuery(TOPOLOGY_TABLE_EXIST_QUERY)) { - return rs.next(); - } - } catch (SQLException ex) { - return false; - } + public String getBlueGreenStatusQuery() { + return BG_STATUS_QUERY; } } diff --git a/wrapper/src/main/java/software/amazon/jdbc/dialect/TopologyDialect.java b/wrapper/src/main/java/software/amazon/jdbc/dialect/TopologyDialect.java new file mode 100644 index 000000000..e7aa0f4d2 --- /dev/null +++ b/wrapper/src/main/java/software/amazon/jdbc/dialect/TopologyDialect.java @@ -0,0 +1,27 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package software.amazon.jdbc.dialect; + +public interface TopologyDialect extends Dialect { + String getTopologyQuery(); + + String getInstanceIdQuery(); + + String getWriterIdQuery(); + + String getIsReaderQuery(); +} diff --git a/wrapper/src/main/java/software/amazon/jdbc/dialect/UnknownDialect.java b/wrapper/src/main/java/software/amazon/jdbc/dialect/UnknownDialect.java index 65b9eb544..067261242 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/dialect/UnknownDialect.java +++ b/wrapper/src/main/java/software/amazon/jdbc/dialect/UnknownDialect.java @@ -31,6 +31,8 @@ public class UnknownDialect implements Dialect { private static final List dialectUpdateCandidates = Arrays.asList( + DialectCodes.GLOBAL_AURORA_PG, + DialectCodes.GLOBAL_AURORA_MYSQL, DialectCodes.AURORA_PG, DialectCodes.AURORA_MYSQL, DialectCodes.RDS_MULTI_AZ_PG_CLUSTER, @@ -80,7 +82,7 @@ public List getDialectUpdateCandidates() { } @Override - public HostListProviderSupplier getHostListProvider() { + public HostListProviderSupplier getHostListProviderSupplier() { return (properties, initialUrl, servicesContainer) -> new ConnectionStringHostListProvider(properties, initialUrl, servicesContainer.getHostListProviderService()); } diff --git a/wrapper/src/main/java/software/amazon/jdbc/exceptions/AbstractPgExceptionHandler.java b/wrapper/src/main/java/software/amazon/jdbc/exceptions/AbstractPgExceptionHandler.java index 22e9cf1be..9b8337f5c 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/exceptions/AbstractPgExceptionHandler.java +++ b/wrapper/src/main/java/software/amazon/jdbc/exceptions/AbstractPgExceptionHandler.java @@ -23,16 +23,13 @@ import software.amazon.jdbc.util.StringUtils; public abstract class AbstractPgExceptionHandler implements ExceptionHandler { + + protected static final String READ_ONLY_CONNECTION_SQLSTATE = "25006"; + public abstract List getNetworkErrors(); public abstract List getAccessErrors(); - @Override - @Deprecated - public boolean isNetworkException(Throwable throwable) { - return this.isNetworkException(throwable, null); - } - @Override public boolean isNetworkException(final Throwable throwable, @Nullable TargetDriverDialect targetDriverDialect) { Throwable exception = throwable; @@ -68,12 +65,6 @@ public boolean isNetworkException(final String sqlState) { return false; } - @Override - @Deprecated - public boolean isLoginException(final Throwable throwable) { - return this.isLoginException(throwable, null); - } - @Override public boolean isLoginException(final Throwable throwable, @Nullable TargetDriverDialect targetDriverDialect) { Throwable exception = throwable; @@ -107,4 +98,36 @@ public boolean isLoginException(final String sqlState) { } return getAccessErrors().contains(sqlState); } + + @Override + public boolean isReadOnlyConnectionException( + final @Nullable String sqlState, final @Nullable Integer errorCode) { + return READ_ONLY_CONNECTION_SQLSTATE.equals(sqlState); + } + + @Override + public boolean isReadOnlyConnectionException( + final Throwable throwable, @Nullable TargetDriverDialect targetDriverDialect) { + + Throwable exception = throwable; + + while (exception != null) { + String sqlState = null; + Integer errorCode = null; + if (exception instanceof SQLException) { + sqlState = ((SQLException) exception).getSQLState(); + errorCode = ((SQLException) exception).getErrorCode(); + } else if (targetDriverDialect != null) { + sqlState = targetDriverDialect.getSQLState(exception); + } + + if (isReadOnlyConnectionException(sqlState, errorCode)) { + return true; + } + + exception = exception.getCause(); + } + + return false; + } } diff --git a/wrapper/src/main/java/software/amazon/jdbc/exceptions/ExceptionHandler.java b/wrapper/src/main/java/software/amazon/jdbc/exceptions/ExceptionHandler.java index ab886ece1..16b79f2c2 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/exceptions/ExceptionHandler.java +++ b/wrapper/src/main/java/software/amazon/jdbc/exceptions/ExceptionHandler.java @@ -21,33 +21,15 @@ public interface ExceptionHandler { - /** - * The method determines whether provided throwable is about any network issues. - * - * @param throwable A throwable object to check. - * @return true, if a provided throwable object is network-related. - * - * @deprecated Use similar method below that accepts throwable and target driver dialect. - */ - @Deprecated - boolean isNetworkException(Throwable throwable); - boolean isNetworkException(Throwable throwable, @Nullable TargetDriverDialect targetDriverDialect); boolean isNetworkException(String sqlState); boolean isLoginException(String sqlState); - /** - * The method determines whether provided throwable is about any login or authentication issues. - * - * @param throwable A throwable object to check. - * @return true, if a provided throwable object is related to authentication. + boolean isLoginException(Throwable throwable, @Nullable TargetDriverDialect targetDriverDialect); - * @deprecated Use similar method below that accepts throwable and target driver dialect. - */ - @Deprecated - boolean isLoginException(Throwable throwable); + boolean isReadOnlyConnectionException(final @Nullable String sqlState, final @Nullable Integer errorCode); - boolean isLoginException(Throwable throwable, @Nullable TargetDriverDialect targetDriverDialect); + boolean isReadOnlyConnectionException(Throwable throwable, @Nullable TargetDriverDialect targetDriverDialect); } diff --git a/wrapper/src/main/java/software/amazon/jdbc/exceptions/ExceptionManager.java b/wrapper/src/main/java/software/amazon/jdbc/exceptions/ExceptionManager.java index b6f6f5a64..3f7c45c07 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/exceptions/ExceptionManager.java +++ b/wrapper/src/main/java/software/amazon/jdbc/exceptions/ExceptionManager.java @@ -16,34 +16,13 @@ package software.amazon.jdbc.exceptions; +import org.checkerframework.checker.nullness.qual.Nullable; import software.amazon.jdbc.Driver; import software.amazon.jdbc.dialect.Dialect; import software.amazon.jdbc.targetdriverdialect.TargetDriverDialect; public class ExceptionManager { - /** - * Sets a custom exception handler. - * - * @param exceptionHandler A custom exception handler to use. - * - * @deprecated Use software.amazon.jdbc.Driver instead - */ - @Deprecated - public static void setCustomHandler(final ExceptionHandler exceptionHandler) { - Driver.setCustomExceptionHandler(exceptionHandler); - } - - /** - * Resets a custom exception handler. - * - * @deprecated Use software.amazon.jdbc.Driver instead - */ - @Deprecated - public static void resetCustomHandler() { - Driver.resetCustomExceptionHandler(); - } - public boolean isLoginException( final Dialect dialect, final Throwable throwable, final TargetDriverDialect targetDriverDialect) { final ExceptionHandler handler = getHandler(dialect); @@ -66,6 +45,18 @@ public boolean isNetworkException(final Dialect dialect, final String sqlState) return handler.isNetworkException(sqlState); } + public boolean isReadOnlyConnectionException( + final Dialect dialect, final Throwable throwable, final TargetDriverDialect targetDriverDialect) { + final ExceptionHandler handler = getHandler(dialect); + return handler.isReadOnlyConnectionException(throwable, targetDriverDialect); + } + + public boolean isReadOnlyConnectionException( + final Dialect dialect, final @Nullable String sqlState, final @Nullable Integer errorCode) { + final ExceptionHandler handler = getHandler(dialect); + return handler.isReadOnlyConnectionException(sqlState, errorCode); + } + private ExceptionHandler getHandler(final Dialect dialect) { final ExceptionHandler customHandler = Driver.getCustomExceptionHandler(); return customHandler != null ? customHandler : dialect.getExceptionHandler(); diff --git a/wrapper/src/main/java/software/amazon/jdbc/exceptions/GenericExceptionHandler.java b/wrapper/src/main/java/software/amazon/jdbc/exceptions/GenericExceptionHandler.java index cc34175db..a49dea5c4 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/exceptions/GenericExceptionHandler.java +++ b/wrapper/src/main/java/software/amazon/jdbc/exceptions/GenericExceptionHandler.java @@ -38,12 +38,6 @@ public class GenericExceptionHandler implements ExceptionHandler { "08" ); - @Override - @Deprecated - public boolean isNetworkException(Throwable throwable) { - return this.isNetworkException(throwable, null); - } - @Override public boolean isNetworkException(final Throwable throwable, @Nullable TargetDriverDialect targetDriverDialect) { Throwable exception = throwable; @@ -79,12 +73,6 @@ public boolean isNetworkException(final String sqlState) { return false; } - @Override - @Deprecated - public boolean isLoginException(Throwable throwable) { - return this.isLoginException(throwable, null); - } - @Override public boolean isLoginException(final Throwable throwable, TargetDriverDialect targetDriverDialect) { Throwable exception = throwable; @@ -115,4 +103,15 @@ public boolean isLoginException(final Throwable throwable, TargetDriverDialect t public boolean isLoginException(final String sqlState) { return ACCESS_ERRORS.contains(sqlState); } + + @Override + public boolean isReadOnlyConnectionException(@Nullable String sqlState, @Nullable Integer errorCode) { + return false; + } + + @Override + public boolean isReadOnlyConnectionException( + Throwable throwable, @Nullable TargetDriverDialect targetDriverDialect) { + return false; + } } diff --git a/wrapper/src/main/java/software/amazon/jdbc/exceptions/MySQLExceptionHandler.java b/wrapper/src/main/java/software/amazon/jdbc/exceptions/MySQLExceptionHandler.java index a504f5074..feb98fe38 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/exceptions/MySQLExceptionHandler.java +++ b/wrapper/src/main/java/software/amazon/jdbc/exceptions/MySQLExceptionHandler.java @@ -17,6 +17,9 @@ package software.amazon.jdbc.exceptions; import java.sql.SQLException; +import java.util.Arrays; +import java.util.HashSet; +import java.util.Set; import org.checkerframework.checker.nullness.qual.Nullable; import software.amazon.jdbc.targetdriverdialect.TargetDriverDialect; import software.amazon.jdbc.util.StringUtils; @@ -27,11 +30,10 @@ public class MySQLExceptionHandler implements ExceptionHandler { public static final String SET_NETWORK_TIMEOUT_ON_CLOSED_CONNECTION = "setNetworkTimeout cannot be called on a closed connection"; - @Override - @Deprecated - public boolean isNetworkException(Throwable throwable) { - return this.isNetworkException(throwable, null); - } + private static final Set SQLSTATE_READ_ONLY_CONNECTION = new HashSet<>(Arrays.asList( + 1290, // The MySQL server is running with the --read-only option, so it cannot execute this statement + 1836 // Running in read-only mode + )); @Override public boolean isNetworkException(final Throwable throwable, @Nullable TargetDriverDialect targetDriverDialect) { @@ -74,12 +76,6 @@ public boolean isNetworkException(final String sqlState) { return sqlState.startsWith("08"); } - @Override - @Deprecated - public boolean isLoginException(Throwable throwable) { - return this.isLoginException(throwable, null); - } - @Override public boolean isLoginException(final Throwable throwable, @Nullable TargetDriverDialect targetDriverDialect) { Throwable exception = throwable; @@ -115,6 +111,39 @@ public boolean isLoginException(final String sqlState) { return SQLSTATE_ACCESS_ERROR.equals(sqlState); } + @Override + public boolean isReadOnlyConnectionException( + final @Nullable String sqlState, final @Nullable Integer errorCode) { + // HY000 - generic SQL state; use error code for more specific information + return "HY000".equals(sqlState) && errorCode != null && (SQLSTATE_READ_ONLY_CONNECTION.contains(errorCode)); + } + + @Override + public boolean isReadOnlyConnectionException( + final Throwable throwable, @Nullable TargetDriverDialect targetDriverDialect) { + + Throwable exception = throwable; + + while (exception != null) { + String sqlState = null; + Integer errorCode = null; + if (exception instanceof SQLException) { + sqlState = ((SQLException) exception).getSQLState(); + errorCode = ((SQLException) exception).getErrorCode(); + } else if (targetDriverDialect != null) { + sqlState = targetDriverDialect.getSQLState(exception); + } + + if (isReadOnlyConnectionException(sqlState, errorCode)) { + return true; + } + + exception = exception.getCause(); + } + + return false; + } + private boolean isHikariMariaDbNetworkException(final SQLException sqlException) { return sqlException.getSQLState().equals(SQLSTATE_SYNTAX_ERROR_OR_ACCESS_VIOLATION) && sqlException.getMessage().contains(SET_NETWORK_TIMEOUT_ON_CLOSED_CONNECTION); diff --git a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/AuroraHostListProvider.java b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/AuroraHostListProvider.java deleted file mode 100644 index fc53f9e1d..000000000 --- a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/AuroraHostListProvider.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package software.amazon.jdbc.hostlistprovider; - - -import java.util.Properties; -import java.util.logging.Logger; -import software.amazon.jdbc.util.FullServicesContainer; - - -public class AuroraHostListProvider extends RdsHostListProvider { - - static final Logger LOGGER = Logger.getLogger(AuroraHostListProvider.class.getName()); - - public AuroraHostListProvider( - final Properties properties, - final String originalUrl, - final FullServicesContainer servicesContainer, - final String topologyQuery, - final String nodeIdQuery, - final String isReaderQuery) { - super(properties, - originalUrl, - servicesContainer, - topologyQuery, - nodeIdQuery, - isReaderQuery); - } -} diff --git a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/AuroraTopologyUtils.java b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/AuroraTopologyUtils.java new file mode 100644 index 000000000..f62415ff9 --- /dev/null +++ b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/AuroraTopologyUtils.java @@ -0,0 +1,95 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package software.amazon.jdbc.hostlistprovider; + +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.sql.Timestamp; +import java.time.Instant; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.logging.Logger; +import org.checkerframework.checker.nullness.qual.Nullable; +import software.amazon.jdbc.HostSpec; +import software.amazon.jdbc.HostSpecBuilder; +import software.amazon.jdbc.dialect.TopologyDialect; +import software.amazon.jdbc.util.Messages; +import software.amazon.jdbc.util.StringUtils; + +public class AuroraTopologyUtils extends TopologyUtils { + private static final Logger LOGGER = Logger.getLogger(AuroraTopologyUtils.class.getName()); + + public AuroraTopologyUtils(TopologyDialect dialect, HostSpecBuilder hostSpecBuilder) { + super(dialect, hostSpecBuilder); + } + + @Override + protected @Nullable List getHosts( + Connection conn, ResultSet rs, HostSpec initialHostSpec, HostSpec instanceTemplate) throws SQLException { + // Data in the result set is ordered by last update time, so the latest records are last. + // We add hosts to a map to ensure newer records are not overwritten by older ones. + Map hostsMap = new HashMap<>(); + while (rs.next()) { + try { + HostSpec host = createHost(rs, initialHostSpec, instanceTemplate); + hostsMap.put(host.getHost(), host); + } catch (Exception e) { + LOGGER.finest(Messages.get("TopologyUtils.errorProcessingQueryResults", new Object[] {e.getMessage()})); + return null; + } + } + + return new ArrayList<>(hostsMap.values()); + } + + @Override + public boolean isWriterInstance(final Connection connection) throws SQLException { + try (final Statement stmt = connection.createStatement()) { + try (final ResultSet rs = stmt.executeQuery(this.dialect.getWriterIdQuery())) { + if (rs.next()) { + return !StringUtils.isNullOrEmpty(rs.getString(1)); + } + } + } + + return false; + } + + protected HostSpec createHost(ResultSet rs, HostSpec initialHostSpec, HostSpec instanceTemplate) throws SQLException { + // According to the topology query the result set should contain 4 columns: + // instance ID, 1/0 (writer/reader), CPU utilization, instance lag in time. + String hostName = rs.getString(1); + final boolean isWriter = rs.getBoolean(2); + final double cpuUtilization = rs.getDouble(3); + final double instanceLag = rs.getDouble(4); + Timestamp lastUpdateTime; + try { + lastUpdateTime = rs.getTimestamp(5); + } catch (Exception e) { + lastUpdateTime = Timestamp.from(Instant.now()); + } + + // Calculate weight based on instance lag in time and CPU utilization. + final long weight = Math.round(instanceLag) * 100L + Math.round(cpuUtilization); + + return createHost(hostName, hostName, isWriter, weight, lastUpdateTime, initialHostSpec, instanceTemplate); + } +} diff --git a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/ConnectionStringHostListProvider.java b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/ConnectionStringHostListProvider.java index 80f55bdad..426ea3963 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/ConnectionStringHostListProvider.java +++ b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/ConnectionStringHostListProvider.java @@ -25,7 +25,6 @@ import java.util.logging.Logger; import org.checkerframework.checker.nullness.qual.NonNull; import software.amazon.jdbc.AwsWrapperProperty; -import software.amazon.jdbc.HostListProviderService; import software.amazon.jdbc.HostRole; import software.amazon.jdbc.HostSpec; import software.amazon.jdbc.util.ConnectionUrlParser; @@ -36,7 +35,6 @@ public class ConnectionStringHostListProvider implements StaticHostListProvider private static final Logger LOGGER = Logger.getLogger(ConnectionStringHostListProvider.class.getName()); final List hostList = new ArrayList<>(); - Properties properties; private boolean isInitialized = false; private final boolean isSingleWriterConnectionString; private final ConnectionUrlParser connectionUrlParser; @@ -75,11 +73,12 @@ private void init() throws SQLException { } this.hostList.addAll( this.connectionUrlParser.getHostsFromConnectionUrl(this.initialUrl, this.isSingleWriterConnectionString, - () -> this.hostListProviderService.getHostSpecBuilder())); + this.hostListProviderService::getHostSpecBuilder)); if (this.hostList.isEmpty()) { throw new SQLException(Messages.get("ConnectionStringHostListProvider.parsedListEmpty", new Object[] {this.initialUrl})); } + this.hostListProviderService.setInitialConnectionHostSpec(this.hostList.get(0)); this.isInitialized = true; } diff --git a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/DynamicHostListProvider.java b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/DynamicHostListProvider.java index 451c047f3..09d321c41 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/DynamicHostListProvider.java +++ b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/DynamicHostListProvider.java @@ -16,9 +16,7 @@ package software.amazon.jdbc.hostlistprovider; -import software.amazon.jdbc.HostListProvider; - -// A marker interface for providers that can fetch a host list, and it changes depending on database status -// A good example of such provider would be DB cluster provider (Aurora DB clusters, patroni DB clusters, etc.) -// where cluster topology (nodes, their roles, their statuses) changes over time. +// A marker interface for providers that can fetch a host list reflecting the current database topology. +// Examples include providers for Aurora or Multi-AZ clusters, where the cluster topology, status, and instance roles +// change over time. public interface DynamicHostListProvider extends HostListProvider { } diff --git a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/GlobalAuroraHostListProvider.java b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/GlobalAuroraHostListProvider.java new file mode 100644 index 000000000..682ebb31f --- /dev/null +++ b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/GlobalAuroraHostListProvider.java @@ -0,0 +1,73 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package software.amazon.jdbc.hostlistprovider; + +import java.sql.Connection; +import java.sql.SQLException; +import java.util.List; +import java.util.Map; +import java.util.Properties; +import java.util.logging.Logger; +import software.amazon.jdbc.AwsWrapperProperty; +import software.amazon.jdbc.HostSpec; +import software.amazon.jdbc.PropertyDefinition; +import software.amazon.jdbc.util.FullServicesContainer; +import software.amazon.jdbc.util.RdsUtils; + +public class GlobalAuroraHostListProvider extends RdsHostListProvider { + + public static final AwsWrapperProperty GLOBAL_CLUSTER_INSTANCE_HOST_PATTERNS = + new AwsWrapperProperty( + "globalClusterInstanceHostPatterns", + null, + "Comma-separated list of the cluster instance DNS patterns that will be used to " + + "build a complete instance endpoints. " + + "A \"?\" character in these patterns should be used as a placeholder for cluster instance names. " + + "This parameter is required for Global Aurora Databases. " + + "Each region in the Global Aurora Database should be specified in the list."); + + protected final RdsUtils rdsUtils = new RdsUtils(); + protected final GlobalAuroraTopologyUtils topologyUtils; + + protected Map instanceTemplatesByRegion; + + static { + PropertyDefinition.registerPluginProperties(GlobalAuroraHostListProvider.class); + } + + public GlobalAuroraHostListProvider( + GlobalAuroraTopologyUtils topologyUtils, Properties properties, String originalUrl, + FullServicesContainer servicesContainer) { + super(topologyUtils, properties, originalUrl, servicesContainer); + this.topologyUtils = topologyUtils; + } + + @Override + protected void initSettings() throws SQLException { + super.initSettings(); + + String instanceTemplates = GLOBAL_CLUSTER_INSTANCE_HOST_PATTERNS.getString(properties); + this.instanceTemplatesByRegion = + this.topologyUtils.parseInstanceTemplates(instanceTemplates, this::validateHostPatternSetting); + } + + @Override + protected List queryForTopology(final Connection conn) throws SQLException { + init(); + return this.topologyUtils.queryForTopology(conn, this.initialHostSpec, this.instanceTemplatesByRegion); + } +} diff --git a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/GlobalAuroraTopologyUtils.java b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/GlobalAuroraTopologyUtils.java new file mode 100644 index 000000000..4a5d5eeea --- /dev/null +++ b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/GlobalAuroraTopologyUtils.java @@ -0,0 +1,152 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package software.amazon.jdbc.hostlistprovider; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.SQLSyntaxErrorException; +import java.sql.Statement; +import java.sql.Timestamp; +import java.time.Instant; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.function.Consumer; +import java.util.logging.Logger; +import java.util.stream.Collectors; +import org.checkerframework.checker.nullness.qual.Nullable; +import software.amazon.jdbc.HostSpec; +import software.amazon.jdbc.HostSpecBuilder; +import software.amazon.jdbc.dialect.GlobalAuroraTopologyDialect; +import software.amazon.jdbc.util.ConnectionUrlParser; +import software.amazon.jdbc.util.LogUtils; +import software.amazon.jdbc.util.Messages; +import software.amazon.jdbc.util.Pair; +import software.amazon.jdbc.util.StringUtils; + +public class GlobalAuroraTopologyUtils extends AuroraTopologyUtils { + private static final Logger LOGGER = Logger.getLogger(GlobalAuroraTopologyUtils.class.getName()); + + protected final GlobalAuroraTopologyDialect dialect; + + public GlobalAuroraTopologyUtils(GlobalAuroraTopologyDialect dialect, HostSpecBuilder hostSpecBuilder) { + super(dialect, hostSpecBuilder); + this.dialect = dialect; + } + + public @Nullable List queryForTopology( + Connection conn, HostSpec initialHostSpec, Map instanceTemplatesByRegion) + throws SQLException { + int originalNetworkTimeout = setNetworkTimeout(conn); + try (final Statement stmt = conn.createStatement(); + final ResultSet rs = stmt.executeQuery(this.dialect.getTopologyQuery())) { + if (rs.getMetaData().getColumnCount() == 0) { + // We expect at least 4 columns. Note that the server may return 0 columns if failover has occurred. + LOGGER.finest(Messages.get("TopologyUtils.unexpectedTopologyQueryColumnCount")); + return null; + } + + return this.verifyWriter(this.getHosts(rs, initialHostSpec, instanceTemplatesByRegion)); + } catch (final SQLSyntaxErrorException e) { + throw new SQLException(Messages.get("TopologyUtils.invalidQuery"), e); + } finally { + if (originalNetworkTimeout == 0 && !conn.isClosed()) { + conn.setNetworkTimeout(networkTimeoutExecutor, originalNetworkTimeout); + } + } + } + + protected @Nullable List getHosts( + ResultSet rs, HostSpec initialHostSpec, Map instanceTemplatesByRegion) throws SQLException { + // Data in the result set is ordered by last update time, so the latest records are last. + // We add hosts to a map to ensure newer records are not overwritten by older ones. + Map hostsMap = new HashMap<>(); + while (rs.next()) { + try { + HostSpec host = createHost(rs, initialHostSpec, instanceTemplatesByRegion); + hostsMap.put(host.getHost(), host); + } catch (Exception e) { + LOGGER.finest(Messages.get("TopologyUtils.errorProcessingQueryResults", new Object[] {e.getMessage()})); + return null; + } + } + + return new ArrayList<>(hostsMap.values()); + } + + protected HostSpec createHost( + ResultSet rs, HostSpec initialHostSpec, Map instanceTemplatesByRegion) + throws SQLException { + // According to the topology query the result set should contain 4 columns: + // instance ID, 1/0 (writer/reader), node lag in time (msec), AWS region. + String hostName = rs.getString(1); + final boolean isWriter = rs.getBoolean(2); + final float nodeLag = rs.getFloat(3); + final String awsRegion = rs.getString(4); + + // Calculate weight based on node lag in time and CPU utilization. + final long weight = Math.round(nodeLag) * 100L; + + final HostSpec instanceTemplate = instanceTemplatesByRegion.get(awsRegion); + if (instanceTemplate == null) { + throw new SQLException(Messages.get( + "GlobalAuroraTopologyMonitor.cannotFindRegionTemplate", new Object[] {awsRegion})); + } + + return createHost( + hostName, hostName, isWriter, weight, Timestamp.from(Instant.now()), initialHostSpec, instanceTemplate); + } + + public @Nullable String getRegion(String instanceId, Connection conn) throws SQLException { + try (final PreparedStatement stmt = conn.prepareStatement(this.dialect.getRegionByInstanceIdQuery())) { + stmt.setString(1, instanceId); + try (final ResultSet rs = stmt.executeQuery()) { + if (rs.next()) { + String awsRegion = rs.getString(1); + return StringUtils.isNullOrEmpty(awsRegion) ? null : awsRegion; + } + } + } + + return null; + } + + public Map parseInstanceTemplates(String instanceTemplatesString, Consumer hostValidator) + throws SQLException { + if (StringUtils.isNullOrEmpty(instanceTemplatesString)) { + throw new SQLException(Messages.get("GlobalAuroraTopologyUtils.globalClusterInstanceHostPatternsRequired")); + } + + Map instanceTemplates = Arrays.stream(instanceTemplatesString.split(",")) + .map(x -> ConnectionUrlParser.parseHostPortPairWithRegionPrefix(x.trim(), () -> hostSpecBuilder)) + .collect(Collectors.toMap( + Pair::getValue1, + v -> { + hostValidator.accept(v.getValue2().getHost()); + return v.getValue2(); + })); + LOGGER.finest(Messages.get( + "GlobalAuroraTopologyUtils.detectedGdbPatterns", + new Object[] {LogUtils.toLogString(instanceTemplates)})); + + return instanceTemplates; + } +} diff --git a/wrapper/src/main/java/software/amazon/jdbc/HostListProvider.java b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/HostListProvider.java similarity index 89% rename from wrapper/src/main/java/software/amazon/jdbc/HostListProvider.java rename to wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/HostListProvider.java index 0aa93714a..206a35415 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/HostListProvider.java +++ b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/HostListProvider.java @@ -14,11 +14,14 @@ * limitations under the License. */ -package software.amazon.jdbc; +package software.amazon.jdbc.hostlistprovider; import java.sql.Connection; import java.sql.SQLException; import java.util.List; +import org.checkerframework.checker.nullness.qual.Nullable; +import software.amazon.jdbc.HostRole; +import software.amazon.jdbc.HostSpec; public interface HostListProvider { @@ -40,6 +43,7 @@ public interface HostListProvider { */ HostRole getHostRole(Connection connection) throws SQLException; + @Nullable HostSpec identifyConnection(Connection connection) throws SQLException; String getClusterId() throws UnsupportedOperationException, SQLException; diff --git a/wrapper/src/main/java/software/amazon/jdbc/HostListProviderService.java b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/HostListProviderService.java similarity index 89% rename from wrapper/src/main/java/software/amazon/jdbc/HostListProviderService.java rename to wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/HostListProviderService.java index b2f6b5353..0413cb423 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/HostListProviderService.java +++ b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/HostListProviderService.java @@ -14,9 +14,11 @@ * limitations under the License. */ -package software.amazon.jdbc; +package software.amazon.jdbc.hostlistprovider; import java.sql.Connection; +import software.amazon.jdbc.HostSpec; +import software.amazon.jdbc.HostSpecBuilder; import software.amazon.jdbc.dialect.Dialect; public interface HostListProviderService { diff --git a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/MultiAzTopologyUtils.java b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/MultiAzTopologyUtils.java new file mode 100644 index 000000000..1e1045002 --- /dev/null +++ b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/MultiAzTopologyUtils.java @@ -0,0 +1,117 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package software.amazon.jdbc.hostlistprovider; + +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.sql.Timestamp; +import java.time.Instant; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.logging.Logger; +import org.checkerframework.checker.nullness.qual.Nullable; +import software.amazon.jdbc.HostSpec; +import software.amazon.jdbc.HostSpecBuilder; +import software.amazon.jdbc.dialect.MultiAzClusterDialect; +import software.amazon.jdbc.util.Messages; +import software.amazon.jdbc.util.StringUtils; + +public class MultiAzTopologyUtils extends TopologyUtils { + private static final Logger LOGGER = Logger.getLogger(MultiAzTopologyUtils.class.getName()); + + protected final MultiAzClusterDialect dialect; + + public MultiAzTopologyUtils(MultiAzClusterDialect dialect, HostSpecBuilder hostSpecBuilder) { + super(dialect, hostSpecBuilder); + this.dialect = dialect; + } + + @Override + protected @Nullable List getHosts( + Connection conn, ResultSet rs, HostSpec initialHostSpec, HostSpec instanceTemplate) + throws SQLException { + String writerId = this.getWriterId(conn); + + // Data in the result set is ordered by last update time, so the latest records are last. + // We add hosts to a map to ensure newer records are not overwritten by older ones. + Map hostsMap = new HashMap<>(); + while (rs.next()) { + try { + HostSpec host = createHost(rs, initialHostSpec, instanceTemplate, writerId); + hostsMap.put(host.getHost(), host); + } catch (Exception e) { + LOGGER.finest(Messages.get("TopologyUtils.errorProcessingQueryResults", new Object[]{e.getMessage()})); + return null; + } + } + + return new ArrayList<>(hostsMap.values()); + } + + @Override + public boolean isWriterInstance(final Connection connection) throws SQLException { + try (final Statement stmt = connection.createStatement()) { + + try (final ResultSet rs = stmt.executeQuery(this.dialect.getWriterIdQuery())) { + // When connected to a writer, the result is empty, otherwise it contains a single row. + return !rs.next(); + } + } + } + + protected @Nullable String getWriterId(Connection connection) throws SQLException { + try (final Statement stmt = connection.createStatement()) { + try (final ResultSet rs = stmt.executeQuery(this.dialect.getWriterIdQuery())) { + if (rs.next()) { + String writerId = rs.getString(this.dialect.getWriterIdColumnName()); + if (!StringUtils.isNullOrEmpty(writerId)) { + return writerId; + } + } + } + + // The writer ID is only returned when connected to a reader, so if the query does not return a value, it + // means we are connected to a writer + try (final ResultSet rs = stmt.executeQuery(this.dialect.getInstanceIdQuery())) { + if (rs.next()) { + return rs.getString(1); + } + } + } + + return null; + } + + protected HostSpec createHost( + final ResultSet rs, + final HostSpec initialHostSpec, + final HostSpec instanceTemplate, + final @Nullable String writerId) throws SQLException { + + String endpoint = rs.getString("endpoint"); // "instance-name.XYZ.us-west-2.rds.amazonaws.com" + String instanceName = endpoint.substring(0, endpoint.indexOf(".")); // "instance-name" + String hostId = rs.getString("id"); // "1034958454" + final boolean isWriter = hostId.equals(writerId); + + return createHost( + hostId, instanceName, isWriter, 0, Timestamp.from(Instant.now()), initialHostSpec, instanceTemplate); + } +} diff --git a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/RdsHostListProvider.java b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/RdsHostListProvider.java index 738eebcc3..4bd523e39 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/RdsHostListProvider.java +++ b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/RdsHostListProvider.java @@ -17,45 +17,29 @@ package software.amazon.jdbc.hostlistprovider; import java.sql.Connection; -import java.sql.ResultSet; import java.sql.SQLException; -import java.sql.SQLSyntaxErrorException; -import java.sql.Statement; -import java.sql.Timestamp; -import java.time.Instant; import java.util.ArrayList; import java.util.Collections; -import java.util.Comparator; -import java.util.HashMap; import java.util.List; -import java.util.Map; -import java.util.Map.Entry; import java.util.Objects; import java.util.Properties; -import java.util.UUID; -import java.util.concurrent.Executor; import java.util.concurrent.TimeUnit; import java.util.concurrent.locks.ReentrantLock; import java.util.logging.Logger; -import java.util.stream.Collectors; -import org.checkerframework.checker.nullness.qual.NonNull; import org.checkerframework.checker.nullness.qual.Nullable; import software.amazon.jdbc.AwsWrapperProperty; -import software.amazon.jdbc.HostListProviderService; import software.amazon.jdbc.HostRole; import software.amazon.jdbc.HostSpec; import software.amazon.jdbc.HostSpecBuilder; import software.amazon.jdbc.PropertyDefinition; -import software.amazon.jdbc.hostavailability.HostAvailability; import software.amazon.jdbc.util.ConnectionUrlParser; import software.amazon.jdbc.util.FullServicesContainer; +import software.amazon.jdbc.util.LogUtils; import software.amazon.jdbc.util.Messages; +import software.amazon.jdbc.util.Pair; import software.amazon.jdbc.util.RdsUrlType; import software.amazon.jdbc.util.RdsUtils; -import software.amazon.jdbc.util.StringUtils; -import software.amazon.jdbc.util.SynchronousExecutor; import software.amazon.jdbc.util.Utils; -import software.amazon.jdbc.util.storage.CacheMap; public class RdsHostListProvider implements DynamicHostListProvider { @@ -70,10 +54,10 @@ public class RdsHostListProvider implements DynamicHostListProvider { + "after which it will be updated during the next interaction with the connection."); public static final AwsWrapperProperty CLUSTER_ID = new AwsWrapperProperty( - "clusterId", "", + "clusterId", "1", "A unique identifier for the cluster. " + "Connections with the same cluster id share a cluster topology cache. " - + "If unspecified, a cluster id is automatically created for AWS RDS clusters."); + + "If unspecified, a cluster id is '1'."); public static final AwsWrapperProperty CLUSTER_INSTANCE_HOST_PATTERN = new AwsWrapperProperty( @@ -84,20 +68,17 @@ public class RdsHostListProvider implements DynamicHostListProvider { + "This pattern is required to be specified for IP address or custom domain connections to AWS RDS " + "clusters. Otherwise, if unspecified, the pattern will be automatically created for AWS RDS clusters."); - protected static final Executor networkTimeoutExecutor = new SynchronousExecutor(); protected static final RdsUtils rdsHelper = new RdsUtils(); protected static final ConnectionUrlParser connectionUrlParser = new ConnectionUrlParser(); protected static final int defaultTopologyQueryTimeoutMs = 5000; - protected static final long suggestedClusterIdRefreshRateNano = TimeUnit.MINUTES.toNanos(10); - protected static final CacheMap suggestedPrimaryClusterIdCache = new CacheMap<>(); - protected static final CacheMap primaryClusterIdCache = new CacheMap<>(); + protected final ReentrantLock lock = new ReentrantLock(); + protected final Properties properties; + protected final String originalUrl; protected final FullServicesContainer servicesContainer; protected final HostListProviderService hostListProviderService; - protected final String originalUrl; - protected final String topologyQuery; - protected final String nodeIdQuery; - protected final String isReaderQuery; + protected final TopologyUtils topologyUtils; + protected RdsUrlType rdsUrlType; protected long refreshRateNano = CLUSTER_TOPOLOGY_REFRESH_RATE_MS.defaultValue != null ? TimeUnit.MILLISECONDS.toNanos(Long.parseLong(CLUSTER_TOPOLOGY_REFRESH_RATE_MS.defaultValue)) @@ -105,37 +86,25 @@ public class RdsHostListProvider implements DynamicHostListProvider { protected List hostList = new ArrayList<>(); protected List initialHostList = new ArrayList<>(); protected HostSpec initialHostSpec; - - protected final ReentrantLock lock = new ReentrantLock(); protected String clusterId; - protected HostSpec clusterInstanceTemplate; - - // A primary clusterId is a clusterId that is based off of a cluster endpoint URL - // (rather than a GUID or a value provided by the user). - protected boolean isPrimaryClusterId; + protected HostSpec instanceTemplate; protected volatile boolean isInitialized = false; - protected Properties properties; - static { PropertyDefinition.registerPluginProperties(RdsHostListProvider.class); } public RdsHostListProvider( + final TopologyUtils topologyUtils, final Properties properties, final String originalUrl, - final FullServicesContainer servicesContainer, - final String topologyQuery, - final String nodeIdQuery, - final String isReaderQuery) { + final FullServicesContainer servicesContainer) { + this.topologyUtils = topologyUtils; this.properties = properties; this.originalUrl = originalUrl; this.servicesContainer = servicesContainer; this.hostListProviderService = servicesContainer.getHostListProviderService(); - this.topologyQuery = topologyQuery; - this.nodeIdQuery = nodeIdQuery; - this.isReaderQuery = isReaderQuery; } protected void init() throws SQLException { @@ -148,81 +117,54 @@ protected void init() throws SQLException { if (this.isInitialized) { return; } - - // initial topology is based on connection string - this.initialHostList = - connectionUrlParser.getHostsFromConnectionUrl(this.originalUrl, false, - this.hostListProviderService::getHostSpecBuilder); - if (this.initialHostList == null || this.initialHostList.isEmpty()) { - throw new SQLException(Messages.get("RdsHostListProvider.parsedListEmpty", - new Object[] {this.originalUrl})); - } - this.initialHostSpec = this.initialHostList.get(0); - this.hostListProviderService.setInitialConnectionHostSpec(this.initialHostSpec); - - this.clusterId = UUID.randomUUID().toString(); - this.isPrimaryClusterId = false; - this.refreshRateNano = - TimeUnit.MILLISECONDS.toNanos(CLUSTER_TOPOLOGY_REFRESH_RATE_MS.getInteger(properties)); - - HostSpecBuilder hostSpecBuilder = this.hostListProviderService.getHostSpecBuilder(); - String clusterInstancePattern = CLUSTER_INSTANCE_HOST_PATTERN.getString(this.properties); - if (clusterInstancePattern != null) { - this.clusterInstanceTemplate = - ConnectionUrlParser.parseHostPortPair(clusterInstancePattern, () -> hostSpecBuilder); - } else { - this.clusterInstanceTemplate = - hostSpecBuilder - .host(rdsHelper.getRdsInstanceHostPattern(this.initialHostSpec.getHost())) - .hostId(this.initialHostSpec.getHostId()) - .port(this.initialHostSpec.getPort()) - .build(); - } - - validateHostPatternSetting(this.clusterInstanceTemplate.getHost()); - - this.rdsUrlType = rdsHelper.identifyRdsType(this.initialHostSpec.getHost()); - - final String clusterIdSetting = CLUSTER_ID.getString(this.properties); - if (!StringUtils.isNullOrEmpty(clusterIdSetting)) { - this.clusterId = clusterIdSetting; - } else if (rdsUrlType == RdsUrlType.RDS_PROXY) { - // Each proxy is associated with a single cluster, so it's safe to use RDS Proxy Url as cluster - // identification - this.clusterId = this.initialHostSpec.getUrl(); - } else if (rdsUrlType.isRds()) { - final ClusterSuggestedResult clusterSuggestedResult = - getSuggestedClusterId(this.initialHostSpec.getHostAndPort()); - if (clusterSuggestedResult != null && !StringUtils.isNullOrEmpty(clusterSuggestedResult.clusterId)) { - this.clusterId = clusterSuggestedResult.clusterId; - this.isPrimaryClusterId = clusterSuggestedResult.isPrimaryClusterId; - } else { - final String clusterRdsHostUrl = - rdsHelper.getRdsClusterHostUrl(this.initialHostSpec.getHost()); - if (!StringUtils.isNullOrEmpty(clusterRdsHostUrl)) { - this.clusterId = this.clusterInstanceTemplate.isPortSpecified() - ? String.format("%s:%s", clusterRdsHostUrl, this.clusterInstanceTemplate.getPort()) - : clusterRdsHostUrl; - this.isPrimaryClusterId = true; - primaryClusterIdCache.put(this.clusterId, true, suggestedClusterIdRefreshRateNano); - } - } - } - + this.initSettings(); this.isInitialized = true; } finally { lock.unlock(); } } + protected void initSettings() throws SQLException { + // The initial topology is based on the connection string. + this.initialHostList = + connectionUrlParser.getHostsFromConnectionUrl(this.originalUrl, false, + this.hostListProviderService::getHostSpecBuilder); + if (this.initialHostList == null || this.initialHostList.isEmpty()) { + throw new SQLException(Messages.get("RdsHostListProvider.parsedListEmpty", new Object[] {this.originalUrl})); + } + this.initialHostSpec = this.initialHostList.get(0); + this.hostListProviderService.setInitialConnectionHostSpec(this.initialHostSpec); + + this.clusterId = CLUSTER_ID.getString(this.properties); + this.refreshRateNano = + TimeUnit.MILLISECONDS.toNanos(CLUSTER_TOPOLOGY_REFRESH_RATE_MS.getInteger(properties)); + + HostSpecBuilder hostSpecBuilder = this.hostListProviderService.getHostSpecBuilder(); + String clusterInstancePattern = CLUSTER_INSTANCE_HOST_PATTERN.getString(this.properties); + if (clusterInstancePattern != null) { + this.instanceTemplate = + ConnectionUrlParser.parseHostPortPair(clusterInstancePattern, () -> hostSpecBuilder); + } else { + this.instanceTemplate = + hostSpecBuilder + .host(rdsHelper.getRdsInstanceHostPattern(this.initialHostSpec.getHost())) + .hostId(this.initialHostSpec.getHostId()) + .port(this.initialHostSpec.getPort()) + .build(); + } + + validateHostPatternSetting(this.instanceTemplate.getHost()); + this.rdsUrlType = rdsHelper.identifyRdsType(this.initialHostSpec.getHost()); + } + /** * Get cluster topology. It may require an extra call to database to fetch the latest topology. A * cached copy of topology is returned if it's not yet outdated (controlled by {@link * #refreshRateNano}). * - * @param conn A connection to database to fetch the latest topology, if needed. + * @param conn A connection to database to fetch the latest topology, if needed. * @param forceUpdate If true, it forces a service to ignore cached copy of topology and to fetch - * a fresh one. + * a fresh one. * @return a list of hosts that describes cluster topology. A writer is always at position 0. * Returns an empty list if isn't available or is invalid (doesn't contain a writer). * @throws SQLException if errors occurred while retrieving the topology. @@ -230,43 +172,18 @@ protected void init() throws SQLException { protected FetchTopologyResult getTopology(final Connection conn, final boolean forceUpdate) throws SQLException { init(); - final String suggestedPrimaryClusterId = suggestedPrimaryClusterIdCache.get(this.clusterId); - - // Change clusterId by accepting a suggested one - if (!StringUtils.isNullOrEmpty(suggestedPrimaryClusterId) - && !this.clusterId.equals(suggestedPrimaryClusterId)) { - - final String oldClusterId = this.clusterId; - this.clusterId = suggestedPrimaryClusterId; - this.isPrimaryClusterId = true; - this.clusterIdChanged(oldClusterId); - } - final List storedHosts = this.getStoredTopology(); - - // This clusterId is a primary one and is about to create a new entry in the cache. - // When a primary entry is created it needs to be suggested for other (non-primary) entries. - // Remember a flag to do suggestion after cache is updated. - final boolean needToSuggest = storedHosts == null && this.isPrimaryClusterId; - if (storedHosts == null || forceUpdate) { - - // need to re-fetch topology - + // We need to re-fetch topology. if (conn == null) { - // can't fetch the latest topology since no connection - // return original hosts parsed from connection string + // We cannot fetch the latest topology since we do not have access to a connection, so we return the original + // hosts parsed from the connection string. return new FetchTopologyResult(false, this.initialHostList); } - // fetch topology from the DB - final List hosts = queryForTopology(conn); - + final List hosts = this.queryForTopology(conn); if (!Utils.isNullOrEmpty(hosts)) { this.servicesContainer.getStorageService().set(this.clusterId, new Topology(hosts)); - if (needToSuggest) { - this.suggestPrimaryCluster(hosts); - } return new FetchTopologyResult(false, hosts); } } @@ -274,78 +191,11 @@ protected FetchTopologyResult getTopology(final Connection conn, final boolean f if (storedHosts == null) { return new FetchTopologyResult(false, this.initialHostList); } else { - // use cached data + // Return the cached data. return new FetchTopologyResult(true, storedHosts); } } - protected void clusterIdChanged(final String oldClusterId) throws SQLException { - // do nothing - } - - protected ClusterSuggestedResult getSuggestedClusterId(final String url) { - Map entries = this.servicesContainer.getStorageService().getEntries(Topology.class); - if (entries == null) { - return null; - } - - for (final Entry entry : entries.entrySet()) { - final String key = entry.getKey(); // clusterId - final List hosts = entry.getValue().getHosts(); - final boolean isPrimaryCluster = primaryClusterIdCache.get(key, false, - suggestedClusterIdRefreshRateNano); - if (key.equals(url)) { - return new ClusterSuggestedResult(url, isPrimaryCluster); - } - if (hosts == null) { - continue; - } - for (final HostSpec host : hosts) { - if (host.getHostAndPort().equals(url)) { - LOGGER.finest(() -> Messages.get("RdsHostListProvider.suggestedClusterId", - new Object[] {key, url})); - return new ClusterSuggestedResult(key, isPrimaryCluster); - } - } - } - return null; - } - - protected void suggestPrimaryCluster(final @NonNull List primaryClusterHosts) { - if (Utils.isNullOrEmpty(primaryClusterHosts)) { - return; - } - - Map entries = this.servicesContainer.getStorageService().getEntries(Topology.class); - if (entries == null) { - return; - } - - for (final Entry entry : entries.entrySet()) { - final String clusterId = entry.getKey(); - final List clusterHosts = entry.getValue().getHosts(); - final boolean isPrimaryCluster = primaryClusterIdCache.get(clusterId, false, - suggestedClusterIdRefreshRateNano); - final String suggestedPrimaryClusterId = suggestedPrimaryClusterIdCache.get(clusterId); - if (isPrimaryCluster - || !StringUtils.isNullOrEmpty(suggestedPrimaryClusterId) - || Utils.isNullOrEmpty(clusterHosts)) { - continue; - } - - // The entry is non-primary - for (final HostSpec host : clusterHosts) { - if (Utils.containsHostAndPort(primaryClusterHosts, host.getHostAndPort())) { - // Instance on this cluster matches with one of the instance on primary cluster - // Suggest the primary clusterId to this entry - suggestedPrimaryClusterIdCache.put(clusterId, this.clusterId, - suggestedClusterIdRefreshRateNano); - break; - } - } - } - } - /** * Obtain a cluster topology from database. * @@ -354,142 +204,8 @@ protected void suggestPrimaryCluster(final @NonNull List primaryCluste * @throws SQLException if errors occurred while retrieving the topology. */ protected List queryForTopology(final Connection conn) throws SQLException { - int networkTimeout = -1; - try { - networkTimeout = conn.getNetworkTimeout(); - // The topology query is not monitored by the EFM plugin, so it needs a socket timeout - if (networkTimeout == 0) { - conn.setNetworkTimeout(networkTimeoutExecutor, defaultTopologyQueryTimeoutMs); - } - } catch (SQLException e) { - LOGGER.warning(() -> Messages.get("RdsHostListProvider.errorGettingNetworkTimeout", - new Object[] {e.getMessage()})); - } - - try (final Statement stmt = conn.createStatement(); - final ResultSet resultSet = stmt.executeQuery(this.topologyQuery)) { - return processQueryResults(resultSet); - } catch (final SQLSyntaxErrorException e) { - throw new SQLException(Messages.get("RdsHostListProvider.invalidQuery"), e); - } finally { - if (networkTimeout == 0 && !conn.isClosed()) { - conn.setNetworkTimeout(networkTimeoutExecutor, networkTimeout); - } - } - } - - /** - * Form a list of hosts from the results of the topology query. - * - * @param resultSet The results of the topology query - * @return a list of {@link HostSpec} objects representing - * the topology that was returned by the - * topology query. The list will be empty if the topology query returned an invalid topology - * (no writer instance). - */ - private List processQueryResults(final ResultSet resultSet) throws SQLException { - - final HashMap hostMap = new HashMap<>(); - - // Data is result set is ordered by last updated time so the latest records go last. - // When adding hosts to a map, the newer records replace the older ones. - while (resultSet.next()) { - final HostSpec host = createHost(resultSet); - hostMap.put(host.getHost(), host); - } - - final List hosts = new ArrayList<>(); - final List writers = new ArrayList<>(); - - for (final HostSpec host : hostMap.values()) { - if (host.getRole() != HostRole.WRITER) { - hosts.add(host); - } else { - writers.add(host); - } - } - - int writerCount = writers.size(); - - if (writerCount == 0) { - LOGGER.severe( - () -> Messages.get( - "RdsHostListProvider.invalidTopology")); - hosts.clear(); - } else if (writerCount == 1) { - hosts.add(writers.get(0)); - } else { - // Take the latest updated writer node as the current writer. All others will be ignored. - List sortedWriters = writers.stream() - .sorted(Comparator.comparing(HostSpec::getLastUpdateTime, Comparator.nullsLast(Comparator.reverseOrder()))) - .collect(Collectors.toList()); - hosts.add(sortedWriters.get(0)); - } - - return hosts; - } - - /** - * Creates an instance of HostSpec which captures details about a connectable host. - * - * @param resultSet the result set from querying the topology - * @return a {@link HostSpec} instance for a specific instance from the cluster - * @throws SQLException If unable to retrieve the hostName from the result set - */ - protected HostSpec createHost(final ResultSet resultSet) throws SQLException { - // According to the topology query the result set - // should contain 4 columns: node ID, 1/0 (writer/reader), CPU utilization, node lag in time. - String hostName = resultSet.getString(1); - final boolean isWriter = resultSet.getBoolean(2); - final double cpuUtilization = resultSet.getDouble(3); - final double nodeLag = resultSet.getDouble(4); - Timestamp lastUpdateTime; - try { - lastUpdateTime = resultSet.getTimestamp(5); - } catch (Exception e) { - lastUpdateTime = Timestamp.from(Instant.now()); - } - - // Calculate weight based on node lag in time and CPU utilization. - final long weight = Math.round(nodeLag) * 100L + Math.round(cpuUtilization); - - return createHost(hostName, isWriter, weight, lastUpdateTime); - } - - protected HostSpec createHost( - String host, - final boolean isWriter, - final long weight, - final Timestamp lastUpdateTime) { - - host = host == null ? "?" : host; - final String endpoint = getHostEndpoint(host); - final int port = this.clusterInstanceTemplate.isPortSpecified() - ? this.clusterInstanceTemplate.getPort() - : this.initialHostSpec.getPort(); - - final HostSpec hostSpec = this.hostListProviderService.getHostSpecBuilder() - .host(endpoint) - .port(port) - .role(isWriter ? HostRole.WRITER : HostRole.READER) - .availability(HostAvailability.AVAILABLE) - .weight(weight) - .lastUpdateTime(lastUpdateTime) - .build(); - hostSpec.addAlias(host); - hostSpec.setHostId(host); - return hostSpec; - } - - /** - * Build a host dns endpoint based on host/node name. - * - * @param nodeName A host name. - * @return Host dns endpoint - */ - protected String getHostEndpoint(final String nodeName) { - final String host = this.clusterInstanceTemplate.getHost(); - return host.replace("?", nodeName); + init(); + return this.topologyUtils.queryForTopology(conn, this.initialHostSpec, this.instanceTemplate); } /** @@ -503,14 +219,6 @@ protected String getHostEndpoint(final String nodeName) { return topology == null ? null : topology.getHosts(); } - /** - * Clear topology cache for all clusters. - */ - public static void clearAll() { - primaryClusterIdCache.clear(); - suggestedPrimaryClusterIdCache.clear(); - } - /** * Clear topology cache for the current cluster. */ @@ -531,7 +239,7 @@ public List refresh(final Connection connection) throws SQLException { : this.hostListProviderService.getCurrentConnection(); final FetchTopologyResult results = getTopology(currentConnection, false); - LOGGER.finest(() -> Utils.logTopology(results.hosts, results.isCachedData ? "[From cache] Topology:" : null)); + LOGGER.finest(() -> LogUtils.logTopology(results.hosts, results.isCachedData ? "[From cache] Topology:" : null)); this.hostList = results.hosts; return Collections.unmodifiableList(hostList); @@ -550,7 +258,7 @@ public List forceRefresh(final Connection connection) throws SQLExcept : this.hostListProviderService.getCurrentConnection(); final FetchTopologyResult results = getTopology(currentConnection, true); - LOGGER.finest(() -> Utils.logTopology(results.hosts)); + LOGGER.finest(() -> LogUtils.logTopology(results.hosts)); this.hostList = results.hosts; return Collections.unmodifiableList(this.hostList); } @@ -560,30 +268,22 @@ public RdsUrlType getRdsUrlType() throws SQLException { return this.rdsUrlType; } - private void validateHostPatternSetting(final String hostPattern) { + protected void validateHostPatternSetting(final String hostPattern) { if (!rdsHelper.isDnsPatternValid(hostPattern)) { - // "Invalid value for the 'clusterInstanceHostPattern' configuration setting - the host - // pattern must contain a '?' - // character as a placeholder for the DB instance identifiers of the instances in the cluster" final String message = Messages.get("RdsHostListProvider.invalidPattern"); LOGGER.severe(message); throw new RuntimeException(message); } final RdsUrlType rdsUrlType = rdsHelper.identifyRdsType(hostPattern); - if (rdsUrlType == RdsUrlType.RDS_PROXY) { - // "An RDS Proxy url can't be used as the 'clusterInstanceHostPattern' configuration setting." - final String message = - Messages.get("RdsHostListProvider.clusterInstanceHostPatternNotSupportedForRDSProxy"); + if (rdsUrlType == RdsUrlType.RDS_PROXY || rdsUrlType == RdsUrlType.RDS_PROXY_ENDPOINT) { + final String message = Messages.get("RdsHostListProvider.clusterInstanceHostPatternNotSupportedForRDSProxy"); LOGGER.severe(message); throw new RuntimeException(message); } if (rdsUrlType == RdsUrlType.RDS_CUSTOM_CLUSTER) { - // "An RDS Custom Cluster endpoint can't be used as the 'clusterInstanceHostPattern' - // configuration setting." - final String message = - Messages.get("RdsHostListProvider.clusterInstanceHostPatternNotSupportedForRdsCustom"); + final String message = Messages.get("RdsHostListProvider.clusterInstanceHostPatternNotSupportedForRdsCustom"); LOGGER.severe(message); throw new RuntimeException(message); } @@ -602,64 +302,54 @@ public FetchTopologyResult(final boolean isCachedData, final List host @Override public HostRole getHostRole(Connection conn) throws SQLException { - try (final Statement stmt = conn.createStatement(); - final ResultSet rs = stmt.executeQuery(this.isReaderQuery)) { - if (rs.next()) { - boolean isReader = rs.getBoolean(1); - return isReader ? HostRole.READER : HostRole.WRITER; - } - } catch (SQLException e) { - throw new SQLException(Messages.get("RdsHostListProvider.errorGettingHostRole"), e); - } - - throw new SQLException(Messages.get("RdsHostListProvider.errorGettingHostRole")); + init(); + return this.topologyUtils.getHostRole(conn); } @Override - public HostSpec identifyConnection(Connection connection) throws SQLException { - try (final Statement stmt = connection.createStatement(); - final ResultSet resultSet = stmt.executeQuery(this.nodeIdQuery)) { - if (resultSet.next()) { - final String instanceName = resultSet.getString(1); + public @Nullable HostSpec identifyConnection(Connection connection) throws SQLException { + init(); + try { + Pair instanceIds = this.topologyUtils.getInstanceId(connection); + if (instanceIds == null) { + throw new SQLException(Messages.get("RdsHostListProvider.errorIdentifyConnection")); + } - List topology = this.refresh(connection); + List topology = this.refresh(connection); + boolean isForcedRefresh = false; + if (topology == null) { + topology = this.forceRefresh(connection); + isForcedRefresh = true; + } - boolean isForcedRefresh = false; - if (topology == null) { - topology = this.forceRefresh(connection); - isForcedRefresh = true; - } + if (topology == null) { + return null; + } + String instanceName = instanceIds.getValue2(); + HostSpec foundHost = topology + .stream() + .filter(host -> Objects.equals(instanceName, host.getHostId())) + .findAny() + .orElse(null); + + if (foundHost == null && !isForcedRefresh) { + topology = this.forceRefresh(connection); if (topology == null) { return null; } - HostSpec foundHost = topology + foundHost = topology .stream() .filter(host -> Objects.equals(instanceName, host.getHostId())) .findAny() .orElse(null); - - if (foundHost == null && !isForcedRefresh) { - topology = this.forceRefresh(connection); - if (topology == null) { - return null; - } - - foundHost = topology - .stream() - .filter(host -> Objects.equals(instanceName, host.getHostId())) - .findAny() - .orElse(null); - } - - return foundHost; } + + return foundHost; } catch (final SQLException e) { throw new SQLException(Messages.get("RdsHostListProvider.errorIdentifyConnection"), e); } - - throw new SQLException(Messages.get("RdsHostListProvider.errorIdentifyConnection")); } @Override @@ -667,15 +357,4 @@ public String getClusterId() throws UnsupportedOperationException, SQLException init(); return this.clusterId; } - - public static class ClusterSuggestedResult { - - public String clusterId; - public boolean isPrimaryClusterId; - - public ClusterSuggestedResult(final String clusterId, final boolean isPrimaryClusterId) { - this.clusterId = clusterId; - this.isPrimaryClusterId = isPrimaryClusterId; - } - } } diff --git a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/RdsMultiAzDbClusterListProvider.java b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/RdsMultiAzDbClusterListProvider.java deleted file mode 100644 index a63323176..000000000 --- a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/RdsMultiAzDbClusterListProvider.java +++ /dev/null @@ -1,208 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package software.amazon.jdbc.hostlistprovider; - -import java.sql.Connection; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.SQLSyntaxErrorException; -import java.sql.Statement; -import java.sql.Timestamp; -import java.time.Instant; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Properties; -import java.util.logging.Logger; -import software.amazon.jdbc.HostRole; -import software.amazon.jdbc.HostSpec; -import software.amazon.jdbc.hostavailability.HostAvailability; -import software.amazon.jdbc.util.FullServicesContainer; -import software.amazon.jdbc.util.Messages; - -public class RdsMultiAzDbClusterListProvider extends RdsHostListProvider { - private final String fetchWriterNodeQuery; - private final String fetchWriterNodeQueryHeader; - static final Logger LOGGER = Logger.getLogger(RdsMultiAzDbClusterListProvider.class.getName()); - - public RdsMultiAzDbClusterListProvider( - final Properties properties, - final String originalUrl, - final FullServicesContainer servicesContainer, - final String topologyQuery, - final String nodeIdQuery, - final String isReaderQuery, - final String fetchWriterNodeQuery, - final String fetchWriterNodeQueryHeader - ) { - super(properties, - originalUrl, - servicesContainer, - topologyQuery, - nodeIdQuery, - isReaderQuery); - this.fetchWriterNodeQuery = fetchWriterNodeQuery; - this.fetchWriterNodeQueryHeader = fetchWriterNodeQueryHeader; - } - - /** - * Obtain a cluster topology from database. - * - * @param conn A connection to database to fetch the latest topology. - * @return a list of {@link HostSpec} objects representing the topology - * @throws SQLException if errors occurred while retrieving the topology. - */ - protected List queryForTopology(final Connection conn) throws SQLException { - int networkTimeout = -1; - try { - networkTimeout = conn.getNetworkTimeout(); - // The topology query is not monitored by the EFM plugin, so it needs a socket timeout - if (networkTimeout == 0) { - conn.setNetworkTimeout(networkTimeoutExecutor, defaultTopologyQueryTimeoutMs); - } - } catch (SQLException e) { - LOGGER.warning(() -> Messages.get("RdsHostListProvider.errorGettingNetworkTimeout", - new Object[] {e.getMessage()})); - } - - try { - final Statement stmt = conn.createStatement(); - String writerNodeId = processWriterNodeId(stmt.executeQuery(this.fetchWriterNodeQuery)); - if (writerNodeId == null) { - final ResultSet nodeIdResultSet = stmt.executeQuery(this.nodeIdQuery); - while (nodeIdResultSet.next()) { - writerNodeId = nodeIdResultSet.getString(1); - } - } - final ResultSet topologyResultSet = stmt.executeQuery(this.topologyQuery); - return processTopologyQueryResults(topologyResultSet, writerNodeId); - } catch (final SQLSyntaxErrorException e) { - throw new SQLException(Messages.get("RdsHostListProvider.invalidQuery"), e); - } finally { - if (networkTimeout == 0 && !conn.isClosed()) { - conn.setNetworkTimeout(networkTimeoutExecutor, networkTimeout); - } - } - } - - /** - * Get writer node ID. - * - * @param fetchWriterNodeResultSet A ResultSet of writer node query - * @return String The ID of a writer node - * @throws SQLException if errors occurred while retrieving the topology - */ - private String processWriterNodeId(final ResultSet fetchWriterNodeResultSet) throws SQLException { - String writerNodeId = null; - if (fetchWriterNodeResultSet.next()) { - writerNodeId = fetchWriterNodeResultSet.getString(fetchWriterNodeQueryHeader); - } - return writerNodeId; - } - - /** - * Form a list of hosts from the results of the topology query. - * - * @param topologyResultSet The results of the topology query - * @param writerNodeId The writer node ID - * @return a list of {@link HostSpec} objects representing - * the topology that was returned by the - * topology query. The list will be empty if the topology query returned an invalid topology - * (no writer instance). - */ - private List processTopologyQueryResults( - final ResultSet topologyResultSet, - final String writerNodeId) throws SQLException { - - final HashMap hostMap = new HashMap<>(); - - // Data is result set is ordered by last updated time so the latest records go last. - // When adding hosts to a map, the newer records replace the older ones. - while (topologyResultSet.next()) { - final HostSpec host = createHost(topologyResultSet, writerNodeId); - hostMap.put(host.getHost(), host); - } - - final List hosts = new ArrayList<>(); - final List writers = new ArrayList<>(); - - for (final HostSpec host : hostMap.values()) { - if (host.getRole() != HostRole.WRITER) { - hosts.add(host); - } else { - writers.add(host); - } - } - - int writerCount = writers.size(); - - if (writerCount == 0) { - LOGGER.severe(() -> Messages.get("RdsHostListProvider.invalidTopology")); - hosts.clear(); - } else { - hosts.add(writers.get(0)); - } - - return hosts; - } - - /** - * Creates an instance of HostSpec which captures details about a connectable host. - * - * @param resultSet the result set from querying the topology - * @return a {@link HostSpec} instance for a specific instance from the cluster - * @throws SQLException If unable to retrieve the hostName from the result set - */ - private HostSpec createHost(final ResultSet resultSet, final String writerNodeId) throws SQLException { - - String hostName = resultSet.getString("endpoint"); // "instance-name.XYZ.us-west-2.rds.amazonaws.com" - String instanceName = hostName.substring(0, hostName.indexOf(".")); // "instance-name" - - // "instance-name.XYZ.us-west-2.rds.amazonaws.com" based on cluster instance template - final String endpoint = getHostEndpoint(instanceName); - - String hostId = resultSet.getString("id"); - int queryPort = resultSet.getInt("port"); - final int port = this.clusterInstanceTemplate.isPortSpecified() - ? this.clusterInstanceTemplate.getPort() - : queryPort; - final boolean isWriter = hostId.equals(writerNodeId); - - final HostSpec hostSpec = this.hostListProviderService.getHostSpecBuilder() - .host(endpoint) - .hostId(hostId) - .port(port) - .role(isWriter ? HostRole.WRITER : HostRole.READER) - .availability(HostAvailability.AVAILABLE) - .weight(0) - .lastUpdateTime(Timestamp.from(Instant.now())) - .build(); - hostSpec.addAlias(hostName); - return hostSpec; - } - - /** - * Build a host dns endpoint based on host/node name. - * - * @param nodeName A host name. - * @return Host dns endpoint - */ - protected String getHostEndpoint(final String nodeName) { - final String host = this.clusterInstanceTemplate.getHost(); - return host.replace("?", nodeName); - } -} diff --git a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/StaticHostListProvider.java b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/StaticHostListProvider.java index b37eb4cc3..13e646a03 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/StaticHostListProvider.java +++ b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/StaticHostListProvider.java @@ -16,8 +16,6 @@ package software.amazon.jdbc.hostlistprovider; -import software.amazon.jdbc.HostListProvider; - -// A marker interface for providers that fetch node lists, and it never changes since after. -// An example of such provider is a provider that use connection string as a source. +// A marker interface for providers that fetch host lists that do not change over time. +// An example is a provider that uses a connection string to determine the host list. public interface StaticHostListProvider extends HostListProvider {} diff --git a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/TopologyUtils.java b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/TopologyUtils.java new file mode 100644 index 000000000..5a6a2ef68 --- /dev/null +++ b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/TopologyUtils.java @@ -0,0 +1,235 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +package software.amazon.jdbc.hostlistprovider; + +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.SQLSyntaxErrorException; +import java.sql.Statement; +import java.sql.Timestamp; +import java.util.ArrayList; +import java.util.Comparator; +import java.util.List; +import java.util.concurrent.Executor; +import java.util.logging.Logger; +import java.util.stream.Collectors; +import org.checkerframework.checker.nullness.qual.Nullable; +import software.amazon.jdbc.HostRole; +import software.amazon.jdbc.HostSpec; +import software.amazon.jdbc.HostSpecBuilder; +import software.amazon.jdbc.dialect.TopologyDialect; +import software.amazon.jdbc.hostavailability.HostAvailability; +import software.amazon.jdbc.util.Messages; +import software.amazon.jdbc.util.Pair; +import software.amazon.jdbc.util.SynchronousExecutor; + +/** + * An abstract class defining utility methods that can be used to retrieve and process a variety of database topology + * information. This class can be overridden to define logic specific to various database engine deployments + * (e.g. Aurora, Multi-AZ, Global Aurora etc.). + */ +public abstract class TopologyUtils { + private static final Logger LOGGER = Logger.getLogger(TopologyUtils.class.getName()); + protected static final int DEFAULT_QUERY_TIMEOUT_MS = 1000; + + protected final Executor networkTimeoutExecutor = new SynchronousExecutor(); + protected final TopologyDialect dialect; + protected final HostSpecBuilder hostSpecBuilder; + + public TopologyUtils( + TopologyDialect dialect, + HostSpecBuilder hostSpecBuilder) { + this.dialect = dialect; + this.hostSpecBuilder = hostSpecBuilder; + } + + /** + * Query the database for information for each instance in the database topology. + * + * @param conn the connection to use to query the database. + * @param initialHostSpec the {@link HostSpec} that was used to initially connect. + * @param instanceTemplate the template {@link HostSpec} to use when constructing new {@link HostSpec} objects from + * the data returned by the topology query. + * @return a list of {@link HostSpec} objects representing the results of the topology query. + * @throws SQLException if an error occurs when executing the topology or processing the results. + */ + public @Nullable List queryForTopology(Connection conn, HostSpec initialHostSpec, HostSpec instanceTemplate) + throws SQLException { + int originalNetworkTimeout = setNetworkTimeout(conn); + try (final Statement stmt = conn.createStatement(); + final ResultSet rs = stmt.executeQuery(this.dialect.getTopologyQuery())) { + if (rs.getMetaData().getColumnCount() == 0) { + // We expect at least 4 columns. Note that the server may return 0 columns if failover has occurred. + LOGGER.finest(Messages.get("TopologyUtils.unexpectedTopologyQueryColumnCount")); + return null; + } + + return this.verifyWriter(this.getHosts(conn, rs, initialHostSpec, instanceTemplate)); + } catch (final SQLSyntaxErrorException e) { + throw new SQLException(Messages.get("TopologyUtils.invalidQuery"), e); + } finally { + if (originalNetworkTimeout == 0 && !conn.isClosed()) { + conn.setNetworkTimeout(networkTimeoutExecutor, originalNetworkTimeout); + } + } + } + + protected int setNetworkTimeout(Connection conn) { + int networkTimeout = -1; + try { + networkTimeout = conn.getNetworkTimeout(); + // The topology query is not monitored by the EFM plugin, so it needs a socket timeout. + if (networkTimeout == 0) { + conn.setNetworkTimeout(this.networkTimeoutExecutor, DEFAULT_QUERY_TIMEOUT_MS); + } + } catch (SQLException e) { + LOGGER.warning(() -> Messages.get("TopologyUtils.errorGettingNetworkTimeout", new Object[] {e.getMessage()})); + } + return networkTimeout; + } + + protected abstract @Nullable List getHosts( + Connection conn, ResultSet rs, HostSpec initialHostSpec, HostSpec instanceTemplate) throws SQLException; + + protected @Nullable List verifyWriter(@Nullable List allHosts) { + if (allHosts == null) { + return null; + } + + List hosts = new ArrayList<>(); + List writers = new ArrayList<>(); + for (HostSpec host : allHosts) { + if (HostRole.WRITER == host.getRole()) { + writers.add(host); + } else { + hosts.add(host); + } + } + + int writerCount = writers.size(); + if (writerCount == 0) { + return null; + } else if (writerCount == 1) { + hosts.add(writers.get(0)); + } else { + // Assume the latest updated writer instance is the current writer. Other potential writers will be ignored. + List sortedWriters = writers.stream() + .sorted(Comparator.comparing(HostSpec::getLastUpdateTime, Comparator.nullsLast(Comparator.reverseOrder()))) + .collect(Collectors.toList()); + hosts.add(sortedWriters.get(0)); + } + + return hosts; + } + + /** + * Creates a {@link HostSpec} from the given topology information. + * + * @param instanceId the database instance identifier, e.g. "mydb-instance-1". + * @param isWriter true if this is a writer instance, false for reader. + * @param weight the instance weight for load balancing. + * @param lastUpdateTime the timestamp of the last update to this instance's information. + * @param initialHostSpec the original host specification used for connecting. + * @param instanceTemplate the template used to construct the new {@link HostSpec}. + * @return a {@link HostSpec} representing the given information. + */ + public HostSpec createHost( + String instanceId, + String instanceName, + final boolean isWriter, + final long weight, + final Timestamp lastUpdateTime, + final HostSpec initialHostSpec, + final HostSpec instanceTemplate) { + instanceName = instanceName == null ? "?" : instanceName; + final String endpoint = instanceTemplate.getHost().replace("?", instanceName); + final int port = instanceTemplate.isPortSpecified() + ? instanceTemplate.getPort() + : initialHostSpec.getPort(); + + final HostSpec hostSpec = this.hostSpecBuilder + .hostId(instanceId) + .host(endpoint) + .port(port) + .role(isWriter ? HostRole.WRITER : HostRole.READER) + .availability(HostAvailability.AVAILABLE) + .weight(weight) + .lastUpdateTime(lastUpdateTime) + .build(); + hostSpec.addAlias(instanceName); + hostSpec.setHostId(instanceName); + return hostSpec; + } + + /** + * Identifies instances across different database types using instanceId and instanceName values. + * + *

Database types handle these identifiers differently: + * - Aurora: Uses the instance name as both instanceId and instanceName + * Example: "test-instance-1" for both values + * - RDS Cluster: Uses distinct values for instanceId and instanceName + * Example: + * instanceId: "db-WQFQKBTL2LQUPIEFIFBGENS4ZQ" + * instanceName: "test-multiaz-instance-1" + */ + public @Nullable Pair getInstanceId(final Connection connection) { + try { + try (final Statement stmt = connection.createStatement(); + final ResultSet rs = stmt.executeQuery(this.dialect.getInstanceIdQuery())) { + if (rs.next()) { + return Pair.create(rs.getString(1), rs.getString(2)); + } + } + } catch (SQLException ex) { + return null; + } + + return null; + } + + /** + * Evaluate whether the given connection is to a writer instance. + * + * @param connection the connection to evaluate. + * @return true if the connection is to a writer instance, false otherwise. + * @throws SQLException if an exception occurs when querying the database or processing the database response. + */ + public abstract boolean isWriterInstance(Connection connection) throws SQLException; + + /** + * Evaluate the database role of the given connection, either {@link HostRole#WRITER} or {@link HostRole#READER}. + * + * @param conn the connection to evaluate. + * @return the database role of the given connection. + * @throws SQLException if an exception occurs when querying the database or processing the database response. + */ + public HostRole getHostRole(Connection conn) throws SQLException { + try (final Statement stmt = conn.createStatement(); + final ResultSet rs = stmt.executeQuery(this.dialect.getIsReaderQuery())) { + if (rs.next()) { + boolean isReader = rs.getBoolean(1); + return isReader ? HostRole.READER : HostRole.WRITER; + } + } catch (SQLException e) { + throw new SQLException(Messages.get("TopologyUtils.errorGettingHostRole"), e); + } + + throw new SQLException(Messages.get("TopologyUtils.errorGettingHostRole")); + } +} diff --git a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/ClusterTopologyMonitor.java b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/ClusterTopologyMonitor.java index 482323f92..51b456939 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/ClusterTopologyMonitor.java +++ b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/ClusterTopologyMonitor.java @@ -29,8 +29,6 @@ public interface ClusterTopologyMonitor extends Monitor, EventSubscriber { boolean canDispose(); - void setClusterId(final String clusterId); - List forceRefresh(final boolean writerImportant, final long timeoutMs) throws SQLException, TimeoutException; diff --git a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/ClusterTopologyMonitorImpl.java b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/ClusterTopologyMonitorImpl.java index 5800c993b..845ef1c5f 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/ClusterTopologyMonitorImpl.java +++ b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/ClusterTopologyMonitorImpl.java @@ -17,12 +17,8 @@ package software.amazon.jdbc.hostlistprovider.monitoring; import java.sql.Connection; -import java.sql.ResultSet; import java.sql.SQLException; import java.sql.SQLSyntaxErrorException; -import java.sql.Statement; -import java.sql.Timestamp; -import java.time.Instant; import java.util.ArrayList; import java.util.Collections; import java.util.Comparator; @@ -40,21 +36,21 @@ import java.util.concurrent.locks.ReentrantLock; import java.util.logging.Level; import java.util.logging.Logger; -import java.util.stream.Collectors; import org.checkerframework.checker.nullness.qual.NonNull; import org.checkerframework.checker.nullness.qual.Nullable; import software.amazon.jdbc.HostRole; import software.amazon.jdbc.HostSpec; import software.amazon.jdbc.PropertyDefinition; -import software.amazon.jdbc.hostavailability.HostAvailability; import software.amazon.jdbc.hostlistprovider.Topology; +import software.amazon.jdbc.hostlistprovider.TopologyUtils; import software.amazon.jdbc.util.ExecutorFactory; import software.amazon.jdbc.util.FullServicesContainer; +import software.amazon.jdbc.util.LogUtils; import software.amazon.jdbc.util.Messages; +import software.amazon.jdbc.util.Pair; import software.amazon.jdbc.util.PropertyUtils; import software.amazon.jdbc.util.RdsUtils; import software.amazon.jdbc.util.ServiceUtility; -import software.amazon.jdbc.util.StringUtils; import software.amazon.jdbc.util.SynchronousExecutor; import software.amazon.jdbc.util.Utils; import software.amazon.jdbc.util.events.Event; @@ -69,38 +65,22 @@ public class ClusterTopologyMonitorImpl extends AbstractMonitor implements Clust protected static final Executor networkTimeoutExecutor = new SynchronousExecutor(); protected static final RdsUtils rdsHelper = new RdsUtils(); protected static final long monitorTerminationTimeoutSec = 30; - - protected static final int defaultTopologyQueryTimeoutMs = 1000; protected static final int closeConnectionNetworkTimeoutMs = 500; - protected static final int defaultConnectionTimeoutMs = 5000; protected static final int defaultSocketTimeoutMs = 5000; - // Keep monitoring topology with a high rate for 30s after failover. + // Keep monitoring topology at a high rate for 30s after failover. protected static final long highRefreshPeriodAfterPanicNano = TimeUnit.SECONDS.toNanos(30); protected static final long ignoreTopologyRequestNano = TimeUnit.SECONDS.toNanos(10); - protected final long refreshRateNano; - protected final long highRefreshRateNano; - protected final FullServicesContainer servicesContainer; - protected final Properties properties; - protected final Properties monitoringProperties; - protected final HostSpec initialHostSpec; - protected final String topologyQuery; - protected final String nodeIdQuery; - protected final String writerTopologyQuery; - protected final HostSpec clusterInstanceTemplate; - - protected String clusterId; protected final AtomicReference writerHostSpec = new AtomicReference<>(null); protected final AtomicReference monitoringConnection = new AtomicReference<>(null); - protected boolean isVerifiedWriterConnection = false; - protected long highRefreshRateEndTimeNano = 0; + protected final Object topologyUpdated = new Object(); protected final AtomicBoolean requestToUpdateTopology = new AtomicBoolean(false); protected final AtomicLong ignoreNewTopologyRequestsEndTimeNano = new AtomicLong(-1); protected final ConcurrentHashMap submittedNodes = new ConcurrentHashMap<>(); - protected ExecutorService nodeExecutorService = null; + protected final ReentrantLock nodeExecutorLock = new ReentrantLock(); protected final AtomicBoolean nodeThreadsStop = new AtomicBoolean(false); protected final AtomicReference nodeThreadsWriterConnection = new AtomicReference<>(null); @@ -108,29 +88,39 @@ public class ClusterTopologyMonitorImpl extends AbstractMonitor implements Clust protected final AtomicReference nodeThreadsReaderConnection = new AtomicReference<>(null); protected final AtomicReference> nodeThreadsLatestTopology = new AtomicReference<>(null); + protected final long refreshRateNano; + protected final long highRefreshRateNano; + protected final TopologyUtils topologyUtils; + protected final FullServicesContainer servicesContainer; + protected final Properties properties; + protected final Properties monitoringProperties; + protected final HostSpec initialHostSpec; + protected final HostSpec instanceTemplate; + + protected ExecutorService nodeExecutorService = null; + protected boolean isVerifiedWriterConnection = false; + protected long highRefreshRateEndTimeNano = 0; + protected String clusterId; + public ClusterTopologyMonitorImpl( final FullServicesContainer servicesContainer, + final TopologyUtils topologyUtils, final String clusterId, final HostSpec initialHostSpec, final Properties properties, - final HostSpec clusterInstanceTemplate, + final HostSpec instanceTemplate, final long refreshRateNano, - final long highRefreshRateNano, - final String topologyQuery, - final String writerTopologyQuery, - final String nodeIdQuery) { + final long highRefreshRateNano) { super(monitorTerminationTimeoutSec); - this.clusterId = clusterId; this.servicesContainer = servicesContainer; + this.topologyUtils = topologyUtils; + this.clusterId = clusterId; this.initialHostSpec = initialHostSpec; - this.clusterInstanceTemplate = clusterInstanceTemplate; + this.instanceTemplate = instanceTemplate; this.properties = properties; this.refreshRateNano = refreshRateNano; this.highRefreshRateNano = highRefreshRateNano; - this.topologyQuery = topologyQuery; - this.writerTopologyQuery = writerTopologyQuery; - this.nodeIdQuery = nodeIdQuery; this.monitoringProperties = PropertyUtils.copyProperties(properties); this.properties.stringPropertyNames().stream() @@ -159,11 +149,6 @@ public boolean canDispose() { return true; } - @Override - public void setClusterId(String clusterId) { - this.clusterId = clusterId; - } - @Override public List forceRefresh(final boolean shouldVerifyWriter, final long timeoutMs) throws SQLException, TimeoutException { @@ -171,10 +156,11 @@ public List forceRefresh(final boolean shouldVerifyWriter, final long if (this.ignoreNewTopologyRequestsEndTimeNano.get() > 0 && System.nanoTime() < this.ignoreNewTopologyRequestsEndTimeNano.get()) { - // Previous failover has just completed. We can use results of it without triggering a new topology update. + // A previous failover event has completed recently. + // We can use the results of it without triggering a new topology update. List currentHosts = getStoredHosts(); LOGGER.finest(() -> - Utils.logTopology(currentHosts, Messages.get("ClusterTopologyMonitorImpl.ignoringTopologyRequest"))); + LogUtils.logTopology(currentHosts, Messages.get("ClusterTopologyMonitorImpl.ignoringTopologyRequest"))); if (currentHosts != null) { return currentHosts; } @@ -191,13 +177,12 @@ public List forceRefresh(final boolean shouldVerifyWriter, final long @Override public List forceRefresh(@Nullable Connection connection, final long timeoutMs) throws SQLException, TimeoutException { - if (this.isVerifiedWriterConnection) { - // Push monitoring thread to refresh topology with a verified connection + // Get the monitoring thread to refresh the topology using a verified connection. return this.waitTillTopologyGetsUpdated(timeoutMs); } - // Otherwise use provided unverified connection to update topology + // Otherwise, use the provided unverified connection to update the topology. return this.fetchTopologyAndUpdateCache(connection); } @@ -208,12 +193,13 @@ protected List waitTillTopologyGetsUpdated(final long timeoutMs) throw synchronized (this.requestToUpdateTopology) { this.requestToUpdateTopology.set(true); - // Notify monitoring thread (that might be sleeping) that topology should be refreshed immediately. + // Notify the monitoring thread, which may be sleeping, that topology should be refreshed immediately. this.requestToUpdateTopology.notifyAll(); } if (timeoutMs == 0) { - LOGGER.finest(() -> Utils.logTopology(currentHosts, Messages.get("ClusterTopologyMonitorImpl.timeoutSetToZero"))); + LOGGER.finest(() -> + LogUtils.logTopology(currentHosts, Messages.get("ClusterTopologyMonitorImpl.timeoutSetToZero"))); return currentHosts; } @@ -236,9 +222,8 @@ protected List waitTillTopologyGetsUpdated(final long timeoutMs) throw } if (System.nanoTime() >= end) { - throw new TimeoutException(Messages.get( - "ClusterTopologyMonitorImpl.topologyNotUpdated", - new Object[]{timeoutMs})); + throw new TimeoutException( + Messages.get("ClusterTopologyMonitorImpl.topologyNotUpdated", new Object[] {timeoutMs})); } return latestHosts; @@ -254,7 +239,7 @@ public void stop() { this.nodeThreadsStop.set(true); this.shutdownNodeExecutorService(); - // It breaks a waiting/sleeping cycles in monitoring thread + // This code interrupts the waiting/sleeping cycle in the monitoring thread. synchronized (this.requestToUpdateTopology) { this.requestToUpdateTopology.set(true); this.requestToUpdateTopology.notifyAll(); @@ -277,7 +262,7 @@ public void monitor() throws Exception { try { LOGGER.finest(() -> Messages.get( "ClusterTopologyMonitorImpl.startMonitoringThread", - new Object[]{this.clusterId, this.initialHostSpec.getHost()})); + new Object[] {this.clusterId, this.initialHostSpec.getHost()})); this.servicesContainer.getEventPublisher().subscribe( this, Collections.singleton(MonitorResetEvent.class)); @@ -290,7 +275,7 @@ public void monitor() throws Exception { if (this.submittedNodes.isEmpty()) { LOGGER.finest(() -> Messages.get("ClusterTopologyMonitorImpl.startingNodeMonitoringThreads")); - // start node threads + // Start node monitors. this.nodeThreadsStop.set(false); this.nodeThreadsWriterConnection.set(null); this.nodeThreadsReaderConnection.set(null); @@ -299,7 +284,7 @@ public void monitor() throws Exception { List hosts = getStoredHosts(); if (hosts == null) { - // need any connection to get topology + // Use any available connection to get the topology. hosts = this.openAnyConnectionAndUpdateTopology(); } @@ -330,19 +315,17 @@ public void monitor() throws Exception { throw exceptionList.get(0); } } - // It's not possible to call shutdown() on this.nodeExecutorService since more node may be added later. + // We do not call nodeExecutorService.shutdown() here since more node monitors may be submitted later. } - // otherwise let's try it again the next round - + // We will try again in the next iteration. } else { - // node threads are running - // check if writer is already detected + // The node monitors are running, so we check if the writer has been detected. final Connection writerConnection = this.nodeThreadsWriterConnection.get(); final HostSpec writerConnectionHostSpec = this.nodeThreadsWriterHostSpec.get(); if (writerConnection != null && writerConnectionHostSpec != null) { LOGGER.finest(() -> Messages.get( "ClusterTopologyMonitorImpl.writerPickedUpFromNodeMonitors", - new Object[]{writerConnectionHostSpec})); + new Object[] {writerConnectionHostSpec})); this.closeConnection(this.monitoringConnection); this.monitoringConnection.set(writerConnection); @@ -364,7 +347,7 @@ public void monitor() throws Exception { continue; } else { - // update node threads with new nodes in the topology + // Update node monitors with the new instances in the topology List hosts = this.nodeThreadsLatestTopology.get(); if (hosts != null && !this.nodeThreadsStop.get()) { for (HostSpec hostSpec : hosts) { @@ -388,7 +371,7 @@ public void monitor() throws Exception { throw exceptionList.get(0); } } - // It's not possible to call shutdown() on this.nodeExecutorService since more node may be added later. + // We do not call nodeExecutorService.shutdown() here since more node monitors may be submitted later. } } } @@ -396,8 +379,7 @@ public void monitor() throws Exception { this.delay(true); } else { - // regular mode (not panic mode) - + // We are in regular mode (not panic mode). if (!this.submittedNodes.isEmpty()) { this.shutdownNodeExecutorService(); this.submittedNodes.clear(); @@ -405,8 +387,7 @@ public void monitor() throws Exception { final List hosts = this.fetchTopologyAndUpdateCache(this.monitoringConnection.get()); if (hosts == null) { - // can't get topology - // let's switch to panic mode + // Attempt to fetch topology failed, so we switch to panic mode. this.closeConnection(this.monitoringConnection); this.isVerifiedWriterConnection = false; continue; @@ -416,9 +397,9 @@ public void monitor() throws Exception { this.highRefreshRateEndTimeNano = 0; } - // Do not log topology while in high refresh rate. It's noisy! + // We avoid logging the topology while using the high refresh rate because it is too noisy. if (this.highRefreshRateEndTimeNano == 0) { - LOGGER.finest(() -> Utils.logTopology(getStoredHosts())); + LOGGER.finest(() -> LogUtils.logTopology(getStoredHosts())); } this.delay(false); @@ -433,14 +414,14 @@ public void monitor() throws Exception { } catch (final InterruptedException intEx) { Thread.currentThread().interrupt(); } catch (final Exception ex) { - // this should not be reached; log and exit thread + // This should not be reached. if (LOGGER.isLoggable(Level.FINEST)) { - // We want to print full trace stack of the exception. + // We want to print the full trace stack of the exception. LOGGER.log( Level.FINEST, Messages.get( "ClusterTopologyMonitorImpl.exceptionDuringMonitoringStop", - new Object[]{this.initialHostSpec.getHost()}), + new Object[] {this.initialHostSpec.getHost()}), ex); } @@ -458,7 +439,7 @@ public void monitor() throws Exception { LOGGER.finest(() -> Messages.get( "ClusterTopologyMonitorImpl.stopMonitoringThread", - new Object[]{this.initialHostSpec.getHost()})); + new Object[] {this.initialHostSpec.getHost()})); } } @@ -522,7 +503,7 @@ protected void shutdownNodeExecutorService() { this.nodeExecutorService.shutdownNow(); } } catch (InterruptedException e) { - // do nothing + // Do nothing. } this.nodeExecutorService = null; @@ -559,21 +540,20 @@ protected List openAnyConnectionAndUpdateTopology() { Connection conn; - // open a new connection + // Open a new connection. try { conn = this.servicesContainer.getPluginService().forceConnect(this.initialHostSpec, this.monitoringProperties); } catch (SQLException ex) { - // can't connect return null; } if (this.monitoringConnection.compareAndSet(null, conn)) { LOGGER.finest(() -> Messages.get( "ClusterTopologyMonitorImpl.openedMonitoringConnection", - new Object[]{this.initialHostSpec.getHost()})); + new Object[] {this.initialHostSpec.getHost()})); try { - if (!StringUtils.isNullOrEmpty(this.getWriterNodeId(this.monitoringConnection.get()))) { + if (this.topologyUtils.isWriterInstance(this.monitoringConnection.get())) { this.isVerifiedWriterConnection = true; writerVerifiedByThisThread = true; @@ -581,24 +561,27 @@ protected List openAnyConnectionAndUpdateTopology() { this.writerHostSpec.set(this.initialHostSpec); LOGGER.finest(() -> Messages.get( "ClusterTopologyMonitorImpl.writerMonitoringConnection", - new Object[]{this.writerHostSpec.get().getHost()})); + new Object[] {this.writerHostSpec.get().getHost()})); } else { - final String nodeId = this.getNodeId(this.monitoringConnection.get()); - if (!StringUtils.isNullOrEmpty(nodeId)) { - this.writerHostSpec.set(this.createHost(nodeId, true, 0, null)); + final Pair pair = this.topologyUtils.getInstanceId(this.monitoringConnection.get()); + if (pair != null) { + HostSpec instanceTemplate = this.getInstanceTemplate(pair.getValue2(), this.monitoringConnection.get()); + HostSpec writerHost = this.topologyUtils.createHost( + pair.getValue1(), pair.getValue2(), true, 0, null, this.initialHostSpec, instanceTemplate); + this.writerHostSpec.set(writerHost); LOGGER.finest(() -> Messages.get( - "ClusterTopologyMonitorImpl.writerMonitoringConnection", - new Object[]{this.writerHostSpec.get().getHost()})); + "ClusterTopologyMonitorImpl.writerMonitoringConnection", + new Object[] {this.writerHostSpec.get().getHost()})); } } } } catch (SQLException ex) { - // do nothing + // Do nothing. } } else { - // monitoring connection has already been set by other thread - // close new connection as we don't need it + // The monitoring connection has already been detected by another thread. We close the new connection since it + // is not needed anymore. this.closeConnection(conn); } } @@ -614,8 +597,7 @@ protected List openAnyConnectionAndUpdateTopology() { } if (hosts == null) { - // can't get topology; it might be something's wrong with a connection - // close connection + // Attempt to fetch topology failed. There might be something wrong with the connection, so we close it here. this.closeConnection(this.monitoringConnection); this.isVerifiedWriterConnection = false; } @@ -623,18 +605,8 @@ protected List openAnyConnectionAndUpdateTopology() { return hosts; } - protected String getNodeId(final Connection connection) { - try { - try (final Statement stmt = connection.createStatement(); - final ResultSet resultSet = stmt.executeQuery(this.nodeIdQuery)) { - if (resultSet.next()) { - return resultSet.getString(1); - } - } - } catch (SQLException ex) { - // do nothing - } - return null; + protected HostSpec getInstanceTemplate(String nodeId, Connection connection) throws SQLException { + return this.instanceTemplate; } protected void closeConnection(final @Nullable Connection connection) { @@ -643,12 +615,12 @@ protected void closeConnection(final @Nullable Connection connection) { try { connection.setNetworkTimeout(networkTimeoutExecutor, closeConnectionNetworkTimeoutMs); } catch (SQLException ex) { - // do nothing + // Do nothing. } connection.close(); } } catch (final SQLException ex) { - // ignore + // Do nothing. } } @@ -657,7 +629,7 @@ protected void closeConnection(final @NonNull AtomicReference connec this.closeConnection(connection); } - // Sleep that can be easily interrupted + // Sleep method that can be easily interrupted. protected void delay(boolean useHighRefreshRate) throws InterruptedException { if (this.highRefreshRateEndTimeNano > 0 && System.nanoTime() < this.highRefreshRateEndTimeNano) { useHighRefreshRate = true; @@ -687,12 +659,16 @@ protected void delay(boolean useHighRefreshRate) throws InterruptedException { } return hosts; } catch (SQLException ex) { - // do nothing - LOGGER.finest(() -> Messages.get("ClusterTopologyMonitorImpl.errorFetchingTopology", new Object[]{ex})); + LOGGER.finest(() -> Messages.get("ClusterTopologyMonitorImpl.errorFetchingTopology", new Object[] {ex})); } + return null; } + protected List queryForTopology(Connection connection) throws SQLException { + return this.topologyUtils.queryForTopology(connection, this.initialHostSpec, this.instanceTemplate); + } + protected void updateTopologyCache(final @NonNull List hosts) { synchronized (this.requestToUpdateTopology) { this.servicesContainer.getStorageService().set(this.clusterId, new Topology(hosts)); @@ -717,158 +693,6 @@ protected void clearTopologyCache() { } } - // Returns a writer node ID if connected to a writer node. Returns null otherwise. - protected String getWriterNodeId(final Connection connection) throws SQLException { - try (final Statement stmt = connection.createStatement()) { - try (final ResultSet resultSet = stmt.executeQuery(this.writerTopologyQuery)) { - if (resultSet.next()) { - return resultSet.getString(1); - } - } - } - return null; - } - - protected @Nullable List queryForTopology(final Connection conn) throws SQLException { - int networkTimeout = -1; - try { - networkTimeout = conn.getNetworkTimeout(); - // The topology query is not monitored by the EFM plugin, so it needs a socket timeout - if (networkTimeout == 0) { - conn.setNetworkTimeout(networkTimeoutExecutor, defaultTopologyQueryTimeoutMs); - } - } catch (SQLException e) { - LOGGER.warning(() -> Messages.get("ClusterTopologyMonitorImpl.errorGettingNetworkTimeout", - new Object[] {e.getMessage()})); - } - - final String suggestedWriterNodeId = this.getSuggestedWriterNodeId(conn); - try (final Statement stmt = conn.createStatement(); - final ResultSet resultSet = stmt.executeQuery(this.topologyQuery)) { - return this.processQueryResults(resultSet, suggestedWriterNodeId); - } catch (final SQLSyntaxErrorException e) { - throw new SQLException(Messages.get("ClusterTopologyMonitorImpl.invalidQuery"), e); - } finally { - if (networkTimeout == 0 && !conn.isClosed()) { - conn.setNetworkTimeout(networkTimeoutExecutor, networkTimeout); - } - } - } - - protected String getSuggestedWriterNodeId(final Connection connection) throws SQLException { - // Aurora topology query can detect a writer for itself so it doesn't need any suggested writer node ID. - return null; // intentionally null - } - - protected @Nullable List processQueryResults( - final ResultSet resultSet, - final String suggestedWriterNodeId) throws SQLException { - - final HashMap hostMap = new HashMap<>(); - - if (resultSet.getMetaData().getColumnCount() == 0) { - // We expect at least 4 columns. Note that the server may return 0 columns if failover has occurred. - LOGGER.finest(() -> Messages.get("ClusterTopologyMonitorImpl.unexpectedTopologyQueryColumnCount")); - return null; - } - - // Data is result set is ordered by last updated time so the latest records go last. - // When adding hosts to a map, the newer records replace the older ones. - while (resultSet.next()) { - try { - final HostSpec host = createHost(resultSet, suggestedWriterNodeId); - hostMap.put(host.getHost(), host); - } catch (Exception e) { - LOGGER.finest(() -> - Messages.get("ClusterTopologyMonitorImpl.errorProcessingQueryResults", new Object[]{e.getMessage()})); - return null; - } - } - - final List hosts = new ArrayList<>(); - final List writers = new ArrayList<>(); - - for (final HostSpec host : hostMap.values()) { - if (host.getRole() != HostRole.WRITER) { - hosts.add(host); - } else { - writers.add(host); - } - } - - int writerCount = writers.size(); - - if (writerCount == 0) { - LOGGER.warning(() -> Messages.get("ClusterTopologyMonitorImpl.invalidTopology")); - hosts.clear(); - } else if (writerCount == 1) { - hosts.add(writers.get(0)); - } else { - // Take the latest updated writer node as the current writer. All others will be ignored. - List sortedWriters = writers.stream() - .sorted(Comparator.comparing(HostSpec::getLastUpdateTime, Comparator.nullsLast(Comparator.reverseOrder()))) - .collect(Collectors.toList()); - hosts.add(sortedWriters.get(0)); - } - - return hosts; - } - - protected HostSpec createHost( - final ResultSet resultSet, - final String suggestedWriterNodeId) throws SQLException { - - // suggestedWriterNodeId is not used for Aurora clusters. Topology query can detect a writer for itself. - - // According to the topology query the result set - // should contain 4 columns: node ID, 1/0 (writer/reader), CPU utilization, node lag in time. - String hostName = resultSet.getString(1); - final boolean isWriter = resultSet.getBoolean(2); - final float cpuUtilization = resultSet.getFloat(3); - final float nodeLag = resultSet.getFloat(4); - Timestamp lastUpdateTime; - try { - lastUpdateTime = resultSet.getTimestamp(5); - } catch (Exception e) { - lastUpdateTime = Timestamp.from(Instant.now()); - } - - // Calculate weight based on node lag in time and CPU utilization. - final long weight = Math.round(nodeLag) * 100L + Math.round(cpuUtilization); - - return createHost(hostName, isWriter, weight, lastUpdateTime); - } - - protected HostSpec createHost( - String nodeName, - final boolean isWriter, - final long weight, - final Timestamp lastUpdateTime) { - - nodeName = nodeName == null ? "?" : nodeName; - final String endpoint = getHostEndpoint(nodeName); - final int port = this.clusterInstanceTemplate.isPortSpecified() - ? this.clusterInstanceTemplate.getPort() - : this.initialHostSpec.getPort(); - - final HostSpec hostSpec = this.servicesContainer.getHostListProviderService().getHostSpecBuilder() - .host(endpoint) - .port(port) - .role(isWriter ? HostRole.WRITER : HostRole.READER) - .availability(HostAvailability.AVAILABLE) - .weight(weight) - .lastUpdateTime(lastUpdateTime) - .build(); - hostSpec.addAlias(nodeName); - hostSpec.setHostId(nodeName); - return hostSpec; - } - - protected String getHostEndpoint(final String nodeName) { - final String host = this.clusterInstanceTemplate.getHost(); - return host.replace("?", nodeName); - } - private static class NodeMonitoringWorker implements Runnable { private static final Logger LOGGER = Logger.getLogger(NodeMonitoringWorker.class.getName()); @@ -913,26 +737,24 @@ public void run() { } if (connection != null) { - - String writerId = null; + boolean isWriter = false; try { - writerId = this.monitor.getWriterNodeId(connection); - + isWriter = this.monitor.topologyUtils.isWriterInstance(connection); } catch (SQLSyntaxErrorException ex) { - LOGGER.severe(() -> Messages.get("NodeMonitoringThread.invalidWriterQuery", + LOGGER.severe(() -> Messages.get( + "NodeMonitoringThread.invalidWriterQuery", new Object[] {ex.getMessage()})); throw new RuntimeException(ex); - } catch (SQLException ex) { this.monitor.closeConnection(connection); connection = null; } - if (!StringUtils.isNullOrEmpty(writerId)) { + if (isWriter) { try { if (this.servicesContainer.getPluginService().getHostRole(connection) != HostRole.WRITER) { // The first connection after failover may be stale. - writerId = null; + isWriter = false; } } catch (SQLException e) { // Invalid connection, retry. @@ -940,40 +762,39 @@ public void run() { } } - if (!StringUtils.isNullOrEmpty(writerId)) { - // this prevents closing connection in finally block + if (isWriter) { + // This prevents us from closing the connection in the finally block. if (!this.monitor.nodeThreadsWriterConnection.compareAndSet(null, connection)) { - // writer connection is already setup + // The writer connection is already set up, probably by another node monitor. this.monitor.closeConnection(connection); - } else { - // writer connection is successfully set to writerConnection - LOGGER.fine(Messages.get("NodeMonitoringThread.detectedWriter", new Object[]{writerId})); + // Successfully updated the node monitor writer connection. + LOGGER.fine(() -> + Messages.get("NodeMonitoringThread.detectedWriter", new Object[] {hostSpec.getUrl()})); // When nodeThreadsWriterConnection and nodeThreadsWriterHostSpec are both set, the topology monitor may // set ignoreNewTopologyRequestsEndTimeNano, in which case other threads will use the cached topology // for the ignore duration, so we need to update the topology before setting nodeThreadsWriterHostSpec. this.monitor.fetchTopologyAndUpdateCache(connection); this.monitor.nodeThreadsWriterHostSpec.set(hostSpec); this.monitor.nodeThreadsStop.set(true); - LOGGER.fine(Utils.logTopology(this.monitor.getStoredHosts())); + LOGGER.fine(() -> LogUtils.logTopology(this.monitor.getStoredHosts())); } - // Setting the connection to null here prevents the final block - // from closing nodeThreadsWriterConnection. + // We set the connection to null to prevent the finally block from closing nodeThreadsWriterConnection. connection = null; return; - } else if (connection != null) { - // this connection is a reader connection + // This connection is a reader connection. if (this.monitor.nodeThreadsWriterConnection.get() == null) { - // while writer connection isn't yet established this reader connection may update topology + // We can use this reader connection to update the topology while we wait for the writer connection to + // be established. if (updateTopology) { - this.readerThreadFetchTopology(connection, writerHostSpec); + this.readerThreadFetchTopology(connection, this.writerHostSpec); } else if (this.monitor.nodeThreadsReaderConnection.get() == null) { if (this.monitor.nodeThreadsReaderConnection.compareAndSet(null, connection)) { - // let's use this connection to update topology + // Use this connection to update the topology. updateTopology = true; - this.readerThreadFetchTopology(connection, writerHostSpec); + this.readerThreadFetchTopology(connection, this.writerHostSpec); } } } @@ -989,7 +810,8 @@ public void run() { } finally { this.monitor.closeConnection(connection); final long end = System.nanoTime(); - LOGGER.finest(() -> Messages.get("NodeMonitoringThread.threadCompleted", + LOGGER.finest(() -> Messages.get( + "NodeMonitoringThread.threadCompleted", new Object[] {TimeUnit.NANOSECONDS.toMillis(end - start)})); } } @@ -1001,7 +823,8 @@ private void readerThreadFetchTopology(final Connection connection, final @Nulla List hosts; try { - hosts = this.monitor.queryForTopology(connection); + hosts = this.monitor.topologyUtils.queryForTopology( + connection, this.monitor.initialHostSpec, this.monitor.instanceTemplate); if (hosts == null) { return; } @@ -1009,12 +832,12 @@ private void readerThreadFetchTopology(final Connection connection, final @Nulla return; } - // share this topology so the main monitoring thread be able to adjust node monitoring threads + // Share this topology so that the main monitoring thread can adjust the node monitoring threads. this.monitor.nodeThreadsLatestTopology.set(hosts); if (this.writerChanged) { this.monitor.updateTopologyCache(hosts); - LOGGER.finest(Utils.logTopology(hosts)); + LOGGER.finest(() -> LogUtils.logTopology(hosts)); return; } @@ -1025,16 +848,14 @@ private void readerThreadFetchTopology(final Connection connection, final @Nulla if (latestWriterHostSpec != null && writerHostSpec != null && !latestWriterHostSpec.getHostAndPort().equals(writerHostSpec.getHostAndPort())) { - - // writer node has changed this.writerChanged = true; - - LOGGER.fine(() -> Messages.get("NodeMonitoringThread.writerNodeChanged", + LOGGER.fine(() -> Messages.get( + "NodeMonitoringThread.writerNodeChanged", new Object[] {writerHostSpec.getHost(), latestWriterHostSpec.getHost()})); - // we can update topology cache and notify all waiting threads + // Update the topology cache and notify all waiting threads. this.monitor.updateTopologyCache(hosts); - LOGGER.fine(Utils.logTopology(hosts)); + LOGGER.fine(() -> LogUtils.logTopology(hosts)); } } } diff --git a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/GlobalAuroraTopologyMonitor.java b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/GlobalAuroraTopologyMonitor.java new file mode 100644 index 000000000..c280035d3 --- /dev/null +++ b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/GlobalAuroraTopologyMonitor.java @@ -0,0 +1,78 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package software.amazon.jdbc.hostlistprovider.monitoring; + +import java.sql.Connection; +import java.sql.SQLException; +import java.util.List; +import java.util.Map; +import java.util.Properties; +import software.amazon.jdbc.HostSpec; +import software.amazon.jdbc.hostlistprovider.GlobalAuroraTopologyUtils; +import software.amazon.jdbc.util.FullServicesContainer; +import software.amazon.jdbc.util.Messages; +import software.amazon.jdbc.util.StringUtils; + + +public class GlobalAuroraTopologyMonitor extends ClusterTopologyMonitorImpl { + protected final Map instanceTemplatesByRegion; + protected final GlobalAuroraTopologyUtils topologyUtils; + + public GlobalAuroraTopologyMonitor( + final FullServicesContainer servicesContainer, + final GlobalAuroraTopologyUtils topologyUtils, + final String clusterId, + final HostSpec initialHostSpec, + final Properties properties, + final HostSpec instanceTemplate, + final long refreshRateNano, + final long highRefreshRateNano, + final Map instanceTemplatesByRegion) { + super(servicesContainer, + topologyUtils, + clusterId, + initialHostSpec, + properties, + instanceTemplate, + refreshRateNano, + highRefreshRateNano); + + this.instanceTemplatesByRegion = instanceTemplatesByRegion; + this.topologyUtils = topologyUtils; + } + + @Override + protected HostSpec getInstanceTemplate(String instanceId, Connection connection) throws SQLException { + String region = this.topologyUtils.getRegion(instanceId, connection); + if (!StringUtils.isNullOrEmpty(region)) { + final HostSpec instanceTemplate = this.instanceTemplatesByRegion.get(region); + if (instanceTemplate == null) { + throw new SQLException( + Messages.get("GlobalAuroraTopologyMonitor.cannotFindRegionTemplate", new Object[] {region})); + } + + return instanceTemplate; + } + + return this.instanceTemplate; + } + + @Override + protected List queryForTopology(Connection connection) throws SQLException { + return this.topologyUtils.queryForTopology(connection, this.initialHostSpec, this.instanceTemplatesByRegion); + } +} diff --git a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/MonitoringGlobalAuroraHostListProvider.java b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/MonitoringGlobalAuroraHostListProvider.java new file mode 100644 index 000000000..b258c223d --- /dev/null +++ b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/MonitoringGlobalAuroraHostListProvider.java @@ -0,0 +1,91 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package software.amazon.jdbc.hostlistprovider.monitoring; + +import java.sql.Connection; +import java.sql.SQLException; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Properties; +import java.util.logging.Logger; +import software.amazon.jdbc.HostSpec; +import software.amazon.jdbc.PropertyDefinition; +import software.amazon.jdbc.hostlistprovider.GlobalAuroraHostListProvider; +import software.amazon.jdbc.hostlistprovider.GlobalAuroraTopologyUtils; +import software.amazon.jdbc.util.FullServicesContainer; +import software.amazon.jdbc.util.LogUtils; +import software.amazon.jdbc.util.Messages; +import software.amazon.jdbc.util.RdsUtils; +import software.amazon.jdbc.util.StringUtils; + +public class MonitoringGlobalAuroraHostListProvider extends MonitoringRdsHostListProvider { + + static final Logger LOGGER = Logger.getLogger(MonitoringGlobalAuroraHostListProvider.class.getName()); + + protected Map instanceTemplatesByRegion = new HashMap<>(); + + protected final RdsUtils rdsUtils = new RdsUtils(); + protected final GlobalAuroraTopologyUtils topologyUtils; + + static { + // Intentionally register property definition using the GlobalAuroraHostListProvider class. + PropertyDefinition.registerPluginProperties(GlobalAuroraHostListProvider.class); + } + + public MonitoringGlobalAuroraHostListProvider( + GlobalAuroraTopologyUtils topologyUtils, + Properties properties, + String originalUrl, + FullServicesContainer servicesContainer) { + super(topologyUtils, properties, originalUrl, servicesContainer); + this.topologyUtils = topologyUtils; + } + + @Override + protected void initSettings() throws SQLException { + super.initSettings(); + + String instanceTemplates = GlobalAuroraHostListProvider.GLOBAL_CLUSTER_INSTANCE_HOST_PATTERNS.getString(properties); + this.instanceTemplatesByRegion = + this.topologyUtils.parseInstanceTemplates(instanceTemplates, this::validateHostPatternSetting); + } + + protected ClusterTopologyMonitor initMonitor() throws SQLException { + return this.servicesContainer.getMonitorService().runIfAbsent( + ClusterTopologyMonitorImpl.class, + this.clusterId, + this.servicesContainer, + this.properties, + (servicesContainer) -> + new GlobalAuroraTopologyMonitor( + servicesContainer, + this.topologyUtils, + this.clusterId, + this.initialHostSpec, + this.properties, + this.instanceTemplate, + this.refreshRateNano, + this.highRefreshRateNano, + this.instanceTemplatesByRegion)); + } + + @Override + protected List queryForTopology(Connection connection) throws SQLException { + return this.topologyUtils.queryForTopology(connection, this.initialHostSpec, this.instanceTemplatesByRegion); + } +} diff --git a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/MonitoringRdsHostListProvider.java b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/MonitoringRdsHostListProvider.java index 3a6bc50e6..c3fa52442 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/MonitoringRdsHostListProvider.java +++ b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/MonitoringRdsHostListProvider.java @@ -22,7 +22,6 @@ import java.util.Properties; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; -import java.util.logging.Logger; import software.amazon.jdbc.AwsWrapperProperty; import software.amazon.jdbc.BlockingHostListProvider; import software.amazon.jdbc.HostSpec; @@ -30,15 +29,11 @@ import software.amazon.jdbc.PropertyDefinition; import software.amazon.jdbc.cleanup.CanReleaseResources; import software.amazon.jdbc.hostlistprovider.RdsHostListProvider; -import software.amazon.jdbc.hostlistprovider.Topology; +import software.amazon.jdbc.hostlistprovider.TopologyUtils; import software.amazon.jdbc.util.FullServicesContainer; -import software.amazon.jdbc.util.monitoring.MonitorService; -import software.amazon.jdbc.util.storage.StorageService; -public class MonitoringRdsHostListProvider extends RdsHostListProvider - implements BlockingHostListProvider, CanReleaseResources { - - private static final Logger LOGGER = Logger.getLogger(MonitoringRdsHostListProvider.class.getName()); +public class MonitoringRdsHostListProvider + extends RdsHostListProvider implements BlockingHostListProvider, CanReleaseResources { public static final AwsWrapperProperty CLUSTER_TOPOLOGY_HIGH_REFRESH_RATE_MS = new AwsWrapperProperty( @@ -53,33 +48,19 @@ public class MonitoringRdsHostListProvider extends RdsHostListProvider protected final FullServicesContainer servicesContainer; protected final PluginService pluginService; protected final long highRefreshRateNano; - protected final String writerTopologyQuery; public MonitoringRdsHostListProvider( + final TopologyUtils topologyUtils, final Properties properties, final String originalUrl, - final FullServicesContainer servicesContainer, - final String topologyQuery, - final String nodeIdQuery, - final String isReaderQuery, - final String writerTopologyQuery) { - super(properties, originalUrl, servicesContainer, topologyQuery, nodeIdQuery, isReaderQuery); + final FullServicesContainer servicesContainer) { + super(topologyUtils, properties, originalUrl, servicesContainer); this.servicesContainer = servicesContainer; this.pluginService = servicesContainer.getPluginService(); - this.writerTopologyQuery = writerTopologyQuery; this.highRefreshRateNano = TimeUnit.MILLISECONDS.toNanos( CLUSTER_TOPOLOGY_HIGH_REFRESH_RATE_MS.getLong(this.properties)); } - public static void clearCache() { - clearAll(); - } - - @Override - protected void init() throws SQLException { - super.init(); - } - protected ClusterTopologyMonitor initMonitor() throws SQLException { return this.servicesContainer.getMonitorService().runIfAbsent( ClusterTopologyMonitorImpl.class, @@ -88,15 +69,13 @@ protected ClusterTopologyMonitor initMonitor() throws SQLException { this.properties, (servicesContainer) -> new ClusterTopologyMonitorImpl( this.servicesContainer, + this.topologyUtils, this.clusterId, this.initialHostSpec, this.properties, - this.clusterInstanceTemplate, + this.instanceTemplate, this.refreshRateNano, - this.highRefreshRateNano, - this.topologyQuery, - this.writerTopologyQuery, - this.nodeIdQuery)); + this.highRefreshRateNano)); } @Override @@ -110,31 +89,6 @@ protected List queryForTopology(final Connection conn) throws SQLExcep } } - @Override - protected void clusterIdChanged(final String oldClusterId) throws SQLException { - MonitorService monitorService = this.servicesContainer.getMonitorService(); - final ClusterTopologyMonitorImpl existingMonitor = - monitorService.get(ClusterTopologyMonitorImpl.class, oldClusterId); - if (existingMonitor != null) { - this.servicesContainer.getMonitorService().runIfAbsent( - ClusterTopologyMonitorImpl.class, - this.clusterId, - this.servicesContainer, - this.properties, - (servicesContainer) -> existingMonitor); - assert monitorService.get(ClusterTopologyMonitorImpl.class, this.clusterId) == existingMonitor; - existingMonitor.setClusterId(this.clusterId); - monitorService.remove(ClusterTopologyMonitorImpl.class, oldClusterId); - } - - final StorageService storageService = this.servicesContainer.getStorageService(); - final Topology existingTopology = storageService.get(Topology.class, oldClusterId); - final List existingHosts = existingTopology == null ? null : existingTopology.getHosts(); - if (existingHosts != null) { - storageService.set(this.clusterId, new Topology(existingHosts)); - } - } - @Override public List forceRefresh(final boolean shouldVerifyWriter, final long timeoutMs) throws SQLException, TimeoutException { @@ -150,6 +104,6 @@ public List forceRefresh(final boolean shouldVerifyWriter, final long @Override public void releaseResources() { - // do nothing + // Do nothing. } } diff --git a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/MonitoringRdsMultiAzHostListProvider.java b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/MonitoringRdsMultiAzHostListProvider.java deleted file mode 100644 index c11da2be9..000000000 --- a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/MonitoringRdsMultiAzHostListProvider.java +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package software.amazon.jdbc.hostlistprovider.monitoring; - -import java.sql.SQLException; -import java.util.Properties; -import java.util.logging.Logger; -import software.amazon.jdbc.util.FullServicesContainer; - -public class MonitoringRdsMultiAzHostListProvider extends MonitoringRdsHostListProvider { - - private static final Logger LOGGER = Logger.getLogger(MonitoringRdsMultiAzHostListProvider.class.getName()); - - protected final String fetchWriterNodeQuery; - protected final String fetchWriterNodeColumnName; - - public MonitoringRdsMultiAzHostListProvider( - final Properties properties, - final String originalUrl, - final FullServicesContainer servicesContainer, - final String topologyQuery, - final String nodeIdQuery, - final String isReaderQuery, - final String fetchWriterNodeQuery, - final String fetchWriterNodeColumnName) { - super( - properties, - originalUrl, - servicesContainer, - topologyQuery, - nodeIdQuery, - isReaderQuery, - ""); - this.fetchWriterNodeQuery = fetchWriterNodeQuery; - this.fetchWriterNodeColumnName = fetchWriterNodeColumnName; - } - - @Override - protected ClusterTopologyMonitor initMonitor() throws SQLException { - return this.servicesContainer.getMonitorService().runIfAbsent(MultiAzClusterTopologyMonitorImpl.class, - this.clusterId, - this.servicesContainer, - this.properties, - (servicesContainer) -> new MultiAzClusterTopologyMonitorImpl( - servicesContainer, - this.clusterId, - this.initialHostSpec, - this.properties, - this.hostListProviderService, - this.clusterInstanceTemplate, - this.refreshRateNano, - this.highRefreshRateNano, - this.topologyQuery, - this.writerTopologyQuery, - this.nodeIdQuery, - this.fetchWriterNodeQuery, - this.fetchWriterNodeColumnName)); - } - -} diff --git a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/MultiAzClusterTopologyMonitorImpl.java b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/MultiAzClusterTopologyMonitorImpl.java deleted file mode 100644 index 36bab8f90..000000000 --- a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/MultiAzClusterTopologyMonitorImpl.java +++ /dev/null @@ -1,128 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package software.amazon.jdbc.hostlistprovider.monitoring; - -import java.sql.Connection; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Statement; -import java.sql.Timestamp; -import java.time.Instant; -import java.util.Properties; -import java.util.logging.Logger; -import software.amazon.jdbc.HostListProviderService; -import software.amazon.jdbc.HostSpec; -import software.amazon.jdbc.util.FullServicesContainer; -import software.amazon.jdbc.util.StringUtils; - -public class MultiAzClusterTopologyMonitorImpl extends ClusterTopologyMonitorImpl { - - private static final Logger LOGGER = Logger.getLogger(MultiAzClusterTopologyMonitorImpl.class.getName()); - - protected final String fetchWriterNodeQuery; - protected final String fetchWriterNodeColumnName; - - public MultiAzClusterTopologyMonitorImpl( - final FullServicesContainer servicesContainer, - final String clusterId, - final HostSpec initialHostSpec, - final Properties properties, - final HostListProviderService hostListProviderService, - final HostSpec clusterInstanceTemplate, - final long refreshRateNano, - final long highRefreshRateNano, - final String topologyQuery, - final String writerTopologyQuery, - final String nodeIdQuery, - final String fetchWriterNodeQuery, - final String fetchWriterNodeColumnName) { - super( - servicesContainer, - clusterId, - initialHostSpec, - properties, - clusterInstanceTemplate, - refreshRateNano, - highRefreshRateNano, - topologyQuery, - writerTopologyQuery, - nodeIdQuery); - this.fetchWriterNodeQuery = fetchWriterNodeQuery; - this.fetchWriterNodeColumnName = fetchWriterNodeColumnName; - } - - // Returns a writer node ID if connected to a writer node. Returns null otherwise. - @Override - protected String getWriterNodeId(final Connection connection) throws SQLException { - try (final Statement stmt = connection.createStatement()) { - try (final ResultSet resultSet = stmt.executeQuery(this.fetchWriterNodeQuery)) { - if (resultSet.next()) { - String nodeId = resultSet.getString(this.fetchWriterNodeColumnName); - if (!StringUtils.isNullOrEmpty(nodeId)) { - // Replica status exists and shows a writer node ID. - // That means that this node (this connection) is a reader - return null; - } - } - } - // Replica status doesn't exist. That means that this node is a writer. - try (final ResultSet resultSet = stmt.executeQuery(this.nodeIdQuery)) { - if (resultSet.next()) { - return resultSet.getString(1); - } - } - } - return null; - } - - @Override - protected String getSuggestedWriterNodeId(final Connection connection) throws SQLException { - try (final Statement stmt = connection.createStatement()) { - try (final ResultSet resultSet = stmt.executeQuery(this.fetchWriterNodeQuery)) { - if (resultSet.next()) { - String nodeId = resultSet.getString(this.fetchWriterNodeColumnName); - if (!StringUtils.isNullOrEmpty(nodeId)) { - // Replica status exists and shows a writer node ID. - // That means that this node (this connection) is a reader. - // But we now what replication source is and that is a writer node. - return nodeId; - } - } - } - // Replica status doesn't exist. That means that this node is a writer. - try (final ResultSet resultSet = stmt.executeQuery(this.nodeIdQuery)) { - if (resultSet.next()) { - return resultSet.getString(1); - } - } - } - return null; - } - - @Override - protected HostSpec createHost( - final ResultSet resultSet, - final String suggestedWriterNodeId) throws SQLException { - - String endpoint = resultSet.getString("endpoint"); // "instance-name.XYZ.us-west-2.rds.amazonaws.com" - String instanceName = endpoint.substring(0, endpoint.indexOf(".")); // "instance-name" - String hostId = resultSet.getString("id"); // "1034958454" - final boolean isWriter = hostId.equals(suggestedWriterNodeId); - - return createHost(instanceName, isWriter, 0, Timestamp.from(Instant.now())); - } -} diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/AbstractConnectionPlugin.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/AbstractConnectionPlugin.java index 035e4ecf9..9cfb8de24 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/plugin/AbstractConnectionPlugin.java +++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/AbstractConnectionPlugin.java @@ -24,12 +24,12 @@ import java.util.Properties; import java.util.Set; import software.amazon.jdbc.ConnectionPlugin; -import software.amazon.jdbc.HostListProviderService; import software.amazon.jdbc.HostRole; import software.amazon.jdbc.HostSpec; import software.amazon.jdbc.JdbcCallable; import software.amazon.jdbc.NodeChangeOptions; import software.amazon.jdbc.OldConnectionSuggestedAction; +import software.amazon.jdbc.hostlistprovider.HostListProviderService; public abstract class AbstractConnectionPlugin implements ConnectionPlugin { diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/AuroraInitialConnectionStrategyPlugin.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/AuroraInitialConnectionStrategyPlugin.java index aef063a63..d467e8b22 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/plugin/AuroraInitialConnectionStrategyPlugin.java +++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/AuroraInitialConnectionStrategyPlugin.java @@ -19,24 +19,26 @@ import java.sql.Connection; import java.sql.SQLException; import java.util.Collections; -import java.util.HashMap; import java.util.HashSet; -import java.util.Map; +import java.util.List; import java.util.Properties; import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.logging.Logger; +import java.util.stream.Collectors; +import org.jetbrains.annotations.Nullable; import software.amazon.jdbc.AwsWrapperProperty; -import software.amazon.jdbc.HostListProviderService; import software.amazon.jdbc.HostRole; import software.amazon.jdbc.HostSpec; import software.amazon.jdbc.JdbcCallable; import software.amazon.jdbc.PluginService; import software.amazon.jdbc.PropertyDefinition; import software.amazon.jdbc.hostavailability.HostAvailability; +import software.amazon.jdbc.hostlistprovider.HostListProviderService; import software.amazon.jdbc.util.Messages; import software.amazon.jdbc.util.RdsUrlType; import software.amazon.jdbc.util.RdsUtils; +import software.amazon.jdbc.util.StringUtils; import software.amazon.jdbc.util.Utils; import software.amazon.jdbc.util.WrapperUtils; @@ -76,31 +78,11 @@ public class AuroraInitialConnectionStrategyPlugin extends AbstractConnectionPlu null, "Force to verify an opened connection to be either a writer or a reader."); - private enum VerifyOpenedConnectionType { - WRITER, - READER; - - private static final Map nameToValue = - new HashMap() { - { - put("writer", WRITER); - put("reader", READER); - } - }; - - public static VerifyOpenedConnectionType fromValue(String value) { - if (value == null) { - return null; - } - return nameToValue.get(value.toLowerCase()); - } - } - private final PluginService pluginService; private HostListProviderService hostListProviderService; private final RdsUtils rdsUtils = new RdsUtils(); - private VerifyOpenedConnectionType verifyOpenedConnectionType = null; + private final HostRole verifyOpenedConnectionType; static { PropertyDefinition.registerPluginProperties(AuroraInitialConnectionStrategyPlugin.class); @@ -109,7 +91,7 @@ public static VerifyOpenedConnectionType fromValue(String value) { public AuroraInitialConnectionStrategyPlugin(final PluginService pluginService, final Properties properties) { this.pluginService = pluginService; this.verifyOpenedConnectionType = - VerifyOpenedConnectionType.fromValue(VERIFY_OPENED_CONNECTION_TYPE.getString(properties)); + HostRole.verifyConnectionTypeFromValue(VERIFY_OPENED_CONNECTION_TYPE.getString(properties)); } @Override @@ -126,9 +108,6 @@ public void initHostProvider( final JdbcCallable initHostProviderFunc) throws SQLException { this.hostListProviderService = hostListProviderService; - if (hostListProviderService.isStaticHostListProvider()) { - throw new SQLException(Messages.get("AuroraInitialConnectionStrategyPlugin.requireDynamicProvider")); - } initHostProviderFunc.call(); } @@ -143,8 +122,14 @@ public Connection connect( final RdsUrlType type = this.rdsUtils.identifyRdsType(hostSpec.getHost()); + if (!type.isRdsCluster()) { + // It's not a cluster endpoint. Continue with a normal workflow. + return connectFunc.call(); + } + if (type == RdsUrlType.RDS_WRITER_CLUSTER - || isInitialConnection && this.verifyOpenedConnectionType == VerifyOpenedConnectionType.WRITER) { + || type == RdsUrlType.RDS_GLOBAL_WRITER_CLUSTER + || isInitialConnection && this.verifyOpenedConnectionType == HostRole.WRITER) { Connection writerCandidateConn = this.getVerifiedWriterConnection(props, isInitialConnection, connectFunc); if (writerCandidateConn == null) { // Can't get writer connection. Continue with a normal workflow. @@ -154,8 +139,9 @@ public Connection connect( } if (type == RdsUrlType.RDS_READER_CLUSTER - || isInitialConnection && this.verifyOpenedConnectionType == VerifyOpenedConnectionType.READER) { - Connection readerCandidateConn = this.getVerifiedReaderConnection(props, isInitialConnection, connectFunc); + || isInitialConnection && this.verifyOpenedConnectionType == HostRole.READER) { + Connection readerCandidateConn = + this.getVerifiedReaderConnection(type, hostSpec, props, isInitialConnection, connectFunc); if (readerCandidateConn == null) { // Can't get a reader connection. Continue with a normal workflow. LOGGER.finest("Continue with normal workflow."); @@ -190,7 +176,9 @@ private Connection getVerifiedWriterConnection( try { writerCandidate = Utils.getWriter(this.pluginService.getAllHosts()); - if (writerCandidate == null || this.rdsUtils.isRdsClusterDns(writerCandidate.getHost())) { + if (writerCandidate == null + || this.rdsUtils.isRdsClusterDns(writerCandidate.getHost()) + || this.rdsUtils.isGlobalDbWriterClusterDns(writerCandidate.getHost())) { // Writer is not found. It seems that topology is outdated. writerCandidateConn = connectFunc.call(); @@ -247,6 +235,8 @@ private Connection getVerifiedWriterConnection( } private Connection getVerifiedReaderConnection( + final RdsUrlType rdsUrlType, + final HostSpec hostSpec, final Properties props, final boolean isInitialConnection, final JdbcCallable connectFunc) @@ -259,6 +249,9 @@ private Connection getVerifiedReaderConnection( Connection readerCandidateConn; HostSpec readerCandidate; + final String awsRegion = rdsUrlType == RdsUrlType.RDS_READER_CLUSTER + ? this.rdsUtils.getRdsRegion(hostSpec.getHost()) + : null; while (this.getTime() < endTimeNano) { @@ -266,7 +259,7 @@ private Connection getVerifiedReaderConnection( readerCandidate = null; try { - readerCandidate = this.getReader(props); + readerCandidate = this.getReader(props, awsRegion); if (readerCandidate == null || this.rdsUtils.isRdsClusterDns(readerCandidate.getHost())) { @@ -364,14 +357,20 @@ private void delay(final long delayMs) { } } - private HostSpec getReader(final Properties props) throws SQLException { + private HostSpec getReader(final Properties props, final @Nullable String awsRegion) throws SQLException { final String strategy = READER_HOST_SELECTOR_STRATEGY.getString(props); if (this.pluginService.acceptsStrategy(HostRole.READER, strategy)) { try { - return this.pluginService.getHostSpecByStrategy(HostRole.READER, strategy); - } catch (UnsupportedOperationException ex) { - throw ex; + if (!StringUtils.isNullOrEmpty(awsRegion)) { + final List hostsInRegion = this.pluginService.getHosts() + .stream() + .filter(x -> awsRegion.equalsIgnoreCase(this.rdsUtils.getRdsRegion(x.getHost()))) + .collect(Collectors.toList()); + return this.pluginService.getHostSpecByStrategy(hostsInRegion, HostRole.READER, strategy); + } else { + return this.pluginService.getHostSpecByStrategy(HostRole.READER, strategy); + } } catch (SQLException ex) { // host isn't found return null; diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/DefaultConnectionPlugin.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/DefaultConnectionPlugin.java index 66277275b..f6f0f391d 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/plugin/DefaultConnectionPlugin.java +++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/DefaultConnectionPlugin.java @@ -31,10 +31,10 @@ import java.util.stream.Collectors; import org.checkerframework.checker.nullness.qual.NonNull; import org.checkerframework.checker.nullness.qual.Nullable; +import software.amazon.jdbc.ConnectionInfo; import software.amazon.jdbc.ConnectionPlugin; import software.amazon.jdbc.ConnectionProvider; import software.amazon.jdbc.ConnectionProviderManager; -import software.amazon.jdbc.HostListProviderService; import software.amazon.jdbc.HostRole; import software.amazon.jdbc.HostSpec; import software.amazon.jdbc.JdbcCallable; @@ -43,6 +43,7 @@ import software.amazon.jdbc.PluginManagerService; import software.amazon.jdbc.PluginService; import software.amazon.jdbc.hostavailability.HostAvailability; +import software.amazon.jdbc.hostlistprovider.HostListProviderService; import software.amazon.jdbc.util.Messages; import software.amazon.jdbc.util.SqlMethodAnalyzer; import software.amazon.jdbc.util.WrapperUtils; @@ -121,6 +122,10 @@ public T execute( TelemetryContext telemetryContext = telemetryFactory.openTelemetryContext( this.pluginService.getTargetName(), TelemetryTraceLevel.NESTED); + // Check previous autocommit value before calling jdbcMethodFunc. + final boolean doesSwitchAutoCommitFalseTrue = sqlMethodAnalyzer.doesSwitchAutoCommitFalseTrue( + this.pluginService.getCurrentConnection(), methodName, jdbcMethodArgs); + T result; try { result = jdbcMethodFunc.call(); @@ -143,8 +148,7 @@ public T execute( } else if ( sqlMethodAnalyzer.doesCloseTransaction(currentConn, methodName, jdbcMethodArgs) // According to the JDBC spec, transactions are committed if autocommit is switched from false to true. - || sqlMethodAnalyzer.doesSwitchAutoCommitFalseTrue(currentConn, methodName, - jdbcMethodArgs)) { + || doesSwitchAutoCommitFalseTrue) { this.pluginManagerService.setInTransaction(false); } @@ -190,9 +194,9 @@ private Connection connectInternal( TelemetryContext telemetryContext = telemetryFactory.openTelemetryContext( connProvider.getTargetName(), TelemetryTraceLevel.NESTED); - Connection conn; + ConnectionInfo connectionInfo; try { - conn = connProvider.connect( + connectionInfo = connProvider.connect( driverProtocol, this.pluginService.getDialect(), this.pluginService.getTargetDriverDialect(), @@ -204,14 +208,17 @@ private Connection connectInternal( } } - this.connProviderManager.initConnection(conn, driverProtocol, hostSpec, props); + this.pluginManagerService.setIsPooledConnection(connectionInfo.isPooled()); + this.connProviderManager.initConnection(connectionInfo.getConnection(), driverProtocol, hostSpec, props); - this.pluginService.setAvailability(hostSpec.asAliases(), HostAvailability.AVAILABLE); + if (connectionInfo.getConnection() != null) { + this.pluginService.setAvailability(hostSpec.asAliases(), HostAvailability.AVAILABLE); + } if (isInitialConnection) { - this.pluginService.updateDialect(conn); + this.pluginService.updateDialect(connectionInfo.getConnection()); } - return conn; + return connectionInfo.getConnection(); } @Override diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/BlueGreenInterimStatus.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/BlueGreenInterimStatus.java index 63a2cef2a..4a49d57a6 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/BlueGreenInterimStatus.java +++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/BlueGreenInterimStatus.java @@ -23,8 +23,8 @@ import java.util.Set; import java.util.stream.Collectors; import software.amazon.jdbc.HostSpec; +import software.amazon.jdbc.util.LogUtils; import software.amazon.jdbc.util.StringUtils; -import software.amazon.jdbc.util.Utils; public class BlueGreenInterimStatus { public BlueGreenPhase blueGreenPhase; @@ -74,8 +74,8 @@ public String toString() { .map(x -> String.format("%s -> %s", x.getKey(), x.getValue())) .collect(Collectors.joining("\n ")); String allHostNamesStr = String.join("\n ", this.hostNames); - String startTopologyStr = Utils.logTopology(this.startTopology); - String currentTopologyStr = Utils.logTopology(this.currentTopology); + String startTopologyStr = LogUtils.logTopology(this.startTopology); + String currentTopologyStr = LogUtils.logTopology(this.currentTopology); return String.format("%s [\n" + " phase %s, \n" + " version '%s', \n" diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/BlueGreenStatusMonitor.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/BlueGreenStatusMonitor.java index 33fb2fd4d..19cd1ef6d 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/BlueGreenStatusMonitor.java +++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/BlueGreenStatusMonitor.java @@ -43,12 +43,12 @@ import java.util.stream.Collectors; import org.checkerframework.checker.nullness.qual.NonNull; import org.checkerframework.checker.nullness.qual.Nullable; -import software.amazon.jdbc.HostListProvider; import software.amazon.jdbc.HostSpec; import software.amazon.jdbc.HostSpecBuilder; import software.amazon.jdbc.PluginService; import software.amazon.jdbc.dialect.BlueGreenDialect; import software.amazon.jdbc.hostavailability.SimpleHostAvailabilityStrategy; +import software.amazon.jdbc.hostlistprovider.HostListProvider; import software.amazon.jdbc.hostlistprovider.RdsHostListProvider; import software.amazon.jdbc.plugin.iam.IamAuthConnectionPlugin; import software.amazon.jdbc.util.ConnectionUrlParser; @@ -548,16 +548,15 @@ protected void openConnection() { this.openConnectionFuture = openConnectionExecutorService.submit(() -> { - HostSpec connectionHostSpecCopy = this.connectionHostSpec.get(); - String connectedIpAddressCopy = this.connectedIpAddress.get(); - - if (connectionHostSpecCopy == null) { + if (this.connectionHostSpec.get() == null) { this.connectionHostSpec.set(this.initialHostSpec); - connectionHostSpecCopy = this.initialHostSpec; this.connectedIpAddress.set(null); - connectedIpAddressCopy = null; this.connectionHostSpecCorrect.set(false); } + + HostSpec connectionHostSpecCopy = this.connectionHostSpec.get(); + String connectedIpAddressCopy = this.connectedIpAddress.get(); + try { if (this.useIpAddress.get() && connectedIpAddressCopy != null) { @@ -628,7 +627,7 @@ protected void initHostListProvider() { if (connectionHostSpecCopy != null) { String hostListProviderUrl = String.format("%s%s/", protocol, connectionHostSpecCopy.getHostAndPort()); this.hostListProvider = this.pluginService.getDialect() - .getHostListProvider() + .getHostListProviderSupplier() .getProvider( hostListProperties, hostListProviderUrl, diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/BlueGreenStatusProvider.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/BlueGreenStatusProvider.java index 1cb3eb6ed..76405657c 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/BlueGreenStatusProvider.java +++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/BlueGreenStatusProvider.java @@ -967,11 +967,7 @@ protected void registerIamHost(String connectHost, String iamHost) { boolean differentNodeNames = connectHost != null && !connectHost.equals(iamHost); if (differentNodeNames) { - boolean alreadyChangedName = this.iamHostSuccessfulConnects - .computeIfAbsent(connectHost, (key) -> ConcurrentHashMap.newKeySet()) - .contains(iamHost); - - if (!alreadyChangedName) { + if (!isAlreadySuccessfullyConnected(connectHost, iamHost)) { this.greenNodeChangeNameTimes.computeIfAbsent(connectHost, (key) -> Instant.now()); LOGGER.finest(() -> Messages.get("bgd.greenNodeChangedName", new Object[] {connectHost, iamHost})); } diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/customendpoint/CustomEndpointPlugin.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/customendpoint/CustomEndpointPlugin.java index adfb408e9..49a9dbc9a 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/plugin/customendpoint/CustomEndpointPlugin.java +++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/customendpoint/CustomEndpointPlugin.java @@ -19,6 +19,7 @@ import java.sql.Connection; import java.sql.SQLException; import java.util.Collections; +import java.util.EnumSet; import java.util.HashSet; import java.util.Properties; import java.util.Set; @@ -53,8 +54,8 @@ public class CustomEndpointPlugin extends AbstractConnectionPlugin { private static final Logger LOGGER = Logger.getLogger(CustomEndpointPlugin.class.getName()); protected static final String TELEMETRY_WAIT_FOR_INFO_COUNTER = "customEndpoint.waitForInfo.counter"; protected static final RegionUtils regionUtils = new RegionUtils(); - protected static final Set monitorErrorResponses = - new HashSet<>(Collections.singletonList(MonitorErrorResponse.RECREATE)); + protected static final EnumSet monitorErrorResponses = + EnumSet.of(MonitorErrorResponse.RECREATE); public static final AwsWrapperProperty CUSTOM_ENDPOINT_INFO_REFRESH_RATE_MS = new AwsWrapperProperty( "customEndpointInfoRefreshRateMs", "30000", diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/failover/ClusterAwareReaderFailoverHandler.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/failover/ClusterAwareReaderFailoverHandler.java index 6b8661e02..1b5078d26 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/plugin/failover/ClusterAwareReaderFailoverHandler.java +++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/failover/ClusterAwareReaderFailoverHandler.java @@ -317,7 +317,7 @@ private ReaderFailoverResult getConnectionFromHostGroup(final List hos } } - return new ReaderFailoverResult(null, null, false); + return FAILED_READER_FAILOVER_RESULT; } finally { executor.shutdownNow(); } @@ -364,7 +364,7 @@ private ReaderFailoverResult getResultFromNextTaskBatch( return result; } } - return new ReaderFailoverResult(null, null, false); + return FAILED_READER_FAILOVER_RESULT; } private ReaderFailoverResult getNextResult(final CompletionService service) diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/failover/ClusterAwareWriterFailoverHandler.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/failover/ClusterAwareWriterFailoverHandler.java index 8504842c6..a6eafd1d3 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/plugin/failover/ClusterAwareWriterFailoverHandler.java +++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/failover/ClusterAwareWriterFailoverHandler.java @@ -37,6 +37,7 @@ import software.amazon.jdbc.hostavailability.HostAvailability; import software.amazon.jdbc.util.ExecutorFactory; import software.amazon.jdbc.util.FullServicesContainer; +import software.amazon.jdbc.util.LogUtils; import software.amazon.jdbc.util.Messages; import software.amazon.jdbc.util.PropertyUtils; import software.amazon.jdbc.util.ServiceUtility; @@ -465,7 +466,7 @@ private boolean refreshTopologyAndConnectToNewWriter() throws InterruptedExcepti if (allowOldWriter || !isSame(writerCandidate, this.originalWriterHost)) { // new writer is available, and it's different from the previous writer - LOGGER.finest(() -> Utils.logTopology(this.currentTopology, "[TaskB] Topology:")); + LOGGER.finest(() -> LogUtils.logTopology(this.currentTopology, "[TaskB] Topology:")); if (connectToWriter(writerCandidate)) { return true; } diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/failover/FailoverConnectionPlugin.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/failover/FailoverConnectionPlugin.java index ef2f95550..7928912fe 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/plugin/failover/FailoverConnectionPlugin.java +++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/failover/FailoverConnectionPlugin.java @@ -33,7 +33,6 @@ import java.util.logging.Logger; import org.checkerframework.checker.nullness.qual.Nullable; import software.amazon.jdbc.AwsWrapperProperty; -import software.amazon.jdbc.HostListProviderService; import software.amazon.jdbc.HostRole; import software.amazon.jdbc.HostSpec; import software.amazon.jdbc.JdbcCallable; @@ -43,10 +42,12 @@ import software.amazon.jdbc.PluginService; import software.amazon.jdbc.PropertyDefinition; import software.amazon.jdbc.hostavailability.HostAvailability; +import software.amazon.jdbc.hostlistprovider.HostListProviderService; import software.amazon.jdbc.plugin.AbstractConnectionPlugin; import software.amazon.jdbc.plugin.staledns.AuroraStaleDnsHelper; import software.amazon.jdbc.targetdriverdialect.TargetDriverDialect; import software.amazon.jdbc.util.FullServicesContainer; +import software.amazon.jdbc.util.LogUtils; import software.amazon.jdbc.util.Messages; import software.amazon.jdbc.util.RdsUrlType; import software.amazon.jdbc.util.RdsUtils; @@ -351,30 +352,34 @@ public void notifyNodeListChanged(final Map> return; } - if (LOGGER.isLoggable(Level.FINEST)) { - final StringBuilder sb = new StringBuilder("Changes:"); - for (final Map.Entry> change : changes.entrySet()) { - if (sb.length() > 0) { - sb.append("\n"); + try { + if (LOGGER.isLoggable(Level.FINEST)) { + final StringBuilder sb = new StringBuilder("Changes:"); + for (final Map.Entry> change : changes.entrySet()) { + if (sb.length() > 0) { + sb.append("\n"); + } + sb.append(String.format("\tHost '%s': %s", change.getKey(), change.getValue())); } - sb.append(String.format("\tHost '%s': %s", change.getKey(), change.getValue())); + LOGGER.finest(sb.toString()); } - LOGGER.finest(sb.toString()); - } - final HostSpec currentHost = this.pluginService.getCurrentHostSpec(); - final String url = currentHost.getUrl(); - if (isNodeStillValid(url, changes)) { - return; - } - - for (final String alias : currentHost.getAliases()) { - if (isNodeStillValid(alias + "/", changes)) { + final HostSpec currentHost = this.pluginService.getCurrentHostSpec(); + final String url = currentHost.getUrl(); + if (isNodeStillValid(url, changes)) { return; } - } - LOGGER.fine(() -> Messages.get("Failover.invalidNode", new Object[]{currentHost})); + for (final String alias : currentHost.getAliases()) { + if (isNodeStillValid(alias + "/", changes)) { + return; + } + } + + LOGGER.fine(() -> Messages.get("Failover.invalidNode", new Object[]{currentHost})); + } finally { + this.staleDnsHelper.notifyNodeListChanged(changes); + } } private boolean isNodeStillValid(final String node, final Map> changes) { @@ -388,6 +393,7 @@ private boolean isNodeStillValid(final String node, final Map(remainingReaders), Messages.get("Failover.errorSelectingReaderHost", new Object[]{ex.getMessage()}))); break; @@ -437,7 +443,7 @@ protected ReaderFailoverResult getReaderFailoverConnection(long failoverEndTimeN if (readerCandidate == null) { LOGGER.finest( - Utils.logTopology(new ArrayList<>(remainingReaders), Messages.get("Failover.readerCandidateNull"))); + LogUtils.logTopology(new ArrayList<>(remainingReaders), Messages.get("Failover.readerCandidateNull"))); break; } @@ -558,7 +564,7 @@ protected void failoverWriter() throws SQLException { if (this.failoverWriterFailedCounter != null) { this.failoverWriterFailedCounter.inc(); } - String message = Utils.logTopology(updatedHosts, Messages.get("Failover.noWriterHost")); + String message = LogUtils.logTopology(updatedHosts, Messages.get("Failover.noWriterHost")); LOGGER.severe(message); throw new FailoverFailedSQLException(message); } @@ -568,7 +574,7 @@ protected void failoverWriter() throws SQLException { if (this.failoverWriterFailedCounter != null) { this.failoverWriterFailedCounter.inc(); } - String topologyString = Utils.logTopology(allowedHosts, ""); + String topologyString = LogUtils.logTopology(allowedHosts, ""); LOGGER.severe(Messages.get("Failover.newWriterNotAllowed", new Object[] {writerCandidate.getUrl(), topologyString})); throw new FailoverFailedSQLException( @@ -696,7 +702,14 @@ protected boolean shouldExceptionTriggerConnectionSwitch(final Throwable t) { return false; } - return this.pluginService.isNetworkException(t, this.pluginService.getTargetDriverDialect()); + if (this.pluginService.isNetworkException(t, this.pluginService.getTargetDriverDialect())) { + return true; + } + + // For STRICT_WRITER failover mode when connection exception indicate that the connection's in read-only mode, + // initiate a failover by returning true. + return this.failoverMode == FailoverMode.STRICT_WRITER + && this.pluginService.isReadOnlyConnectionException(t, this.pluginService.getTargetDriverDialect()); } /** @@ -792,4 +805,9 @@ public Connection connect( return conn; } + + @Override + public void notifyNodeListChanged(final Map> changes) { + this.staleDnsHelper.notifyNodeListChanged(changes); + } } diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/iam/IamAuthConnectionPlugin.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/iam/IamAuthConnectionPlugin.java index 5541ac917..f8a2be272 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/plugin/iam/IamAuthConnectionPlugin.java +++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/iam/IamAuthConnectionPlugin.java @@ -72,6 +72,10 @@ public class IamAuthConnectionPlugin extends AbstractConnectionPlugin { "iamExpiration", String.valueOf(DEFAULT_TOKEN_EXPIRATION_SEC), "IAM token cache expiration in seconds"); + public static final AwsWrapperProperty IAM_TOKEN_PROPERTY_NAME = new AwsWrapperProperty( + "iamAccessTokenPropertyName", PropertyDefinition.PASSWORD.name, + "Overrides default IAM access token property name"); + protected static final RegionUtils regionUtils = new RegionUtils(); protected final PluginService pluginService; protected final RdsUtils rdsUtils = new RdsUtils(); @@ -121,6 +125,10 @@ private Connection connectInternal(String driverProtocol, HostSpec hostSpec, Pro throw new SQLException(PropertyDefinition.USER.name + " is null or empty."); } + if (StringUtils.isNullOrEmpty(IAM_TOKEN_PROPERTY_NAME.getString(props))) { + throw new SQLException(IAM_TOKEN_PROPERTY_NAME.name + " is null or empty."); + } + String host = IamAuthUtils.getIamHost(IAM_HOST.getString(props), hostSpec); int port = IamAuthUtils.getIamPort( @@ -149,7 +157,7 @@ private Connection connectInternal(String driverProtocol, HostSpec hostSpec, Pro () -> Messages.get( "AuthenticationToken.useCachedToken", new Object[] {tokenInfo.getToken()})); - PropertyDefinition.PASSWORD.set(props, tokenInfo.getToken()); + props.setProperty(IAM_TOKEN_PROPERTY_NAME.getString(props), tokenInfo.getToken()); } else { final Instant tokenExpiry = Instant.now().plus(tokenExpirationSec, ChronoUnit.SECONDS); if (this.fetchTokenCounter != null) { @@ -167,7 +175,8 @@ private Connection connectInternal(String driverProtocol, HostSpec hostSpec, Pro () -> Messages.get( "AuthenticationToken.generatedNewToken", new Object[] {token})); - PropertyDefinition.PASSWORD.set(props, token); + + props.setProperty(IAM_TOKEN_PROPERTY_NAME.getString(props), token); IamAuthCacheHolder.tokenCache.put( cacheKey, new TokenInfo(token, tokenExpiry)); @@ -206,7 +215,7 @@ private Connection connectInternal(String driverProtocol, HostSpec hostSpec, Pro () -> Messages.get( "AuthenticationToken.generatedNewToken", new Object[] {token})); - PropertyDefinition.PASSWORD.set(props, token); + props.setProperty(IAM_TOKEN_PROPERTY_NAME.getString(props), token); IamAuthCacheHolder.tokenCache.put( cacheKey, new TokenInfo(token, tokenExpiry)); diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/limitless/LimitlessRouterMonitor.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/limitless/LimitlessRouterMonitor.java index f4075a285..9264e1603 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/plugin/limitless/LimitlessRouterMonitor.java +++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/limitless/LimitlessRouterMonitor.java @@ -26,9 +26,9 @@ import org.checkerframework.checker.nullness.qual.NonNull; import software.amazon.jdbc.HostSpec; import software.amazon.jdbc.util.FullServicesContainer; +import software.amazon.jdbc.util.LogUtils; import software.amazon.jdbc.util.Messages; import software.amazon.jdbc.util.PropertyUtils; -import software.amazon.jdbc.util.Utils; import software.amazon.jdbc.util.monitoring.AbstractMonitor; import software.amazon.jdbc.util.storage.StorageService; import software.amazon.jdbc.util.telemetry.TelemetryContext; @@ -116,7 +116,7 @@ public void monitor() { List newLimitlessRouters = queryHelper.queryForLimitlessRouters(this.monitoringConn, this.hostSpec.getPort()); this.storageService.set(this.limitlessRouterCacheKey, new LimitlessRouters(newLimitlessRouters)); - LOGGER.finest(Utils.logTopology(newLimitlessRouters, "[limitlessRouterMonitor] Topology:")); + LOGGER.finest(LogUtils.logTopology(newLimitlessRouters, "[limitlessRouterMonitor] Topology:")); TimeUnit.MILLISECONDS.sleep(this.intervalMs); // do not include this in the telemetry } catch (final Exception ex) { if (telemetryContext != null) { diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/limitless/LimitlessRouterServiceImpl.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/limitless/LimitlessRouterServiceImpl.java index ae4e7b026..5bfecd9dc 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/plugin/limitless/LimitlessRouterServiceImpl.java +++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/limitless/LimitlessRouterServiceImpl.java @@ -17,12 +17,10 @@ package software.amazon.jdbc.plugin.limitless; import java.sql.SQLException; -import java.util.Collections; -import java.util.HashSet; +import java.util.EnumSet; import java.util.List; import java.util.Map; import java.util.Properties; -import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.TimeUnit; import java.util.concurrent.locks.ReentrantLock; @@ -51,8 +49,8 @@ public class LimitlessRouterServiceImpl implements LimitlessRouterService { "600000", // 10min "Interval in milliseconds for an Limitless router monitor to be considered inactive and to be disposed."); protected static final Map forceGetLimitlessRoutersLockMap = new ConcurrentHashMap<>(); - protected static final Set monitorErrorResponses = - new HashSet<>(Collections.singletonList(MonitorErrorResponse.RECREATE)); + protected static final EnumSet monitorErrorResponses = + EnumSet.of(MonitorErrorResponse.RECREATE); protected final FullServicesContainer servicesContainer; protected final PluginService pluginService; protected final LimitlessQueryHelper queryHelper; diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/readwritesplitting/ReadWriteSplittingPlugin.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/readwritesplitting/ReadWriteSplittingPlugin.java index 2d25675e2..899f6d97a 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/plugin/readwritesplitting/ReadWriteSplittingPlugin.java +++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/readwritesplitting/ReadWriteSplittingPlugin.java @@ -24,11 +24,11 @@ import java.util.List; import java.util.Properties; import java.util.Set; +import java.util.concurrent.TimeUnit; import java.util.logging.Level; import java.util.logging.Logger; import org.checkerframework.checker.nullness.qual.NonNull; import software.amazon.jdbc.AwsWrapperProperty; -import software.amazon.jdbc.HostListProviderService; import software.amazon.jdbc.HostRole; import software.amazon.jdbc.HostSpec; import software.amazon.jdbc.JdbcCallable; @@ -38,8 +38,11 @@ import software.amazon.jdbc.PluginService; import software.amazon.jdbc.PropertyDefinition; import software.amazon.jdbc.cleanup.CanReleaseResources; +import software.amazon.jdbc.hostlistprovider.HostListProviderService; import software.amazon.jdbc.plugin.AbstractConnectionPlugin; import software.amazon.jdbc.plugin.failover.FailoverSQLException; +import software.amazon.jdbc.util.CacheItem; +import software.amazon.jdbc.util.LogUtils; import software.amazon.jdbc.util.Messages; import software.amazon.jdbc.util.SqlState; import software.amazon.jdbc.util.Utils; @@ -81,10 +84,10 @@ public class ReadWriteSplittingPlugin extends AbstractConnectionPlugin private volatile boolean inReadWriteSplit = false; private HostListProviderService hostListProviderService; private Connection writerConnection; - private Connection readerConnection; private HostSpec readerHostSpec; private boolean isReaderConnFromInternalPool; private boolean isWriterConnFromInternalPool; + private CacheItem readerConnection; public static final AwsWrapperProperty READER_HOST_SELECTOR_STRATEGY = new AwsWrapperProperty( @@ -92,6 +95,13 @@ public class ReadWriteSplittingPlugin extends AbstractConnectionPlugin "random", "The strategy that should be used to select a new reader host."); + public static final AwsWrapperProperty CACHED_READER_KEEP_ALIVE_TIMEOUT = + new AwsWrapperProperty( + "cachedReaderKeepAliveTimeoutMs", + "0", + "The time in milliseconds to keep a reader connection alive in the cache. " + + "Default value 0 means the Wrapper will keep reusing the same cached reader connection."); + static { PropertyDefinition.registerPluginProperties(ReadWriteSplittingPlugin.class); } @@ -114,7 +124,7 @@ public class ReadWriteSplittingPlugin extends AbstractConnectionPlugin this(pluginService, properties); this.hostListProviderService = hostListProviderService; this.writerConnection = writerConnection; - this.readerConnection = readerConnection; + this.readerConnection = new CacheItem<>(readerConnection, CACHED_READER_KEEP_ALIVE_TIMEOUT.getLong(properties)); } @Override @@ -147,7 +157,7 @@ public Connection connect( if (!pluginService.acceptsStrategy(hostSpec.getRole(), this.readerSelectorStrategy)) { throw new UnsupportedOperationException( Messages.get("ReadWriteSplittingPlugin.unsupportedHostSpecSelectorStrategy", - new Object[] { this.readerSelectorStrategy })); + new Object[] {this.readerSelectorStrategy})); } final Connection currentConnection = connectFunc.call(); @@ -209,8 +219,8 @@ public T execute( if (this.writerConnection != null && !this.writerConnection.isClosed()) { this.writerConnection.clearWarnings(); } - if (this.readerConnection != null && !this.readerConnection.isClosed()) { - this.readerConnection.clearWarnings(); + if (this.readerConnection != null && isConnectionUsable(this.readerConnection.get())) { + this.readerConnection.get().clearWarnings(); } } catch (final SQLException e) { throw WrapperUtils.wrapExceptionIfNeeded(exceptionClass, e); @@ -266,9 +276,9 @@ private boolean isReader(final @NonNull HostSpec hostSpec) { return HostRole.READER.equals(hostSpec.getRole()); } - private void getNewWriterConnection(final HostSpec writerHostSpec) throws SQLException { + private void initializeWriterConnection(final HostSpec writerHostSpec) throws SQLException { final Connection conn = this.pluginService.connect(writerHostSpec, this.properties, this); - this.isWriterConnFromInternalPool = this.pluginService.isPooledConnectionProvider(writerHostSpec, this.properties); + this.isWriterConnFromInternalPool = Boolean.TRUE.equals(this.pluginService.isPooledConnection()); setWriterConnection(conn, writerHostSpec); switchCurrentConnectionTo(this.writerConnection, writerHostSpec); } @@ -280,17 +290,18 @@ private void setWriterConnection(final Connection writerConnection, () -> Messages.get( "ReadWriteSplittingPlugin.setWriterConnection", new Object[] { - writerHostSpec.getUrl()})); + writerHostSpec.getHostAndPort()})); } private void setReaderConnection(final Connection conn, final HostSpec host) { - this.readerConnection = conn; + closeReaderConnectionIfIdle(this.readerConnection); + this.readerConnection = new CacheItem<>(conn, this.getKeepAliveTimeout(host)); this.readerHostSpec = host; LOGGER.finest( () -> Messages.get( "ReadWriteSplittingPlugin.setReaderConnection", new Object[] { - host.getUrl()})); + host.getHostAndPort()})); } void switchConnectionIfRequired(final boolean readOnly) throws SQLException { @@ -321,7 +332,7 @@ void switchConnectionIfRequired(final boolean readOnly) throws SQLException { } catch (final SQLException e) { if (!isConnectionUsable(currentConnection)) { logAndThrowException( - Messages.get("ReadWriteSplittingPlugin.errorSwitchingToReader", new Object[] { e.getMessage() }), + Messages.get("ReadWriteSplittingPlugin.errorSwitchingToReader", new Object[] {e.getMessage()}), SqlState.CONNECTION_UNABLE_TO_CONNECT, e); return; @@ -332,7 +343,7 @@ void switchConnectionIfRequired(final boolean readOnly) throws SQLException { "ReadWriteSplittingPlugin.fallbackToWriter", new Object[] { e.getMessage(), - this.pluginService.getCurrentHostSpec().getUrl()})); + this.pluginService.getCurrentHostSpec().getHostAndPort()})); } } } else { @@ -383,17 +394,17 @@ private void switchToWriterConnection( final HostSpec writerHost = getWriter(hosts); this.inReadWriteSplit = true; if (!isConnectionUsable(this.writerConnection)) { - getNewWriterConnection(writerHost); + initializeWriterConnection(writerHost); } else { switchCurrentConnectionTo(this.writerConnection, writerHost); } if (this.isReaderConnFromInternalPool) { - this.closeConnectionIfIdle(this.readerConnection); + this.closeReaderConnectionIfIdle(this.readerConnection); } LOGGER.finer(() -> Messages.get("ReadWriteSplittingPlugin.switchedFromReaderToWriter", - new Object[] {writerHost.getUrl()})); + new Object[] {writerHost.getHostAndPort()})); } private void switchCurrentConnectionTo( @@ -409,7 +420,7 @@ private void switchCurrentConnectionTo( LOGGER.finest(() -> Messages.get( "ReadWriteSplittingPlugin.settingCurrentConnection", new Object[] { - newConnectionHost.getUrl()})); + newConnectionHost.getHostAndPort()})); } private void switchToReaderConnection(final List hosts) @@ -425,37 +436,35 @@ private void switchToReaderConnection(final List hosts) LOGGER.finest( Messages.get( "ReadWriteSplittingPlugin.previousReaderNotAllowed", - new Object[] {this.readerHostSpec, Utils.logTopology(hosts, "")})); - closeConnectionIfIdle(this.readerConnection); + new Object[] {this.readerHostSpec, LogUtils.logTopology(hosts, "")})); + closeReaderConnectionIfIdle(this.readerConnection); } this.inReadWriteSplit = true; - if (!isConnectionUsable(this.readerConnection)) { + if (this.readerConnection == null || !isConnectionUsable(this.readerConnection.get())) { initializeReaderConnection(hosts); } else { try { - switchCurrentConnectionTo(this.readerConnection, this.readerHostSpec); + switchCurrentConnectionTo(this.readerConnection.get(), this.readerHostSpec); LOGGER.finer(() -> Messages.get("ReadWriteSplittingPlugin.switchedFromWriterToReader", - new Object[] {this.readerHostSpec.getUrl()})); + new Object[] {this.readerHostSpec.getHostAndPort()})); } catch (SQLException e) { if (e.getMessage() != null) { LOGGER.warning( () -> Messages.get("ReadWriteSplittingPlugin.errorSwitchingToCachedReaderWithCause", - new Object[] {this.readerHostSpec.getUrl(), e.getMessage()})); + new Object[] {this.readerHostSpec.getHostAndPort(), e.getMessage()})); } else { LOGGER.warning(() -> Messages.get("ReadWriteSplittingPlugin.errorSwitchingToCachedReader", - new Object[] {this.readerHostSpec.getUrl()})); + new Object[] {this.readerHostSpec.getHostAndPort()})); } - this.readerConnection.close(); - this.readerConnection = null; - this.readerHostSpec = null; + closeReaderConnectionIfIdle(this.readerConnection); initializeReaderConnection(hosts); } } if (this.isWriterConnFromInternalPool) { - this.closeConnectionIfIdle(this.writerConnection); + this.closeWriterConnectionIfIdle(this.writerConnection); } } @@ -463,14 +472,14 @@ private void initializeReaderConnection(final @NonNull List hosts) thr if (hosts.size() == 1) { final HostSpec writerHost = getWriter(hosts); if (!isConnectionUsable(this.writerConnection)) { - getNewWriterConnection(writerHost); + initializeWriterConnection(writerHost); } LOGGER.warning(() -> Messages.get("ReadWriteSplittingPlugin.noReadersFound", - new Object[] {writerHost.getUrl()})); + new Object[] {writerHost.getHostAndPort()})); } else { - getNewReaderConnection(); + openNewReaderConnection(); LOGGER.finer(() -> Messages.get("ReadWriteSplittingPlugin.switchedFromWriterToReader", - new Object[] {this.readerHostSpec.getUrl()})); + new Object[] {this.readerHostSpec.getHostAndPort()})); } } @@ -483,7 +492,7 @@ private HostSpec getWriter(final @NonNull List hosts) throws SQLExcept return writerHost; } - private void getNewReaderConnection() throws SQLException { + private void openNewReaderConnection() throws SQLException { Connection conn = null; HostSpec readerHost = null; @@ -492,7 +501,7 @@ private void getNewReaderConnection() throws SQLException { HostSpec hostSpec = this.pluginService.getHostSpecByStrategy(HostRole.READER, this.readerSelectorStrategy); try { conn = this.pluginService.connect(hostSpec, this.properties, this); - this.isReaderConnFromInternalPool = this.pluginService.isPooledConnectionProvider(hostSpec, this.properties); + this.isReaderConnFromInternalPool = Boolean.TRUE.equals(this.pluginService.isPooledConnection()); readerHost = hostSpec; break; } catch (final SQLException e) { @@ -501,7 +510,7 @@ private void getNewReaderConnection() throws SQLException { Messages.get( "ReadWriteSplittingPlugin.failedToConnectToReader", new Object[]{ - hostSpec.getUrl()}), + hostSpec.getHostAndPort()}), e); } } @@ -516,15 +525,24 @@ private void getNewReaderConnection() throws SQLException { final HostSpec finalReaderHost = readerHost; LOGGER.finest( () -> Messages.get("ReadWriteSplittingPlugin.successfullyConnectedToReader", - new Object[] {finalReaderHost.getUrl()})); + new Object[] {finalReaderHost.getHostAndPort()})); setReaderConnection(conn, readerHost); - switchCurrentConnectionTo(this.readerConnection, this.readerHostSpec); + switchCurrentConnectionTo(this.readerConnection.get(), this.readerHostSpec); } private boolean isConnectionUsable(final Connection connection) throws SQLException { return connection != null && !connection.isClosed(); } + private long getKeepAliveTimeout(final HostSpec host) { + if (this.pluginService.isPooledConnectionProvider(host, properties)) { + // Let the connection pool handle the lifetime of the reader connection. + return 0; + } + final long keepAliveMs = CACHED_READER_KEEP_ALIVE_TIMEOUT.getLong(properties); + return keepAliveMs > 0 ? System.nanoTime() + TimeUnit.MILLISECONDS.toNanos(keepAliveMs) : 0; + } + @Override public void releaseResources() { closeIdleConnections(); @@ -532,25 +550,37 @@ public void releaseResources() { private void closeIdleConnections() { LOGGER.finest(() -> Messages.get("ReadWriteSplittingPlugin.closingInternalConnections")); - closeConnectionIfIdle(this.readerConnection); - closeConnectionIfIdle(this.writerConnection); + closeReaderConnectionIfIdle(this.readerConnection); + closeWriterConnectionIfIdle(this.writerConnection); } - void closeConnectionIfIdle(final Connection internalConnection) { + void closeReaderConnectionIfIdle(CacheItem readerConnection) { + if (readerConnection == null) { + return; + } + final Connection currentConnection = this.pluginService.getCurrentConnection(); + final Connection readerConnectionCache = readerConnection.get(true); + try { - if (internalConnection != null - && internalConnection != currentConnection - && !internalConnection.isClosed()) { - internalConnection.close(); - if (internalConnection == writerConnection) { - writerConnection = null; - } + if (isConnectionUsable(readerConnectionCache) && readerConnectionCache != currentConnection) { + readerConnectionCache.close(); + } + } catch (SQLException e) { + // Do nothing. + } - if (internalConnection == readerConnection) { - readerConnection = null; - readerHostSpec = null; - } + this.readerConnection = null; + this.readerHostSpec = null; + } + + void closeWriterConnectionIfIdle(final Connection internalConnection) { + final Connection currentConnection = this.pluginService.getCurrentConnection(); + try { + if (isConnectionUsable(internalConnection) + && internalConnection != currentConnection) { + internalConnection.close(); + writerConnection = null; } } catch (final SQLException e) { // ignore @@ -565,6 +595,6 @@ Connection getWriterConnection() { } Connection getReaderConnection() { - return this.readerConnection; + return this.readerConnection == null ? null : this.readerConnection.get(); } } diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/srw/SimpleReadWriteSplittingPlugin.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/srw/SimpleReadWriteSplittingPlugin.java new file mode 100644 index 000000000..c129c17c4 --- /dev/null +++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/srw/SimpleReadWriteSplittingPlugin.java @@ -0,0 +1,709 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package software.amazon.jdbc.plugin.srw; + +import java.sql.Connection; +import java.sql.SQLException; +import java.util.Collections; +import java.util.EnumSet; +import java.util.HashSet; +import java.util.Properties; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import java.util.logging.Logger; +import org.checkerframework.checker.nullness.qual.NonNull; +import software.amazon.jdbc.AwsWrapperProperty; +import software.amazon.jdbc.HostRole; +import software.amazon.jdbc.HostSpec; +import software.amazon.jdbc.HostSpecBuilder; +import software.amazon.jdbc.JdbcCallable; +import software.amazon.jdbc.JdbcMethod; +import software.amazon.jdbc.NodeChangeOptions; +import software.amazon.jdbc.OldConnectionSuggestedAction; +import software.amazon.jdbc.PluginService; +import software.amazon.jdbc.PropertyDefinition; +import software.amazon.jdbc.cleanup.CanReleaseResources; +import software.amazon.jdbc.hostavailability.HostAvailability; +import software.amazon.jdbc.hostlistprovider.HostListProviderService; +import software.amazon.jdbc.plugin.AbstractConnectionPlugin; +import software.amazon.jdbc.plugin.failover.FailoverSQLException; +import software.amazon.jdbc.plugin.readwritesplitting.ReadWriteSplittingSQLException; +import software.amazon.jdbc.util.FullServicesContainer; +import software.amazon.jdbc.util.Messages; +import software.amazon.jdbc.util.RdsUrlType; +import software.amazon.jdbc.util.RdsUtils; +import software.amazon.jdbc.util.SqlState; +import software.amazon.jdbc.util.StringUtils; +import software.amazon.jdbc.util.WrapperUtils; + +public class SimpleReadWriteSplittingPlugin extends AbstractConnectionPlugin + implements CanReleaseResources { + + private static final Logger LOGGER = Logger.getLogger(SimpleReadWriteSplittingPlugin.class.getName()); + private static final Set subscribedMethods = + Collections.unmodifiableSet(new HashSet() { + { + add(JdbcMethod.CONNECT.methodName); + add(JdbcMethod.INITHOSTPROVIDER.methodName); + add(JdbcMethod.NOTIFYCONNECTIONCHANGED.methodName); + add(JdbcMethod.CONNECTION_SETREADONLY.methodName); + add(JdbcMethod.CONNECTION_CLEARWARNINGS.methodName); + add(JdbcMethod.STATEMENT_EXECUTE.methodName); + add(JdbcMethod.STATEMENT_EXECUTEQUERY.methodName); + add(JdbcMethod.STATEMENT_EXECUTEBATCH.methodName); + add(JdbcMethod.STATEMENT_EXECUTEUPDATE.methodName); + add(JdbcMethod.PREPAREDSTATEMENT_EXECUTE.methodName); + add(JdbcMethod.PREPAREDSTATEMENT_EXECUTEUPDATE.methodName); + add(JdbcMethod.PREPAREDSTATEMENT_EXECUTELARGEUPDATE.methodName); + add(JdbcMethod.PREPAREDSTATEMENT_EXECUTEQUERY.methodName); + add(JdbcMethod.PREPAREDSTATEMENT_EXECUTEBATCH.methodName); + add(JdbcMethod.CALLABLESTATEMENT_EXECUTE.methodName); + add(JdbcMethod.CALLABLESTATEMENT_EXECUTEQUERY.methodName); + add(JdbcMethod.CALLABLESTATEMENT_EXECUTELARGEUPDATE.methodName); + add(JdbcMethod.CALLABLESTATEMENT_EXECUTEBATCH.methodName); + add(JdbcMethod.CALLABLESTATEMENT_EXECUTEUPDATE.methodName); + add(JdbcMethod.CONNECTION_SETAUTOCOMMIT.methodName); + } + }); + + private final PluginService pluginService; + private final Properties properties; + private final RdsUtils rdsUtils = new RdsUtils(); + private final boolean verifyNewConnections; + private volatile boolean inReadWriteSplit = false; + private HostListProviderService hostListProviderService; + private Connection writerConnection; + private Connection readerConnection; + private final String writeEndpoint; + private final String readEndpoint; + private HostSpec readEndpointHostSpec; + private HostSpec writeEndpointHostSpec; + private final HostRole verifyOpenedConnectionType; + private final int connectRetryIntervalMs; + private final long connectRetryTimeoutMs; + private boolean isReaderConnFromInternalPool; + private boolean isWriterConnFromInternalPool; + + public static final AwsWrapperProperty SRW_READ_ENDPOINT = + new AwsWrapperProperty( + "srwReadEndpoint", + null, + "The read-only endpoint that should be used to connect to a reader."); + + public static final AwsWrapperProperty SRW_WRITE_ENDPOINT = + new AwsWrapperProperty( + "srwWriteEndpoint", + null, + "The read-write/cluster endpoint that should be used to connect to the writer."); + + public static final AwsWrapperProperty VERIFY_NEW_SRW_CONNECTIONS = + new AwsWrapperProperty( + "verifyNewSrwConnections", + "true", + "Enables role verification for new connections made by the Simple Read/Write Splitting Plugin.", + false, + new String[] { + "true", "false" + }); + + public static final AwsWrapperProperty SRW_CONNECT_RETRY_TIMEOUT_MS = + new AwsWrapperProperty( + "srwConnectRetryTimeoutMs", + "60000", + "Maximum allowed time for the retries opening a connection."); + + public static final AwsWrapperProperty SRW_CONNECT_RETRY_INTERVAL_MS = + new AwsWrapperProperty( + "srwConnectRetryIntervalMs", + "1000", + "Time between each retry of opening a connection."); + + public static final AwsWrapperProperty VERIFY_INITIAL_CONNECTION_TYPE = + new AwsWrapperProperty( + "verifyInitialConnectionType", + null, + "Force to verify the initial connection to be either a writer or a reader."); + + static { + PropertyDefinition.registerPluginProperties(SimpleReadWriteSplittingPlugin.class); + } + + SimpleReadWriteSplittingPlugin(final FullServicesContainer servicesContainer, final Properties properties) { + this(servicesContainer.getPluginService(), properties); + } + + SimpleReadWriteSplittingPlugin(final PluginService pluginService, final Properties properties) { + this.writeEndpoint = SRW_WRITE_ENDPOINT.getString(properties); + if (StringUtils.isNullOrEmpty(writeEndpoint)) { + throw new + RuntimeException( + Messages.get( + "SimpleReadWriteSplittingPlugin.missingRequiredConfigParameter", + new Object[] {SRW_WRITE_ENDPOINT.name})); + } + this.readEndpoint = SRW_READ_ENDPOINT.getString(properties); + if (StringUtils.isNullOrEmpty(readEndpoint)) { + throw new + RuntimeException( + Messages.get( + "SimpleReadWriteSplittingPlugin.missingRequiredConfigParameter", + new Object[] {SRW_READ_ENDPOINT.name})); + } + this.pluginService = pluginService; + this.properties = properties; + this.verifyNewConnections = VERIFY_NEW_SRW_CONNECTIONS.getBoolean(properties); + this.verifyOpenedConnectionType = + HostRole.verifyConnectionTypeFromValue( + VERIFY_INITIAL_CONNECTION_TYPE.getString(properties)); + this.connectRetryIntervalMs = SRW_CONNECT_RETRY_INTERVAL_MS.getInteger(properties); + this.connectRetryTimeoutMs = SRW_CONNECT_RETRY_TIMEOUT_MS.getInteger(properties); + } + + /** + * For testing purposes only. + */ + SimpleReadWriteSplittingPlugin( + final PluginService pluginService, + final Properties properties, + final HostListProviderService hostListProviderService, + final Connection writerConnection, + final Connection readerConnection, + final HostSpec writeEndpointHostSpec, + final HostSpec readEndpointHostSpec) { + this(pluginService, properties); + this.hostListProviderService = hostListProviderService; + this.writerConnection = writerConnection; + this.readerConnection = readerConnection; + this.writeEndpointHostSpec = writeEndpointHostSpec; + this.readEndpointHostSpec = readEndpointHostSpec; + } + + @Override + public Set getSubscribedMethods() { + return subscribedMethods; + } + + @Override + public void initHostProvider( + final String driverProtocol, + final String initialUrl, + final Properties props, + final HostListProviderService hostListProviderService, + final JdbcCallable initHostProviderFunc) + throws SQLException { + + this.hostListProviderService = hostListProviderService; + initHostProviderFunc.call(); + } + + @Override + public OldConnectionSuggestedAction notifyConnectionChanged( + final EnumSet changes) { + try { + updateInternalConnectionInfo(); + } catch (final SQLException e) { + // ignore + } + + if (this.inReadWriteSplit) { + return OldConnectionSuggestedAction.PRESERVE; + } + return OldConnectionSuggestedAction.NO_OPINION; + } + + @Override + public Connection connect( + final String driverProtocol, + final HostSpec hostSpec, + final Properties props, + final boolean isInitialConnection, + final JdbcCallable connectFunc) + throws SQLException { + + if (!isInitialConnection || !this.verifyNewConnections) { + // No verification required. Continue with a normal workflow. + return connectFunc.call(); + } + + final RdsUrlType type = this.rdsUtils.identifyRdsType(hostSpec.getHost()); + + Connection conn = null; + if (type == RdsUrlType.RDS_WRITER_CLUSTER + || type == RdsUrlType.RDS_GLOBAL_WRITER_CLUSTER + || this.verifyOpenedConnectionType == HostRole.WRITER) { + conn = this.getVerifiedConnection(props, hostSpec, HostRole.WRITER, connectFunc); + } else if (type == RdsUrlType.RDS_READER_CLUSTER + || this.verifyOpenedConnectionType == HostRole.READER) { + conn = this.getVerifiedConnection(props, hostSpec, HostRole.READER, connectFunc); + } + + if (conn == null) { + // Continue with a normal workflow. + conn = connectFunc.call(); + } + this.setInitialConnectionHostSpec(conn, hostSpec); + return conn; + } + + private Connection getVerifiedConnection( + final Properties props, + final HostSpec hostSpec, + final HostRole hostRole, + final JdbcCallable connectFunc) + throws SQLException { + + final long endTimeNano = System.nanoTime() + + TimeUnit.MILLISECONDS.toNanos(this.connectRetryTimeoutMs); + + Connection candidateConn; + + while (System.nanoTime() < endTimeNano) { + + candidateConn = null; + + try { + if (connectFunc != null) { + candidateConn = connectFunc.call(); + } else if (hostSpec != null) { + candidateConn = this.pluginService.connect(hostSpec, props, this); + } else { + // Unable to verify. + break; + } + + if (candidateConn == null || this.pluginService.getHostRole(candidateConn) != hostRole) { + // The connection does not have the desired role. Retry. + this.closeConnection(candidateConn); + this.delay(); + continue; + } + + // Connection is valid and verified. + return candidateConn; + } catch (SQLException ex) { + this.closeConnection(candidateConn); + if (this.pluginService.isLoginException(ex, this.pluginService.getTargetDriverDialect())) { + throw WrapperUtils.wrapExceptionIfNeeded(SQLException.class, ex); + } + this.delay(); + } catch (Throwable ex) { + this.closeConnection(candidateConn); + throw ex; + } + } + + LOGGER.fine( + () -> Messages.get("SimpleReadWriteSplittingPlugin.verificationFailed", + new Object[] {hostRole, this.connectRetryTimeoutMs})); + return null; + } + + private void setInitialConnectionHostSpec(Connection conn, HostSpec hostSpec) { + if (hostSpec == null) { + try { + hostSpec = this.pluginService.identifyConnection(conn); + } catch (Exception e) { + // Ignore error + } + } + + if (hostSpec != null && hostListProviderService != null) { + hostListProviderService.setInitialConnectionHostSpec(hostSpec); + } + } + + @Override + public T execute( + final Class resultClass, + final Class exceptionClass, + final Object methodInvokeOn, + final String methodName, + final JdbcCallable jdbcMethodFunc, + final Object[] args) + throws E { + final Connection conn = WrapperUtils.getConnectionFromSqlObject(methodInvokeOn); + if (conn != null && conn != this.pluginService.getCurrentConnection()) { + LOGGER.fine( + () -> Messages.get("ReadWriteSplittingPlugin.executingAgainstOldConnection", + new Object[] {methodInvokeOn})); + return jdbcMethodFunc.call(); + } + + if (JdbcMethod.CONNECTION_CLEARWARNINGS.methodName.equals(methodName)) { + try { + if (this.writerConnection != null && !this.writerConnection.isClosed()) { + this.writerConnection.clearWarnings(); + } + if (this.readerConnection != null && !this.readerConnection.isClosed()) { + this.readerConnection.clearWarnings(); + } + } catch (final SQLException e) { + throw WrapperUtils.wrapExceptionIfNeeded(exceptionClass, e); + } + } + + if (JdbcMethod.CONNECTION_SETREADONLY.methodName.equals(methodName) + && args != null + && args.length > 0) { + try { + switchConnectionIfRequired((Boolean) args[0]); + } catch (final SQLException e) { + throw WrapperUtils.wrapExceptionIfNeeded(exceptionClass, e); + } + } + + try { + return jdbcMethodFunc.call(); + } catch (final Exception e) { + if (e instanceof FailoverSQLException) { + LOGGER.finer( + () -> Messages.get("ReadWriteSplittingPlugin.failoverExceptionWhileExecutingCommand", + new Object[] {methodName})); + closeIdleConnections(); + } else { + LOGGER.finest( + () -> Messages.get("ReadWriteSplittingPlugin.exceptionWhileExecutingCommand", + new Object[] {methodName})); + } + throw e; + } + } + + private void updateInternalConnectionInfo() throws SQLException { + final Connection currentConnection = this.pluginService.getCurrentConnection(); + final HostSpec currentHost = this.pluginService.getCurrentHostSpec(); + if (currentConnection == null || currentHost == null) { + return; + } + + // Only update internal connection info if connection is to the endpoint and different from internal connection. + if (isWriteEndpoint(currentHost) && !currentConnection.equals(this.writerConnection) + && (!this.verifyNewConnections || this.pluginService.getHostRole(currentConnection) == HostRole.WRITER)) { + setWriterConnection(currentConnection, currentHost); + } else if (isReadEndpoint(currentHost) && !currentConnection.equals(this.readerConnection) + && (!this.verifyNewConnections || this.pluginService.getHostRole(currentConnection) == HostRole.READER)) { + setReaderConnection(currentConnection, currentHost); + } + } + + private boolean isWriteEndpoint(final @NonNull HostSpec hostSpec) { + return this.writeEndpoint.equalsIgnoreCase(hostSpec.getHost()) + || this.writeEndpoint.equalsIgnoreCase(hostSpec.getHostAndPort()); + } + + private boolean isReadEndpoint(final @NonNull HostSpec hostSpec) { + return this.readEndpoint.equalsIgnoreCase(hostSpec.getHost()) + || this.readEndpoint.equalsIgnoreCase(hostSpec.getHostAndPort()); + } + + private void setWriterConnection(final Connection conn, final HostSpec host) { + this.writerConnection = conn; + this.writeEndpointHostSpec = host; + LOGGER.finest( + () -> Messages.get( + "ReadWriteSplittingPlugin.setWriterConnection", + new Object[] { + host.getHostAndPort()})); + } + + private void setReaderConnection(final Connection conn, final HostSpec host) { + this.readerConnection = conn; + this.readEndpointHostSpec = host; + LOGGER.finest( + () -> Messages.get( + "ReadWriteSplittingPlugin.setReaderConnection", + new Object[] { + host.getHostAndPort()})); + } + + void switchConnectionIfRequired(final boolean readOnly) throws SQLException { + final Connection currentConnection = this.pluginService.getCurrentConnection(); + if (currentConnection != null && currentConnection.isClosed()) { + logAndThrowException(Messages.get("ReadWriteSplittingPlugin.setReadOnlyOnClosedConnection"), + SqlState.CONNECTION_NOT_OPEN); + } + + final HostSpec currentHost = this.pluginService.getCurrentHostSpec(); + if (readOnly) { + if (!pluginService.isInTransaction() && !isReadEndpoint(currentHost)) { + try { + switchToReaderConnection(); + } catch (final SQLException e) { + if (!isConnectionUsable(currentConnection)) { + logAndThrowException( + Messages.get("ReadWriteSplittingPlugin.errorSwitchingToReader", new Object[]{e.getMessage()}), + e); + } + // Failed to switch to the reader endpoint. The current connection will be used as a fallback. + LOGGER.fine(() -> Messages.get( + "SimpleReadWriteSplittingPlugin.fallbackToCurrentConnection", + new Object[] { + this.pluginService.getCurrentHostSpec().getHostAndPort(), + e.getMessage()})); + } + } + } else { + if (!isWriteEndpoint(currentHost) && pluginService.isInTransaction()) { + logAndThrowException( + Messages.get("ReadWriteSplittingPlugin.setReadOnlyFalseInTransaction"), + SqlState.ACTIVE_SQL_TRANSACTION); + } + + if (!isWriteEndpoint(currentHost)) { + try { + switchToWriterConnection(); + LOGGER.finer(() -> Messages.get("ReadWriteSplittingPlugin.switchedFromReaderToWriter", + new Object[] {writeEndpointHostSpec.getHostAndPort()})); + } catch (final SQLException e) { + logAndThrowException(Messages.get("ReadWriteSplittingPlugin.errorSwitchingToWriter"), + e); + } + } + } + } + + private void logAndThrowException(final String logMessage) throws SQLException { + LOGGER.severe(logMessage); + throw new ReadWriteSplittingSQLException(logMessage); + } + + private void logAndThrowException(final String logMessage, final SqlState sqlState) + throws SQLException { + LOGGER.severe(logMessage); + throw new ReadWriteSplittingSQLException(logMessage, sqlState.getState()); + } + + private void logAndThrowException( + final String logMessage, final Throwable cause) + throws SQLException { + LOGGER.fine(logMessage); + throw new ReadWriteSplittingSQLException(logMessage, SqlState.CONNECTION_UNABLE_TO_CONNECT.getState(), cause); + } + + private void switchToReaderConnection() throws SQLException { + final Connection currentConnection = this.pluginService.getCurrentConnection(); + final HostSpec currentHost = this.pluginService.getCurrentHostSpec(); + if (isReadEndpoint(currentHost) && isConnectionUsable(currentConnection)) { + // Already connected to the read-only endpoint. + return; + } + + this.inReadWriteSplit = true; + if (!isConnectionUsable(this.readerConnection)) { + initializeReaderConnection(); + } else { + try { + switchCurrentConnectionTo(this.readerConnection, this.readEndpointHostSpec); + LOGGER.finer(() -> Messages.get("ReadWriteSplittingPlugin.switchedFromWriterToReader", + new Object[] {this.readEndpointHostSpec.getHostAndPort()})); + } catch (SQLException e) { + if (e.getMessage() != null) { + LOGGER.warning( + () -> Messages.get("ReadWriteSplittingPlugin.errorSwitchingToCachedReaderWithCause", + new Object[] {this.readEndpointHostSpec.getHostAndPort(), e.getMessage()})); + } else { + LOGGER.warning(() -> Messages.get("ReadWriteSplittingPlugin.errorSwitchingToCachedReader", + new Object[] {this.readEndpointHostSpec.getHostAndPort()})); + } + + this.readerConnection.close(); + this.readerConnection = null; + initializeReaderConnection(); + } + } + + if (this.isWriterConnFromInternalPool) { + this.closeConnectionIfIdle(this.writerConnection); + } + } + + private void switchToWriterConnection() throws SQLException { + final Connection currentConnection = this.pluginService.getCurrentConnection(); + final HostSpec currentHost = this.pluginService.getCurrentHostSpec(); + if (isWriteEndpoint(currentHost) && isConnectionUsable(currentConnection)) { + // Already connected to the cluster/read-write endpoint. + return; + } + + this.inReadWriteSplit = true; + if (!isConnectionUsable(this.writerConnection)) { + initializeWriterConnection(); + } else { + switchCurrentConnectionTo(this.writerConnection, this.writeEndpointHostSpec); + } + + if (this.isReaderConnFromInternalPool) { + this.closeConnectionIfIdle(this.readerConnection); + } + } + + private void initializeWriterConnection() throws SQLException { + if (this.writeEndpointHostSpec == null) { + this.writeEndpointHostSpec = createHostSpec(this.writeEndpoint, HostRole.WRITER); + } + final Connection conn; + if (this.verifyNewConnections) { + conn = this.getVerifiedConnection( + this.properties, this.writeEndpointHostSpec, HostRole.WRITER, null); + } else { + conn = this.pluginService.connect(this.writeEndpointHostSpec, this.properties, this); + } + + if (conn == null) { + logAndThrowException( + Messages.get("SimpleReadWriteSplittingPlugin.failedToConnectToWriter", + new Object[]{this.writeEndpoint})); + } + + setWriterConnection(conn, writeEndpointHostSpec); + switchCurrentConnectionTo(this.writerConnection, writeEndpointHostSpec); + this.isWriterConnFromInternalPool = Boolean.TRUE.equals(this.pluginService.isPooledConnection()); + } + + private void initializeReaderConnection() throws SQLException { + if (this.readEndpointHostSpec == null) { + this.readEndpointHostSpec = createHostSpec(this.readEndpoint, HostRole.READER); + } + final Connection conn; + + if (this.verifyNewConnections) { + conn = this.getVerifiedConnection( + this.properties, this.readEndpointHostSpec, HostRole.READER, null); + } else { + conn = this.pluginService.connect(this.readEndpointHostSpec, this.properties, this); + } + + if (conn == null) { + logAndThrowException(Messages.get("ReadWriteSplittingPlugin.failedToConnectToReader", + new Object[]{this.readEndpoint}), + SqlState.CONNECTION_UNABLE_TO_CONNECT); + } + + LOGGER.finest( + () -> Messages.get("ReadWriteSplittingPlugin.successfullyConnectedToReader", + new Object[]{readEndpointHostSpec.getHostAndPort()})); + + // Store reader connection for reuse. + setReaderConnection(conn, readEndpointHostSpec); + switchCurrentConnectionTo(conn, this.readEndpointHostSpec); + this.isReaderConnFromInternalPool = Boolean.TRUE.equals(this.pluginService.isPooledConnection()); + LOGGER.finer(() -> Messages.get("ReadWriteSplittingPlugin.switchedFromWriterToReader", + new Object[] {readEndpoint})); + } + + private void switchCurrentConnectionTo( + final Connection newConnection, + final HostSpec newConnectionHost) + throws SQLException { + + final Connection currentConnection = this.pluginService.getCurrentConnection(); + if (currentConnection == newConnection) { + return; + } + this.pluginService.setCurrentConnection(newConnection, newConnectionHost); + LOGGER.finest(() -> Messages.get( + "ReadWriteSplittingPlugin.settingCurrentConnection", + new Object[] { + newConnectionHost.getHostAndPort()})); + } + + private HostSpec createHostSpec(String endpoint, HostRole role) { + endpoint = endpoint.trim(); + + String host = endpoint; + int port = this.hostListProviderService.getCurrentHostSpec().getPort(); + int colonIndex = endpoint.lastIndexOf(":"); + if (colonIndex != -1 && endpoint.substring(colonIndex + 1).matches("\\d+")) { + host = endpoint.substring(0, colonIndex); + port = Integer.parseInt(endpoint.substring(colonIndex + 1)); + } + + return new HostSpecBuilder(this.hostListProviderService.getHostSpecBuilder()) + .host(host) + .port(port) + .role(role) + .availability(HostAvailability.AVAILABLE) + .build(); + } + + private boolean isConnectionUsable(final Connection connection) throws SQLException { + return connection != null && !connection.isClosed(); + } + + @Override + public void releaseResources() { + closeIdleConnections(); + } + + private void closeIdleConnections() { + LOGGER.finest(() -> Messages.get("ReadWriteSplittingPlugin.closingInternalConnections")); + closeConnectionIfIdle(this.readerConnection); + closeConnectionIfIdle(this.writerConnection); + this.readerConnection = null; + this.writerConnection = null; + } + + void closeConnectionIfIdle(final Connection internalConnection) { + final Connection currentConnection = this.pluginService.getCurrentConnection(); + try { + if (internalConnection != null + && internalConnection != currentConnection + && !internalConnection.isClosed()) { + internalConnection.close(); + } + } catch (final SQLException e) { + // ignore + } + } + + private void closeConnection(final Connection connection) { + if (connection != null) { + try { + connection.close(); + } catch (final SQLException ex) { + // ignore + } + } + } + + private void delay() { + try { + TimeUnit.MILLISECONDS.sleep(this.connectRetryIntervalMs); + } catch (InterruptedException ex) { + Thread.currentThread().interrupt(); + } + } + + /** + * Methods for testing purposes only. + */ + Connection getWriterConnection() { + return this.writerConnection; + } + + Connection getReaderConnection() { + return this.readerConnection; + } + + HostSpec getReaderHostSpec() { + return this.readEndpointHostSpec; + } + + HostSpec getWriterHostSpec() { + return this.writeEndpointHostSpec; + } +} diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/srw/SimpleReadWriteSplittingPluginFactory.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/srw/SimpleReadWriteSplittingPluginFactory.java new file mode 100644 index 000000000..4f0cdba58 --- /dev/null +++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/srw/SimpleReadWriteSplittingPluginFactory.java @@ -0,0 +1,40 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package software.amazon.jdbc.plugin.srw; + +import java.util.Properties; +import software.amazon.jdbc.ConnectionPlugin; +import software.amazon.jdbc.ConnectionPluginFactory; +import software.amazon.jdbc.PluginService; +import software.amazon.jdbc.ServicesContainerPluginFactory; +import software.amazon.jdbc.plugin.failover.FailoverConnectionPlugin; +import software.amazon.jdbc.util.FullServicesContainer; +import software.amazon.jdbc.util.Messages; + +public class SimpleReadWriteSplittingPluginFactory implements ServicesContainerPluginFactory { + @Override + public ConnectionPlugin getInstance(final PluginService pluginService, final Properties props) { + throw new UnsupportedOperationException( + Messages.get( + "ServicesContainerPluginFactory.servicesContainerRequired", new Object[] {"FailoverConnectionPlugin"})); + } + + @Override + public ConnectionPlugin getInstance(final FullServicesContainer servicesContainer, final Properties props) { + return new SimpleReadWriteSplittingPlugin(servicesContainer, props); + } +} diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/staledns/AuroraStaleDnsHelper.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/staledns/AuroraStaleDnsHelper.java index 682c3080f..9af641402 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/plugin/staledns/AuroraStaleDnsHelper.java +++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/staledns/AuroraStaleDnsHelper.java @@ -25,13 +25,15 @@ import java.util.Map; import java.util.Properties; import java.util.logging.Logger; -import software.amazon.jdbc.HostListProviderService; import software.amazon.jdbc.HostRole; import software.amazon.jdbc.HostSpec; import software.amazon.jdbc.JdbcCallable; import software.amazon.jdbc.NodeChangeOptions; import software.amazon.jdbc.PluginService; +import software.amazon.jdbc.hostlistprovider.HostListProviderService; +import software.amazon.jdbc.util.LogUtils; import software.amazon.jdbc.util.Messages; +import software.amazon.jdbc.util.RdsUrlType; import software.amazon.jdbc.util.RdsUtils; import software.amazon.jdbc.util.Utils; import software.amazon.jdbc.util.telemetry.TelemetryCounter; @@ -66,7 +68,11 @@ public Connection getVerifiedConnection( final Properties props, final JdbcCallable connectFunc) throws SQLException { - if (!this.rdsUtils.isWriterClusterDns(hostSpec.getHost())) { + final RdsUrlType type = this.rdsUtils.identifyRdsType(hostSpec.getHost()); + + if (type != RdsUrlType.RDS_WRITER_CLUSTER + && type != RdsUrlType.RDS_GLOBAL_WRITER_CLUSTER) { + // It's not a writer cluster endpoint. Continue with a normal workflow. return connectFunc.call(); } @@ -96,7 +102,7 @@ public Connection getVerifiedConnection( this.pluginService.refreshHostList(conn); } - LOGGER.finest(() -> Utils.logTopology(this.pluginService.getAllHosts())); + LOGGER.finest(() -> LogUtils.logTopology(this.pluginService.getAllHosts())); if (this.writerHostSpec == null) { final HostSpec writerCandidate = Utils.getWriter(this.pluginService.getAllHosts()); @@ -144,7 +150,7 @@ public Connection getVerifiedConnection( Messages.get("AuroraStaleDnsHelper.currentWriterNotAllowed", new Object[] { this.writerHostSpec == null ? "" : this.writerHostSpec.getHostAndPort(), - Utils.logTopology(allowedHosts, "")}) + LogUtils.logTopology(allowedHosts, "")}) ); } @@ -178,6 +184,7 @@ public void notifyNodeListChanged(final Map> LOGGER.finest(() -> Messages.get("AuroraStaleDnsHelper.reset")); this.writerHostSpec = null; this.writerHostAddress = null; + return; } } } diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/staledns/AuroraStaleDnsPlugin.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/staledns/AuroraStaleDnsPlugin.java index a5babfc3c..080d19990 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/plugin/staledns/AuroraStaleDnsPlugin.java +++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/staledns/AuroraStaleDnsPlugin.java @@ -25,16 +25,18 @@ import java.util.Properties; import java.util.Set; import java.util.logging.Logger; -import software.amazon.jdbc.HostListProviderService; import software.amazon.jdbc.HostSpec; import software.amazon.jdbc.JdbcCallable; import software.amazon.jdbc.JdbcMethod; import software.amazon.jdbc.NodeChangeOptions; import software.amazon.jdbc.PluginService; +import software.amazon.jdbc.hostlistprovider.HostListProviderService; import software.amazon.jdbc.plugin.AbstractConnectionPlugin; /** - * After Aurora DB cluster fail over is completed and a cluster has elected a new writer node, the corresponding + * Deprecated. Use 'initialConnection' plugin instead. + * + *

After Aurora DB cluster fail over is completed and a cluster has elected a new writer node, the corresponding * cluster (writer) endpoint contains stale data and points to an old writer node. That old writer node plays * a reader role after fail over and connecting with the cluster endpoint connects to it. In such case a user * application expects a writer connection but practically gets connected to a reader. Any DML statements fail diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/strategy/fastestresponse/HostResponseTimeServiceImpl.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/strategy/fastestresponse/HostResponseTimeServiceImpl.java index 9915391ac..4d90f6d5d 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/plugin/strategy/fastestresponse/HostResponseTimeServiceImpl.java +++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/strategy/fastestresponse/HostResponseTimeServiceImpl.java @@ -28,7 +28,6 @@ import software.amazon.jdbc.PluginService; import software.amazon.jdbc.util.FullServicesContainer; import software.amazon.jdbc.util.Messages; -import software.amazon.jdbc.util.storage.SlidingExpirationCacheWithCleanupThread; public class HostResponseTimeServiceImpl implements HostResponseTimeService { diff --git a/wrapper/src/main/java/software/amazon/jdbc/targetdriverdialect/TargetDriverDialectManager.java b/wrapper/src/main/java/software/amazon/jdbc/targetdriverdialect/TargetDriverDialectManager.java index 865dbfe7e..47040059c 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/targetdriverdialect/TargetDriverDialectManager.java +++ b/wrapper/src/main/java/software/amazon/jdbc/targetdriverdialect/TargetDriverDialectManager.java @@ -70,28 +70,6 @@ public class TargetDriverDialectManager implements TargetDriverDialectProvider { PropertyDefinition.registerPluginProperties(TargetDriverDialectManager.class); } - /** - * Sets a custom target driver dialect handler. - * - * @param targetDriverDialect A custom driver dialect to use. - * - * @deprecated Use software.amazon.jdbc.Driver instead - */ - @Deprecated - public static void setCustomDialect(final @NonNull TargetDriverDialect targetDriverDialect) { - software.amazon.jdbc.Driver.setCustomTargetDriverDialect(targetDriverDialect); - } - - /** - * Resets a custom target driver dialect. - * - * @deprecated Use {@link software.amazon.jdbc.Driver#resetCustomTargetDriverDialect()} instead - */ - @Deprecated - public static void resetCustomDialect() { - software.amazon.jdbc.Driver.resetCustomTargetDriverDialect(); - } - @Override public TargetDriverDialect getDialect( final @NonNull Driver driver, diff --git a/wrapper/src/main/java/software/amazon/jdbc/util/CacheItem.java b/wrapper/src/main/java/software/amazon/jdbc/util/CacheItem.java new file mode 100644 index 000000000..31379014b --- /dev/null +++ b/wrapper/src/main/java/software/amazon/jdbc/util/CacheItem.java @@ -0,0 +1,68 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package software.amazon.jdbc.util; + +import java.util.Objects; + +public class CacheItem { + + final V item; + final long expirationTime; + + public CacheItem(final V item, final long expirationTime) { + this.item = item; + this.expirationTime = expirationTime; + } + + public boolean isExpired() { + if (expirationTime <= 0) { + // No expiration time. + return false; + } + return System.nanoTime() > expirationTime; + } + + public V get() { + return get(false); + } + + public V get(final boolean returnExpired) { + return (this.isExpired() && !returnExpired) ? null : item; + } + + @Override + public int hashCode() { + return Objects.hashCode(item); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (!(obj instanceof CacheItem)) { + return false; + } + CacheItem other = (CacheItem) obj; + return Objects.equals(this.item, other.item); + } + + @Override + public String toString() { + return "CacheItem [item=" + item + ", expirationTime=" + expirationTime + "]"; + } +} diff --git a/wrapper/src/main/java/software/amazon/jdbc/util/ConnectionUrlParser.java b/wrapper/src/main/java/software/amazon/jdbc/util/ConnectionUrlParser.java index 435907141..b66f631e4 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/util/ConnectionUrlParser.java +++ b/wrapper/src/main/java/software/amazon/jdbc/util/ConnectionUrlParser.java @@ -43,6 +43,11 @@ public class ConnectionUrlParser { // follows by any char except "/", "?" or "#" + "(?:[/?#].*)?"); // Anything starting with either "/", "?" or "#" + private static final Pattern URL_WITH_REGION_PATTERN = + Pattern.compile( + "^(\\[(?.+)\\])?(?[a-zA-Z0-9\\?\\.\\-]+)(:(?[0-9]+))?$", + Pattern.CASE_INSENSITIVE); + static final Pattern EMPTY_STRING_IN_QUOTATIONS = Pattern.compile("\"(\\s*)\""); private static final RdsUtils rdsUtils = new RdsUtils(); @@ -90,6 +95,59 @@ public static HostSpec parseHostPortPair(final String url, final HostRole role, return getHostSpec(hostPortPair, role, hostSpecBuilderSupplier.get()); } + /** + * Parse strings in the following formats: + * "url", for example: "instance-1.XYZ.us-east-2.rds.amazonaws.com" + * "url:port", for example: "instance-1.XYZ.us-east-2.rds.amazonaws.com:9999" + * "[region_name]url", for example: "us-east-2:instance-1.any-domain.com" + * "[region_name]url:port", for example: "us-east-2:instance-1.any-domain.com:9999" + * + * @param urlWithRegionPrefix Url with region prexix + * @param hostSpecBuilderSupplier A host builder supplier + * @return A pair of region and HostSpec + */ + public static Pair parseHostPortPairWithRegionPrefix( + final String urlWithRegionPrefix, + final Supplier hostSpecBuilderSupplier) { + + final Matcher matcher = URL_WITH_REGION_PATTERN.matcher(urlWithRegionPrefix); + if (!matcher.find()) { + throw new IllegalArgumentException( + Messages.get( + "ConnectionUrlParser.cantParseUrl", + new Object[] {urlWithRegionPrefix})); + } + String awsRegion = matcher.group("region"); + final String host = matcher.group("domain"); + final String port = matcher.group("port"); + + if (StringUtils.isNullOrEmpty(host)) { + throw new IllegalArgumentException( + Messages.get( + "ConnectionUrlParser.cantParseHost", + new Object[] {urlWithRegionPrefix})); + } + + if (StringUtils.isNullOrEmpty(awsRegion)) { + awsRegion = rdsUtils.getRdsRegion(host); + if (StringUtils.isNullOrEmpty(awsRegion)) { + throw new IllegalArgumentException( + Messages.get( + "ConnectionUrlParser.cantParseAwsRegion", + new Object[] {urlWithRegionPrefix})); + } + } + + final RdsUrlType urlType = rdsUtils.identifyRdsType(host); + + // Assign HostRole of READER if using the reader cluster URL, otherwise assume a HostRole of WRITER + final HostRole hostRole = RdsUrlType.RDS_READER_CLUSTER.equals(urlType) ? HostRole.READER : HostRole.WRITER; + final String[] hostPortPair = StringUtils.isNullOrEmpty(port) + ? new String[] { host } + : new String[] { host, port }; + return Pair.create(awsRegion, getHostSpec(hostPortPair, hostRole, hostSpecBuilderSupplier.get())); + } + private static HostSpec getHostSpec(final String[] hostPortPair, final HostRole hostRole, final HostSpecBuilder hostSpecBuilder) { String hostId = rdsUtils.getRdsInstanceId(hostPortPair[0]); diff --git a/wrapper/src/main/java/software/amazon/jdbc/util/FullServicesContainer.java b/wrapper/src/main/java/software/amazon/jdbc/util/FullServicesContainer.java index 373b011cc..f406486e3 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/util/FullServicesContainer.java +++ b/wrapper/src/main/java/software/amazon/jdbc/util/FullServicesContainer.java @@ -18,9 +18,9 @@ import software.amazon.jdbc.ConnectionPluginManager; import software.amazon.jdbc.ConnectionProvider; -import software.amazon.jdbc.HostListProviderService; import software.amazon.jdbc.PluginManagerService; import software.amazon.jdbc.PluginService; +import software.amazon.jdbc.hostlistprovider.HostListProviderService; import software.amazon.jdbc.util.events.EventPublisher; import software.amazon.jdbc.util.monitoring.MonitorService; import software.amazon.jdbc.util.storage.StorageService; diff --git a/wrapper/src/main/java/software/amazon/jdbc/util/FullServicesContainerImpl.java b/wrapper/src/main/java/software/amazon/jdbc/util/FullServicesContainerImpl.java index 9f9cb73fc..7bd479caf 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/util/FullServicesContainerImpl.java +++ b/wrapper/src/main/java/software/amazon/jdbc/util/FullServicesContainerImpl.java @@ -18,9 +18,9 @@ import software.amazon.jdbc.ConnectionPluginManager; import software.amazon.jdbc.ConnectionProvider; -import software.amazon.jdbc.HostListProviderService; import software.amazon.jdbc.PluginManagerService; import software.amazon.jdbc.PluginService; +import software.amazon.jdbc.hostlistprovider.HostListProviderService; import software.amazon.jdbc.util.events.EventPublisher; import software.amazon.jdbc.util.monitoring.MonitorService; import software.amazon.jdbc.util.storage.StorageService; diff --git a/wrapper/src/main/java/software/amazon/jdbc/util/LogUtils.java b/wrapper/src/main/java/software/amazon/jdbc/util/LogUtils.java new file mode 100644 index 000000000..932e4a21e --- /dev/null +++ b/wrapper/src/main/java/software/amazon/jdbc/util/LogUtils.java @@ -0,0 +1,55 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package software.amazon.jdbc.util; + +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; +import org.checkerframework.checker.nullness.qual.Nullable; +import software.amazon.jdbc.HostSpec; + +public class LogUtils { + public static String logTopology(final @Nullable List hosts) { + return logTopology(hosts, null); + } + + public static String logTopology( + final @Nullable List hosts, + final @Nullable String messagePrefix) { + + final StringBuilder msg = new StringBuilder(); + if (hosts == null) { + msg.append(""); + } else { + for (final HostSpec host : hosts) { + if (msg.length() > 0) { + msg.append("\n"); + } + msg.append(" ").append(host == null ? "" : host); + } + } + + return Messages.get("Utils.topology", + new Object[] {messagePrefix == null ? "Topology:" : messagePrefix, msg.toString()}); + } + + public static String toLogString(Map map) { + return map.entrySet().stream() + .map(x -> String.format("\t[%s] -> %s", x.getKey(), x.getValue().getHostAndPort())) + .collect(Collectors.joining("\n")); + } +} diff --git a/wrapper/src/main/java/software/amazon/jdbc/util/RdsUrlType.java b/wrapper/src/main/java/software/amazon/jdbc/util/RdsUrlType.java index dff1e663a..8c3084192 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/util/RdsUrlType.java +++ b/wrapper/src/main/java/software/amazon/jdbc/util/RdsUrlType.java @@ -22,8 +22,10 @@ public enum RdsUrlType { RDS_READER_CLUSTER(true, true), RDS_CUSTOM_CLUSTER(true, true), RDS_PROXY(true, false), + RDS_PROXY_ENDPOINT(true, false), RDS_INSTANCE(true, false), RDS_AURORA_LIMITLESS_DB_SHARD_GROUP(true, false), + RDS_GLOBAL_WRITER_CLUSTER(true, true), OTHER(false, false); private final boolean isRds; diff --git a/wrapper/src/main/java/software/amazon/jdbc/util/RdsUtils.java b/wrapper/src/main/java/software/amazon/jdbc/util/RdsUtils.java index 2da967a9b..c1eec6545 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/util/RdsUtils.java +++ b/wrapper/src/main/java/software/amazon/jdbc/util/RdsUtils.java @@ -73,6 +73,20 @@ public class RdsUtils { // Governmental endpoints // https://aws.amazon.com/compliance/fips/#FIPS_Endpoints_by_Service // https://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/s3/model/Region.html + // + // + // Aurora Global Database + // https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Concepts.Aurora_Fea_Regions_DB-eng.Feature.GlobalDatabase.html + // Global Database Endpoint: .global-.global.rds.amazonaws.com + // Example: test-global-db-name.global-123456789012.global.rds.amazonaws.com + // + // + // RDS Proxy + // RDS Proxy Endpoint: .proxy-..rds.amazonaws.com + // Example: test-rds-proxy-name.proxy-123456789012.us-east-2.rds.amazonaws.com + // + // RDS Proxy Custom Endpoint: .endpoint.proxy-..rds.amazonaws.com + // Example: test-custom-endpoint-name.endpoint.proxy-123456789012.us-east-2.rds.amazonaws.com private static final Pattern AURORA_DNS_PATTERN = Pattern.compile( @@ -177,6 +191,38 @@ public class RdsUtils { ".*(?-old1)\\..*", Pattern.CASE_INSENSITIVE); + // https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Concepts.Aurora_Fea_Regions_DB-eng.Feature.GlobalDatabase.html + private static final Pattern AURORA_GLOBAL_WRITER_DNS_PATTERN = + Pattern.compile( + "^(?.+)\\." + + "(?global-)?" + + "(?[a-zA-Z0-9]+\\.global\\.rds\\.amazonaws\\.com\\.?)$", + Pattern.CASE_INSENSITIVE); + + private static final Pattern RDS_PROXY_ENDPOINT_DNS_PATTERN = + Pattern.compile( + "^(?.+)\\.endpoint\\." + + "(?proxy-)?" + + "(?[a-zA-Z0-9]+\\.(?[a-zA-Z0-9\\-]+)" + + "\\.rds\\.amazonaws\\.com\\.?)$", + Pattern.CASE_INSENSITIVE); + + private static final Pattern RDS_PROXY_ENDPOINT_CHINA_DNS_PATTERN = + Pattern.compile( + "^(?.+)\\.endpoint\\." + + "(?proxy-)+" + + "(?[a-zA-Z0-9]+\\.rds\\.(?[a-zA-Z0-9\\-]+)" + + "\\.amazonaws\\.com\\.cn\\.?)$", + Pattern.CASE_INSENSITIVE); + + private static final Pattern RDS_PROXY_ENDPOINT_OLD_CHINA_DNS_PATTERN = + Pattern.compile( + "^(?.+)\\.endpoint\\." + + "(?proxy-)?" + + "(?[a-zA-Z0-9]+\\.(?[a-zA-Z0-9\\-]+)" + + "\\.rds\\.amazonaws\\.com\\.cn\\.?)$", + Pattern.CASE_INSENSITIVE); + private static final Map cachedPatterns = new ConcurrentHashMap<>(); private static final Map cachedDnsPatterns = new ConcurrentHashMap<>(); @@ -218,6 +264,21 @@ public boolean isRdsProxyDns(final String host) { return dnsGroup != null && dnsGroup.startsWith("proxy-"); } + public boolean isRdsProxyEndpointDns(final String host) { + final String preparedHost = getPreparedHost(host); + if (StringUtils.isNullOrEmpty(preparedHost)) { + return false; + } + + final Matcher matcher = cacheMatcher(preparedHost, + RDS_PROXY_ENDPOINT_DNS_PATTERN, RDS_PROXY_ENDPOINT_CHINA_DNS_PATTERN, RDS_PROXY_ENDPOINT_OLD_CHINA_DNS_PATTERN); + if (getRegexGroup(matcher, DNS_GROUP) != null) { + return getRegexGroup(matcher, INSTANCE_GROUP) != null; + } + + return false; + } + public @Nullable String getRdsClusterId(final String host) { final String preparedHost = getPreparedHost(host); if (StringUtils.isNullOrEmpty(preparedHost)) { @@ -324,6 +385,11 @@ public String getRdsClusterHostUrl(final String host) { return null; } + public boolean isGlobalDbWriterClusterDns(final String host) { + final String dnsGroup = getDnsGroup(getPreparedHost(host)); + return dnsGroup != null && dnsGroup.equalsIgnoreCase("global-"); + } + public boolean isIP(final String ip) { return isIPv4(ip) || isIPv6(ip); } @@ -348,6 +414,8 @@ public RdsUrlType identifyRdsType(final String host) { if (isIP(host)) { return RdsUrlType.IP_ADDRESS; + } else if (isGlobalDbWriterClusterDns(host)) { + return RdsUrlType.RDS_GLOBAL_WRITER_CLUSTER; } else if (isWriterClusterDns(host)) { return RdsUrlType.RDS_WRITER_CLUSTER; } else if (isReaderClusterDns(host)) { @@ -358,6 +426,8 @@ public RdsUrlType identifyRdsType(final String host) { return RdsUrlType.RDS_AURORA_LIMITLESS_DB_SHARD_GROUP; } else if (isRdsProxyDns(host)) { return RdsUrlType.RDS_PROXY; + } else if (isRdsProxyEndpointDns(host)) { + return RdsUrlType.RDS_PROXY_ENDPOINT; } else if (isRdsDns(host)) { return RdsUrlType.RDS_INSTANCE; } else { @@ -453,7 +523,8 @@ private String getDnsGroup(final String host) { } return cachedDnsPatterns.computeIfAbsent(host, (k) -> { final Matcher matcher = cacheMatcher(k, - AURORA_DNS_PATTERN, AURORA_CHINA_DNS_PATTERN, AURORA_OLD_CHINA_DNS_PATTERN, AURORA_GOV_DNS_PATTERN); + AURORA_DNS_PATTERN, AURORA_CHINA_DNS_PATTERN, AURORA_OLD_CHINA_DNS_PATTERN, + AURORA_GOV_DNS_PATTERN, AURORA_GLOBAL_WRITER_DNS_PATTERN); return getRegexGroup(matcher, DNS_GROUP); }); } diff --git a/wrapper/src/main/java/software/amazon/jdbc/util/ServiceUtility.java b/wrapper/src/main/java/software/amazon/jdbc/util/ServiceUtility.java index c9bfe06df..976d3d917 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/util/ServiceUtility.java +++ b/wrapper/src/main/java/software/amazon/jdbc/util/ServiceUtility.java @@ -21,11 +21,11 @@ import org.checkerframework.checker.nullness.qual.Nullable; import software.amazon.jdbc.ConnectionPluginManager; import software.amazon.jdbc.ConnectionProvider; -import software.amazon.jdbc.HostListProvider; import software.amazon.jdbc.PartialPluginService; import software.amazon.jdbc.PluginServiceImpl; import software.amazon.jdbc.dialect.Dialect; import software.amazon.jdbc.dialect.HostListProviderSupplier; +import software.amazon.jdbc.hostlistprovider.HostListProvider; import software.amazon.jdbc.profile.ConfigurationProfile; import software.amazon.jdbc.targetdriverdialect.TargetDriverDialect; import software.amazon.jdbc.util.events.EventPublisher; @@ -76,7 +76,7 @@ public FullServicesContainer createStandardServiceContainer( servicesContainer.setPluginManagerService(pluginService); pluginManager.initPlugins(servicesContainer, configurationProfile); - final HostListProviderSupplier supplier = pluginService.getDialect().getHostListProvider(); + final HostListProviderSupplier supplier = pluginService.getDialect().getHostListProviderSupplier(); if (supplier != null) { final HostListProvider provider = supplier.getProvider(props, originalUrl, servicesContainer); pluginService.setHostListProvider(provider); diff --git a/wrapper/src/main/java/software/amazon/jdbc/util/Utils.java b/wrapper/src/main/java/software/amazon/jdbc/util/Utils.java index 4d02e9224..8bfe5ca1b 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/util/Utils.java +++ b/wrapper/src/main/java/software/amazon/jdbc/util/Utils.java @@ -53,28 +53,4 @@ public static boolean containsHostAndPort(final Collection hosts, Stri } return null; } - - public static String logTopology(final @Nullable List hosts) { - return logTopology(hosts, null); - } - - public static String logTopology( - final @Nullable List hosts, - final @Nullable String messagePrefix) { - - final StringBuilder msg = new StringBuilder(); - if (hosts == null) { - msg.append(""); - } else { - for (final HostSpec host : hosts) { - if (msg.length() > 0) { - msg.append("\n"); - } - msg.append(" ").append(host == null ? "" : host); - } - } - - return Messages.get("Utils.topology", - new Object[] {messagePrefix == null ? "Topology:" : messagePrefix, msg.toString()}); - } } diff --git a/wrapper/src/main/java/software/amazon/jdbc/util/WrapperUtils.java b/wrapper/src/main/java/software/amazon/jdbc/util/WrapperUtils.java index 0037bdbd9..f4b0babb7 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/util/WrapperUtils.java +++ b/wrapper/src/main/java/software/amazon/jdbc/util/WrapperUtils.java @@ -217,6 +217,8 @@ public static T executeWithPlugins( context.setAttribute("jdbcCall", jdbcMethod.methodName); } + connectionWrapper.getServicesContainer().getPluginManagerService().resetCallContext(); + // The target driver may block on Statement.getConnection(). if (jdbcMethod.shouldLockConnection && jdbcMethod.checkBoundedConnection) { final Connection conn = WrapperUtils.getConnectionFromSqlObject(methodInvokeOn); @@ -286,6 +288,8 @@ public static T executeWithPlugins( context.setAttribute("jdbcCall", jdbcMethod.methodName); } + connectionWrapper.getServicesContainer().getPluginManagerService().resetCallContext(); + // The target driver may block on Statement.getConnection(). if (jdbcMethod.shouldLockConnection && jdbcMethod.checkBoundedConnection) { final Connection conn = WrapperUtils.getConnectionFromSqlObject(methodInvokeOn); diff --git a/wrapper/src/main/java/software/amazon/jdbc/util/connection/ConnectionService.java b/wrapper/src/main/java/software/amazon/jdbc/util/connection/ConnectionService.java deleted file mode 100644 index 1c18a9f28..000000000 --- a/wrapper/src/main/java/software/amazon/jdbc/util/connection/ConnectionService.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package software.amazon.jdbc.util.connection; - -import java.sql.Connection; -import java.sql.SQLException; -import java.util.Properties; -import software.amazon.jdbc.HostSpec; -import software.amazon.jdbc.PluginService; -import software.amazon.jdbc.util.FullServicesContainer; - -/** - * A service used to open new connections for internal driver use. - * - * @deprecated This interface is deprecated and will be removed in a future version. Use - * {@link software.amazon.jdbc.util.ServiceUtility#createMinimalServiceContainer} followed by - * {@link PluginService#forceConnect} instead. - */ -@Deprecated -public interface ConnectionService { - /** - * Creates an auxiliary connection. Auxiliary connections are driver-internal connections that accomplish various - * specific tasks such as monitoring a host's availability, checking the topology information for a cluster, etc. - * - * @param hostSpec the hostSpec containing the host information for the auxiliary connection. - * @param props the properties for the auxiliary connection. - * @return a new connection to the given host using the given props. - * @throws SQLException if an error occurs while opening the connection. - * @deprecated Use {@link software.amazon.jdbc.util.ServiceUtility#createMinimalServiceContainer} followed by - * {@link PluginService#forceConnect} instead. - */ - @Deprecated - Connection open(HostSpec hostSpec, Properties props) throws SQLException; - - /** - * Get the {@link PluginService} associated with this {@link ConnectionService}. - * - * @return the {@link PluginService} associated with this {@link ConnectionService} - * @deprecated Use {@link software.amazon.jdbc.util.ServiceUtility#createMinimalServiceContainer} followed by - * {@link FullServicesContainer#getPluginService()} instead. - */ - @Deprecated - PluginService getPluginService(); -} diff --git a/wrapper/src/main/java/software/amazon/jdbc/util/connection/ConnectionServiceImpl.java b/wrapper/src/main/java/software/amazon/jdbc/util/connection/ConnectionServiceImpl.java deleted file mode 100644 index ef280e738..000000000 --- a/wrapper/src/main/java/software/amazon/jdbc/util/connection/ConnectionServiceImpl.java +++ /dev/null @@ -1,105 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package software.amazon.jdbc.util.connection; - -import java.sql.Connection; -import java.sql.SQLException; -import java.util.Properties; -import software.amazon.jdbc.ConnectionPluginManager; -import software.amazon.jdbc.ConnectionProvider; -import software.amazon.jdbc.HostSpec; -import software.amazon.jdbc.PartialPluginService; -import software.amazon.jdbc.PluginService; -import software.amazon.jdbc.dialect.Dialect; -import software.amazon.jdbc.targetdriverdialect.TargetDriverDialect; -import software.amazon.jdbc.util.FullServicesContainer; -import software.amazon.jdbc.util.FullServicesContainerImpl; -import software.amazon.jdbc.util.PropertyUtils; -import software.amazon.jdbc.util.events.EventPublisher; -import software.amazon.jdbc.util.monitoring.MonitorService; -import software.amazon.jdbc.util.storage.StorageService; -import software.amazon.jdbc.util.telemetry.TelemetryFactory; - -/** - * A service used to open new connections for internal driver use. - * - * @deprecated This class is deprecated and will be removed in a future version. Use - * {@link software.amazon.jdbc.util.ServiceUtility#createMinimalServiceContainer} followed by - * {@link PluginService#forceConnect} instead. - */ -@Deprecated -public class ConnectionServiceImpl implements ConnectionService { - protected final String targetDriverProtocol; - protected final ConnectionPluginManager pluginManager; - protected final PluginService pluginService; - - /** - * Constructs a {@link ConnectionServiceImpl} instance. - * - * @deprecated Use {@link software.amazon.jdbc.util.ServiceUtility#createMinimalServiceContainer} instead. - */ - @Deprecated - public ConnectionServiceImpl( - StorageService storageService, - MonitorService monitorService, - EventPublisher eventPublisher, - TelemetryFactory telemetryFactory, - ConnectionProvider connectionProvider, - String originalUrl, - String targetDriverProtocol, - TargetDriverDialect driverDialect, - Dialect dbDialect, - Properties props) throws SQLException { - this.targetDriverProtocol = targetDriverProtocol; - - FullServicesContainer servicesContainer = - new FullServicesContainerImpl( - storageService, monitorService, eventPublisher, connectionProvider, telemetryFactory); - this.pluginManager = new ConnectionPluginManager( - props, telemetryFactory, connectionProvider, null); - servicesContainer.setConnectionPluginManager(this.pluginManager); - - Properties propsCopy = PropertyUtils.copyProperties(props); - PartialPluginService partialPluginService = new PartialPluginService( - servicesContainer, - propsCopy, - originalUrl, - this.targetDriverProtocol, - driverDialect, - dbDialect - ); - - servicesContainer.setHostListProviderService(partialPluginService); - servicesContainer.setPluginService(partialPluginService); - servicesContainer.setPluginManagerService(partialPluginService); - - this.pluginService = partialPluginService; - this.pluginManager.initPlugins(servicesContainer, null); - } - - @Override - @Deprecated - public Connection open(HostSpec hostSpec, Properties props) throws SQLException { - return this.pluginManager.forceConnect(this.targetDriverProtocol, hostSpec, props, true, null); - } - - @Override - @Deprecated - public PluginService getPluginService() { - return this.pluginService; - } -} diff --git a/wrapper/src/main/java/software/amazon/jdbc/util/events/DataAccessEvent.java b/wrapper/src/main/java/software/amazon/jdbc/util/events/DataAccessEvent.java index b94aadb40..81fe37147 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/util/events/DataAccessEvent.java +++ b/wrapper/src/main/java/software/amazon/jdbc/util/events/DataAccessEvent.java @@ -24,8 +24,8 @@ * data. */ public class DataAccessEvent implements Event { - protected @NonNull Class dataClass; - protected @NonNull Object key; + protected final @NonNull Class dataClass; + protected final @NonNull Object key; /** * Constructor for a DataAccessEvent. diff --git a/wrapper/src/main/java/software/amazon/jdbc/util/events/Event.java b/wrapper/src/main/java/software/amazon/jdbc/util/events/Event.java index 67c751cb4..170f989f2 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/util/events/Event.java +++ b/wrapper/src/main/java/software/amazon/jdbc/util/events/Event.java @@ -18,6 +18,10 @@ /** * An interface for events that need to be communicated between different components. + * + *

All implementations of this interface MUST be immutable or use both the default {@link Object#equals} and + * {@link Object#hashCode} implementations, as instances will be used as keys in hash-based collections. Mutable + * implementations may cause undefined behavior when used as Map keys or Set elements. */ public interface Event { boolean isImmediateDelivery(); diff --git a/wrapper/src/main/java/software/amazon/jdbc/util/events/EventSubscriber.java b/wrapper/src/main/java/software/amazon/jdbc/util/events/EventSubscriber.java index 34e63b452..877c7ef21 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/util/events/EventSubscriber.java +++ b/wrapper/src/main/java/software/amazon/jdbc/util/events/EventSubscriber.java @@ -24,6 +24,10 @@ * {@link java.util.HashSet} to prevent duplicate subscriptions, so classes implementing this interface should consider * whether they need to override {@link Object#equals(Object)} and {@link Object#hashCode()}. * + *

All implementations of this interface MUST be immutable or use both the default {@link Object#equals} and + * {@link Object#hashCode} implementations, as instances will be used as keys in hash-based collections. Mutable + * implementations may cause undefined behavior when used as Map keys or Set elements. + * * @see EventPublisher */ public interface EventSubscriber { diff --git a/wrapper/src/main/java/software/amazon/jdbc/util/monitoring/AbstractMonitor.java b/wrapper/src/main/java/software/amazon/jdbc/util/monitoring/AbstractMonitor.java index e7a92ffbd..269b76499 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/util/monitoring/AbstractMonitor.java +++ b/wrapper/src/main/java/software/amazon/jdbc/util/monitoring/AbstractMonitor.java @@ -91,7 +91,6 @@ public void stop() { Thread.currentThread().interrupt(); this.monitorExecutor.shutdownNow(); } finally { - // TODO: Should this be removed? close() should be called in the run() method finally block close(); this.state.set(MonitorState.STOPPED); } diff --git a/wrapper/src/main/java/software/amazon/jdbc/util/monitoring/Monitor.java b/wrapper/src/main/java/software/amazon/jdbc/util/monitoring/Monitor.java index fbdd55063..78e111d88 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/util/monitoring/Monitor.java +++ b/wrapper/src/main/java/software/amazon/jdbc/util/monitoring/Monitor.java @@ -26,6 +26,8 @@ public interface Monitor { * Executes the monitoring loop for this monitor. This method should be called in the run() method of the thread * submitted during the call to {@link #start()}. Additionally, the monitoring loop should regularly update the last * activity timestamp so that the {@link MonitorService} can detect whether the monitor is stuck or not. + * + * @throws Exception if there's an error executing the monitoring logic. */ void monitor() throws Exception; diff --git a/wrapper/src/main/java/software/amazon/jdbc/util/monitoring/MonitorService.java b/wrapper/src/main/java/software/amazon/jdbc/util/monitoring/MonitorService.java index 18b950400..8b7ac7fe1 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/util/monitoring/MonitorService.java +++ b/wrapper/src/main/java/software/amazon/jdbc/util/monitoring/MonitorService.java @@ -17,8 +17,8 @@ package software.amazon.jdbc.util.monitoring; import java.sql.SQLException; +import java.util.EnumSet; import java.util.Properties; -import java.util.Set; import org.checkerframework.checker.nullness.qual.Nullable; import software.amazon.jdbc.ConnectionProvider; import software.amazon.jdbc.dialect.Dialect; @@ -41,7 +41,8 @@ public interface MonitorService { * @param heartbeatTimeoutNanos a duration in nanoseconds defining the maximum amount of time that a monitor should * take between updating its last-updated timestamp. If a monitor has not updated its * last-updated timestamp within this duration it will be considered stuck. - * @param errorResponses a {@link Set} defining actions to take if the monitor is stuck or in an error state. + * @param errorResponses a {@link EnumSet} defining actions to take if the monitor is stuck or in an error + * state. * @param producedDataClass the class of data produced by the monitor. * @param the type of the monitor. */ @@ -49,7 +50,7 @@ void registerMonitorTypeIfAbsent( Class monitorClass, long expirationTimeoutNanos, long heartbeatTimeoutNanos, - Set errorResponses, + EnumSet errorResponses, @Nullable Class producedDataClass); /** diff --git a/wrapper/src/main/java/software/amazon/jdbc/util/monitoring/MonitorServiceImpl.java b/wrapper/src/main/java/software/amazon/jdbc/util/monitoring/MonitorServiceImpl.java index 95090b193..5a3446af3 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/util/monitoring/MonitorServiceImpl.java +++ b/wrapper/src/main/java/software/amazon/jdbc/util/monitoring/MonitorServiceImpl.java @@ -19,12 +19,12 @@ import java.sql.SQLException; import java.util.ArrayList; import java.util.Collections; +import java.util.EnumSet; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Properties; -import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; @@ -36,7 +36,6 @@ import software.amazon.jdbc.dialect.Dialect; import software.amazon.jdbc.hostlistprovider.Topology; import software.amazon.jdbc.hostlistprovider.monitoring.ClusterTopologyMonitorImpl; -import software.amazon.jdbc.hostlistprovider.monitoring.MultiAzClusterTopologyMonitorImpl; import software.amazon.jdbc.plugin.strategy.fastestresponse.NodeResponseTimeMonitor; import software.amazon.jdbc.targetdriverdialect.TargetDriverDialect; import software.amazon.jdbc.util.ExecutorFactory; @@ -59,13 +58,11 @@ public class MonitorServiceImpl implements MonitorService, EventSubscriber { static { Map, Supplier> suppliers = new HashMap<>(); - Set recreateOnError = - new HashSet<>(Collections.singletonList(MonitorErrorResponse.RECREATE)); + EnumSet recreateOnError = EnumSet.of(MonitorErrorResponse.RECREATE); MonitorSettings defaultSettings = new MonitorSettings( TimeUnit.MINUTES.toNanos(15), TimeUnit.MINUTES.toNanos(3), recreateOnError); suppliers.put(ClusterTopologyMonitorImpl.class, () -> new CacheContainer(defaultSettings, Topology.class)); - suppliers.put(MultiAzClusterTopologyMonitorImpl.class, () -> new CacheContainer(defaultSettings, Topology.class)); suppliers.put(NodeResponseTimeMonitor.class, () -> new CacheContainer(defaultSettings, null)); defaultSuppliers = Collections.unmodifiableMap(suppliers); } @@ -150,7 +147,7 @@ protected void handleMonitorError( Monitor monitor = errorMonitorItem.getMonitor(); monitor.stop(); - Set errorResponses = cacheContainer.getSettings().getErrorResponses(); + EnumSet errorResponses = cacheContainer.getSettings().getErrorResponses(); if (errorResponses != null && errorResponses.contains(MonitorErrorResponse.RECREATE)) { cacheContainer.getCache().computeIfAbsent(key, k -> { LOGGER.fine(Messages.get("MonitorServiceImpl.recreatingMonitor", new Object[] {monitor})); @@ -166,7 +163,7 @@ public void registerMonitorTypeIfAbsent( Class monitorClass, long expirationTimeoutNanos, long heartbeatTimeoutNanos, - Set errorResponses, + EnumSet errorResponses, @Nullable Class producedDataClass) { monitorCaches.computeIfAbsent( monitorClass, diff --git a/wrapper/src/main/java/software/amazon/jdbc/util/monitoring/MonitorSettings.java b/wrapper/src/main/java/software/amazon/jdbc/util/monitoring/MonitorSettings.java index 6774058e8..4e181e058 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/util/monitoring/MonitorSettings.java +++ b/wrapper/src/main/java/software/amazon/jdbc/util/monitoring/MonitorSettings.java @@ -16,6 +16,7 @@ package software.amazon.jdbc.util.monitoring; +import java.util.EnumSet; import java.util.Set; import org.checkerframework.checker.nullness.qual.NonNull; import org.checkerframework.checker.nullness.qual.Nullable; @@ -26,7 +27,7 @@ public class MonitorSettings { private final long expirationTimeoutNanos; private final long inactiveTimeoutNanos; - private @Nullable final Set errorResponses; + private @Nullable final EnumSet errorResponses; /** * Constructs a MonitorSettings instance. @@ -40,7 +41,7 @@ public class MonitorSettings { * no action will be performed. */ public MonitorSettings( - long expirationTimeoutNanos, long inactiveTimeoutNanos, @NonNull Set errorResponses) { + long expirationTimeoutNanos, long inactiveTimeoutNanos, @NonNull EnumSet errorResponses) { this.expirationTimeoutNanos = expirationTimeoutNanos; this.inactiveTimeoutNanos = inactiveTimeoutNanos; this.errorResponses = errorResponses; @@ -54,7 +55,7 @@ public long getInactiveTimeoutNanos() { return inactiveTimeoutNanos; } - public @Nullable Set getErrorResponses() { + public @Nullable EnumSet getErrorResponses() { return errorResponses; } } diff --git a/wrapper/src/main/java/software/amazon/jdbc/util/storage/StorageService.java b/wrapper/src/main/java/software/amazon/jdbc/util/storage/StorageService.java index 35770f691..2a49ead0d 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/util/storage/StorageService.java +++ b/wrapper/src/main/java/software/amazon/jdbc/util/storage/StorageService.java @@ -16,7 +16,6 @@ package software.amazon.jdbc.util.storage; -import java.util.Map; import org.checkerframework.checker.nullness.qual.Nullable; public interface StorageService { @@ -91,9 +90,5 @@ void registerItemClassIfAbsent( */ void clearAll(); - // TODO: this is only called by the suggestedClusterId logic in RdsHostListProvider, which will be removed. This - // method should potentially be removed at that point as well. - @Nullable Map getEntries(Class itemClass); - int size(Class itemClass); } diff --git a/wrapper/src/main/java/software/amazon/jdbc/util/storage/StorageServiceImpl.java b/wrapper/src/main/java/software/amazon/jdbc/util/storage/StorageServiceImpl.java index e59e8d72e..418885ede 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/util/storage/StorageServiceImpl.java +++ b/wrapper/src/main/java/software/amazon/jdbc/util/storage/StorageServiceImpl.java @@ -170,17 +170,6 @@ public void clearAll() { } } - @Override - public @Nullable Map getEntries(Class itemClass) { - final ExpirationCache cache = caches.get(itemClass); - if (cache == null) { - return null; - } - - // TODO: remove this method after removing the suggestedClusterId logic - return (Map) cache.getEntries(); - } - @Override public int size(Class itemClass) { final ExpirationCache cache = caches.get(itemClass); diff --git a/wrapper/src/main/java/software/amazon/jdbc/wrapper/ConnectionWrapper.java b/wrapper/src/main/java/software/amazon/jdbc/wrapper/ConnectionWrapper.java index c29f11c2a..312dd8c26 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/wrapper/ConnectionWrapper.java +++ b/wrapper/src/main/java/software/amazon/jdbc/wrapper/ConnectionWrapper.java @@ -39,12 +39,12 @@ import org.checkerframework.checker.nullness.qual.NonNull; import org.checkerframework.checker.nullness.qual.Nullable; import software.amazon.jdbc.ConnectionPluginManager; -import software.amazon.jdbc.HostListProviderService; import software.amazon.jdbc.JdbcMethod; import software.amazon.jdbc.PluginManagerService; import software.amazon.jdbc.PluginService; import software.amazon.jdbc.PropertyDefinition; import software.amazon.jdbc.cleanup.CanReleaseResources; +import software.amazon.jdbc.hostlistprovider.HostListProviderService; import software.amazon.jdbc.profile.ConfigurationProfile; import software.amazon.jdbc.util.FullServicesContainer; import software.amazon.jdbc.util.Messages; @@ -63,6 +63,7 @@ public class ConnectionWrapper implements Connection, CanReleaseResources { protected final String originalUrl; protected @Nullable ConfigurationProfile configurationProfile; protected @Nullable Throwable openConnectionStacktrace; + protected FullServicesContainer servicesContainer; public ConnectionWrapper( @NonNull final FullServicesContainer servicesContainer, @@ -71,6 +72,7 @@ public ConnectionWrapper( @NonNull final String targetDriverProtocol, @Nullable final ConfigurationProfile configurationProfile) throws SQLException { + this.servicesContainer = servicesContainer; this.pluginManager = servicesContainer.getConnectionPluginManager(); this.pluginService = servicesContainer.getPluginService(); this.hostListProviderService = servicesContainer.getHostListProviderService(); @@ -105,6 +107,10 @@ protected ConnectionWrapper( init(props); } + public FullServicesContainer getServicesContainer() { + return this.servicesContainer; + } + protected void init(final Properties props) throws SQLException { if (this.pluginService.getCurrentConnection() == null) { final Connection conn = diff --git a/wrapper/src/main/resources/aws_advanced_jdbc_wrapper_messages.properties b/wrapper/src/main/resources/aws_advanced_jdbc_wrapper_messages.properties index 6d0b0fa90..f821daa2f 100644 --- a/wrapper/src/main/resources/aws_advanced_jdbc_wrapper_messages.properties +++ b/wrapper/src/main/resources/aws_advanced_jdbc_wrapper_messages.properties @@ -27,6 +27,8 @@ AdfsCredentialsProviderFactory.signOnPagePostActionRequestFailed=ADFS SignOn Pag AdfsCredentialsProviderFactory.signOnPageRequestFailed=ADFS SignOn Page Request Failed with HTTP status ''{0}'', reason phrase ''{1}'', and response ''{2}'' AdfsCredentialsProviderFactory.signOnPageUrl=ADFS SignOn URL: ''{0}'' +AuroraPgDialect.auroraUtils=auroraUtils: {0} + AuthenticationToken.useCachedToken=Use cached authentication token = ''{0}'' AuthenticationToken.generatedNewToken=Generated new authentication token = ''{0}'' AuthenticationToken.javaSdkNotInClasspath=Required dependency 'AWS Java SDK RDS v2.x' is not on the classpath. @@ -34,13 +36,10 @@ AuthenticationToken.javaSdkNotInClasspath=Required dependency 'AWS Java SDK RDS RdsHostListProvider.clusterInstanceHostPatternNotSupportedForRDSProxy=An RDS Proxy url can''t be used as the 'clusterInstanceHostPattern' configuration setting. RdsHostListProvider.clusterInstanceHostPatternNotSupportedForRdsCustom=A custom RDS url can''t be used as the 'clusterInstanceHostPattern' configuration setting. RdsHostListProvider.invalidPattern=Invalid value for the 'clusterInstanceHostPattern' configuration setting - the host pattern must contain a '?' character as a placeholder for the DB instance identifiers of the instances in the cluster. -RdsHostListProvider.invalidTopology=The topology query returned an invalid topology - no writer instance detected. -RdsHostListProvider.suggestedClusterId=ClusterId ''{0}'' is suggested for url ''{1}''. RdsHostListProvider.parsedListEmpty=Can''t parse connection string: ''{0}'' -RdsHostListProvider.invalidQuery=Error obtaining host list. Provided database might not be an Aurora Db cluster -RdsHostListProvider.errorGettingHostRole=An error occurred while obtaining the connected host's role. This could occur if the connection is broken or if you are not connected to an Aurora database. RdsHostListProvider.errorIdentifyConnection=An error occurred while obtaining the connection's host ID. -RdsHostListProvider.errorGettingNetworkTimeout=An error occurred while getting the connection network timeout: {0} + +RdsPgDialect.rdsToolsAuroraUtils=rdsTools: {0}, auroraUtils: {1} AwsSdk.unsupportedRegion=Unsupported AWS region ''{0}''. For supported regions please read https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html @@ -97,6 +96,9 @@ ConnectionProvider.unsupportedHostSpecSelectorStrategy=Unsupported host selectio ConnectionUrlBuilder.missingJdbcProtocol=Missing JDBC protocol and/or host name. Could not construct URL. ConnectionUrlParser.protocolNotFound=Url should contain a driver protocol. Protocol is not found in url: ''{0}'' +ConnectionUrlParser.cantParseUrl=Can''t parse URL from ''{0}''. +ConnectionUrlParser.cantParseHost=Can''t parse host from ''{0}''. +ConnectionUrlParser.cantParseAwsRegion=Can''t parse AWS region from ''{0}''. ConnectTimeConnectionPlugin.connectTime=Connected in {0} nanos. @@ -132,6 +134,8 @@ DefaultConnectionPlugin.executingMethod=Executing method: ''{0}'' DefaultConnectionPlugin.noHostsAvailable=The default connection plugin received an empty host list from the plugin service. DefaultConnectionPlugin.unknownRoleRequested=A HostSpec with a role of HostRole.UNKNOWN was requested via getHostSpecByStrategy. The requested role must be either HostRole.WRITER or HostRole.READER +DialectManager.currentDialect=Current dialect: {0}, {1}, canUpdate: {2} + Driver.nullUrl=Url is null. Driver.alreadyRegistered=Driver is already registered. It can only be registered once. Driver.missingDriver=Can''t find the target driver for ''{0}''. Please ensure the target driver is in the classpath and is registered. Here is the list of registered drivers in the classpath: {1} @@ -181,6 +185,11 @@ Failover.skipFailoverOnInterruptedThread=Do not start failover since the current FederatedAuthPlugin.unableToDetermineRegion=Unable to determine connection region. If you are using a non-standard RDS URL, please set the ''{0}'' property. +GlobalAuroraTopologyMonitor.cannotFindRegionTemplate=Cannot find cluster template for region {0}. + +GlobalAuroraTopologyUtils.globalClusterInstanceHostPatternsRequired=Parameter 'globalClusterInstanceHostPatterns' is required for Aurora Global Database. +GlobalAuroraTopologyUtils.detectedGdbPatterns=Detected GDB instance template patterns:\n{0} + HostAvailabilityStrategy.invalidMaxRetries=Invalid value of {0} for configuration parameter `hostAvailabilityStrategyMaxRetries`. It must be an integer greater than 1. HostAvailabilityStrategy.invalidInitialBackoffTime=Invalid value of {0} for configuration parameter `hostAvailabilityStrategyInitialBackoffTime`. It must be an integer greater than 1. @@ -247,7 +256,6 @@ HostMonitorImpl.interruptedExceptionDuringMonitoring=Monitoring thread for node HostMonitorImpl.exceptionDuringMonitoringContinue=Continuing monitoring after unhandled exception was thrown in monitoring thread for node {0}. HostMonitorImpl.exceptionDuringMonitoringStop=Stopping monitoring after unhandled exception was thrown in monitoring thread for node {0}. HostMonitorImpl.monitorIsStopped=Monitoring was already stopped for node {0}. -HostMonitorImpl.stopped=Stopped monitoring thread for node ''{0}''. HostMonitorImpl.startMonitoringThreadNewContext=Start monitoring thread for checking new contexts for {0}. HostMonitorImpl.stopMonitoringThreadNewContext=Stop monitoring thread for checking new contexts for {0}. HostMonitorImpl.startMonitoringThread=Start monitoring thread for {0}. @@ -257,6 +265,8 @@ HostMonitorServiceImpl.emptyAliasSet=Empty alias set passed for ''{0}''. Set sho HostResponseTimeServiceImpl.errorStartingMonitor=An error occurred while starting a response time monitor for ''{0}'': {1} +MonitoringGlobalAuroraHostListProvider.globalHostPatternsRequired=Parameter 'globalClusterInstanceHostPatterns' is required for Aurora Global Database. + MonitorServiceImpl.checkingMonitors=Checking monitors for errors... MonitorServiceImpl.monitorClassMismatch=The monitor stored at ''{0}'' did not have the expected type. The expected type was ''{1}'', but the monitor ''{2}'' had a type of ''{3}''. MonitorServiceImpl.monitorStuck=Monitor ''{0}'' has not been updated within the inactive timeout of {1} milliseconds. The monitor will be stopped. @@ -314,12 +324,17 @@ ReadWriteSplittingPlugin.exceptionWhileExecutingCommand=Detected an exception wh ReadWriteSplittingPlugin.failoverExceptionWhileExecutingCommand=Detected a failover exception while executing a command: ''{0}'' ReadWriteSplittingPlugin.executingAgainstOldConnection=Executing method against old connection: ''{0}'' ReadWriteSplittingPlugin.noReadersAvailable=The plugin was unable to establish a reader connection to any reader instance. -ReadWriteSplittingPlugin.successfullyConnectedToReader=Successfully connected to a new reader host: ''{0}'' +ReadWriteSplittingPlugin.successfullyConnectedToReader=Successfully connected to reader host: ''{0}'' ReadWriteSplittingPlugin.failedToConnectToReader=Failed to connect to reader host: ''{0}'' ReadWriteSplittingPlugin.unsupportedHostSpecSelectorStrategy=Unsupported host selection strategy ''{0}'' specified in plugin configuration parameter ''readerHostSelectorStrategy''. Please visit the Read/Write Splitting Plugin documentation for all supported strategies. ReadWriteSplittingPlugin.errorVerifyingInitialHostSpecRole=An error occurred while obtaining the connected host's role. This could occur if the connection is broken or if you are not connected to an Aurora database. ReadWriteSplittingPlugin.previousReaderNotAllowed=The previous reader connection cannot be used because it is no longer in the list of allowed hosts. Previous reader: {0}. Allowed hosts: {1} +SimpleReadWriteSplittingPlugin.verificationFailed=The plugin was unable to establish a {0} connection within {1} ms. +SimpleReadWriteSplittingPlugin.failedToConnectToWriter=A writer connection was requested via setReadOnly, but the plugin was unable to establish a writer connection with the writer endpoint ''{0}''. +SimpleReadWriteSplittingPlugin.missingRequiredConfigParameter=Configuration parameter ''{0}'' is required. +SimpleReadWriteSplittingPlugin.fallbackToCurrentConnection=Failed to switch to reader host. The current connection will be used as a fallback: ''{0}''. Error: {1} + SAMLCredentialsProviderFactory.getSamlAssertionFailed=Failed to get SAML Assertion due to exception: ''{0}'' SamlAuthPlugin.javaStsSdkNotInClasspath=Required dependency 'AWS Java SDK for AWS Secret Token Service' is not on the classpath. SamlAuthPlugin.unhandledException=Unhandled exception: ''{0}'' @@ -355,12 +370,18 @@ TargetDriverDialectManager.useDialect=Target driver dialect set to: ''{0}'', {1} TargetDriverDialectManager.unexpectedClass=Unexpected DataSource class. Expected class(es): {0}, actual class: {1}. TargetDriverDialect.unsupported=This target driver dialect does not support this operation. MysqlConnectorJDriverHelper.canNotRegister=Can''t register driver com.mysql.cj.jdbc.Driver. + +TopologyUtils.errorGettingHostRole=An error occurred while obtaining the connected host's role. This could occur if the connection is broken or if you are not connected to an Aurora database. +TopologyUtils.errorGettingNetworkTimeout=An error occurred while getting the connection network timeout: {0} +TopologyUtils.errorProcessingQueryResults=An error occurred while processing the results from the topology query: {0} +TopologyUtils.invalidQuery=An error occurred while attempting to obtain the topology because the topology query was invalid. Please ensure you are connecting to an Aurora or RDS cluster. +TopologyUtils.invalidTopology=The topology query returned an invalid topology - no writer instance detected. +TopologyUtils.unexpectedTopologyQueryColumnCount=The topology query returned a result with 0 columns. This may occur if the topology query is executed when the server is failing over. + MariadbDriverHelper.canNotRegister=Can''t register driver org.mariadb.jdbc.Driver. AuroraInitialConnectionStrategyPlugin.unsupportedStrategy=Unsupported host selection strategy ''{0}''. -AuroraInitialConnectionStrategyPlugin.requireDynamicProvider=Dynamic host list provider is required. -NodeResponseTimeMonitor.stopped=Stopped Response time thread for node ''{0}''. NodeResponseTimeMonitor.responseTime=Response time for ''{0}'': {1} ms NodeResponseTimeMonitor.interruptedExceptionDuringMonitoring=Response time thread for node {0} was interrupted. NodeResponseTimeMonitor.exceptionDuringMonitoringStop=Stopping thread after unhandled exception was thrown in Response time thread for node {0}. @@ -370,10 +391,7 @@ NodeResponseTimeMonitor.openedConnection=Opened Response time connection: {0}. ClusterTopologyMonitorImpl.startMonitoringThread=[clusterId: ''{0}''] Start cluster topology monitoring thread for ''{1}''. ClusterTopologyMonitorImpl.stopMonitoringThread=Stop cluster topology monitoring thread for ''{0}''. ClusterTopologyMonitorImpl.exceptionDuringMonitoringStop=Stopping cluster topology monitoring after unhandled exception was thrown in monitoring thread for node ''{0}''. -ClusterTopologyMonitorImpl.invalidQuery=An error occurred while attempting to obtain the topology because the topology query was invalid. Please ensure you are connecting to an Aurora or RDS Db cluster. -ClusterTopologyMonitorImpl.errorGettingNetworkTimeout=An error occurred while getting the connection network timeout: {0} -ClusterTopologyMonitorImpl.invalidTopology=The topology query returned an invalid topology - no writer instance detected. -ClusterTopologyMonitorImpl.topologyNotUpdated=Topology hasn''t been updated after {0} ms. +ClusterTopologyMonitorImpl.topologyNotUpdated=Topology has not been updated after {0} ms. ClusterTopologyMonitorImpl.openedMonitoringConnection=Opened monitoring connection to node ''{0}''. ClusterTopologyMonitorImpl.ignoringTopologyRequest=A topology refresh was requested, but the topology was already updated recently. Returning cached hosts: ClusterTopologyMonitorImpl.timeoutSetToZero=A topology refresh was requested, but the given timeout for the request was 0ms. Returning cached hosts: @@ -382,8 +400,6 @@ ClusterTopologyMonitorImpl.startingNodeMonitoringThreads=Starting node monitorin ClusterTopologyMonitorImpl.writerPickedUpFromNodeMonitors=The writer host detected by the node monitors was picked up by the topology monitor: ''{0}''. ClusterTopologyMonitorImpl.writerMonitoringConnection=The monitoring connection is connected to a writer: ''{0}''. ClusterTopologyMonitorImpl.errorFetchingTopology=An error occurred while querying for topology: {0} -ClusterTopologyMonitorImpl.errorProcessingQueryResults=An error occurred while processing the results from the topology query: {0} -ClusterTopologyMonitorImpl.unexpectedTopologyQueryColumnCount=The topology query returned a result with 0 columns. This may occur if the topology query is executed when the server is failing over. ClusterTopologyMonitorImpl.reset=Reset: clusterId={0}, host={1} ClusterTopologyMonitorImpl.resetEventReceived=MonitorResetEvent received. @@ -406,7 +422,7 @@ bgd.interrupted=[{0}] Interrupted. bgd.monitoringUnhandledException=[{0}] Unhandled exception while monitoring blue/green status. bgd.threadCompleted=[{0}] Blue/green status monitoring thread is completed. bgd.statusNotAvailable=[{0}] (status not available) currentPhase: {1} -bgd.usesVersion=[{0}] Blue/Green deployment uses version ''{1}'' which the driver doesn''t support. Version ''{2}'' will be used instead. +bgd.usesVersion=[{0}] Blue/Green deployment uses version ''{1}'' which the driver does not support. Version ''{2}'' will be used instead. bgd.noEntriesInStatusTable=[{0}] No entries in status table. bgd.exception=[{0}] currentPhase: {1}, exception while querying for blue/green status. bgd.unhandledSqlException=[{0}] Unhandled SQLException. diff --git a/wrapper/src/test/java/integration/TestEnvironmentRequest.java b/wrapper/src/test/java/integration/TestEnvironmentRequest.java index 7e4b64daf..91ff6f6d0 100644 --- a/wrapper/src/test/java/integration/TestEnvironmentRequest.java +++ b/wrapper/src/test/java/integration/TestEnvironmentRequest.java @@ -19,8 +19,7 @@ import com.fasterxml.jackson.annotation.JsonIgnore; import com.fasterxml.jackson.annotation.JsonIgnoreProperties; import com.fasterxml.jackson.annotation.JsonProperty; -import java.util.HashSet; -import java.util.Set; +import java.util.EnumSet; @JsonIgnoreProperties(ignoreUnknown = true) public class TestEnvironmentRequest { @@ -41,7 +40,7 @@ public class TestEnvironmentRequest { private TargetJvm targetJvm; @JsonProperty("features") - private final Set features = new HashSet<>(); + private final EnumSet features = EnumSet.noneOf(TestEnvironmentFeatures.class); @JsonProperty("numOfInstances") private int numOfInstances = 1; @@ -93,7 +92,7 @@ public TargetJvm getTargetJvm() { } @JsonIgnore - public Set getFeatures() { + public EnumSet getFeatures() { return this.features; } diff --git a/wrapper/src/test/java/integration/container/ConnectionStringHelper.java b/wrapper/src/test/java/integration/container/ConnectionStringHelper.java index ba3ab227f..84d176959 100644 --- a/wrapper/src/test/java/integration/container/ConnectionStringHelper.java +++ b/wrapper/src/test/java/integration/container/ConnectionStringHelper.java @@ -21,8 +21,8 @@ import integration.TestEnvironmentFeatures; import integration.TestEnvironmentInfo; import integration.TestInstanceInfo; +import java.util.EnumSet; import java.util.Properties; -import java.util.Set; import software.amazon.jdbc.PropertyDefinition; import software.amazon.jdbc.util.StringUtils; @@ -106,6 +106,19 @@ public static String getWrapperUrl(TestInstanceInfo instance) { TestEnvironment.getCurrent().getInfo().getDatabaseInfo().getDefaultDbName()); } + public static String getWrapperUrl(String host) { + return getWrapperUrl( + TestEnvironment.getCurrent().getCurrentDriver(), + host, + TestEnvironment.getCurrent() + .getInfo() + .getDatabaseInfo() + .getInstances() + .get(0) + .getPort(), + TestEnvironment.getCurrent().getInfo().getDatabaseInfo().getDefaultDbName()); + } + public static String getWrapperUrl(String host, int port, String databaseName) { return getWrapperUrl(TestEnvironment.getCurrent().getCurrentDriver(), host, port, databaseName); } @@ -196,7 +209,7 @@ public static Properties getDefaultProperties() { props.setProperty(PropertyDefinition.USER.name, envInfo.getDatabaseInfo().getUsername()); props.setProperty(PropertyDefinition.PASSWORD.name, envInfo.getDatabaseInfo().getPassword()); - final Set features = envInfo.getRequest().getFeatures(); + final EnumSet features = envInfo.getRequest().getFeatures(); props.setProperty(PropertyDefinition.ENABLE_TELEMETRY.name, "true"); props.setProperty(PropertyDefinition.TELEMETRY_SUBMIT_TOPLEVEL.name, "true"); props.setProperty( diff --git a/wrapper/src/test/java/integration/container/TestDriverProvider.java b/wrapper/src/test/java/integration/container/TestDriverProvider.java index 817d0a1b0..7a7d0df1d 100644 --- a/wrapper/src/test/java/integration/container/TestDriverProvider.java +++ b/wrapper/src/test/java/integration/container/TestDriverProvider.java @@ -39,8 +39,8 @@ import java.sql.SQLException; import java.util.ArrayList; import java.util.Arrays; +import java.util.EnumSet; import java.util.List; -import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.logging.Logger; import java.util.stream.Collectors; @@ -187,7 +187,7 @@ public void beforeEach(ExtensionContext context) throws Exception { @Override public void afterEach(ExtensionContext context) throws Exception { - Set features = TestEnvironment.getCurrent() + EnumSet features = TestEnvironment.getCurrent() .getInfo() .getRequest() .getFeatures(); diff --git a/wrapper/src/test/java/integration/container/TestEnvironment.java b/wrapper/src/test/java/integration/container/TestEnvironment.java index a31eed239..51ba07fdb 100644 --- a/wrapper/src/test/java/integration/container/TestEnvironment.java +++ b/wrapper/src/test/java/integration/container/TestEnvironment.java @@ -37,9 +37,9 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Collections; +import java.util.EnumSet; import java.util.HashMap; import java.util.List; -import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.logging.Logger; import software.amazon.jdbc.Driver; @@ -249,7 +249,7 @@ public boolean isTestDriverAllowed(TestDriver testDriver) { boolean disabledByFeature; boolean driverCompatibleToDatabaseEngine; - final Set features = this.info.getRequest().getFeatures(); + final EnumSet features = this.info.getRequest().getFeatures(); final DatabaseEngine databaseEngine = this.info.getRequest().getDatabaseEngine(); switch (testDriver) { diff --git a/wrapper/src/test/java/integration/container/condition/DisableOnTestFeatureCondition.java b/wrapper/src/test/java/integration/container/condition/DisableOnTestFeatureCondition.java index 841383cae..c38ecde02 100644 --- a/wrapper/src/test/java/integration/container/condition/DisableOnTestFeatureCondition.java +++ b/wrapper/src/test/java/integration/container/condition/DisableOnTestFeatureCondition.java @@ -21,7 +21,7 @@ import integration.TestEnvironmentFeatures; import integration.container.TestEnvironment; import java.util.Arrays; -import java.util.Set; +import java.util.EnumSet; import org.junit.jupiter.api.extension.ConditionEvaluationResult; import org.junit.jupiter.api.extension.ExecutionCondition; import org.junit.jupiter.api.extension.ExtensionContext; @@ -33,7 +33,7 @@ public DisableOnTestFeatureCondition() {} @Override public ConditionEvaluationResult evaluateExecutionCondition(ExtensionContext context) { - Set features = + EnumSet features = TestEnvironment.getCurrent().getInfo().getRequest().getFeatures(); boolean disabled = diff --git a/wrapper/src/test/java/integration/container/condition/EnableOnTestFeatureCondition.java b/wrapper/src/test/java/integration/container/condition/EnableOnTestFeatureCondition.java index 673a3379e..6c88c60ca 100644 --- a/wrapper/src/test/java/integration/container/condition/EnableOnTestFeatureCondition.java +++ b/wrapper/src/test/java/integration/container/condition/EnableOnTestFeatureCondition.java @@ -21,7 +21,7 @@ import integration.TestEnvironmentFeatures; import integration.container.TestEnvironment; import java.util.Arrays; -import java.util.Set; +import java.util.EnumSet; import java.util.logging.Logger; import org.junit.jupiter.api.extension.ConditionEvaluationResult; import org.junit.jupiter.api.extension.ExecutionCondition; @@ -37,7 +37,7 @@ public EnableOnTestFeatureCondition() {} @Override public ConditionEvaluationResult evaluateExecutionCondition(ExtensionContext context) { - final Set features = + final EnumSet features = TestEnvironment.getCurrent().getInfo().getRequest().getFeatures(); boolean enabled = diff --git a/wrapper/src/test/java/integration/container/tests/AdvancedPerformanceTest.java b/wrapper/src/test/java/integration/container/tests/AdvancedPerformanceTest.java index 38087f2dc..5339a177e 100644 --- a/wrapper/src/test/java/integration/container/tests/AdvancedPerformanceTest.java +++ b/wrapper/src/test/java/integration/container/tests/AdvancedPerformanceTest.java @@ -29,7 +29,6 @@ import integration.container.ConnectionStringHelper; import integration.container.TestDriverProvider; import integration.container.TestEnvironment; -import integration.container.aurora.TestAuroraHostListProvider; import integration.container.aurora.TestPluginServiceImpl; import integration.container.condition.DisableOnTestFeature; import integration.container.condition.EnableOnTestFeature; @@ -66,9 +65,11 @@ import org.junit.jupiter.api.extension.ExtendWith; import org.junit.jupiter.params.provider.Arguments; import software.amazon.jdbc.PropertyDefinition; +import software.amazon.jdbc.hostlistprovider.RdsHostListProvider; import software.amazon.jdbc.plugin.efm.HostMonitorThreadContainer; import software.amazon.jdbc.plugin.efm2.HostMonitorServiceImpl; import software.amazon.jdbc.plugin.failover.FailoverSuccessSQLException; +import software.amazon.jdbc.util.CoreServicesContainer; import software.amazon.jdbc.util.StringUtils; @TestMethodOrder(MethodOrderer.MethodName.class) @@ -686,7 +687,7 @@ private void ensureClusterHealthy() throws InterruptedException { auroraUtil.makeSureInstancesUp(TimeUnit.MINUTES.toSeconds(5)); - TestAuroraHostListProvider.clearCache(); + CoreServicesContainer.getInstance().getStorageService().clearAll(); TestPluginServiceImpl.clearHostAvailabilityCache(); HostMonitorThreadContainer.releaseInstance(); HostMonitorServiceImpl.closeAllMonitors(); diff --git a/wrapper/src/test/java/integration/container/tests/AutoscalingTests.java b/wrapper/src/test/java/integration/container/tests/AutoscalingTests.java index 307e00bb9..28a6c63e4 100644 --- a/wrapper/src/test/java/integration/container/tests/AutoscalingTests.java +++ b/wrapper/src/test/java/integration/container/tests/AutoscalingTests.java @@ -52,7 +52,7 @@ import software.amazon.jdbc.HikariPoolConfigurator; import software.amazon.jdbc.HikariPooledConnectionProvider; import software.amazon.jdbc.PropertyDefinition; -import software.amazon.jdbc.hostlistprovider.AuroraHostListProvider; +import software.amazon.jdbc.hostlistprovider.RdsHostListProvider; import software.amazon.jdbc.plugin.failover.FailoverSuccessSQLException; import software.amazon.jdbc.plugin.readwritesplitting.ReadWriteSplittingPlugin; @@ -104,7 +104,7 @@ public void test_pooledConnectionAutoScaling_setReadOnlyOnOldConnection() final Properties props = getProps(); final long topologyRefreshRateMs = 5000; ReadWriteSplittingPlugin.READER_HOST_SELECTOR_STRATEGY.set(props, "leastConnections"); - AuroraHostListProvider.CLUSTER_TOPOLOGY_REFRESH_RATE_MS.set(props, + RdsHostListProvider.CLUSTER_TOPOLOGY_REFRESH_RATE_MS.set(props, Long.toString(topologyRefreshRateMs)); final TestEnvironmentInfo testInfo = TestEnvironment.getCurrent().getInfo(); @@ -186,7 +186,7 @@ public void test_pooledConnectionAutoScaling_failoverFromDeletedReader() final Properties props = getPropsWithFailover(); final long topologyRefreshRateMs = 5000; ReadWriteSplittingPlugin.READER_HOST_SELECTOR_STRATEGY.set(props, "leastConnections"); - AuroraHostListProvider.CLUSTER_TOPOLOGY_REFRESH_RATE_MS.set(props, + RdsHostListProvider.CLUSTER_TOPOLOGY_REFRESH_RATE_MS.set(props, Long.toString(topologyRefreshRateMs)); final TestEnvironmentInfo testInfo = TestEnvironment.getCurrent().getInfo(); diff --git a/wrapper/src/test/java/integration/container/tests/FailoverTest.java b/wrapper/src/test/java/integration/container/tests/FailoverTest.java index 347264561..241499f63 100644 --- a/wrapper/src/test/java/integration/container/tests/FailoverTest.java +++ b/wrapper/src/test/java/integration/container/tests/FailoverTest.java @@ -62,7 +62,7 @@ import org.junit.jupiter.api.extension.ExtendWith; import software.amazon.jdbc.PropertyDefinition; import software.amazon.jdbc.ds.AwsWrapperDataSource; -import software.amazon.jdbc.hostlistprovider.AuroraHostListProvider; +import software.amazon.jdbc.hostlistprovider.RdsHostListProvider; import software.amazon.jdbc.plugin.failover.FailoverSuccessSQLException; import software.amazon.jdbc.plugin.failover.TransactionStateUnknownSQLException; import software.amazon.jdbc.util.SqlState; @@ -505,11 +505,11 @@ public void test_takeOverConnectionProperties() throws SQLException { /** * Current writer dies, a reader instance is nominated to be a new writer, failover to the new - * writer. Autocommit is set to false and the keepSessionStateOnFailover property is set to true. + * writer. Autocommit is set to false. */ @TestTemplate @EnableOnNumOfInstances(min = 2) - public void test_failFromWriterWhereKeepSessionStateOnFailoverIsTrue() throws SQLException { + public void test_failFromWriter() throws SQLException { final String initialWriterId = this.currentWriter; TestInstanceInfo initialWriterInstanceInfo = @@ -688,7 +688,7 @@ protected Properties initDefaultProxiedProps() { // Some tests temporarily disable connectivity for 5 seconds. The socket timeout needs to be less than this to // trigger driver failover. PropertyDefinition.SOCKET_TIMEOUT.set(props, "2000"); - AuroraHostListProvider.CLUSTER_INSTANCE_HOST_PATTERN.set( + RdsHostListProvider.CLUSTER_INSTANCE_HOST_PATTERN.set( props, "?." + TestEnvironment.getCurrent().getInfo().getProxyDatabaseInfo().getInstanceEndpointSuffix() + ":" + TestEnvironment.getCurrent().getInfo().getProxyDatabaseInfo().getInstanceEndpointPort()); diff --git a/wrapper/src/test/java/integration/container/tests/PerformanceTest.java b/wrapper/src/test/java/integration/container/tests/PerformanceTest.java index 73e0b338c..5993a716b 100644 --- a/wrapper/src/test/java/integration/container/tests/PerformanceTest.java +++ b/wrapper/src/test/java/integration/container/tests/PerformanceTest.java @@ -61,9 +61,7 @@ import org.junit.jupiter.api.extension.ExtendWith; import org.junit.jupiter.params.provider.Arguments; import software.amazon.jdbc.PropertyDefinition; -import software.amazon.jdbc.hostlistprovider.AuroraHostListProvider; import software.amazon.jdbc.hostlistprovider.RdsHostListProvider; -import software.amazon.jdbc.hostlistprovider.monitoring.MonitoringRdsHostListProvider; import software.amazon.jdbc.plugin.OpenedConnectionTracker; import software.amazon.jdbc.plugin.efm.HostMonitorThreadContainer; import software.amazon.jdbc.plugin.efm2.HostMonitorServiceImpl; @@ -149,8 +147,6 @@ public void test_FailureDetectionTime_EnhancedMonitoringEnabled(final String efm OpenedConnectionTracker.clearCache(); HostMonitorThreadContainer.releaseInstance(); HostMonitorServiceImpl.closeAllMonitors(); - AuroraHostListProvider.clearAll(); - MonitoringRdsHostListProvider.clearCache(); enhancedFailureMonitoringPerfDataList.clear(); @@ -231,8 +227,6 @@ public void test_FailureDetectionTime_FailoverAndEnhancedMonitoringEnabled(final OpenedConnectionTracker.clearCache(); HostMonitorThreadContainer.releaseInstance(); HostMonitorServiceImpl.closeAllMonitors(); - AuroraHostListProvider.clearAll(); - MonitoringRdsHostListProvider.clearCache(); failoverWithEfmPerfDataList.clear(); @@ -319,8 +313,6 @@ private void test_FailoverTime_SocketTimeout(final String plugins) throws IOExce OpenedConnectionTracker.clearCache(); HostMonitorThreadContainer.releaseInstance(); HostMonitorServiceImpl.closeAllMonitors(); - AuroraHostListProvider.clearAll(); - MonitoringRdsHostListProvider.clearCache(); failoverWithSocketTimeoutPerfDataList.clear(); diff --git a/wrapper/src/test/java/integration/container/tests/ReadWriteSplittingTests.java b/wrapper/src/test/java/integration/container/tests/ReadWriteSplittingTests.java index b8c96edbe..466b6dca4 100644 --- a/wrapper/src/test/java/integration/container/tests/ReadWriteSplittingTests.java +++ b/wrapper/src/test/java/integration/container/tests/ReadWriteSplittingTests.java @@ -73,7 +73,7 @@ import software.amazon.jdbc.HikariPooledConnectionProvider; import software.amazon.jdbc.PluginService; import software.amazon.jdbc.PropertyDefinition; -import software.amazon.jdbc.hostlistprovider.AuroraHostListProvider; +import software.amazon.jdbc.hostlistprovider.RdsHostListProvider; import software.amazon.jdbc.plugin.failover.FailoverConnectionPlugin; import software.amazon.jdbc.plugin.failover.FailoverFailedSQLException; import software.amazon.jdbc.plugin.failover.FailoverSuccessSQLException; @@ -120,17 +120,17 @@ public void tearDownEach() { } - protected static Properties getProxiedPropsWithFailover() { + protected Properties getProxiedPropsWithFailover() { final Properties props = getPropsWithFailover(); - AuroraHostListProvider.CLUSTER_INSTANCE_HOST_PATTERN.set(props, + RdsHostListProvider.CLUSTER_INSTANCE_HOST_PATTERN.set(props, "?." + TestEnvironment.getCurrent().getInfo().getProxyDatabaseInfo().getInstanceEndpointSuffix() + ":" + TestEnvironment.getCurrent().getInfo().getProxyDatabaseInfo().getInstanceEndpointPort()); return props; } - protected static Properties getProxiedProps() { + protected Properties getProxiedProps() { final Properties props = getProps(); - AuroraHostListProvider.CLUSTER_INSTANCE_HOST_PATTERN.set(props, + RdsHostListProvider.CLUSTER_INSTANCE_HOST_PATTERN.set(props, "?." + TestEnvironment.getCurrent().getInfo().getProxyDatabaseInfo().getInstanceEndpointSuffix() + ":" + TestEnvironment.getCurrent().getInfo().getProxyDatabaseInfo().getInstanceEndpointPort()); return props; @@ -145,18 +145,34 @@ protected static Properties getDefaultPropsNoPlugins() { return props; } - protected static Properties getProps() { + protected Properties getProps() { final Properties props = getDefaultPropsNoPlugins(); PropertyDefinition.PLUGINS.set(props, "readWriteSplitting"); return props; } - protected static Properties getPropsWithFailover() { + protected Properties getPropsWithFailover() { final Properties props = getDefaultPropsNoPlugins(); PropertyDefinition.PLUGINS.set(props, "failover,efm2,readWriteSplitting"); return props; } + protected String getWriterEndpoint() { + return TestEnvironment.getCurrent() + .getInfo() + .getDatabaseInfo() + .getInstances() + .get(0) + .getHost(); + } + + protected String getReaderClusterEndpoint() { + return TestEnvironment.getCurrent() + .getInfo() + .getDatabaseInfo() + .getClusterReadOnlyEndpoint(); + } + @TestTemplate public void test_connectToWriter_switchSetReadOnly() throws SQLException { final String url = ConnectionStringHelper.getWrapperUrl(); @@ -217,7 +233,7 @@ protected String getWrapperReaderInstanceUrl() { @TestTemplate public void test_connectToReaderCluster_setReadOnlyTrueFalse() throws SQLException { - final String url = ConnectionStringHelper.getWrapperReaderClusterUrl(); + final String url = ConnectionStringHelper.getWrapperUrl(getReaderClusterEndpoint()); LOGGER.finest("Connecting to url " + url); try (final Connection conn = DriverManager.getConnection(url, getProps())) { final String readerConnectionId = auroraUtil.queryInstanceId(conn); @@ -294,7 +310,8 @@ public void test_setReadOnlyFalseInTransaction_setAutocommitFalse() throws SQLEx @TestTemplate @EnableOnDatabaseEngine({DatabaseEngine.MYSQL}) public void test_setReadOnlyTrueInTransaction() throws SQLException { - try (final Connection conn = DriverManager.getConnection(ConnectionStringHelper.getWrapperUrl(), getProps())) { + try (final Connection conn = DriverManager.getConnection( + ConnectionStringHelper.getWrapperUrl(getWriterEndpoint()), getProps())) { final String writerConnectionId = auroraUtil.queryInstanceId(conn); @@ -336,7 +353,9 @@ public void test_setReadOnlyTrue_allReadersDown() throws SQLException { final String writerConnectionId = auroraUtil.queryInstanceId(conn); - // Kill all reader instances + // Kill reader endpoint and all reader instances + ProxyHelper.disableConnectivity(TestEnvironment.getCurrent().getInfo().getProxyDatabaseInfo() + .getClusterReadOnlyEndpoint()); final List instanceIDs = TestEnvironment.getCurrent().getInfo().getDatabaseInfo().getInstances().stream() .map(TestInstanceInfo::getInstanceId).collect(Collectors.toList()); @@ -448,8 +467,11 @@ public void test_writerFailover_setReadOnlyTrueFalse() throws SQLException { final String originalWriterId = auroraUtil.queryInstanceId(conn); - // Kill all reader instances - List instances = TestEnvironment.getCurrent().getInfo().getProxyDatabaseInfo().getInstances(); + // Kill reader endpoint and all reader instances + ProxyHelper.disableConnectivity(TestEnvironment.getCurrent().getInfo().getProxyDatabaseInfo() + .getClusterReadOnlyEndpoint()); + List instances = TestEnvironment.getCurrent().getInfo().getProxyDatabaseInfo() + .getInstances(); for (int i = 1; i < instances.size(); i++) { ProxyHelper.disableConnectivity(instances.get(i).getInstanceId()); } @@ -559,7 +581,7 @@ public void test_failoverReaderToWriter_setReadOnlyTrueFalse() final List instances = TestEnvironment.getCurrent().getInfo().getProxyDatabaseInfo() .getInstances(); - // Kill all instances except the writer + // Kill all instances except the writer and cluster endpoint for (final TestInstanceInfo instance : instances) { final String instanceId = instance.getInstanceId(); if (writerConnectionId.equals(instanceId)) { @@ -567,6 +589,8 @@ public void test_failoverReaderToWriter_setReadOnlyTrueFalse() } ProxyHelper.disableConnectivity(instanceId); } + ProxyHelper.disableConnectivity(TestEnvironment.getCurrent().getInfo().getProxyDatabaseInfo() + .getClusterReadOnlyEndpoint()); auroraUtil.assertFirstQueryThrows(conn, FailoverSuccessSQLException.class); assertFalse(conn.isClosed()); diff --git a/wrapper/src/test/java/integration/container/tests/SimpleReadWriteSplittingTest.java b/wrapper/src/test/java/integration/container/tests/SimpleReadWriteSplittingTest.java new file mode 100644 index 000000000..3b7381c4a --- /dev/null +++ b/wrapper/src/test/java/integration/container/tests/SimpleReadWriteSplittingTest.java @@ -0,0 +1,211 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package integration.container.tests; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import integration.TestEnvironmentFeatures; +import integration.container.ConnectionStringHelper; +import integration.container.TestDriverProvider; +import integration.container.TestEnvironment; +import integration.container.condition.DisableOnTestFeature; +import integration.container.condition.EnableOnNumOfInstances; +import integration.container.condition.EnableOnTestFeature; +import integration.container.condition.MakeSureFirstInstanceWriter; +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.SQLException; +import java.util.Properties; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.MethodOrderer; +import org.junit.jupiter.api.Order; +import org.junit.jupiter.api.TestMethodOrder; +import org.junit.jupiter.api.TestTemplate; +import org.junit.jupiter.api.extension.ExtendWith; +import software.amazon.jdbc.PropertyDefinition; +import software.amazon.jdbc.hostlistprovider.RdsHostListProvider; +import software.amazon.jdbc.plugin.srw.SimpleReadWriteSplittingPlugin; + +@TestMethodOrder(MethodOrderer.MethodName.class) +@ExtendWith(TestDriverProvider.class) +@EnableOnTestFeature(TestEnvironmentFeatures.FAILOVER_SUPPORTED) +@DisableOnTestFeature({ + TestEnvironmentFeatures.PERFORMANCE, + TestEnvironmentFeatures.RUN_HIBERNATE_TESTS_ONLY, + TestEnvironmentFeatures.RUN_AUTOSCALING_TESTS_ONLY, + TestEnvironmentFeatures.BLUE_GREEN_DEPLOYMENT, + TestEnvironmentFeatures.RUN_DB_METRICS_ONLY}) +@EnableOnNumOfInstances(min = 2) +@MakeSureFirstInstanceWriter +@Order(23) + + +public class SimpleReadWriteSplittingTest extends ReadWriteSplittingTests { + String pluginCode = "srw"; + String pluginCodesWithFailover = "failover2,efm2,srw"; + + protected Properties getSrwProps(boolean proxied, String plugins) { + final Properties props = getDefaultPropsNoPlugins(); + PropertyDefinition.PLUGINS.set(props, plugins); + if (proxied) { + props.setProperty(SimpleReadWriteSplittingPlugin.SRW_WRITE_ENDPOINT.name, + TestEnvironment.getCurrent().getInfo().getProxyDatabaseInfo().getClusterEndpoint()); + props.setProperty(SimpleReadWriteSplittingPlugin.SRW_READ_ENDPOINT.name, + TestEnvironment.getCurrent().getInfo().getProxyDatabaseInfo().getClusterReadOnlyEndpoint()); + } else { + props.setProperty(SimpleReadWriteSplittingPlugin.VERIFY_NEW_SRW_CONNECTIONS.name, "false"); + props.setProperty(SimpleReadWriteSplittingPlugin.SRW_WRITE_ENDPOINT.name, getWriterEndpoint()); + props.setProperty(SimpleReadWriteSplittingPlugin.SRW_READ_ENDPOINT.name, getReaderClusterEndpoint()); + } + return props; + } + + @TestTemplate + public void test_IncorrectReaderEndpoint() throws SQLException { + final Properties props = getDefaultPropsNoPlugins(); + PropertyDefinition.PLUGINS.set(props, pluginCode); + int port = TestEnvironment.getCurrent() + .getInfo() + .getDatabaseInfo() + .getClusterEndpointPort(); + props.setProperty(SimpleReadWriteSplittingPlugin.SRW_WRITE_ENDPOINT.name, getWriterEndpoint() + ":" + port); + props.setProperty(SimpleReadWriteSplittingPlugin.SRW_READ_ENDPOINT.name, getWriterEndpoint() + ":" + port); + + try (final Connection conn = DriverManager.getConnection( + ConnectionStringHelper.getWrapperClusterEndpointUrl(), props)) { + final String writerConnectionId = auroraUtil.queryInstanceId(conn); + + // Switch to reader successfully + conn.setReadOnly(true); + final String readerConnectionId = auroraUtil.queryInstanceId(conn); + // Should stay on writer as fallback since reader endpoint points to a writer. + assertEquals(writerConnectionId, readerConnectionId); + + // Going to the write endpoint will be the same connection again. + conn.setReadOnly(false); + final String finalConnectionId = auroraUtil.queryInstanceId(conn); + assertEquals(writerConnectionId, finalConnectionId); + } + } + + @TestTemplate + public void test_autoCommitStatePreserved_acrossConnectionSwitches() throws SQLException { + try (final Connection conn = DriverManager.getConnection(ConnectionStringHelper.getWrapperUrl(), getProps())) { + + // Set autoCommit to false on writer + conn.setAutoCommit(false); + assertFalse(conn.getAutoCommit()); + final String writerConnectionId = auroraUtil.queryInstanceId(conn); + conn.commit(); + + // Switch to reader - autoCommit should remain false + conn.setReadOnly(true); + assertFalse(conn.getAutoCommit()); + final String readerConnectionId = auroraUtil.queryInstanceId(conn); + assertNotEquals(writerConnectionId, readerConnectionId); + + // Change autoCommit on reader + conn.setAutoCommit(true); + assertTrue(conn.getAutoCommit()); + + // Switch back to writer - autoCommit should be true + conn.setReadOnly(false); + assertTrue(conn.getAutoCommit()); + final String finalWriterConnectionId = auroraUtil.queryInstanceId(conn); + assertEquals(writerConnectionId, finalWriterConnectionId); + } + } + + @Override + protected Properties getProps() { + return getSrwProps(false, pluginCode); + } + + @Override + protected Properties getProxiedPropsWithFailover() { + final Properties props = getSrwProps(true, pluginCodesWithFailover); + RdsHostListProvider.CLUSTER_INSTANCE_HOST_PATTERN.set(props, + "?." + TestEnvironment.getCurrent().getInfo().getProxyDatabaseInfo().getInstanceEndpointSuffix() + + ":" + TestEnvironment.getCurrent().getInfo().getProxyDatabaseInfo().getInstanceEndpointPort()); + return props; + } + + @Override + protected Properties getProxiedProps() { + final Properties props = getSrwProps(true, pluginCode); + RdsHostListProvider.CLUSTER_INSTANCE_HOST_PATTERN.set(props, + "?." + TestEnvironment.getCurrent().getInfo().getProxyDatabaseInfo().getInstanceEndpointSuffix() + + ":" + TestEnvironment.getCurrent().getInfo().getProxyDatabaseInfo().getInstanceEndpointPort()); + return props; + } + + @Override + protected Properties getPropsWithFailover() { + return getSrwProps(false, pluginCodesWithFailover); + } + + @Override + protected String getWriterEndpoint() { + return TestEnvironment.getCurrent() + .getInfo() + .getDatabaseInfo() + .getClusterEndpoint(); + } + + @TestTemplate + @Disabled("Skipping because it's not applicable to SimpleReadWriteSplitting.") + @Override + public void test_connectToReader_setReadOnlyTrueFalse() throws SQLException { + // This test checks that the connection does not change when setReadOnly(true) + // is called on a connection initially made with a reader instance endpoint. + // Not applicable, srw will change the connection to the srwReadEndpoint. + } + + @TestTemplate + @Disabled("Skipping because it's not applicable to SimpleReadWriteSplitting.") + @Override + public void test_pooledConnectionFailoverWithClusterURL() { + // Skip this test for simple read write splitting as it relies on there NOT being a stored read/write splitting + // connection to the cluster endpoint. + } + + @TestTemplate + @Disabled("Skipping because it's not applicable to SimpleReadWriteSplitting.") + @Override + public void test_failoverToNewReader_setReadOnlyFalseTrue() { + // Skip this test for simple read write splitting as disabling connectivity to a reader cluster endpoint does not + // trigger reader to reader failover but rather forces defaulting to the writer. + } + + @TestTemplate + @Disabled("Skipping because it's not applicable to SimpleReadWriteSplitting.") + @Override + public void test_pooledConnection_leastConnectionsStrategy() { + // Skip this test for simple read write splitting as there is no reader selection strategy. + } + + @TestTemplate + @Disabled("Skipping because it's not applicable to SimpleReadWriteSplitting.") + @Override + public void test_pooledConnection_leastConnectionsWithPoolMapping() { + // Skip this test for simple read write splitting as there is no reader selection strategy. + } +} + diff --git a/wrapper/src/test/java/software/amazon/jdbc/ConnectionPluginManagerTests.java b/wrapper/src/test/java/software/amazon/jdbc/ConnectionPluginManagerTests.java index 8a880a39d..735e83eac 100644 --- a/wrapper/src/test/java/software/amazon/jdbc/ConnectionPluginManagerTests.java +++ b/wrapper/src/test/java/software/amazon/jdbc/ConnectionPluginManagerTests.java @@ -53,6 +53,7 @@ import software.amazon.jdbc.mock.TestPluginThrowException; import software.amazon.jdbc.mock.TestPluginTwo; import software.amazon.jdbc.plugin.AuroraConnectionTrackerPlugin; +import software.amazon.jdbc.plugin.AuroraInitialConnectionStrategyPlugin; import software.amazon.jdbc.plugin.DefaultConnectionPlugin; import software.amazon.jdbc.plugin.LogQueryConnectionPlugin; import software.amazon.jdbc.plugin.efm2.HostMonitoringConnectionPlugin; @@ -565,12 +566,13 @@ public void testDefaultPlugins() throws SQLException { testProperties, mockTelemetryFactory, mockConnectionProvider, null)); target.initPlugins(mockServicesContainer, configurationProfile); - assertEquals(4, target.plugins.size()); - assertEquals(AuroraConnectionTrackerPlugin.class, target.plugins.get(0).getClass()); + assertEquals(5, target.plugins.size()); + assertEquals(AuroraInitialConnectionStrategyPlugin.class, target.plugins.get(0).getClass()); + assertEquals(AuroraConnectionTrackerPlugin.class, target.plugins.get(1).getClass()); assertEquals(software.amazon.jdbc.plugin.failover2.FailoverConnectionPlugin.class, - target.plugins.get(1).getClass()); - assertEquals(HostMonitoringConnectionPlugin.class, target.plugins.get(2).getClass()); - assertEquals(DefaultConnectionPlugin.class, target.plugins.get(3).getClass()); + target.plugins.get(2).getClass()); + assertEquals(HostMonitoringConnectionPlugin.class, target.plugins.get(3).getClass()); + assertEquals(DefaultConnectionPlugin.class, target.plugins.get(4).getClass()); } @Test @@ -609,6 +611,11 @@ public void testTwoConnectionsDoNotBlockOneAnother() { final ConnectionProvider mockConnectionProvider1 = Mockito.mock(ConnectionProvider.class); final ConnectionWrapper mockConnectionWrapper1 = Mockito.mock(ConnectionWrapper.class); final PluginService mockPluginService1 = Mockito.mock(PluginService.class); + final PluginManagerService mockPluginManagerService1 = Mockito.mock(PluginManagerService.class); + final FullServicesContainer mockServicesContainer1 = Mockito.mock(FullServicesContainer.class); + when(mockConnectionWrapper1.getServicesContainer()).thenReturn(mockServicesContainer1); + when(mockServicesContainer1.getPluginService()).thenReturn(mockPluginService1); + when(mockServicesContainer1.getPluginManagerService()).thenReturn(mockPluginManagerService1); final TelemetryFactory mockTelemetryFactory1 = Mockito.mock(TelemetryFactory.class); final Object object1 = new Object(); when(mockPluginService1.getTelemetryFactory()).thenReturn(mockTelemetryFactory1); @@ -617,10 +624,14 @@ public void testTwoConnectionsDoNotBlockOneAnother() { final ConnectionPluginManager pluginManager1 = new ConnectionPluginManager( mockConnectionProvider1, null, testProperties, testPlugins, mockTelemetryFactory1); - final ConnectionProvider mockConnectionProvider2 = Mockito.mock(ConnectionProvider.class); final ConnectionWrapper mockConnectionWrapper2 = Mockito.mock(ConnectionWrapper.class); final PluginService mockPluginService2 = Mockito.mock(PluginService.class); + final PluginManagerService mockPluginManagerService2 = Mockito.mock(PluginManagerService.class); + final FullServicesContainer mockServicesContainer2 = Mockito.mock(FullServicesContainer.class); + when(mockConnectionWrapper2.getServicesContainer()).thenReturn(mockServicesContainer2); + when(mockServicesContainer2.getPluginService()).thenReturn(mockPluginService2); + when(mockServicesContainer2.getPluginManagerService()).thenReturn(mockPluginManagerService2); final TelemetryFactory mockTelemetryFactory2 = Mockito.mock(TelemetryFactory.class); final Object object2 = new Object(); when(mockPluginService2.getTelemetryFactory()).thenReturn(mockTelemetryFactory2); diff --git a/wrapper/src/test/java/software/amazon/jdbc/DialectDetectionTests.java b/wrapper/src/test/java/software/amazon/jdbc/DialectDetectionTests.java index 3b47f12bb..1a5517559 100644 --- a/wrapper/src/test/java/software/amazon/jdbc/DialectDetectionTests.java +++ b/wrapper/src/test/java/software/amazon/jdbc/DialectDetectionTests.java @@ -43,13 +43,14 @@ import software.amazon.jdbc.dialect.Dialect; import software.amazon.jdbc.dialect.DialectManager; import software.amazon.jdbc.dialect.MariaDbDialect; +import software.amazon.jdbc.dialect.MultiAzClusterMysqlDialect; +import software.amazon.jdbc.dialect.MultiAzClusterPgDialect; import software.amazon.jdbc.dialect.MysqlDialect; import software.amazon.jdbc.dialect.PgDialect; -import software.amazon.jdbc.dialect.RdsMultiAzDbClusterMysqlDialect; -import software.amazon.jdbc.dialect.RdsMultiAzDbClusterPgDialect; import software.amazon.jdbc.dialect.RdsMysqlDialect; import software.amazon.jdbc.dialect.RdsPgDialect; import software.amazon.jdbc.exceptions.ExceptionManager; +import software.amazon.jdbc.hostlistprovider.HostListProviderService; import software.amazon.jdbc.targetdriverdialect.TargetDriverDialect; import software.amazon.jdbc.util.FullServicesContainer; import software.amazon.jdbc.util.storage.StorageService; @@ -70,10 +71,10 @@ public class DialectDetectionTests { @Mock private Statement mockStatement; @Mock private ResultSet mockSuccessResultSet; @Mock private ResultSet mockFailResultSet; + @Mock private ResultSetMetaData mockResultSetMetaData; @Mock private HostSpec mockHost; @Mock private ConnectionPluginManager mockPluginManager; @Mock private TargetDriverDialect mockTargetDriverDialect; - @Mock private ResultSetMetaData mockResultSetMetaData; @BeforeEach void setUp() throws SQLException { @@ -83,6 +84,8 @@ void setUp() throws SQLException { when(this.mockServicesContainer.getStorageService()).thenReturn(mockStorageService); when(this.mockConnection.createStatement()).thenReturn(this.mockStatement); when(this.mockHost.getUrl()).thenReturn("url"); + when(this.mockFailResultSet.getMetaData()).thenReturn(mockResultSetMetaData); + when(this.mockResultSetMetaData.getColumnCount()).thenReturn(4); when(this.mockFailResultSet.next()).thenReturn(false); mockPluginManager.plugins = new ArrayList<>(); } @@ -219,7 +222,7 @@ void testUpdateDialectPgToTaz() throws SQLException { final PluginServiceImpl target = getPluginService(LOCALHOST, PG_PROTOCOL); target.setInitialConnectionHostSpec(mockHost); target.updateDialect(mockConnection); - assertEquals(RdsMultiAzDbClusterPgDialect.class, target.dialect.getClass()); + assertEquals(MultiAzClusterPgDialect.class, target.dialect.getClass()); } @Test @@ -272,7 +275,7 @@ void testUpdateDialectMariaToMysqlTaz() throws SQLException { final PluginServiceImpl target = getPluginService(LOCALHOST, MARIA_PROTOCOL); target.setInitialConnectionHostSpec(mockHost); target.updateDialect(mockConnection); - assertEquals(RdsMultiAzDbClusterMysqlDialect.class, target.dialect.getClass()); + assertEquals(MultiAzClusterMysqlDialect.class, target.dialect.getClass()); } @Test diff --git a/wrapper/src/test/java/software/amazon/jdbc/DialectTests.java b/wrapper/src/test/java/software/amazon/jdbc/DialectTests.java index 4170f8556..e3c2df258 100644 --- a/wrapper/src/test/java/software/amazon/jdbc/DialectTests.java +++ b/wrapper/src/test/java/software/amazon/jdbc/DialectTests.java @@ -36,10 +36,10 @@ import software.amazon.jdbc.dialect.AuroraMysqlDialect; import software.amazon.jdbc.dialect.AuroraPgDialect; import software.amazon.jdbc.dialect.MariaDbDialect; +import software.amazon.jdbc.dialect.MultiAzClusterMysqlDialect; +import software.amazon.jdbc.dialect.MultiAzClusterPgDialect; import software.amazon.jdbc.dialect.MysqlDialect; import software.amazon.jdbc.dialect.PgDialect; -import software.amazon.jdbc.dialect.RdsMultiAzDbClusterMysqlDialect; -import software.amazon.jdbc.dialect.RdsMultiAzDbClusterPgDialect; import software.amazon.jdbc.dialect.RdsMysqlDialect; import software.amazon.jdbc.dialect.RdsPgDialect; @@ -51,11 +51,11 @@ public class DialectTests { @Mock private ResultSetMetaData mockResultSetMetaData; private final MysqlDialect mysqlDialect = new MysqlDialect(); private final RdsMysqlDialect rdsMysqlDialect = new RdsMysqlDialect(); - private final RdsMultiAzDbClusterMysqlDialect rdsTazMysqlDialect = new RdsMultiAzDbClusterMysqlDialect(); + private final MultiAzClusterMysqlDialect rdsTazMysqlDialect = new MultiAzClusterMysqlDialect(); private final AuroraMysqlDialect auroraMysqlDialect = new AuroraMysqlDialect(); private final PgDialect pgDialect = new PgDialect(); private final RdsPgDialect rdsPgDialect = new RdsPgDialect(); - private final RdsMultiAzDbClusterPgDialect rdsTazPgDialect = new RdsMultiAzDbClusterPgDialect(); + private final MultiAzClusterPgDialect rdsTazPgDialect = new MultiAzClusterPgDialect(); private final AuroraPgDialect auroraPgDialect = new AuroraPgDialect(); private final MariaDbDialect mariaDbDialect = new MariaDbDialect(); private AutoCloseable closeable; diff --git a/wrapper/src/test/java/software/amazon/jdbc/HikariPooledConnectionProviderTest.java b/wrapper/src/test/java/software/amazon/jdbc/HikariPooledConnectionProviderTest.java index 6e5844ccf..89f29acdd 100644 --- a/wrapper/src/test/java/software/amazon/jdbc/HikariPooledConnectionProviderTest.java +++ b/wrapper/src/test/java/software/amazon/jdbc/HikariPooledConnectionProviderTest.java @@ -141,13 +141,26 @@ void testConnectWithDefaultMapping() throws SQLException { Properties props = new Properties(); props.setProperty(PropertyDefinition.USER.name, user1); props.setProperty(PropertyDefinition.PASSWORD.name, password); - try (Connection conn = provider.connect(protocol, mockDialect, mockTargetDriverDialect, mockHostSpec, props)) { + + ConnectionInfo connectionInfo = null; + try { + connectionInfo = + provider.connect(protocol, mockDialect, mockTargetDriverDialect, mockHostSpec, props); + Connection conn = connectionInfo.getConnection(); assertEquals(mockConnection, conn); assertEquals(1, provider.getHostCount()); final Set hosts = provider.getHosts(); assertEquals(expectedUrls, hosts); final Set keys = provider.getKeys(); assertEquals(expectedKeys, keys); + } finally { + if (connectionInfo != null && connectionInfo.getConnection() != null) { + try { + connectionInfo.getConnection().close(); + } catch (Exception ex) { + // ignore + } + } } } @@ -166,11 +179,24 @@ void testConnectWithCustomMapping() throws SQLException { Properties props = new Properties(); props.setProperty(PropertyDefinition.USER.name, user1); props.setProperty(PropertyDefinition.PASSWORD.name, password); - try (Connection conn = provider.connect(protocol, mockDialect, mockTargetDriverDialect, mockHostSpec, props)) { + + ConnectionInfo connectionInfo = null; + try { + connectionInfo = + provider.connect(protocol, mockDialect, mockTargetDriverDialect, mockHostSpec, props); + Connection conn = connectionInfo.getConnection(); assertEquals(mockConnection, conn); assertEquals(1, provider.getHostCount()); final Set keys = provider.getKeys(); assertEquals(expectedKeys, keys); + } finally { + if (connectionInfo != null && connectionInfo.getConnection() != null) { + try { + connectionInfo.getConnection().close(); + } catch (Exception ex) { + // ignore + } + } } } diff --git a/wrapper/src/test/java/software/amazon/jdbc/PluginServiceImplTests.java b/wrapper/src/test/java/software/amazon/jdbc/PluginServiceImplTests.java index db9a072a1..5ef1235ad 100644 --- a/wrapper/src/test/java/software/amazon/jdbc/PluginServiceImplTests.java +++ b/wrapper/src/test/java/software/amazon/jdbc/PluginServiceImplTests.java @@ -62,6 +62,7 @@ import software.amazon.jdbc.exceptions.ExceptionManager; import software.amazon.jdbc.hostavailability.HostAvailability; import software.amazon.jdbc.hostavailability.SimpleHostAvailabilityStrategy; +import software.amazon.jdbc.hostlistprovider.HostListProvider; import software.amazon.jdbc.profile.ConfigurationProfile; import software.amazon.jdbc.profile.ConfigurationProfileBuilder; import software.amazon.jdbc.states.SessionStateService; diff --git a/wrapper/src/test/java/software/amazon/jdbc/RoundRobinHostSelectorTest.java b/wrapper/src/test/java/software/amazon/jdbc/RoundRobinHostSelectorTest.java index 7a523a3e7..3f61a1ec8 100644 --- a/wrapper/src/test/java/software/amazon/jdbc/RoundRobinHostSelectorTest.java +++ b/wrapper/src/test/java/software/amazon/jdbc/RoundRobinHostSelectorTest.java @@ -27,12 +27,7 @@ import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import software.amazon.jdbc.HostRole; -import software.amazon.jdbc.HostSpec; -import software.amazon.jdbc.HostSpecBuilder; -import software.amazon.jdbc.RoundRobinHostSelector; import software.amazon.jdbc.hostavailability.SimpleHostAvailabilityStrategy; -import software.amazon.jdbc.util.HostSelectorUtils; public class RoundRobinHostSelectorTest { private static final int TEST_PORT = 5432; diff --git a/wrapper/src/test/java/software/amazon/jdbc/hostlistprovider/RdsHostListProviderTest.java b/wrapper/src/test/java/software/amazon/jdbc/hostlistprovider/RdsHostListProviderTest.java index 797d151be..991c0734b 100644 --- a/wrapper/src/test/java/software/amazon/jdbc/hostlistprovider/RdsHostListProviderTest.java +++ b/wrapper/src/test/java/software/amazon/jdbc/hostlistprovider/RdsHostListProviderTest.java @@ -17,31 +17,19 @@ package software.amazon.jdbc.hostlistprovider; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.atMostOnce; -import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.doReturn; -import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; -import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; -import com.mysql.cj.exceptions.WrongArgumentException; import java.sql.Connection; -import java.sql.ResultSet; import java.sql.SQLException; -import java.sql.SQLSyntaxErrorException; -import java.sql.Statement; -import java.sql.Timestamp; -import java.time.Instant; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -50,21 +38,19 @@ import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import org.mockito.ArgumentCaptor; -import org.mockito.Captor; import org.mockito.Mock; import org.mockito.Mockito; import org.mockito.MockitoAnnotations; -import software.amazon.jdbc.HostListProviderService; import software.amazon.jdbc.HostRole; import software.amazon.jdbc.HostSpec; import software.amazon.jdbc.HostSpecBuilder; import software.amazon.jdbc.PluginService; -import software.amazon.jdbc.dialect.Dialect; +import software.amazon.jdbc.dialect.TopologyDialect; import software.amazon.jdbc.hostavailability.HostAvailability; import software.amazon.jdbc.hostavailability.SimpleHostAvailabilityStrategy; import software.amazon.jdbc.hostlistprovider.RdsHostListProvider.FetchTopologyResult; import software.amazon.jdbc.util.FullServicesContainer; +import software.amazon.jdbc.util.Pair; import software.amazon.jdbc.util.events.EventPublisher; import software.amazon.jdbc.util.storage.StorageService; import software.amazon.jdbc.util.storage.TestStorageServiceImpl; @@ -74,14 +60,13 @@ class RdsHostListProviderTest { private RdsHostListProvider rdsHostListProvider; @Mock private Connection mockConnection; - @Mock private Statement mockStatement; - @Mock private ResultSet mockResultSet; @Mock private FullServicesContainer mockServicesContainer; @Mock private PluginService mockPluginService; @Mock private HostListProviderService mockHostListProviderService; + @Mock private HostSpecBuilder mockHostSpecBuilder; @Mock private EventPublisher mockEventPublisher; - @Mock Dialect mockTopologyAwareDialect; - @Captor private ArgumentCaptor queryCaptor; + @Mock private TopologyUtils mockTopologyUtils; + @Mock private TopologyDialect mockDialect; private AutoCloseable closeable; private final HostSpec currentHostSpec = new HostSpecBuilder(new SimpleHostAvailabilityStrategy()) @@ -96,12 +81,12 @@ void setUp() throws SQLException { storageService = new TestStorageServiceImpl(mockEventPublisher); when(mockServicesContainer.getHostListProviderService()).thenReturn(mockHostListProviderService); when(mockServicesContainer.getStorageService()).thenReturn(storageService); + when(mockServicesContainer.getPluginService()).thenReturn(mockPluginService); when(mockPluginService.getCurrentConnection()).thenReturn(mockConnection); when(mockPluginService.connect(any(HostSpec.class), any(Properties.class))).thenReturn(mockConnection); when(mockPluginService.getCurrentHostSpec()).thenReturn(currentHostSpec); - when(mockConnection.createStatement()).thenReturn(mockStatement); - when(mockStatement.executeQuery(queryCaptor.capture())).thenReturn(mockResultSet); - when(mockHostListProviderService.getDialect()).thenReturn(mockTopologyAwareDialect); + when(mockPluginService.getHostSpecBuilder()).thenReturn(mockHostSpecBuilder); + when(mockHostListProviderService.getDialect()).thenReturn(mockDialect); when(mockHostListProviderService.getHostSpecBuilder()) .thenReturn(new HostSpecBuilder(new SimpleHostAvailabilityStrategy())); when(mockHostListProviderService.getCurrentConnection()).thenReturn(mockConnection); @@ -109,17 +94,13 @@ void setUp() throws SQLException { @AfterEach void tearDown() throws Exception { - RdsHostListProvider.clearAll(); storageService.clearAll(); closeable.close(); } private RdsHostListProvider getRdsHostListProvider(String originalUrl) throws SQLException { RdsHostListProvider provider = new RdsHostListProvider( - new Properties(), - originalUrl, - mockServicesContainer, - "foo", "bar", "baz"); + mockTopologyUtils, new Properties(), originalUrl, mockServicesContainer); provider.init(); return provider; } @@ -146,7 +127,8 @@ void testGetTopology_withForceUpdate_returnsUpdatedTopology() throws SQLExceptio final List newHosts = Collections.singletonList( new HostSpecBuilder(new SimpleHostAvailabilityStrategy()).host("newHost").build()); - doReturn(newHosts).when(rdsHostListProvider).queryForTopology(mockConnection); + doReturn(newHosts).when(mockTopologyUtils).queryForTopology( + eq(mockConnection), any(HostSpec.class), any(HostSpec.class)); final FetchTopologyResult result = rdsHostListProvider.getTopology(mockConnection, true); verify(rdsHostListProvider, atMostOnce()).queryForTopology(mockConnection); @@ -194,9 +176,8 @@ void testQueryForTopology_withDifferentDriverProtocol() throws SQLException { final List expectedPostgres = Collections.singletonList( new HostSpecBuilder(new SimpleHostAvailabilityStrategy()).host("postgresql").port(HostSpec.NO_PORT) .role(HostRole.WRITER).availability(HostAvailability.AVAILABLE).weight(0).build()); - when(mockResultSet.next()).thenReturn(true, false); - when(mockResultSet.getBoolean(eq(2))).thenReturn(true); - when(mockResultSet.getString(eq(1))).thenReturn("mysql"); + when(mockTopologyUtils.queryForTopology(eq(mockConnection), any(HostSpec.class), any(HostSpec.class))) + .thenReturn(expectedMySQL).thenReturn(expectedPostgres); rdsHostListProvider = getRdsHostListProvider("mysql://url/"); @@ -204,24 +185,11 @@ void testQueryForTopology_withDifferentDriverProtocol() throws SQLException { List hosts = rdsHostListProvider.queryForTopology(mockConnection); assertEquals(expectedMySQL, hosts); - when(mockResultSet.next()).thenReturn(true, false); - when(mockResultSet.getString(eq(1))).thenReturn("postgresql"); - rdsHostListProvider = getRdsHostListProvider("postgresql://url/"); hosts = rdsHostListProvider.queryForTopology(mockConnection); assertEquals(expectedPostgres, hosts); } - @Test - void testQueryForTopology_queryResultsInException() throws SQLException { - rdsHostListProvider = getRdsHostListProvider("protocol://url/"); - when(mockStatement.executeQuery(queryCaptor.capture())).thenThrow(new SQLSyntaxErrorException()); - - assertThrows( - SQLException.class, - () -> rdsHostListProvider.queryForTopology(mockConnection)); - } - @Test void testGetCachedTopology_returnStoredTopology() throws SQLException { rdsHostListProvider = getRdsHostListProvider("jdbc:someprotocol://url"); @@ -233,202 +201,10 @@ void testGetCachedTopology_returnStoredTopology() throws SQLException { assertEquals(expected, result); } - @Test - void testTopologyCache_NoSuggestedClusterId() throws SQLException { - RdsHostListProvider.clearAll(); - - RdsHostListProvider provider1 = Mockito.spy(getRdsHostListProvider("jdbc:something://cluster-a.domain.com/")); - provider1.init(); - final List topologyClusterA = Arrays.asList( - new HostSpecBuilder(new SimpleHostAvailabilityStrategy()) - .host("instance-a-1.domain.com").port(HostSpec.NO_PORT).role(HostRole.WRITER).build(), - new HostSpecBuilder(new SimpleHostAvailabilityStrategy()) - .host("instance-a-2.domain.com").port(HostSpec.NO_PORT).role(HostRole.READER).build(), - new HostSpecBuilder(new SimpleHostAvailabilityStrategy()) - .host("instance-a-3.domain.com").port(HostSpec.NO_PORT).role(HostRole.READER).build()); - - doReturn(topologyClusterA) - .when(provider1).queryForTopology(any(Connection.class)); - - assertEquals(0, storageService.size(Topology.class)); - - final List topologyProvider1 = provider1.refresh(mock(Connection.class)); - assertEquals(topologyClusterA, topologyProvider1); - - RdsHostListProvider provider2 = Mockito.spy(getRdsHostListProvider("jdbc:something://cluster-b.domain.com/")); - provider2.init(); - assertNull(provider2.getStoredTopology()); - - final List topologyClusterB = Arrays.asList( - new HostSpecBuilder(new SimpleHostAvailabilityStrategy()) - .host("instance-b-1.domain.com").port(HostSpec.NO_PORT).role(HostRole.WRITER).build(), - new HostSpecBuilder(new SimpleHostAvailabilityStrategy()) - .host("instance-b-2.domain.com").port(HostSpec.NO_PORT).role(HostRole.READER).build(), - new HostSpecBuilder(new SimpleHostAvailabilityStrategy()) - .host("instance-b-3.domain.com").port(HostSpec.NO_PORT).role(HostRole.READER).build()); - doReturn(topologyClusterB).when(provider2).queryForTopology(any(Connection.class)); - - final List topologyProvider2 = provider2.refresh(mock(Connection.class)); - assertEquals(topologyClusterB, topologyProvider2); - - assertEquals(2, storageService.size(Topology.class)); - } - - @Test - void testTopologyCache_SuggestedClusterIdForRds() throws SQLException { - RdsHostListProvider.clearAll(); - - RdsHostListProvider provider1 = - Mockito.spy(getRdsHostListProvider("jdbc:something://cluster-a.cluster-xyz.us-east-2.rds.amazonaws.com/")); - provider1.init(); - final List topologyClusterA = Arrays.asList( - new HostSpecBuilder(new SimpleHostAvailabilityStrategy()) - .host("instance-a-1.xyz.us-east-2.rds.amazonaws.com") - .port(HostSpec.NO_PORT) - .role(HostRole.WRITER) - .build(), - new HostSpecBuilder(new SimpleHostAvailabilityStrategy()) - .host("instance-a-2.xyz.us-east-2.rds.amazonaws.com") - .port(HostSpec.NO_PORT) - .role(HostRole.READER) - .build(), - new HostSpecBuilder(new SimpleHostAvailabilityStrategy()) - .host("instance-a-3.xyz.us-east-2.rds.amazonaws.com") - .port(HostSpec.NO_PORT) - .role(HostRole.READER) - .build()); - - doReturn(topologyClusterA).when(provider1).queryForTopology(any(Connection.class)); - - assertEquals(0, storageService.size(Topology.class)); - - final List topologyProvider1 = provider1.refresh(mock(Connection.class)); - assertEquals(topologyClusterA, topologyProvider1); - - RdsHostListProvider provider2 = - Mockito.spy(getRdsHostListProvider("jdbc:something://cluster-a.cluster-xyz.us-east-2.rds.amazonaws.com/")); - provider2.init(); - - assertEquals(provider1.clusterId, provider2.clusterId); - assertTrue(provider1.isPrimaryClusterId); - assertTrue(provider2.isPrimaryClusterId); - - final List topologyProvider2 = provider2.refresh(mock(Connection.class)); - assertEquals(topologyClusterA, topologyProvider2); - - assertEquals(1, storageService.size(Topology.class)); - } - - @Test - void testTopologyCache_SuggestedClusterIdForInstance() throws SQLException { - RdsHostListProvider.clearAll(); - - RdsHostListProvider provider1 = - Mockito.spy(getRdsHostListProvider("jdbc:something://cluster-a.cluster-xyz.us-east-2.rds.amazonaws.com/")); - provider1.init(); - final List topologyClusterA = Arrays.asList( - new HostSpecBuilder(new SimpleHostAvailabilityStrategy()) - .host("instance-a-1.xyz.us-east-2.rds.amazonaws.com") - .port(HostSpec.NO_PORT) - .role(HostRole.WRITER) - .build(), - new HostSpecBuilder(new SimpleHostAvailabilityStrategy()) - .host("instance-a-2.xyz.us-east-2.rds.amazonaws.com") - .port(HostSpec.NO_PORT) - .role(HostRole.READER) - .build(), - new HostSpecBuilder(new SimpleHostAvailabilityStrategy()) - .host("instance-a-3.xyz.us-east-2.rds.amazonaws.com") - .port(HostSpec.NO_PORT) - .role(HostRole.READER) - .build()); - - doReturn(topologyClusterA).when(provider1).queryForTopology(any(Connection.class)); - - assertEquals(0, storageService.size(Topology.class)); - - final List topologyProvider1 = provider1.refresh(mock(Connection.class)); - assertEquals(topologyClusterA, topologyProvider1); - - RdsHostListProvider provider2 = - Mockito.spy(getRdsHostListProvider("jdbc:something://instance-a-3.xyz.us-east-2.rds.amazonaws.com/")); - provider2.init(); - - assertEquals(provider1.clusterId, provider2.clusterId); - assertTrue(provider1.isPrimaryClusterId); - assertTrue(provider2.isPrimaryClusterId); - - final List topologyProvider2 = provider2.refresh(mock(Connection.class)); - assertEquals(topologyClusterA, topologyProvider2); - - assertEquals(1, storageService.size(Topology.class)); - } - - @Test - void testTopologyCache_AcceptSuggestion() throws SQLException { - RdsHostListProvider.clearAll(); - - RdsHostListProvider provider1 = - Mockito.spy(getRdsHostListProvider("jdbc:something://instance-a-2.xyz.us-east-2.rds.amazonaws.com/")); - provider1.init(); - final List topologyClusterA = Arrays.asList( - new HostSpecBuilder(new SimpleHostAvailabilityStrategy()) - .host("instance-a-1.xyz.us-east-2.rds.amazonaws.com") - .port(HostSpec.NO_PORT) - .role(HostRole.WRITER) - .build(), - new HostSpecBuilder(new SimpleHostAvailabilityStrategy()) - .host("instance-a-2.xyz.us-east-2.rds.amazonaws.com") - .port(HostSpec.NO_PORT) - .role(HostRole.READER) - .build(), - new HostSpecBuilder(new SimpleHostAvailabilityStrategy()) - .host("instance-a-3.xyz.us-east-2.rds.amazonaws.com") - .port(HostSpec.NO_PORT) - .role(HostRole.READER) - .build()); - - doAnswer(a -> topologyClusterA).when(provider1).queryForTopology(any(Connection.class)); - - assertEquals(0, storageService.size(Topology.class)); - - List topologyProvider1 = provider1.refresh(mock(Connection.class)); - assertEquals(topologyClusterA, topologyProvider1); - - // RdsHostListProvider.logCache(); - - RdsHostListProvider provider2 = - Mockito.spy(getRdsHostListProvider("jdbc:something://cluster-a.cluster-xyz.us-east-2.rds.amazonaws.com/")); - provider2.init(); - - doAnswer(a -> topologyClusterA).when(provider2).queryForTopology(any(Connection.class)); - - final List topologyProvider2 = provider2.refresh(mock(Connection.class)); - assertEquals(topologyClusterA, topologyProvider2); - - assertNotEquals(provider1.clusterId, provider2.clusterId); - assertFalse(provider1.isPrimaryClusterId); - assertTrue(provider2.isPrimaryClusterId); - assertEquals(2, storageService.size(Topology.class)); - assertEquals("cluster-a.cluster-xyz.us-east-2.rds.amazonaws.com", - RdsHostListProvider.suggestedPrimaryClusterIdCache.get(provider1.clusterId)); - - // RdsHostListProvider.logCache(); - - topologyProvider1 = provider1.forceRefresh(mock(Connection.class)); - assertEquals(topologyClusterA, topologyProvider1); - assertEquals(provider1.clusterId, provider2.clusterId); - assertTrue(provider1.isPrimaryClusterId); - assertTrue(provider2.isPrimaryClusterId); - - // RdsHostListProvider.logCache(); - } - @Test void testIdentifyConnectionWithInvalidNodeIdQuery() throws SQLException { rdsHostListProvider = Mockito.spy(getRdsHostListProvider("jdbc:someprotocol://url")); - when(mockResultSet.next()).thenReturn(false); assertThrows(SQLException.class, () -> rdsHostListProvider.identifyConnection(mockConnection)); when(mockConnection.createStatement()).thenThrow(new SQLException("exception")); @@ -438,11 +214,10 @@ void testIdentifyConnectionWithInvalidNodeIdQuery() throws SQLException { @Test void testIdentifyConnectionNullTopology() throws SQLException { rdsHostListProvider = Mockito.spy(getRdsHostListProvider("jdbc:someprotocol://url")); - rdsHostListProvider.clusterInstanceTemplate = new HostSpecBuilder(new SimpleHostAvailabilityStrategy()) - .host("?.pattern").build(); + rdsHostListProvider.instanceTemplate = + new HostSpecBuilder(new SimpleHostAvailabilityStrategy()).host("?.pattern").build(); - when(mockResultSet.next()).thenReturn(true); - when(mockResultSet.getString(eq(1))).thenReturn("instance-1"); + when(mockTopologyUtils.getInstanceId(mockConnection)).thenReturn(Pair.create("instance-1", "instance-1")); doReturn(null).when(rdsHostListProvider).refresh(mockConnection); doReturn(null).when(rdsHostListProvider).forceRefresh(mockConnection); @@ -459,8 +234,7 @@ void testIdentifyConnectionHostNotInTopology() throws SQLException { .build()); rdsHostListProvider = Mockito.spy(getRdsHostListProvider("jdbc:someprotocol://url")); - when(mockResultSet.next()).thenReturn(true); - when(mockResultSet.getString(eq(1))).thenReturn("instance-1"); + when(mockTopologyUtils.getInstanceId(mockConnection)).thenReturn(Pair.create("instance-1", "instance-1")); doReturn(cachedTopology).when(rdsHostListProvider).refresh(mockConnection); doReturn(cachedTopology).when(rdsHostListProvider).forceRefresh(mockConnection); @@ -478,8 +252,7 @@ void testIdentifyConnectionHostInTopology() throws SQLException { final List cachedTopology = Collections.singletonList(expectedHost); rdsHostListProvider = Mockito.spy(getRdsHostListProvider("jdbc:someprotocol://url")); - when(mockResultSet.next()).thenReturn(true); - when(mockResultSet.getString(eq(1))).thenReturn("instance-a-1"); + when(mockTopologyUtils.getInstanceId(mockConnection)).thenReturn(Pair.create("instance-a-1", "instance-a-1")); doReturn(cachedTopology).when(rdsHostListProvider).refresh(mockConnection); doReturn(cachedTopology).when(rdsHostListProvider).forceRefresh(mockConnection); @@ -487,143 +260,4 @@ void testIdentifyConnectionHostInTopology() throws SQLException { assertEquals("instance-a-1.xyz.us-east-2.rds.amazonaws.com", actual.getHost()); assertEquals("instance-a-1", actual.getHostId()); } - - @Test - void testGetTopology_StaleRecord() throws SQLException { - rdsHostListProvider = Mockito.spy(getRdsHostListProvider("jdbc:someprotocol://url")); - rdsHostListProvider.isInitialized = true; - - final String hostName1 = "hostName1"; - final String hostName2 = "hostName2"; - final Double cpuUtilization = 11.1D; - final Double nodeLag = 0.123D; - final Timestamp firstTimestamp = Timestamp.from(Instant.now()); - final Timestamp secondTimestamp = new Timestamp(firstTimestamp.getTime() + 100); - when(mockResultSet.next()).thenReturn(true, true, false); - when(mockResultSet.getString(1)).thenReturn(hostName1).thenReturn(hostName2); - when(mockResultSet.getBoolean(2)).thenReturn(true).thenReturn(true); - when(mockResultSet.getDouble(3)).thenReturn(cpuUtilization).thenReturn(cpuUtilization); - when(mockResultSet.getDouble(4)).thenReturn(nodeLag).thenReturn(nodeLag); - when(mockResultSet.getTimestamp(5)).thenReturn(firstTimestamp).thenReturn(secondTimestamp); - long weight = Math.round(nodeLag) * 100L + Math.round(cpuUtilization); - final HostSpec expectedWriter = new HostSpecBuilder(new SimpleHostAvailabilityStrategy()) - .host(hostName2) - .port(-1) - .role(HostRole.WRITER) - .availability(HostAvailability.AVAILABLE) - .weight(weight) - .lastUpdateTime(secondTimestamp) - .build(); - - final FetchTopologyResult result = rdsHostListProvider.getTopology(mockConnection, true); - verify(rdsHostListProvider, atMostOnce()).queryForTopology(mockConnection); - assertEquals(1, result.hosts.size()); - assertEquals(expectedWriter, result.hosts.get(0)); - } - - @Test - void testGetTopology_InvalidLastUpdatedTimestamp() throws SQLException { - rdsHostListProvider = Mockito.spy(getRdsHostListProvider("jdbc:someprotocol://url")); - rdsHostListProvider.isInitialized = true; - - final String hostName = "hostName"; - final Double cpuUtilization = 11.1D; - final Double nodeLag = 0.123D; - when(mockResultSet.next()).thenReturn(true, false); - when(mockResultSet.getString(1)).thenReturn(hostName); - when(mockResultSet.getBoolean(2)).thenReturn(true); - when(mockResultSet.getDouble(3)).thenReturn(cpuUtilization); - when(mockResultSet.getDouble(4)).thenReturn(nodeLag); - when(mockResultSet.getTimestamp(5)).thenThrow(WrongArgumentException.class); - - final FetchTopologyResult result = rdsHostListProvider.getTopology(mockConnection, true); - verify(rdsHostListProvider, atMostOnce()).queryForTopology(mockConnection); - - final String expectedLastUpdatedTimeStampRounded = Timestamp.from(Instant.now()).toString().substring(0, 16); - assertEquals(1, result.hosts.size()); - assertEquals( - expectedLastUpdatedTimeStampRounded, - result.hosts.get(0).getLastUpdateTime().toString().substring(0, 16)); - } - - @Test - void testGetTopology_returnsLatestWriter() throws SQLException { - rdsHostListProvider = Mockito.spy(getRdsHostListProvider("jdbc:someprotocol://url")); - rdsHostListProvider.isInitialized = true; - - HostSpec expectedWriterHost = new HostSpecBuilder(new SimpleHostAvailabilityStrategy()) - .host("expectedWriterHost") - .role(HostRole.WRITER) - .lastUpdateTime(Timestamp.valueOf("3000-01-01 00:00:00")) - .build(); - - HostSpec unexpectedWriterHost0 = new HostSpecBuilder(new SimpleHostAvailabilityStrategy()) - .host("unexpectedWriterHost0") - .role(HostRole.WRITER) - .lastUpdateTime(Timestamp.valueOf("1000-01-01 00:00:00")) - .build(); - - HostSpec unexpectedWriterHost1 = new HostSpecBuilder(new SimpleHostAvailabilityStrategy()) - .host("unexpectedWriterHost1") - .role(HostRole.WRITER) - .lastUpdateTime(Timestamp.valueOf("2000-01-01 00:00:00")) - .build(); - - HostSpec unexpectedWriterHostWithNullLastUpdateTime0 = new HostSpecBuilder(new SimpleHostAvailabilityStrategy()) - .host("unexpectedWriterHostWithNullLastUpdateTime0") - .role(HostRole.WRITER) - .lastUpdateTime(null) - .build(); - - HostSpec unexpectedWriterHostWithNullLastUpdateTime1 = new HostSpecBuilder(new SimpleHostAvailabilityStrategy()) - .host("unexpectedWriterHostWithNullLastUpdateTime1") - .role(HostRole.WRITER) - .lastUpdateTime(null) - .build(); - - when(mockResultSet.next()).thenReturn(true, true, true, true, true, false); - - when(mockResultSet.getString(1)).thenReturn( - unexpectedWriterHostWithNullLastUpdateTime0.getHost(), - unexpectedWriterHost0.getHost(), - expectedWriterHost.getHost(), - unexpectedWriterHost1.getHost(), - unexpectedWriterHostWithNullLastUpdateTime1.getHost()); - when(mockResultSet.getBoolean(2)).thenReturn(true, true, true, true, true); - when(mockResultSet.getFloat(3)).thenReturn((float) 0, (float) 0, (float) 0, (float) 0, (float) 0); - when(mockResultSet.getFloat(4)).thenReturn((float) 0, (float) 0, (float) 0, (float) 0, (float) 0); - when(mockResultSet.getTimestamp(5)).thenReturn( - unexpectedWriterHostWithNullLastUpdateTime0.getLastUpdateTime(), - unexpectedWriterHost0.getLastUpdateTime(), - expectedWriterHost.getLastUpdateTime(), - unexpectedWriterHost1.getLastUpdateTime(), - unexpectedWriterHostWithNullLastUpdateTime1.getLastUpdateTime() - ); - - final FetchTopologyResult result = rdsHostListProvider.getTopology(mockConnection, true); - verify(rdsHostListProvider, atMostOnce()).queryForTopology(mockConnection); - - assertEquals(expectedWriterHost.getHost(), result.hosts.get(0).getHost()); - } - - @Test - void testClusterUrlUsedAsDefaultClusterId() throws SQLException { - String readerClusterUrl = "mycluster.cluster-ro-XYZ.us-east-1.rds.amazonaws.com"; - String expectedClusterId = "mycluster.cluster-XYZ.us-east-1.rds.amazonaws.com:1234"; - String connectionString = "jdbc:someprotocol://" + readerClusterUrl + ":1234/test"; - RdsHostListProvider provider1 = Mockito.spy(getRdsHostListProvider(connectionString)); - assertEquals(expectedClusterId, provider1.getClusterId()); - - List mockTopology = - Collections.singletonList(new HostSpecBuilder(new SimpleHostAvailabilityStrategy()).host("host").build()); - doReturn(mockTopology).when(provider1).queryForTopology(any(Connection.class)); - provider1.refresh(); - assertEquals(mockTopology, provider1.getStoredTopology()); - verify(provider1, times(1)).queryForTopology(mockConnection); - - RdsHostListProvider provider2 = Mockito.spy(getRdsHostListProvider(connectionString)); - assertEquals(expectedClusterId, provider2.getClusterId()); - assertEquals(mockTopology, provider2.getStoredTopology()); - verify(provider2, never()).queryForTopology(mockConnection); - } } diff --git a/wrapper/src/test/java/software/amazon/jdbc/hostlistprovider/RdsMultiAzDbClusterListProviderTest.java b/wrapper/src/test/java/software/amazon/jdbc/hostlistprovider/RdsMultiAzDbClusterListProviderTest.java deleted file mode 100644 index df6d6ee50..000000000 --- a/wrapper/src/test/java/software/amazon/jdbc/hostlistprovider/RdsMultiAzDbClusterListProviderTest.java +++ /dev/null @@ -1,470 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package software.amazon.jdbc.hostlistprovider; - -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.junit.jupiter.api.Assertions.assertNotEquals; -import static org.junit.jupiter.api.Assertions.assertNotNull; -import static org.junit.jupiter.api.Assertions.assertNull; -import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.atMostOnce; -import static org.mockito.Mockito.doAnswer; -import static org.mockito.Mockito.doReturn; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import java.sql.Connection; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.SQLSyntaxErrorException; -import java.sql.Statement; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.List; -import java.util.Properties; -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; -import org.mockito.ArgumentCaptor; -import org.mockito.Captor; -import org.mockito.Mock; -import org.mockito.Mockito; -import org.mockito.MockitoAnnotations; -import software.amazon.jdbc.HostListProviderService; -import software.amazon.jdbc.HostRole; -import software.amazon.jdbc.HostSpec; -import software.amazon.jdbc.HostSpecBuilder; -import software.amazon.jdbc.PluginService; -import software.amazon.jdbc.dialect.Dialect; -import software.amazon.jdbc.hostavailability.SimpleHostAvailabilityStrategy; -import software.amazon.jdbc.hostlistprovider.RdsHostListProvider.FetchTopologyResult; -import software.amazon.jdbc.util.FullServicesContainer; -import software.amazon.jdbc.util.events.EventPublisher; -import software.amazon.jdbc.util.storage.StorageService; -import software.amazon.jdbc.util.storage.TestStorageServiceImpl; - -class RdsMultiAzDbClusterListProviderTest { - private StorageService storageService; - private RdsMultiAzDbClusterListProvider rdsMazDbClusterHostListProvider; - - @Mock private Connection mockConnection; - @Mock private Statement mockStatement; - @Mock private ResultSet mockResultSet; - @Mock private FullServicesContainer mockServicesContainer; - @Mock private PluginService mockPluginService; - @Mock private HostListProviderService mockHostListProviderService; - @Mock private EventPublisher mockEventPublisher; - @Mock Dialect mockTopologyAwareDialect; - @Captor private ArgumentCaptor queryCaptor; - - private AutoCloseable closeable; - private final HostSpec currentHostSpec = new HostSpecBuilder(new SimpleHostAvailabilityStrategy()) - .host("foo").port(1234).build(); - private final List hosts = Arrays.asList( - new HostSpecBuilder(new SimpleHostAvailabilityStrategy()).host("host1").build(), - new HostSpecBuilder(new SimpleHostAvailabilityStrategy()).host("host2").build()); - - @BeforeEach - void setUp() throws SQLException { - closeable = MockitoAnnotations.openMocks(this); - storageService = new TestStorageServiceImpl(mockEventPublisher); - when(mockServicesContainer.getHostListProviderService()).thenReturn(mockHostListProviderService); - when(mockServicesContainer.getStorageService()).thenReturn(storageService); - when(mockPluginService.getCurrentConnection()).thenReturn(mockConnection); - when(mockPluginService.connect(any(HostSpec.class), any(Properties.class))).thenReturn(mockConnection); - when(mockPluginService.getCurrentHostSpec()).thenReturn(currentHostSpec); - when(mockConnection.createStatement()).thenReturn(mockStatement); - when(mockStatement.executeQuery(queryCaptor.capture())).thenReturn(mockResultSet); - when(mockHostListProviderService.getDialect()).thenReturn(mockTopologyAwareDialect); - when(mockHostListProviderService.getHostSpecBuilder()) - .thenReturn(new HostSpecBuilder(new SimpleHostAvailabilityStrategy())); - } - - @AfterEach - void tearDown() throws Exception { - RdsMultiAzDbClusterListProvider.clearAll(); - storageService.clearAll(); - closeable.close(); - } - - private RdsMultiAzDbClusterListProvider getRdsMazDbClusterHostListProvider(String originalUrl) throws SQLException { - RdsMultiAzDbClusterListProvider provider = new RdsMultiAzDbClusterListProvider( - new Properties(), - originalUrl, - mockServicesContainer, - "foo", - "bar", - "baz", - "fang", - "li"); - provider.init(); - // provider.clusterId = "cluster-id"; - return provider; - } - - @Test - void testGetTopology_returnCachedTopology() throws SQLException { - rdsMazDbClusterHostListProvider = Mockito.spy(getRdsMazDbClusterHostListProvider("protocol://url/")); - final List expected = hosts; - storageService.set(rdsMazDbClusterHostListProvider.clusterId, new Topology(expected)); - - final FetchTopologyResult result = rdsMazDbClusterHostListProvider.getTopology(mockConnection, false); - assertEquals(expected, result.hosts); - assertEquals(2, result.hosts.size()); - verify(rdsMazDbClusterHostListProvider, never()).queryForTopology(mockConnection); - } - - @Test - void testGetTopology_withForceUpdate_returnsUpdatedTopology() throws SQLException { - rdsMazDbClusterHostListProvider = Mockito.spy(getRdsMazDbClusterHostListProvider("jdbc:someprotocol://url")); - rdsMazDbClusterHostListProvider.isInitialized = true; - - storageService.set(rdsMazDbClusterHostListProvider.clusterId, new Topology(hosts)); - - final List newHosts = Collections.singletonList( - new HostSpecBuilder(new SimpleHostAvailabilityStrategy()).host("newHost").build()); - doReturn(newHosts).when(rdsMazDbClusterHostListProvider).queryForTopology(mockConnection); - - final FetchTopologyResult result = rdsMazDbClusterHostListProvider.getTopology(mockConnection, true); - verify(rdsMazDbClusterHostListProvider, atMostOnce()).queryForTopology(mockConnection); - assertEquals(1, result.hosts.size()); - assertEquals(newHosts, result.hosts); - } - - @Test - void testGetTopology_noForceUpdate_queryReturnsEmptyHostList() throws SQLException { - rdsMazDbClusterHostListProvider = Mockito.spy(getRdsMazDbClusterHostListProvider("jdbc:someprotocol://url")); - rdsMazDbClusterHostListProvider.clusterId = "cluster-id"; - rdsMazDbClusterHostListProvider.isInitialized = true; - - final List expected = hosts; - storageService.set(rdsMazDbClusterHostListProvider.clusterId, new Topology(expected)); - - doReturn(new ArrayList<>()).when(rdsMazDbClusterHostListProvider).queryForTopology(mockConnection); - - final FetchTopologyResult result = rdsMazDbClusterHostListProvider.getTopology(mockConnection, false); - verify(rdsMazDbClusterHostListProvider, atMostOnce()).queryForTopology(mockConnection); - assertEquals(2, result.hosts.size()); - assertEquals(expected, result.hosts); - } - - @Test - void testGetTopology_withForceUpdate_returnsInitialHostList() throws SQLException { - rdsMazDbClusterHostListProvider = Mockito.spy(getRdsMazDbClusterHostListProvider("jdbc:someprotocol://url")); - rdsMazDbClusterHostListProvider.clear(); - - doReturn(new ArrayList<>()).when(rdsMazDbClusterHostListProvider).queryForTopology(mockConnection); - - final FetchTopologyResult result = rdsMazDbClusterHostListProvider.getTopology(mockConnection, true); - verify(rdsMazDbClusterHostListProvider, atMostOnce()).queryForTopology(mockConnection); - assertNotNull(result.hosts); - assertEquals( - Collections.singletonList(new HostSpecBuilder(new SimpleHostAvailabilityStrategy()).host("url").build()), - result.hosts); - } - - @Test - void testQueryForTopology_queryResultsInException() throws SQLException { - rdsMazDbClusterHostListProvider = getRdsMazDbClusterHostListProvider("protocol://url/"); - when(mockStatement.executeQuery(queryCaptor.capture())).thenThrow(new SQLSyntaxErrorException()); - - assertThrows( - SQLException.class, - () -> rdsMazDbClusterHostListProvider.queryForTopology(mockConnection)); - } - - @Test - void testGetCachedTopology_returnCachedTopology() throws SQLException { - rdsMazDbClusterHostListProvider = getRdsMazDbClusterHostListProvider("jdbc:someprotocol://url"); - - final List expected = hosts; - storageService.set(rdsMazDbClusterHostListProvider.clusterId, new Topology(expected)); - - final List result = rdsMazDbClusterHostListProvider.getStoredTopology(); - assertEquals(expected, result); - } - - @Test - void testTopologyCache_NoSuggestedClusterId() throws SQLException { - RdsMultiAzDbClusterListProvider.clearAll(); - - RdsMultiAzDbClusterListProvider provider1 = - Mockito.spy(getRdsMazDbClusterHostListProvider("jdbc:something://cluster-a.domain.com/")); - provider1.init(); - final List topologyClusterA = Arrays.asList( - new HostSpecBuilder(new SimpleHostAvailabilityStrategy()) - .host("instance-a-1.domain.com").port(HostSpec.NO_PORT).role(HostRole.WRITER).build(), - new HostSpecBuilder(new SimpleHostAvailabilityStrategy()) - .host("instance-a-2.domain.com").port(HostSpec.NO_PORT).role(HostRole.READER).build(), - new HostSpecBuilder(new SimpleHostAvailabilityStrategy()) - .host("instance-a-3.domain.com").port(HostSpec.NO_PORT).role(HostRole.READER).build()); - - doReturn(topologyClusterA) - .when(provider1).queryForTopology(any(Connection.class)); - - assertEquals(0, storageService.size(Topology.class)); - - final List topologyProvider1 = provider1.refresh(Mockito.mock(Connection.class)); - assertEquals(topologyClusterA, topologyProvider1); - - RdsMultiAzDbClusterListProvider provider2 = - Mockito.spy(getRdsMazDbClusterHostListProvider("jdbc:something://cluster-b.domain.com/")); - provider2.init(); - assertNull(provider2.getStoredTopology()); - - final List topologyClusterB = Arrays.asList( - new HostSpecBuilder(new SimpleHostAvailabilityStrategy()) - .host("instance-b-1.domain.com").port(HostSpec.NO_PORT).role(HostRole.WRITER).build(), - new HostSpecBuilder(new SimpleHostAvailabilityStrategy()) - .host("instance-b-2.domain.com").port(HostSpec.NO_PORT).role(HostRole.READER).build(), - new HostSpecBuilder(new SimpleHostAvailabilityStrategy()) - .host("instance-b-3.domain.com").port(HostSpec.NO_PORT).role(HostRole.READER).build()); - doReturn(topologyClusterB).when(provider2).queryForTopology(any(Connection.class)); - - final List topologyProvider2 = provider2.refresh(Mockito.mock(Connection.class)); - assertEquals(topologyClusterB, topologyProvider2); - - assertEquals(2, storageService.size(Topology.class)); - } - - @Test - void testTopologyCache_SuggestedClusterIdForRds() throws SQLException { - RdsMultiAzDbClusterListProvider.clearAll(); - - RdsMultiAzDbClusterListProvider provider1 = - Mockito.spy(getRdsMazDbClusterHostListProvider( - "jdbc:something://cluster-a.cluster-xyz.us-east-2.rds.amazonaws.com/")); - provider1.init(); - final List topologyClusterA = Arrays.asList( - new HostSpecBuilder(new SimpleHostAvailabilityStrategy()) - .host("instance-a-1.xyz.us-east-2.rds.amazonaws.com") - .port(HostSpec.NO_PORT) - .role(HostRole.WRITER) - .build(), - new HostSpecBuilder(new SimpleHostAvailabilityStrategy()) - .host("instance-a-2.xyz.us-east-2.rds.amazonaws.com") - .port(HostSpec.NO_PORT) - .role(HostRole.READER) - .build(), - new HostSpecBuilder(new SimpleHostAvailabilityStrategy()) - .host("instance-a-3.xyz.us-east-2.rds.amazonaws.com") - .port(HostSpec.NO_PORT) - .role(HostRole.READER) - .build()); - - doReturn(topologyClusterA).when(provider1).queryForTopology(any(Connection.class)); - - assertEquals(0, storageService.size(Topology.class)); - - final List topologyProvider1 = provider1.refresh(Mockito.mock(Connection.class)); - assertEquals(topologyClusterA, topologyProvider1); - - RdsMultiAzDbClusterListProvider provider2 = - Mockito.spy(getRdsMazDbClusterHostListProvider( - "jdbc:something://cluster-a.cluster-xyz.us-east-2.rds.amazonaws.com/")); - provider2.init(); - - assertEquals(provider1.clusterId, provider2.clusterId); - assertTrue(provider1.isPrimaryClusterId); - assertTrue(provider2.isPrimaryClusterId); - - final List topologyProvider2 = provider2.refresh(Mockito.mock(Connection.class)); - assertEquals(topologyClusterA, topologyProvider2); - - assertEquals(1, storageService.size(Topology.class)); - } - - @Test - void testTopologyCache_SuggestedClusterIdForInstance() throws SQLException { - RdsMultiAzDbClusterListProvider.clearAll(); - - RdsMultiAzDbClusterListProvider provider1 = - Mockito.spy(getRdsMazDbClusterHostListProvider( - "jdbc:something://cluster-a.cluster-xyz.us-east-2.rds.amazonaws.com/")); - provider1.init(); - final List topologyClusterA = Arrays.asList( - new HostSpecBuilder(new SimpleHostAvailabilityStrategy()) - .host("instance-a-1.xyz.us-east-2.rds.amazonaws.com") - .port(HostSpec.NO_PORT) - .role(HostRole.WRITER) - .build(), - new HostSpecBuilder(new SimpleHostAvailabilityStrategy()) - .host("instance-a-2.xyz.us-east-2.rds.amazonaws.com") - .port(HostSpec.NO_PORT) - .role(HostRole.READER) - .build(), - new HostSpecBuilder(new SimpleHostAvailabilityStrategy()) - .host("instance-a-3.xyz.us-east-2.rds.amazonaws.com") - .port(HostSpec.NO_PORT) - .role(HostRole.READER) - .build()); - - doReturn(topologyClusterA).when(provider1).queryForTopology(any(Connection.class)); - - assertEquals(0, storageService.size(Topology.class)); - - final List topologyProvider1 = provider1.refresh(Mockito.mock(Connection.class)); - assertEquals(topologyClusterA, topologyProvider1); - - RdsMultiAzDbClusterListProvider provider2 = - Mockito.spy(getRdsMazDbClusterHostListProvider( - "jdbc:something://instance-a-3.xyz.us-east-2.rds.amazonaws.com/")); - provider2.init(); - - assertEquals(provider1.clusterId, provider2.clusterId); - assertTrue(provider1.isPrimaryClusterId); - assertTrue(provider2.isPrimaryClusterId); - - final List topologyProvider2 = provider2.refresh(Mockito.mock(Connection.class)); - assertEquals(topologyClusterA, topologyProvider2); - - assertEquals(1, storageService.size(Topology.class)); - } - - @Test - void testTopologyCache_AcceptSuggestion() throws SQLException { - RdsMultiAzDbClusterListProvider.clearAll(); - - RdsMultiAzDbClusterListProvider provider1 = - Mockito.spy(getRdsMazDbClusterHostListProvider( - "jdbc:something://instance-a-2.xyz.us-east-2.rds.amazonaws.com/")); - provider1.init(); - final List topologyClusterA = Arrays.asList( - new HostSpecBuilder(new SimpleHostAvailabilityStrategy()) - .host("instance-a-1.xyz.us-east-2.rds.amazonaws.com") - .port(HostSpec.NO_PORT) - .role(HostRole.WRITER) - .build(), - new HostSpecBuilder(new SimpleHostAvailabilityStrategy()) - .host("instance-a-2.xyz.us-east-2.rds.amazonaws.com") - .port(HostSpec.NO_PORT) - .role(HostRole.READER) - .build(), - new HostSpecBuilder(new SimpleHostAvailabilityStrategy()) - .host("instance-a-3.xyz.us-east-2.rds.amazonaws.com") - .port(HostSpec.NO_PORT) - .role(HostRole.READER) - .build()); - - doAnswer(a -> topologyClusterA).when(provider1).queryForTopology(any(Connection.class)); - - assertEquals(0, storageService.size(Topology.class)); - - List topologyProvider1 = provider1.refresh(Mockito.mock(Connection.class)); - assertEquals(topologyClusterA, topologyProvider1); - - // RdsMultiAzDbClusterListProvider.logCache(); - - RdsMultiAzDbClusterListProvider provider2 = - Mockito.spy(getRdsMazDbClusterHostListProvider( - "jdbc:something://cluster-a.cluster-xyz.us-east-2.rds.amazonaws.com/")); - provider2.init(); - - doAnswer(a -> topologyClusterA).when(provider2).queryForTopology(any(Connection.class)); - - final List topologyProvider2 = provider2.refresh(Mockito.mock(Connection.class)); - assertEquals(topologyClusterA, topologyProvider2); - - assertNotEquals(provider1.clusterId, provider2.clusterId); - assertFalse(provider1.isPrimaryClusterId); - assertTrue(provider2.isPrimaryClusterId); - assertEquals(2, storageService.size(Topology.class)); - assertEquals("cluster-a.cluster-xyz.us-east-2.rds.amazonaws.com", - RdsMultiAzDbClusterListProvider.suggestedPrimaryClusterIdCache.get(provider1.clusterId)); - - // RdsMultiAzDbClusterListProvider.logCache(); - - topologyProvider1 = provider1.forceRefresh(Mockito.mock(Connection.class)); - assertEquals(topologyClusterA, topologyProvider1); - assertEquals(provider1.clusterId, provider2.clusterId); - assertTrue(provider1.isPrimaryClusterId); - assertTrue(provider2.isPrimaryClusterId); - - // RdsMultiAzDbClusterListProvider.logCache(); - } - - @Test - void testIdentifyConnectionWithInvalidNodeIdQuery() throws SQLException { - rdsMazDbClusterHostListProvider = Mockito.spy(getRdsMazDbClusterHostListProvider("jdbc:someprotocol://url")); - - when(mockResultSet.next()).thenReturn(false); - assertThrows(SQLException.class, () -> rdsMazDbClusterHostListProvider.identifyConnection(mockConnection)); - - when(mockConnection.createStatement()).thenThrow(new SQLException("exception")); - assertThrows(SQLException.class, () -> rdsMazDbClusterHostListProvider.identifyConnection(mockConnection)); - } - - @Test - void testIdentifyConnectionNullTopology() throws SQLException { - rdsMazDbClusterHostListProvider = Mockito.spy(getRdsMazDbClusterHostListProvider("jdbc:someprotocol://url")); - rdsMazDbClusterHostListProvider.clusterInstanceTemplate = new HostSpecBuilder(new SimpleHostAvailabilityStrategy()) - .host("?.pattern").build(); - - when(mockResultSet.next()).thenReturn(true); - when(mockResultSet.getString(eq(1))).thenReturn("instance-1"); - doReturn(null).when(rdsMazDbClusterHostListProvider).refresh(mockConnection); - doReturn(null).when(rdsMazDbClusterHostListProvider).forceRefresh(mockConnection); - - assertNull(rdsMazDbClusterHostListProvider.identifyConnection(mockConnection)); - } - - @Test - void testIdentifyConnectionHostNotInTopology() throws SQLException { - final List cachedTopology = Collections.singletonList( - new HostSpecBuilder(new SimpleHostAvailabilityStrategy()) - .host("instance-a-1.xyz.us-east-2.rds.amazonaws.com") - .port(HostSpec.NO_PORT) - .role(HostRole.WRITER) - .build()); - - rdsMazDbClusterHostListProvider = Mockito.spy(getRdsMazDbClusterHostListProvider("jdbc:someprotocol://url")); - when(mockResultSet.next()).thenReturn(true); - when(mockResultSet.getString(eq(1))).thenReturn("instance-1"); - doReturn(cachedTopology).when(rdsMazDbClusterHostListProvider).refresh(mockConnection); - doReturn(cachedTopology).when(rdsMazDbClusterHostListProvider).forceRefresh(mockConnection); - - assertNull(rdsMazDbClusterHostListProvider.identifyConnection(mockConnection)); - } - - @Test - void testIdentifyConnectionHostInTopology() throws SQLException { - final HostSpec expectedHost = new HostSpecBuilder(new SimpleHostAvailabilityStrategy()) - .host("instance-a-1.xyz.us-east-2.rds.amazonaws.com") - .hostId("instance-a-1") - .port(HostSpec.NO_PORT) - .role(HostRole.WRITER) - .build(); - final List cachedTopology = Collections.singletonList(expectedHost); - - rdsMazDbClusterHostListProvider = Mockito.spy(getRdsMazDbClusterHostListProvider("jdbc:someprotocol://url")); - when(mockResultSet.next()).thenReturn(true); - when(mockResultSet.getString(eq(1))).thenReturn("instance-a-1"); - doReturn(cachedTopology).when(rdsMazDbClusterHostListProvider).refresh(mockConnection); - doReturn(cachedTopology).when(rdsMazDbClusterHostListProvider).forceRefresh(mockConnection); - - final HostSpec actual = rdsMazDbClusterHostListProvider.identifyConnection(mockConnection); - assertEquals("instance-a-1.xyz.us-east-2.rds.amazonaws.com", actual.getHost()); - assertEquals("instance-a-1", actual.getHostId()); - } - -} diff --git a/wrapper/src/test/java/software/amazon/jdbc/mock/TestPluginOne.java b/wrapper/src/test/java/software/amazon/jdbc/mock/TestPluginOne.java index 9ca4c86dd..523d3be59 100644 --- a/wrapper/src/test/java/software/amazon/jdbc/mock/TestPluginOne.java +++ b/wrapper/src/test/java/software/amazon/jdbc/mock/TestPluginOne.java @@ -27,7 +27,6 @@ import java.util.Properties; import java.util.Set; import software.amazon.jdbc.ConnectionPlugin; -import software.amazon.jdbc.HostListProviderService; import software.amazon.jdbc.HostRole; import software.amazon.jdbc.HostSpec; import software.amazon.jdbc.HostSpecBuilder; @@ -35,6 +34,7 @@ import software.amazon.jdbc.NodeChangeOptions; import software.amazon.jdbc.OldConnectionSuggestedAction; import software.amazon.jdbc.hostavailability.SimpleHostAvailabilityStrategy; +import software.amazon.jdbc.hostlistprovider.HostListProviderService; public class TestPluginOne implements ConnectionPlugin { diff --git a/wrapper/src/test/java/software/amazon/jdbc/plugin/DefaultConnectionPluginTest.java b/wrapper/src/test/java/software/amazon/jdbc/plugin/DefaultConnectionPluginTest.java index 9893e7c95..eaf9d11f1 100644 --- a/wrapper/src/test/java/software/amazon/jdbc/plugin/DefaultConnectionPluginTest.java +++ b/wrapper/src/test/java/software/amazon/jdbc/plugin/DefaultConnectionPluginTest.java @@ -43,6 +43,7 @@ import org.junit.jupiter.params.provider.MethodSource; import org.mockito.Mock; import org.mockito.MockitoAnnotations; +import software.amazon.jdbc.ConnectionInfo; import software.amazon.jdbc.ConnectionProvider; import software.amazon.jdbc.ConnectionProviderManager; import software.amazon.jdbc.HostSpec; @@ -77,7 +78,7 @@ class DefaultConnectionPluginTest { private AutoCloseable closeable; @BeforeEach - void setUp() { + void setUp() throws SQLException { closeable = MockitoAnnotations.openMocks(this); when(pluginService.getTelemetryFactory()).thenReturn(mockTelemetryFactory); @@ -88,6 +89,8 @@ void setUp() { when(mockTelemetryFactory.createGauge(anyString(), any(GaugeCallable.class))).thenReturn(mockTelemetryGauge); when(mockConnectionProviderManager.getConnectionProvider(anyString(), any(), any())) .thenReturn(connectionProvider); + when(connectionProvider.connect(anyString(), any(), any(), any(), any())) + .thenReturn(new ConnectionInfo(conn, false)); plugin = new DefaultConnectionPlugin( pluginService, connectionProvider, pluginManagerService, mockConnectionProviderManager); diff --git a/wrapper/src/test/java/software/amazon/jdbc/plugin/customendpoint/CustomEndpointMonitorImplTest.java b/wrapper/src/test/java/software/amazon/jdbc/plugin/customendpoint/CustomEndpointMonitorImplTest.java index afa6570e0..8a4d5395b 100644 --- a/wrapper/src/test/java/software/amazon/jdbc/plugin/customendpoint/CustomEndpointMonitorImplTest.java +++ b/wrapper/src/test/java/software/amazon/jdbc/plugin/customendpoint/CustomEndpointMonitorImplTest.java @@ -39,6 +39,7 @@ import org.mockito.ArgumentCaptor; import org.mockito.Mock; import org.mockito.MockitoAnnotations; +import org.testcontainers.shaded.org.checkerframework.checker.nullness.qual.Nullable; import software.amazon.awssdk.regions.Region; import software.amazon.awssdk.services.rds.RdsClient; import software.amazon.awssdk.services.rds.model.DBClusterEndpoint; @@ -54,7 +55,6 @@ import software.amazon.jdbc.util.telemetry.TelemetryFactory; public class CustomEndpointMonitorImplTest { - @Mock private MonitorService mockMonitorService; @Mock private StorageService mockStorageService; @Mock private BiFunction mockRdsClientFunc; @Mock private RdsClient mockRdsClient; @@ -113,20 +113,29 @@ void cleanUp() throws Exception { @Test public void testRun() throws InterruptedException { + int refreshRateMs = 50; CustomEndpointMonitorImpl monitor = new CustomEndpointMonitorImpl( mockStorageService, mockTelemetryFactory, host, endpointId, Region.US_EAST_1, - TimeUnit.MILLISECONDS.toNanos(50), + TimeUnit.MILLISECONDS.toNanos(refreshRateMs), mockRdsClientFunc); monitor.start(); - // Wait for 2 run cycles. The first will return an unexpected number of endpoints in the API response, the second - // will return the expected number of endpoints (one). - TimeUnit.MILLISECONDS.sleep(100); - assertEquals(expectedInfo, CustomEndpointMonitorImpl.customEndpointInfoCache.get(host.getUrl())); + // Wait for after 2 run cycles. The first will return an unexpected number of endpoints in the API response, the + // second will return the expected number of endpoints (one). + TimeUnit.MILLISECONDS.sleep(2 * refreshRateMs); + int runCycles = 2; + @Nullable CustomEndpointInfo customEndpointInfo = CustomEndpointMonitorImpl.customEndpointInfoCache + .get(host.getUrl()); + while (customEndpointInfo == null && runCycles < 5) { + TimeUnit.MILLISECONDS.sleep(refreshRateMs); + runCycles++; + customEndpointInfo = CustomEndpointMonitorImpl.customEndpointInfoCache.get(host.getUrl()); + } + assertEquals(expectedInfo, customEndpointInfo); monitor.stop(); ArgumentCaptor captor = ArgumentCaptor.forClass(AllowedAndBlockedHosts.class); @@ -135,7 +144,7 @@ public void testRun() throws InterruptedException { assertNull(captor.getValue().getBlockedHostIds()); // Wait for monitor to close - TimeUnit.MILLISECONDS.sleep(50); + TimeUnit.MILLISECONDS.sleep(refreshRateMs); verify(mockRdsClient, atLeastOnce()).close(); } } diff --git a/wrapper/src/test/java/software/amazon/jdbc/plugin/dev/DeveloperConnectionPluginTest.java b/wrapper/src/test/java/software/amazon/jdbc/plugin/dev/DeveloperConnectionPluginTest.java index 1f8c387a7..a10a986ce 100644 --- a/wrapper/src/test/java/software/amazon/jdbc/plugin/dev/DeveloperConnectionPluginTest.java +++ b/wrapper/src/test/java/software/amazon/jdbc/plugin/dev/DeveloperConnectionPluginTest.java @@ -37,6 +37,7 @@ import org.junit.jupiter.api.Test; import org.mockito.Mock; import org.mockito.MockitoAnnotations; +import software.amazon.jdbc.ConnectionInfo; import software.amazon.jdbc.ConnectionPluginManager; import software.amazon.jdbc.ConnectionProvider; import software.amazon.jdbc.HostSpec; @@ -91,7 +92,8 @@ void init() throws SQLException { mockPluginService, mockPluginService); - when(mockConnectionProvider.connect(any(), any(), any(), any(), any())).thenReturn(mockConnection); + when(mockConnectionProvider.connect(any(), any(), any(), any(), any())) + .thenReturn(new ConnectionInfo(mockConnection, false)); when(mockConnectCallback.getExceptionToRaise(any(), any(), any(), anyBoolean())).thenReturn(null); when(mockConnectionPluginManager.getTelemetryFactory()).thenReturn(mockTelemetryFactory); diff --git a/wrapper/src/test/java/software/amazon/jdbc/plugin/failover/FailoverConnectionPluginTest.java b/wrapper/src/test/java/software/amazon/jdbc/plugin/failover/FailoverConnectionPluginTest.java index 1fb98265e..d7654d70b 100644 --- a/wrapper/src/test/java/software/amazon/jdbc/plugin/failover/FailoverConnectionPluginTest.java +++ b/wrapper/src/test/java/software/amazon/jdbc/plugin/failover/FailoverConnectionPluginTest.java @@ -18,12 +18,15 @@ import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.atLeastOnce; import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.never; import static org.mockito.Mockito.spy; @@ -49,7 +52,6 @@ import org.junit.jupiter.params.provider.ValueSource; import org.mockito.Mock; import org.mockito.MockitoAnnotations; -import software.amazon.jdbc.HostListProviderService; import software.amazon.jdbc.HostRole; import software.amazon.jdbc.HostSpec; import software.amazon.jdbc.HostSpecBuilder; @@ -58,12 +60,12 @@ import software.amazon.jdbc.PluginService; import software.amazon.jdbc.hostavailability.HostAvailability; import software.amazon.jdbc.hostavailability.SimpleHostAvailabilityStrategy; -import software.amazon.jdbc.hostlistprovider.AuroraHostListProvider; +import software.amazon.jdbc.hostlistprovider.HostListProviderService; +import software.amazon.jdbc.hostlistprovider.RdsHostListProvider; import software.amazon.jdbc.targetdriverdialect.TargetDriverDialect; import software.amazon.jdbc.util.FullServicesContainer; import software.amazon.jdbc.util.RdsUrlType; import software.amazon.jdbc.util.SqlState; -import software.amazon.jdbc.util.connection.ConnectionService; import software.amazon.jdbc.util.telemetry.GaugeCallable; import software.amazon.jdbc.util.telemetry.TelemetryContext; import software.amazon.jdbc.util.telemetry.TelemetryCounter; @@ -82,12 +84,11 @@ class FailoverConnectionPluginTest { .host("reader1").port(1234).role(HostRole.READER).build()); @Mock FullServicesContainer mockContainer; - @Mock ConnectionService mockConnectionService; @Mock PluginService mockPluginService; @Mock Connection mockConnection; @Mock HostSpec mockHostSpec; @Mock HostListProviderService mockHostListProviderService; - @Mock AuroraHostListProvider mockHostListProvider; + @Mock RdsHostListProvider mockHostListProvider; @Mock JdbcCallable mockInitHostProviderFunc; @Mock ReaderFailoverHandler mockReaderFailoverHandler; @Mock WriterFailoverHandler mockWriterFailoverHandler; @@ -443,4 +444,17 @@ private void initializePlugin() throws SQLException { spyPlugin.setReaderFailoverHandler(mockReaderFailoverHandler); // doReturn(mockConnectionService).when(spyPlugin).getConnectionService(); } + + @Test + void test_failover_when_read_only_connection() throws SQLException { + initializePlugin(); + spyPlugin.failoverMode = FailoverMode.STRICT_WRITER; + + when(mockPluginService.isReadOnlyConnectionException(any(), any(TargetDriverDialect.class))).thenReturn(true); + assertTrue(spyPlugin.shouldExceptionTriggerConnectionSwitch(new SQLException("test", "any"))); + + when(mockPluginService.isReadOnlyConnectionException(any(), any(TargetDriverDialect.class))).thenReturn(false); + assertFalse(spyPlugin.shouldExceptionTriggerConnectionSwitch(new SQLException("test", "any"))); + } + } diff --git a/wrapper/src/test/java/software/amazon/jdbc/plugin/limitless/LimitlessConnectionPluginTest.java b/wrapper/src/test/java/software/amazon/jdbc/plugin/limitless/LimitlessConnectionPluginTest.java index 411233100..e6e498a41 100644 --- a/wrapper/src/test/java/software/amazon/jdbc/plugin/limitless/LimitlessConnectionPluginTest.java +++ b/wrapper/src/test/java/software/amazon/jdbc/plugin/limitless/LimitlessConnectionPluginTest.java @@ -35,7 +35,6 @@ import org.mockito.MockitoAnnotations; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; -import software.amazon.jdbc.HostListProvider; import software.amazon.jdbc.HostRole; import software.amazon.jdbc.HostSpec; import software.amazon.jdbc.HostSpecBuilder; @@ -45,6 +44,7 @@ import software.amazon.jdbc.dialect.Dialect; import software.amazon.jdbc.dialect.PgDialect; import software.amazon.jdbc.hostavailability.SimpleHostAvailabilityStrategy; +import software.amazon.jdbc.hostlistprovider.HostListProvider; public class LimitlessConnectionPluginTest { diff --git a/wrapper/src/test/java/software/amazon/jdbc/plugin/limitless/LimitlessRouterServiceImplTest.java b/wrapper/src/test/java/software/amazon/jdbc/plugin/limitless/LimitlessRouterServiceImplTest.java index e9d921815..1a9761f80 100644 --- a/wrapper/src/test/java/software/amazon/jdbc/plugin/limitless/LimitlessRouterServiceImplTest.java +++ b/wrapper/src/test/java/software/amazon/jdbc/plugin/limitless/LimitlessRouterServiceImplTest.java @@ -39,7 +39,6 @@ import org.mockito.MockitoAnnotations; import software.amazon.jdbc.ConnectionProvider; import software.amazon.jdbc.HighestWeightHostSelector; -import software.amazon.jdbc.HostListProvider; import software.amazon.jdbc.HostRole; import software.amazon.jdbc.HostSpec; import software.amazon.jdbc.HostSpecBuilder; @@ -48,6 +47,7 @@ import software.amazon.jdbc.WeightedRandomHostSelector; import software.amazon.jdbc.hostavailability.HostAvailability; import software.amazon.jdbc.hostavailability.SimpleHostAvailabilityStrategy; +import software.amazon.jdbc.hostlistprovider.HostListProvider; import software.amazon.jdbc.util.FullServicesContainer; import software.amazon.jdbc.util.FullServicesContainerImpl; import software.amazon.jdbc.util.events.EventPublisher; diff --git a/wrapper/src/test/java/software/amazon/jdbc/plugin/readwritesplitting/ReadWriteSplittingPluginTest.java b/wrapper/src/test/java/software/amazon/jdbc/plugin/readwritesplitting/ReadWriteSplittingPluginTest.java index c7c7bdc1b..77b3a7079 100644 --- a/wrapper/src/test/java/software/amazon/jdbc/plugin/readwritesplitting/ReadWriteSplittingPluginTest.java +++ b/wrapper/src/test/java/software/amazon/jdbc/plugin/readwritesplitting/ReadWriteSplittingPluginTest.java @@ -45,7 +45,6 @@ import org.junit.jupiter.api.Test; import org.mockito.Mock; import org.mockito.MockitoAnnotations; -import software.amazon.jdbc.HostListProviderService; import software.amazon.jdbc.HostRole; import software.amazon.jdbc.HostSpec; import software.amazon.jdbc.HostSpecBuilder; @@ -54,8 +53,8 @@ import software.amazon.jdbc.OldConnectionSuggestedAction; import software.amazon.jdbc.PluginService; import software.amazon.jdbc.PropertyDefinition; -import software.amazon.jdbc.dialect.Dialect; import software.amazon.jdbc.hostavailability.SimpleHostAvailabilityStrategy; +import software.amazon.jdbc.hostlistprovider.HostListProviderService; import software.amazon.jdbc.plugin.failover.FailoverSuccessSQLException; import software.amazon.jdbc.util.SqlState; @@ -95,7 +94,6 @@ public class ReadWriteSplittingPluginTest { @Mock private JdbcCallable mockConnectFunc; @Mock private JdbcCallable mockSqlFunction; @Mock private PluginService mockPluginService; - @Mock private Dialect mockDialect; @Mock private HostListProviderService mockHostListProviderService; @Mock private Connection mockWriterConn; @Mock private Connection mockNewWriterConn; @@ -355,6 +353,35 @@ public void testSetReadOnlyOnClosedConnection() throws SQLException { assertNull(plugin.getReaderConnection()); } + @Test + public void testSetReadOnly_readerExpires() throws SQLException, InterruptedException { + when(this.mockPluginService.connect(eq(readerHostSpec1), any(Properties.class), any())) + .thenReturn(mockReaderConn1) + .thenReturn(mockReaderConn2); + + final Properties propsWithExpirationTime = new Properties(); + propsWithExpirationTime.put("cachedReaderKeepAliveTimeoutMs", "5000"); + + final ReadWriteSplittingPlugin plugin = new ReadWriteSplittingPlugin( + mockPluginService, + propsWithExpirationTime); + + plugin.switchConnectionIfRequired(true); + assertEquals(mockReaderConn1, plugin.getReaderConnection()); + + Thread.sleep(1000); + + plugin.switchConnectionIfRequired(true); + // Ensure the cached reader connection hasn't changed yet since it hasn't expired. + assertEquals(mockReaderConn1, plugin.getReaderConnection()); + + Thread.sleep(6000); + plugin.switchConnectionIfRequired(true); + + // Ensure the cached reader connection has expired and updated. + assertEquals(mockReaderConn2, plugin.getReaderConnection()); + } + @Test public void testExecute_failoverToNewWriter() throws SQLException { when(mockSqlFunction.call()).thenThrow(FailoverSuccessSQLException.class); @@ -567,7 +594,7 @@ public void testClosePooledReaderConnectionAfterSetReadOnly() throws SQLExceptio .when(this.mockPluginService).getCurrentHostSpec(); doReturn(mockReaderConn1).when(mockPluginService).connect(readerHostSpec1, null); when(mockPluginService.getDriverProtocol()).thenReturn("jdbc:postgresql://"); - when(mockPluginService.isPooledConnectionProvider(any(), any())).thenReturn(true); + when(mockPluginService.isPooledConnection()).thenReturn(true); final ReadWriteSplittingPlugin plugin = new ReadWriteSplittingPlugin( mockPluginService, @@ -580,7 +607,7 @@ public void testClosePooledReaderConnectionAfterSetReadOnly() throws SQLExceptio spyPlugin.switchConnectionIfRequired(true); spyPlugin.switchConnectionIfRequired(false); - verify(spyPlugin, times(1)).closeConnectionIfIdle(eq(mockReaderConn1)); + verify(spyPlugin, times(2)).closeReaderConnectionIfIdle(any()); } @Test @@ -593,7 +620,7 @@ public void testClosePooledWriterConnectionAfterSetReadOnly() throws SQLExceptio .when(this.mockPluginService).getCurrentHostSpec(); doReturn(mockWriterConn).when(mockPluginService).connect(writerHostSpec, null); when(mockPluginService.getDriverProtocol()).thenReturn("jdbc:postgresql://"); - when(mockPluginService.isPooledConnectionProvider(any(), any())).thenReturn(true); + when(mockPluginService.isPooledConnection()).thenReturn(true); final ReadWriteSplittingPlugin plugin = new ReadWriteSplittingPlugin( mockPluginService, @@ -607,7 +634,7 @@ public void testClosePooledWriterConnectionAfterSetReadOnly() throws SQLExceptio spyPlugin.switchConnectionIfRequired(false); spyPlugin.switchConnectionIfRequired(true); - verify(spyPlugin, times(1)).closeConnectionIfIdle(eq(mockWriterConn)); + verify(spyPlugin, times(1)).closeWriterConnectionIfIdle(eq(mockWriterConn)); } private static HikariConfig getHikariConfig(HostSpec hostSpec, Properties props) { diff --git a/wrapper/src/test/java/software/amazon/jdbc/plugin/srw/SimpleReadWriteSplittingPluginTest.java b/wrapper/src/test/java/software/amazon/jdbc/plugin/srw/SimpleReadWriteSplittingPluginTest.java new file mode 100644 index 000000000..adf0a5df9 --- /dev/null +++ b/wrapper/src/test/java/software/amazon/jdbc/plugin/srw/SimpleReadWriteSplittingPluginTest.java @@ -0,0 +1,733 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package software.amazon.jdbc.plugin.srw; + +import static org.junit.Assert.assertThrows; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.EnumSet; +import java.util.Properties; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; +import software.amazon.jdbc.HostRole; +import software.amazon.jdbc.HostSpec; +import software.amazon.jdbc.HostSpecBuilder; +import software.amazon.jdbc.JdbcCallable; +import software.amazon.jdbc.NodeChangeOptions; +import software.amazon.jdbc.OldConnectionSuggestedAction; +import software.amazon.jdbc.PluginService; +import software.amazon.jdbc.hostavailability.SimpleHostAvailabilityStrategy; +import software.amazon.jdbc.hostlistprovider.HostListProviderService; +import software.amazon.jdbc.plugin.readwritesplitting.ReadWriteSplittingSQLException; + +public class SimpleReadWriteSplittingPluginTest { + private static final int TEST_PORT = 5432; + private static final String WRITE_ENDPOINT = "writer.cluster-xyz.us-east-1.rds.amazonaws.com"; + private static final String READ_ENDPOINT = "reader.cluster-xyz.us-east-1.rds.amazonaws.com"; + private static final Properties defaultProps = new Properties(); + + private final HostSpec writerHostSpec = new HostSpecBuilder(new SimpleHostAvailabilityStrategy()) + .host(WRITE_ENDPOINT).port(TEST_PORT).role(HostRole.WRITER).build(); + private final HostSpec readerHostSpec = new HostSpecBuilder(new SimpleHostAvailabilityStrategy()) + .host(READ_ENDPOINT).port(TEST_PORT).role(HostRole.READER).build(); + + private AutoCloseable closeable; + + @Mock private JdbcCallable mockConnectFunc; + @Mock private PluginService mockPluginService; + @Mock private HostListProviderService mockHostListProviderService; + @Mock private Connection mockWriterConn; + @Mock private Connection mockClosedWriterConn; + @Mock private Connection mockReaderConn; + @Mock private Statement mockStatement; + @Mock private ResultSet mockResultSet; + @Mock private EnumSet mockChanges; + @Mock private HostSpecBuilder mockHostSpecBuilder; + + @BeforeEach + public void init() throws SQLException { + closeable = MockitoAnnotations.openMocks(this); + mockDefaultBehavior(); + setupDefaultProperties(); + } + + @AfterEach + void cleanUp() throws Exception { + closeable.close(); + defaultProps.clear(); + } + + void setupDefaultProperties() { + SimpleReadWriteSplittingPlugin.SRW_WRITE_ENDPOINT.set(defaultProps, WRITE_ENDPOINT); + SimpleReadWriteSplittingPlugin.SRW_READ_ENDPOINT.set(defaultProps, READ_ENDPOINT); + } + + void mockDefaultBehavior() throws SQLException { + when(this.mockPluginService.getCurrentConnection()).thenReturn(mockWriterConn); + when(this.mockPluginService.getCurrentHostSpec()).thenReturn(writerHostSpec); + when(this.mockPluginService.connect(eq(writerHostSpec), any(Properties.class), any())) + .thenReturn(mockWriterConn); + when(this.mockPluginService.getHostRole(mockWriterConn)).thenReturn(HostRole.WRITER); + when(this.mockPluginService.getHostRole(mockReaderConn)).thenReturn(HostRole.READER); + when(this.mockPluginService.connect(eq(readerHostSpec), any(Properties.class), any())) + .thenReturn(mockReaderConn); + when(this.mockConnectFunc.call()).thenReturn(mockWriterConn); + when(mockWriterConn.createStatement()).thenReturn(mockStatement); + when(mockReaderConn.createStatement()).thenReturn(mockStatement); + when(mockStatement.executeQuery(any(String.class))).thenReturn(mockResultSet); + when(mockResultSet.next()).thenReturn(true); + when(mockClosedWriterConn.isClosed()).thenReturn(true); + when(mockHostListProviderService.getHostSpecBuilder()).thenReturn(mockHostSpecBuilder); + when(mockHostListProviderService.getCurrentHostSpec()).thenReturn(writerHostSpec); + when(mockHostSpecBuilder.host(any())).thenReturn(mockHostSpecBuilder); + when(mockHostSpecBuilder.port(any(Integer.class))).thenReturn(mockHostSpecBuilder); + when(mockHostSpecBuilder.role(any())).thenReturn(mockHostSpecBuilder); + when(mockHostSpecBuilder.availability(any())).thenReturn(mockHostSpecBuilder); + when(mockHostSpecBuilder.build()).thenReturn(writerHostSpec, readerHostSpec); + } + + @Test + public void testConstructor_missingWriteEndpoint() { + Properties props = new Properties(); + // No write endpoint set + SimpleReadWriteSplittingPlugin.SRW_READ_ENDPOINT.set(props, READ_ENDPOINT); + + assertThrows(RuntimeException.class, () -> + new SimpleReadWriteSplittingPlugin(mockPluginService, props, null, null, null, null, null)); + } + + @Test + public void testSwitchToReader_noReaderEndpoint() throws SQLException { + Properties props = new Properties(); + SimpleReadWriteSplittingPlugin.SRW_WRITE_ENDPOINT.set(props, WRITE_ENDPOINT); + // No read endpoint set + + assertThrows(RuntimeException.class, () -> + new SimpleReadWriteSplittingPlugin(mockPluginService, props, null, null, null, null, null)); + } + + @Test + public void testSetReadOnly_trueFalse() throws SQLException { + when(mockPluginService.getCurrentConnection()).thenReturn(mockWriterConn); + when(mockPluginService.getCurrentHostSpec()).thenReturn(writerHostSpec); + when(mockPluginService.isInTransaction()).thenReturn(false); + + final SimpleReadWriteSplittingPlugin plugin = new SimpleReadWriteSplittingPlugin( + mockPluginService, + defaultProps, + mockHostListProviderService, + mockWriterConn, + null, + writerHostSpec, + readerHostSpec); + + plugin.switchConnectionIfRequired(true); + + verify(mockPluginService, times(1)) + .setCurrentConnection(eq(mockReaderConn), eq(readerHostSpec)); + assertEquals(mockReaderConn, plugin.getReaderConnection()); + assertEquals(mockWriterConn, plugin.getWriterConnection()); + + when(mockPluginService.getCurrentConnection()).thenReturn(mockReaderConn); + when(mockPluginService.getCurrentHostSpec()).thenReturn(readerHostSpec); + + plugin.switchConnectionIfRequired(false); + + verify(mockPluginService, times(1)) + .setCurrentConnection(eq(mockWriterConn), eq(writerHostSpec)); + } + + @Test + public void testSetReadOnly_trueFalse_endpointsWithPort() throws SQLException { + int port = 1234; + Properties props = new Properties(); + SimpleReadWriteSplittingPlugin.SRW_WRITE_ENDPOINT.set(props, WRITE_ENDPOINT + ":" + port); + SimpleReadWriteSplittingPlugin.SRW_READ_ENDPOINT.set(props, READ_ENDPOINT + ":" + port); + + when(this.mockPluginService.connect(any(), eq(props), any())) + .thenReturn(mockReaderConn); + + when(mockPluginService.getCurrentConnection()).thenReturn(mockWriterConn); + when(mockPluginService.getCurrentHostSpec()).thenReturn(writerHostSpec); + when(mockPluginService.isInTransaction()).thenReturn(false); + + final SimpleReadWriteSplittingPlugin plugin = new SimpleReadWriteSplittingPlugin( + mockPluginService, + props, + mockHostListProviderService, + null, + null, + null, + null); + + plugin.switchConnectionIfRequired(true); + + verify(mockPluginService, times(1)) + .setCurrentConnection(eq(mockReaderConn), any()); + assertEquals(mockReaderConn, plugin.getReaderConnection()); + assertEquals(READ_ENDPOINT, plugin.getReaderHostSpec().getHost()); + assertEquals(port, plugin.getReaderHostSpec().getPort()); + + when(mockPluginService.getCurrentConnection()).thenReturn(mockReaderConn); + when(mockPluginService.getCurrentHostSpec()).thenReturn(readerHostSpec); + + when(this.mockPluginService.connect(any(), eq(props), any())) + .thenReturn(mockWriterConn); + + plugin.switchConnectionIfRequired(false); + + verify(mockPluginService, times(1)) + .setCurrentConnection(eq(mockWriterConn), any()); + assertEquals(mockWriterConn, plugin.getWriterConnection()); + assertEquals(WRITE_ENDPOINT, plugin.getWriterHostSpec().getHost()); + assertEquals(port, plugin.getWriterHostSpec().getPort()); + } + + @Test + public void testSetReadOnlyTrue_alreadyOnReader() throws SQLException { + when(mockPluginService.getCurrentConnection()).thenReturn(mockReaderConn); + when(mockPluginService.getCurrentHostSpec()).thenReturn(readerHostSpec); + when(mockPluginService.isInTransaction()).thenReturn(false); + + final SimpleReadWriteSplittingPlugin plugin = new SimpleReadWriteSplittingPlugin( + mockPluginService, + defaultProps, + mockHostListProviderService, + mockWriterConn, + mockReaderConn, + null, null); + + plugin.switchConnectionIfRequired(true); + + verify(mockPluginService, never()) + .setCurrentConnection(any(Connection.class), any(HostSpec.class)); + } + + @Test + public void testSetReadOnlyFalse_alreadyOnWriter() throws SQLException { + when(mockPluginService.getCurrentConnection()).thenReturn(mockWriterConn); + when(mockPluginService.getCurrentHostSpec()).thenReturn(writerHostSpec); + + final SimpleReadWriteSplittingPlugin plugin = new SimpleReadWriteSplittingPlugin( + mockPluginService, + defaultProps, + mockHostListProviderService, + mockWriterConn, + mockReaderConn, + null, null); + + plugin.switchConnectionIfRequired(false); + + verify(mockPluginService, never()) + .setCurrentConnection(any(Connection.class), any(HostSpec.class)); + } + + @Test + public void testSetReadOnlyFalse_inTransaction() { + when(mockPluginService.getCurrentConnection()).thenReturn(mockReaderConn); + when(mockPluginService.getCurrentHostSpec()).thenReturn(readerHostSpec); + when(mockPluginService.isInTransaction()).thenReturn(true); + + final SimpleReadWriteSplittingPlugin plugin = new SimpleReadWriteSplittingPlugin( + mockPluginService, + defaultProps, + mockHostListProviderService, + mockWriterConn, + mockReaderConn, + null, null); + + assertThrows(ReadWriteSplittingSQLException.class, () -> + plugin.switchConnectionIfRequired(false)); + } + + @Test + public void testSetReadOnly_closedConnection() throws SQLException { + when(mockPluginService.getCurrentConnection()).thenReturn(mockClosedWriterConn); + when(mockClosedWriterConn.isClosed()).thenReturn(true); + + final SimpleReadWriteSplittingPlugin plugin = new SimpleReadWriteSplittingPlugin( + mockPluginService, + defaultProps, + mockHostListProviderService, + mockWriterConn, + mockReaderConn, + null, null); + + assertThrows(ReadWriteSplittingSQLException.class, () -> + plugin.switchConnectionIfRequired(true)); + } + + @Test + public void testNotifyConnectionChanged_inReadWriteSplit() { + final SimpleReadWriteSplittingPlugin plugin = new SimpleReadWriteSplittingPlugin( + mockPluginService, + defaultProps, + mockHostListProviderService, + mockWriterConn, + mockReaderConn, + writerHostSpec, + readerHostSpec); + + // Simulate being in read-write split mode + try { + plugin.switchConnectionIfRequired(true); + } catch (SQLException e) { + // ignore for test setup + } + + OldConnectionSuggestedAction result = plugin.notifyConnectionChanged(mockChanges); + assertEquals(OldConnectionSuggestedAction.PRESERVE, result); + } + + @Test + public void testNotifyConnectionChanged_notInReadWriteSplit() { + final SimpleReadWriteSplittingPlugin plugin = new SimpleReadWriteSplittingPlugin( + mockPluginService, + defaultProps, + mockHostListProviderService, + mockWriterConn, + mockReaderConn, + null, null); + + OldConnectionSuggestedAction result = plugin.notifyConnectionChanged(mockChanges); + assertEquals(OldConnectionSuggestedAction.NO_OPINION, result); + } + + @Test + public void testReleaseResources() throws SQLException { + when(mockPluginService.getCurrentConnection()).thenReturn(mockWriterConn); + when(mockReaderConn.isClosed()).thenReturn(false); + + final SimpleReadWriteSplittingPlugin plugin = new SimpleReadWriteSplittingPlugin( + mockPluginService, + defaultProps, + mockHostListProviderService, + mockWriterConn, + mockReaderConn, + null, null); + + plugin.releaseResources(); + + verify(mockReaderConn, times(1)).close(); + } + + @Test + public void testWrongRoleConnection_writerEndpointToReader() throws SQLException { + when(mockPluginService.getCurrentConnection()).thenReturn(mockReaderConn); + when(mockPluginService.getCurrentHostSpec()).thenReturn(readerHostSpec); + when(mockPluginService.isInTransaction()).thenReturn(false); + when(mockPluginService.getHostRole(any())).thenReturn(HostRole.READER); // Wrong role + + final SimpleReadWriteSplittingPlugin plugin = new SimpleReadWriteSplittingPlugin( + mockPluginService, + defaultProps, + mockHostListProviderService, + null, + null, + null, + null); + + assertThrows(ReadWriteSplittingSQLException.class, () -> + plugin.switchConnectionIfRequired(false)); + } + + @Test + public void testWrongRoleConnection_readerEndpointToWriter() throws SQLException { + when(mockPluginService.getCurrentConnection()).thenReturn(mockWriterConn); + when(mockPluginService.getCurrentHostSpec()).thenReturn(writerHostSpec); + when(mockPluginService.isInTransaction()).thenReturn(false); + when(mockPluginService.getHostRole(any())).thenReturn(HostRole.WRITER); // Wrong role for reader + + final SimpleReadWriteSplittingPlugin plugin = new SimpleReadWriteSplittingPlugin( + mockPluginService, + defaultProps, + mockHostListProviderService, + null, + null, + null, null); + + plugin.switchConnectionIfRequired(true); + + // While it should use the current connection as fallback, it should not store it. + assertNull(plugin.getReaderConnection()); + } + + @Test + public void testGetVerifiedConnection_wrongRoleRetryReader() throws SQLException { + when(mockPluginService.getCurrentConnection()).thenReturn(mockWriterConn); + when(mockPluginService.getCurrentHostSpec()).thenReturn(writerHostSpec); + when(mockPluginService.isInTransaction()).thenReturn(false); + when(mockPluginService.connect(eq(readerHostSpec), any(Properties.class), any())) + .thenReturn(mockWriterConn) // First call returns wrong role + .thenReturn(mockReaderConn); // Second call returns correct role + when(mockPluginService.getHostRole(mockWriterConn)).thenReturn(HostRole.WRITER); + when(mockPluginService.getHostRole(mockReaderConn)).thenReturn(HostRole.READER); + + final SimpleReadWriteSplittingPlugin plugin = new SimpleReadWriteSplittingPlugin( + mockPluginService, + defaultProps, + mockHostListProviderService, + null, + null, + null, + readerHostSpec); + + plugin.switchConnectionIfRequired(true); + + verify(mockPluginService, times(2)) + .connect(eq(readerHostSpec), any(Properties.class), any()); + verify(mockWriterConn, times(1)).close(); + assertEquals(mockReaderConn, plugin.getReaderConnection()); + } + + @Test + public void testGetVerifiedConnection_wrongRoleRetryWriter() throws SQLException { + when(mockPluginService.getCurrentConnection()).thenReturn(mockReaderConn); + when(mockPluginService.getCurrentHostSpec()).thenReturn(readerHostSpec); + when(mockPluginService.isInTransaction()).thenReturn(false); + when(mockPluginService.connect(eq(writerHostSpec), any(Properties.class), any())) + .thenReturn(mockReaderConn) // First call returns wrong role + .thenReturn(mockWriterConn); // Second call returns correct role + when(mockPluginService.getHostRole(mockWriterConn)).thenReturn(HostRole.WRITER); + when(mockPluginService.getHostRole(mockReaderConn)).thenReturn(HostRole.READER); + + final SimpleReadWriteSplittingPlugin plugin = new SimpleReadWriteSplittingPlugin( + mockPluginService, + defaultProps, + mockHostListProviderService, + null, + null, + writerHostSpec, + null); + + plugin.switchConnectionIfRequired(false); + + verify(mockPluginService, times(2)) + .connect(eq(writerHostSpec), any(Properties.class), any()); + verify(mockReaderConn, times(1)).close(); + assertEquals(mockWriterConn, plugin.getWriterConnection()); + } + + @Test + public void testGetVerifiedConnection_sqlExceptionRetry() throws SQLException { + when(mockPluginService.getCurrentConnection()).thenReturn(mockWriterConn); + when(mockPluginService.getCurrentHostSpec()).thenReturn(writerHostSpec); + when(mockPluginService.isInTransaction()).thenReturn(false); + when(mockPluginService.connect(eq(readerHostSpec), any(Properties.class), any())) + .thenThrow(new SQLException("Connection failed")) + .thenReturn(mockReaderConn); + when(mockPluginService.getHostRole(mockReaderConn)).thenReturn(HostRole.READER); + when(mockPluginService.isLoginException(any(SQLException.class), any())).thenReturn(false); + + final SimpleReadWriteSplittingPlugin plugin = new SimpleReadWriteSplittingPlugin( + mockPluginService, + defaultProps, + mockHostListProviderService, + null, + null, + null, + readerHostSpec); + + plugin.switchConnectionIfRequired(true); + + verify(mockPluginService, times(2)) + .connect(eq(readerHostSpec), any(Properties.class), any()); + assertEquals(mockReaderConn, plugin.getReaderConnection()); + } + + @Test + public void testGetVerifiedConnection_loginExceptionRetry() throws SQLException { + when(mockPluginService.getCurrentConnection()).thenReturn(mockWriterConn); + when(mockPluginService.getCurrentHostSpec()).thenReturn(writerHostSpec); + when(mockPluginService.isInTransaction()).thenReturn(false); + when(mockPluginService.connect(eq(readerHostSpec), any(Properties.class), any())) + .thenThrow(new SQLException("Login exception")) + .thenReturn(mockReaderConn); + when(mockPluginService.getHostRole(mockReaderConn)).thenReturn(HostRole.READER); + when(mockPluginService.isLoginException(any(SQLException.class), any())).thenReturn(true); + + final SimpleReadWriteSplittingPlugin plugin = new SimpleReadWriteSplittingPlugin( + mockPluginService, + defaultProps, + mockHostListProviderService, + null, + null, + null, + readerHostSpec); + + plugin.switchConnectionIfRequired(true); + + verify(mockPluginService, times(1)) + .connect(eq(readerHostSpec), any(Properties.class), any()); + // While it should use the current connection as fallback, it should not store it. + assertNull(plugin.getReaderConnection()); + } + + @Test + public void testConnect_nonInitialConnection() throws SQLException { + when(mockConnectFunc.call()).thenReturn(mockWriterConn); + + final SimpleReadWriteSplittingPlugin plugin = new SimpleReadWriteSplittingPlugin( + mockPluginService, defaultProps); + + Connection result = plugin.connect("jdbc:postgresql", writerHostSpec, defaultProps, false, mockConnectFunc); + + assertEquals(mockWriterConn, result); + verify(mockConnectFunc, times(1)).call(); + verify(mockPluginService, times(0)).getHostRole(mockWriterConn); + } + + @Test + public void testConnect_verificationDisabled() throws SQLException { + Properties props = new Properties(); + SimpleReadWriteSplittingPlugin.SRW_WRITE_ENDPOINT.set(props, WRITE_ENDPOINT); + SimpleReadWriteSplittingPlugin.SRW_READ_ENDPOINT.set(props, READ_ENDPOINT); + SimpleReadWriteSplittingPlugin.VERIFY_NEW_SRW_CONNECTIONS.set(props, "false"); + + when(mockConnectFunc.call()).thenReturn(mockWriterConn); + + final SimpleReadWriteSplittingPlugin plugin = new SimpleReadWriteSplittingPlugin( + mockPluginService, props); + + Connection result = plugin.connect("jdbc:postgresql", writerHostSpec, props, true, mockConnectFunc); + + assertEquals(mockWriterConn, result); + verify(mockConnectFunc, times(1)).call(); + verify(mockPluginService, times(0)).getHostRole(mockWriterConn); + } + + @Test + public void testConnect_writerClusterEndpoint() throws SQLException { + final HostSpec writerClusterHost = new HostSpecBuilder(new SimpleHostAvailabilityStrategy()) + .host("test-cluster.cluster-xyz.us-east-1.rds.amazonaws.com").port(TEST_PORT).role(HostRole.WRITER).build(); + + when(mockPluginService.connect(eq(writerClusterHost), any(Properties.class), any())) + .thenReturn(mockWriterConn); + when(mockPluginService.getHostRole(mockWriterConn)).thenReturn(HostRole.WRITER); + + final SimpleReadWriteSplittingPlugin plugin = new SimpleReadWriteSplittingPlugin( + mockPluginService, + defaultProps, + mockHostListProviderService, + null, + null, + null, + null); + + Connection result = plugin.connect("jdbc:postgresql", writerClusterHost, defaultProps, true, null); + + assertEquals(mockWriterConn, result); + verify(mockPluginService, times(1)).connect(eq(writerClusterHost), any(Properties.class), any()); + verify(mockPluginService, times(1)).getHostRole(mockWriterConn); + } + + @Test + public void testConnect_readerClusterEndpoint() throws SQLException { + HostSpec readerClusterHost = new HostSpecBuilder(new SimpleHostAvailabilityStrategy()) + .host("test-cluster.cluster-ro-xyz.us-east-1.rds.amazonaws.com").port(TEST_PORT).role(HostRole.READER).build(); + + when(mockConnectFunc.call()).thenReturn(mockReaderConn); + when(mockPluginService.getHostRole(mockReaderConn)).thenReturn(HostRole.READER); + + final SimpleReadWriteSplittingPlugin plugin = new SimpleReadWriteSplittingPlugin( + mockPluginService, + defaultProps, + mockHostListProviderService, + null, + null, + null, + null); + + Connection result = plugin.connect("jdbc:postgresql", readerClusterHost, defaultProps, true, mockConnectFunc); + + assertEquals(mockReaderConn, result); + verify(mockPluginService, times(0)).connect(eq(readerClusterHost), any(Properties.class), any()); + verify(mockPluginService, times(1)).getHostRole(mockReaderConn); + verify(mockConnectFunc, times(1)).call(); + } + + @Test + public void testConnect_verificationFailsFallback() throws SQLException { + final HostSpec writerClusterHost = new HostSpecBuilder(new SimpleHostAvailabilityStrategy()) + .host("test-cluster.cluster-xyz.us-east-1.rds.amazonaws.com").port(TEST_PORT).role(HostRole.WRITER).build(); + + Properties timeoutProps = new Properties(); + SimpleReadWriteSplittingPlugin.SRW_WRITE_ENDPOINT.set(timeoutProps, WRITE_ENDPOINT); + SimpleReadWriteSplittingPlugin.SRW_READ_ENDPOINT.set(timeoutProps, READ_ENDPOINT); + SimpleReadWriteSplittingPlugin.SRW_CONNECT_RETRY_TIMEOUT_MS.set(timeoutProps, "5"); + + when(mockConnectFunc.call()).thenReturn(mockReaderConn); + when(mockPluginService.getHostRole(mockReaderConn)).thenReturn(HostRole.READER); + + final SimpleReadWriteSplittingPlugin plugin = new SimpleReadWriteSplittingPlugin( + mockPluginService, timeoutProps); + + Connection result = plugin.connect("jdbc:postgresql", writerClusterHost, timeoutProps, true, mockConnectFunc); + + assertEquals(mockReaderConn, result); + verify(mockPluginService, times(1)).getHostRole(mockReaderConn); + verify(mockConnectFunc, times(2)).call(); + } + + @Test + public void testConnect_nonRdsClusterEndpoint() throws SQLException { + final HostSpec customHost = new HostSpecBuilder(new SimpleHostAvailabilityStrategy()) + .host("custom-db.example.com").port(TEST_PORT).role(HostRole.WRITER).build(); + + when(mockConnectFunc.call()).thenReturn(mockWriterConn); + + final SimpleReadWriteSplittingPlugin plugin = new SimpleReadWriteSplittingPlugin( + mockPluginService, defaultProps); + + Connection result = plugin.connect("jdbc:postgresql", customHost, defaultProps, true, mockConnectFunc); + + assertEquals(mockWriterConn, result); + verify(mockPluginService, times(0)).getHostRole(mockWriterConn); + verify(mockConnectFunc, times(1)).call(); + } + + @Test + public void testConnect_nonRdsClusterEndpointWriterVerify() throws SQLException { + final HostSpec customHost = new HostSpecBuilder(new SimpleHostAvailabilityStrategy()) + .host("custom-db.example.com").port(TEST_PORT).role(HostRole.WRITER).build(); + + Properties props = new Properties(); + SimpleReadWriteSplittingPlugin.SRW_WRITE_ENDPOINT.set(props, WRITE_ENDPOINT); + SimpleReadWriteSplittingPlugin.SRW_READ_ENDPOINT.set(props, READ_ENDPOINT); + SimpleReadWriteSplittingPlugin.VERIFY_INITIAL_CONNECTION_TYPE.set(props, "writer"); + + when(mockConnectFunc.call()).thenReturn(mockWriterConn); + when(mockPluginService.getHostRole(mockWriterConn)).thenReturn(HostRole.WRITER); + + final SimpleReadWriteSplittingPlugin plugin = new SimpleReadWriteSplittingPlugin( + mockPluginService, + props, + mockHostListProviderService, + null, + null, + null, + null); + + Connection result = plugin.connect("jdbc:postgresql", customHost, defaultProps, true, mockConnectFunc); + + assertEquals(mockWriterConn, result); + verify(mockPluginService, times(0)).connect(eq(customHost), any(Properties.class), any()); + verify(mockPluginService, times(1)).getHostRole(mockWriterConn); + verify(mockConnectFunc, times(1)).call(); + } + + @Test + public void testConnect_nonRdsClusterEndpointReaderVerify() throws SQLException { + final HostSpec customHost = new HostSpecBuilder(new SimpleHostAvailabilityStrategy()) + .host("custom-db.example.com").port(TEST_PORT).role(HostRole.READER).build(); + + Properties props = new Properties(); + SimpleReadWriteSplittingPlugin.SRW_WRITE_ENDPOINT.set(props, WRITE_ENDPOINT); + SimpleReadWriteSplittingPlugin.SRW_READ_ENDPOINT.set(props, READ_ENDPOINT); + SimpleReadWriteSplittingPlugin.VERIFY_INITIAL_CONNECTION_TYPE.set(props, "reader"); + + when(mockConnectFunc.call()).thenReturn(mockReaderConn); + when(mockPluginService.getHostRole(mockReaderConn)).thenReturn(HostRole.READER); + + final SimpleReadWriteSplittingPlugin plugin = new SimpleReadWriteSplittingPlugin( + mockPluginService, + props, + mockHostListProviderService, + null, + null, + null, + null); + + Connection result = plugin.connect("jdbc:postgresql", customHost, defaultProps, true, mockConnectFunc); + + assertEquals(mockReaderConn, result); + verify(mockPluginService, times(0)).connect(eq(customHost), any(Properties.class), any()); + verify(mockPluginService, times(1)).getHostRole(mockReaderConn); + verify(mockConnectFunc, times(1)).call(); + } + + @Test + public void testClosePooledReaderConnectionAfterSetReadOnly() throws SQLException { + doReturn(writerHostSpec) + .doReturn(writerHostSpec) + .doReturn(readerHostSpec) + .when(this.mockPluginService).getCurrentHostSpec(); + doReturn(mockReaderConn).when(mockPluginService).connect(readerHostSpec, null); + when(mockPluginService.getDriverProtocol()).thenReturn("jdbc:postgresql://"); + when(mockPluginService.isPooledConnection()).thenReturn(true); + + final SimpleReadWriteSplittingPlugin plugin = new SimpleReadWriteSplittingPlugin( + mockPluginService, + defaultProps, + mockHostListProviderService, + mockWriterConn, + null, + writerHostSpec, + readerHostSpec); + final SimpleReadWriteSplittingPlugin spyPlugin = spy(plugin); + + spyPlugin.switchConnectionIfRequired(true); + spyPlugin.switchConnectionIfRequired(false); + + verify(spyPlugin, times(1)).closeConnectionIfIdle(eq(mockReaderConn)); + } + + @Test + public void testClosePooledWriterConnectionAfterSetReadOnly() throws SQLException { + doReturn(writerHostSpec) + .doReturn(writerHostSpec) + .doReturn(readerHostSpec) + .doReturn(readerHostSpec) + .doReturn(writerHostSpec) + .when(this.mockPluginService).getCurrentHostSpec(); + doReturn(mockWriterConn).when(mockPluginService).connect(writerHostSpec, null); + when(mockPluginService.getDriverProtocol()).thenReturn("jdbc:postgresql://"); + when(mockPluginService.isPooledConnection()).thenReturn(true); + + final SimpleReadWriteSplittingPlugin plugin = new SimpleReadWriteSplittingPlugin( + mockPluginService, + defaultProps, + mockHostListProviderService, + null, + null, + writerHostSpec, + readerHostSpec); + final SimpleReadWriteSplittingPlugin spyPlugin = spy(plugin); + + spyPlugin.switchConnectionIfRequired(true); + spyPlugin.switchConnectionIfRequired(false); + spyPlugin.switchConnectionIfRequired(true); + + verify(spyPlugin, times(1)).closeConnectionIfIdle(eq(mockWriterConn)); + } +} diff --git a/wrapper/src/test/java/software/amazon/jdbc/util/ConnectionUrlParserTest.java b/wrapper/src/test/java/software/amazon/jdbc/util/ConnectionUrlParserTest.java index 056fccd78..39e8ac98b 100644 --- a/wrapper/src/test/java/software/amazon/jdbc/util/ConnectionUrlParserTest.java +++ b/wrapper/src/test/java/software/amazon/jdbc/util/ConnectionUrlParserTest.java @@ -17,7 +17,10 @@ package software.amazon.jdbc.util; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.util.ArrayList; import java.util.Arrays; @@ -36,6 +39,64 @@ class ConnectionUrlParserTest { + @Test + void testParseHostPortPairWithRegionPrefix() { + Pair pair = ConnectionUrlParser.parseHostPortPairWithRegionPrefix( + "?.XYZ.us-east-2.rds.amazonaws.com", + () -> new HostSpecBuilder(new SimpleHostAvailabilityStrategy())); + assertEquals("us-east-2", pair.getValue1()); + assertEquals("?.XYZ.us-east-2.rds.amazonaws.com", pair.getValue2().getHost()); + assertFalse(pair.getValue2().isPortSpecified()); + + pair = ConnectionUrlParser.parseHostPortPairWithRegionPrefix( + "[test-region]?.XYZ.us-east-2.rds.amazonaws.com", + () -> new HostSpecBuilder(new SimpleHostAvailabilityStrategy())); + assertEquals("test-region", pair.getValue1()); + assertEquals("?.XYZ.us-east-2.rds.amazonaws.com", pair.getValue2().getHost()); + assertFalse(pair.getValue2().isPortSpecified()); + + pair = ConnectionUrlParser.parseHostPortPairWithRegionPrefix( + "?.XYZ.us-east-2.rds.amazonaws.com:9999", + () -> new HostSpecBuilder(new SimpleHostAvailabilityStrategy())); + assertEquals("us-east-2", pair.getValue1()); + assertEquals("?.XYZ.us-east-2.rds.amazonaws.com", pair.getValue2().getHost()); + assertTrue(pair.getValue2().isPortSpecified()); + assertEquals(9999, pair.getValue2().getPort()); + + pair = ConnectionUrlParser.parseHostPortPairWithRegionPrefix( + "[test-region]?.XYZ.us-east-2.rds.amazonaws.com:9999", + () -> new HostSpecBuilder(new SimpleHostAvailabilityStrategy())); + assertEquals("test-region", pair.getValue1()); + assertEquals("?.XYZ.us-east-2.rds.amazonaws.com", pair.getValue2().getHost()); + assertTrue(pair.getValue2().isPortSpecified()); + assertEquals(9999, pair.getValue2().getPort()); + + pair = ConnectionUrlParser.parseHostPortPairWithRegionPrefix( + "[test-region]?.custom-domain.com", + () -> new HostSpecBuilder(new SimpleHostAvailabilityStrategy())); + assertEquals("test-region", pair.getValue1()); + assertEquals("?.custom-domain.com", pair.getValue2().getHost()); + assertFalse(pair.getValue2().isPortSpecified()); + + pair = ConnectionUrlParser.parseHostPortPairWithRegionPrefix( + "[test-region]?.custom-domain.com:9999", + () -> new HostSpecBuilder(new SimpleHostAvailabilityStrategy())); + assertEquals("test-region", pair.getValue1()); + assertEquals("?.custom-domain.com", pair.getValue2().getHost()); + assertTrue(pair.getValue2().isPortSpecified()); + assertEquals(9999, pair.getValue2().getPort()); + + assertThrows(IllegalArgumentException.class, () -> + ConnectionUrlParser.parseHostPortPairWithRegionPrefix( + "?.custom-domain.com", + () -> new HostSpecBuilder(new SimpleHostAvailabilityStrategy()))); + + assertThrows(IllegalArgumentException.class, () -> + ConnectionUrlParser.parseHostPortPairWithRegionPrefix( + "?.custom-domain.com:9999", + () -> new HostSpecBuilder(new SimpleHostAvailabilityStrategy()))); + } + @ParameterizedTest @MethodSource("testGetHostsFromConnectionUrlArguments") void testGetHostsFromConnectionUrl_returnCorrectHostList(String testUrl, List expected) { diff --git a/wrapper/src/test/java/software/amazon/jdbc/util/RdsUtilsTests.java b/wrapper/src/test/java/software/amazon/jdbc/util/RdsUtilsTests.java index d2ef2e1eb..a0b5090ab 100644 --- a/wrapper/src/test/java/software/amazon/jdbc/util/RdsUtilsTests.java +++ b/wrapper/src/test/java/software/amazon/jdbc/util/RdsUtilsTests.java @@ -37,6 +37,8 @@ public class RdsUtilsTests { "instance-test-name.XYZ.us-east-2.rds.amazonaws.com"; private static final String usEastRegionProxy = "proxy-test-name.proxy-XYZ.us-east-2.rds.amazonaws.com"; + private static final String usEastRegionProxyEndpoint = + "endpoint-test-name.endpoint.proxy-XYZ.us-east-2.rds.amazonaws.com"; private static final String usEastRegionCustomDomain = "custom-test-name.cluster-custom-XYZ.us-east-2.rds.amazonaws.com"; private static final String usEastRegionLimitlessDbShardGroup = @@ -119,6 +121,9 @@ public class RdsUtilsTests { private static final String usIsoEastRegionLimitlessDbShardGroup = "database-test-name.shardgrp-XYZ.rds.us-iso-east-1.c2s.ic.gov"; + private static final String globalDbWriterCluster = + "global-cluster-test-name.global-XYZ.global.rds.amazonaws.com"; + @BeforeEach public void setupTests() { RdsUtils.clearCache(); @@ -132,6 +137,7 @@ public void testIsRdsDns() { assertTrue(target.isRdsDns(usEastRegionClusterReadOnly)); assertTrue(target.isRdsDns(usEastRegionInstance)); assertTrue(target.isRdsDns(usEastRegionProxy)); + assertTrue(target.isRdsDns(usEastRegionProxyEndpoint)); assertTrue(target.isRdsDns(usEastRegionCustomDomain)); assertFalse(target.isRdsDns(usEastRegionElbUrl)); assertFalse(target.isRdsDns(usEastRegionElbUrlTrailingDot)); @@ -225,6 +231,7 @@ public void testIsRdsClusterDns() { assertTrue(target.isRdsClusterDns(usEastRegionClusterReadOnly)); assertFalse(target.isRdsClusterDns(usEastRegionInstance)); assertFalse(target.isRdsClusterDns(usEastRegionProxy)); + assertFalse(target.isRdsClusterDns(usEastRegionProxyEndpoint)); assertFalse(target.isRdsClusterDns(usEastRegionCustomDomain)); assertFalse(target.isRdsClusterDns(usEastRegionElbUrl)); assertFalse(target.isRdsClusterDns(usEastRegionLimitlessDbShardGroup)); @@ -265,6 +272,7 @@ public void testIsWriterClusterDns() { assertFalse(target.isWriterClusterDns(usEastRegionClusterReadOnly)); assertFalse(target.isWriterClusterDns(usEastRegionInstance)); assertFalse(target.isWriterClusterDns(usEastRegionProxy)); + assertFalse(target.isWriterClusterDns(usEastRegionProxyEndpoint)); assertFalse(target.isWriterClusterDns(usEastRegionCustomDomain)); assertFalse(target.isWriterClusterDns(usEastRegionElbUrl)); assertFalse(target.isWriterClusterDns(usEastRegionLimitlessDbShardGroup)); @@ -305,6 +313,7 @@ public void testIsReaderClusterDns() { assertTrue(target.isReaderClusterDns(usEastRegionClusterReadOnly)); assertFalse(target.isReaderClusterDns(usEastRegionInstance)); assertFalse(target.isReaderClusterDns(usEastRegionProxy)); + assertFalse(target.isReaderClusterDns(usEastRegionProxyEndpoint)); assertFalse(target.isReaderClusterDns(usEastRegionCustomDomain)); assertFalse(target.isReaderClusterDns(usEastRegionElbUrl)); assertFalse(target.isReaderClusterDns(usEastRegionLimitlessDbShardGroup)); @@ -345,6 +354,7 @@ public void testIsLimitlessDbShardGroupDns() { assertFalse(target.isLimitlessDbShardGroupDns(usEastRegionClusterReadOnly)); assertFalse(target.isLimitlessDbShardGroupDns(usEastRegionInstance)); assertFalse(target.isLimitlessDbShardGroupDns(usEastRegionProxy)); + assertFalse(target.isLimitlessDbShardGroupDns(usEastRegionProxyEndpoint)); assertFalse(target.isLimitlessDbShardGroupDns(usEastRegionCustomDomain)); assertFalse(target.isLimitlessDbShardGroupDns(usEastRegionElbUrl)); assertTrue(target.isLimitlessDbShardGroupDns(usEastRegionLimitlessDbShardGroup)); @@ -386,6 +396,7 @@ public void testGetRdsRegion() { assertEquals(expectedHostPattern, target.getRdsRegion(usEastRegionClusterReadOnly)); assertEquals(expectedHostPattern, target.getRdsRegion(usEastRegionInstance)); assertEquals(expectedHostPattern, target.getRdsRegion(usEastRegionProxy)); + assertEquals(expectedHostPattern, target.getRdsRegion(usEastRegionProxyEndpoint)); assertEquals(expectedHostPattern, target.getRdsRegion(usEastRegionCustomDomain)); assertEquals(expectedHostPattern, target.getRdsRegion(usEastRegionElbUrl)); assertEquals(expectedHostPattern, target.getRdsRegion(usEastRegionLimitlessDbShardGroup)); @@ -428,6 +439,18 @@ public void testGetRdsRegion() { assertEquals(euRedshiftExpectedHostPattern, target.getRdsRegion(euRedshift)); } + @Test + public void testIsGlobalDbWriterClusterDns() { + assertFalse(target.isGlobalDbWriterClusterDns(usEastRegionCluster)); + assertTrue(target.isGlobalDbWriterClusterDns(globalDbWriterCluster)); + } + + @Test + public void testisRdsProxyEndpointDns() { + assertFalse(target.isRdsProxyEndpointDns(usEastRegionProxy)); + assertTrue(target.isRdsProxyEndpointDns(usEastRegionProxyEndpoint)); + } + @Test public void testBrokenPathsHostPattern() { final String incorrectChinaHostPattern = "?.rds.cn-northwest-1.rds.amazonaws.com.cn"; diff --git a/wrapper/src/test/java/software/amazon/jdbc/util/WrapperUtilsTest.java b/wrapper/src/test/java/software/amazon/jdbc/util/WrapperUtilsTest.java index afcae7530..3a285939a 100644 --- a/wrapper/src/test/java/software/amazon/jdbc/util/WrapperUtilsTest.java +++ b/wrapper/src/test/java/software/amazon/jdbc/util/WrapperUtilsTest.java @@ -45,6 +45,8 @@ import software.amazon.jdbc.ConnectionPluginManager; import software.amazon.jdbc.JdbcCallable; import software.amazon.jdbc.JdbcMethod; +import software.amazon.jdbc.PluginManagerService; +import software.amazon.jdbc.PluginService; import software.amazon.jdbc.util.telemetry.TelemetryContext; import software.amazon.jdbc.util.telemetry.TelemetryFactory; import software.amazon.jdbc.wrapper.CallableStatementWrapper; @@ -55,7 +57,10 @@ public class WrapperUtilsTest { @Mock ConnectionWrapper mockConnectionWrapper; + @Mock FullServicesContainer mockServicesContainer; @Mock ConnectionPluginManager mockPluginManager; + @Mock PluginService mockPluginService; + @Mock PluginManagerService mockPluginManagerService; @Mock TelemetryFactory mockTelemetryFactory; @Mock TelemetryContext mockTelemetryContext; @Mock Object object; @@ -73,6 +78,10 @@ void init() { when(mockPluginManager.getTelemetryFactory()).thenReturn(mockTelemetryFactory); when(mockTelemetryFactory.openTelemetryContext(anyString(), any())).thenReturn(mockTelemetryContext); when(mockTelemetryFactory.openTelemetryContext(eq(null), any())).thenReturn(mockTelemetryContext); + when(mockConnectionWrapper.getServicesContainer()).thenReturn(mockServicesContainer); + when(mockServicesContainer.getConnectionPluginManager()).thenReturn(mockPluginManager); + when(mockServicesContainer.getPluginService()).thenReturn(mockPluginService); + when(mockServicesContainer.getPluginManagerService()).thenReturn(mockPluginManagerService); } private void mockExecuteReturnValue(Object returnValue) { diff --git a/wrapper/src/test/java/software/amazon/jdbc/util/monitoring/MonitorServiceImplTest.java b/wrapper/src/test/java/software/amazon/jdbc/util/monitoring/MonitorServiceImplTest.java index 895616a26..debb3f243 100644 --- a/wrapper/src/test/java/software/amazon/jdbc/util/monitoring/MonitorServiceImplTest.java +++ b/wrapper/src/test/java/software/amazon/jdbc/util/monitoring/MonitorServiceImplTest.java @@ -28,8 +28,7 @@ import static org.mockito.Mockito.spy; import java.sql.SQLException; -import java.util.Collections; -import java.util.HashSet; +import java.util.EnumSet; import java.util.Properties; import java.util.concurrent.TimeUnit; import org.junit.jupiter.api.AfterEach; @@ -86,7 +85,7 @@ public void testMonitorError_monitorReCreated() throws SQLException, Interrupted NoOpMonitor.class, TimeUnit.MINUTES.toNanos(1), TimeUnit.MINUTES.toNanos(1), - new HashSet<>(Collections.singletonList(MonitorErrorResponse.RECREATE)), + EnumSet.of(MonitorErrorResponse.RECREATE), null ); String key = "testMonitor"; @@ -131,7 +130,7 @@ public void testMonitorStuck_monitorReCreated() throws SQLException, Interrupted NoOpMonitor.class, TimeUnit.MINUTES.toNanos(1), 1, // heartbeat times out immediately - new HashSet<>(Collections.singletonList(MonitorErrorResponse.RECREATE)), + EnumSet.of(MonitorErrorResponse.RECREATE), null ); String key = "testMonitor"; @@ -178,7 +177,7 @@ public void testMonitorExpired() throws SQLException, InterruptedException { TimeUnit.MINUTES.toNanos(1), // even though we pass a re-create policy, we should not re-create it if the monitor is expired since this // indicates it is not being used. - new HashSet<>(Collections.singletonList(MonitorErrorResponse.RECREATE)), + EnumSet.of(MonitorErrorResponse.RECREATE), null ); String key = "testMonitor"; @@ -242,7 +241,7 @@ public void testRemove() throws SQLException, InterruptedException { TimeUnit.MINUTES.toNanos(1), // even though we pass a re-create policy, we should not re-create it if the monitor is expired since this // indicates it is not being used. - new HashSet<>(Collections.singletonList(MonitorErrorResponse.RECREATE)), + EnumSet.of(MonitorErrorResponse.RECREATE), null ); @@ -278,7 +277,7 @@ public void testStopAndRemove() throws SQLException, InterruptedException { TimeUnit.MINUTES.toNanos(1), // even though we pass a re-create policy, we should not re-create it if the monitor is expired since this // indicates it is not being used. - new HashSet<>(Collections.singletonList(MonitorErrorResponse.RECREATE)), + EnumSet.of(MonitorErrorResponse.RECREATE), null ); diff --git a/wrapper/src/test/resources/hibernate_files/DataSourceTest.java b/wrapper/src/test/resources/hibernate_files/DataSourceTest.java index 624114400..38a6b1c78 100644 --- a/wrapper/src/test/resources/hibernate_files/DataSourceTest.java +++ b/wrapper/src/test/resources/hibernate_files/DataSourceTest.java @@ -4,6 +4,9 @@ */ package org.hibernate.orm.test.datasource; +import static org.hibernate.internal.util.StringHelper.split; +import static org.junit.jupiter.api.Assertions.assertTrue; + import jakarta.persistence.Entity; import jakarta.persistence.Id; import org.hibernate.cfg.Environment; @@ -21,10 +24,6 @@ import org.junit.jupiter.api.Disabled; import org.junit.jupiter.api.Test; - -import static org.hibernate.internal.util.StringHelper.split; -import static org.junit.jupiter.api.Assertions.assertTrue; - @Jpa(annotatedClasses = DataSourceTest.TestEntity.class, integrationSettings = @Setting(name = JdbcSettings.CONNECTION_PROVIDER, value = "org.hibernate.orm.test.datasource.TestDataSourceConnectionProvider")) diff --git a/wrapper/src/test/resources/hibernate_files/PostgreSQLCastingIntervalSecondJdbcType.java b/wrapper/src/test/resources/hibernate_files/PostgreSQLCastingIntervalSecondJdbcType.java index 7dcd98188..55f5386bf 100644 --- a/wrapper/src/test/resources/hibernate_files/PostgreSQLCastingIntervalSecondJdbcType.java +++ b/wrapper/src/test/resources/hibernate_files/PostgreSQLCastingIntervalSecondJdbcType.java @@ -9,7 +9,6 @@ import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; - import org.hibernate.dialect.Dialect; import org.hibernate.engine.spi.SessionFactoryImplementor; import org.hibernate.metamodel.mapping.JdbcMappingContainer; diff --git a/wrapper/src/test/resources/hibernate_files/PostgresIntervalSecondTest.java b/wrapper/src/test/resources/hibernate_files/PostgresIntervalSecondTest.java index f07faccac..065b83fb3 100644 --- a/wrapper/src/test/resources/hibernate_files/PostgresIntervalSecondTest.java +++ b/wrapper/src/test/resources/hibernate_files/PostgresIntervalSecondTest.java @@ -6,33 +6,30 @@ import static org.assertj.core.api.Assertions.assertThat; +import jakarta.persistence.Column; +import jakarta.persistence.Entity; +import jakarta.persistence.Id; +import jakarta.persistence.Table; import java.time.Duration; - import org.hibernate.annotations.JdbcTypeCode; import org.hibernate.cfg.AvailableSettings; import org.hibernate.dialect.PostgreSQLDialect; import org.hibernate.dialect.type.PostgreSQLIntervalSecondJdbcType; import org.hibernate.metamodel.spi.MappingMetamodelImplementor; import org.hibernate.persister.entity.EntityPersister; -import org.hibernate.type.SqlTypes; -import org.hibernate.type.descriptor.jdbc.JdbcType; -import org.hibernate.type.descriptor.jdbc.NumericJdbcType; -import org.hibernate.type.descriptor.jdbc.spi.JdbcTypeRegistry; - import org.hibernate.testing.orm.junit.DomainModel; import org.hibernate.testing.orm.junit.RequiresDialect; import org.hibernate.testing.orm.junit.ServiceRegistry; import org.hibernate.testing.orm.junit.SessionFactory; import org.hibernate.testing.orm.junit.SessionFactoryScope; import org.hibernate.testing.orm.junit.Setting; +import org.hibernate.type.SqlTypes; +import org.hibernate.type.descriptor.jdbc.JdbcType; +import org.hibernate.type.descriptor.jdbc.NumericJdbcType; +import org.hibernate.type.descriptor.jdbc.spi.JdbcTypeRegistry; import org.junit.jupiter.api.Disabled; import org.junit.jupiter.api.Test; -import jakarta.persistence.Column; -import jakarta.persistence.Entity; -import jakarta.persistence.Id; -import jakarta.persistence.Table; - /** * Test to see if using `@org.hibernate.annotations.JdbcTypeCode` or `@org.hibernate.annotations.JdbcType` * will override a default JdbcType set by a {@link AvailableSettings#PREFERRED_DURATION_JDBC_TYPE config property}. diff --git a/wrapper/src/test/resources/hibernate_files/StructEmbeddableArrayTest.java b/wrapper/src/test/resources/hibernate_files/StructEmbeddableArrayTest.java index c7c7faa20..46cc08277 100644 --- a/wrapper/src/test/resources/hibernate_files/StructEmbeddableArrayTest.java +++ b/wrapper/src/test/resources/hibernate_files/StructEmbeddableArrayTest.java @@ -4,6 +4,14 @@ */ package org.hibernate.orm.test.mapping.embeddable; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertInstanceOf; +import static org.junit.jupiter.api.Assertions.assertNull; + +import jakarta.persistence.Entity; +import jakarta.persistence.Id; +import jakarta.persistence.ParameterMode; +import jakarta.persistence.Tuple; import java.net.URL; import java.sql.Time; import java.sql.Timestamp; @@ -18,7 +26,6 @@ import java.util.List; import java.util.Set; import java.util.UUID; - import org.hibernate.annotations.Struct; import org.hibernate.boot.ResourceStreamLocator; import org.hibernate.boot.model.naming.PhysicalNamingStrategyStandardImpl; @@ -35,7 +42,6 @@ import org.hibernate.dialect.PostgresPlusDialect; import org.hibernate.procedure.ProcedureCall; import org.hibernate.procedure.ProcedureParameter; - import org.hibernate.testing.jdbc.SharedDriverManagerTypeCacheClearingIntegrator; import org.hibernate.testing.orm.domain.gambit.EntityOfBasics; import org.hibernate.testing.orm.domain.gambit.MutableValue; @@ -54,16 +60,6 @@ import org.junit.jupiter.api.Disabled; import org.junit.jupiter.api.Test; -import jakarta.persistence.Entity; -import jakarta.persistence.Id; -import jakarta.persistence.ParameterMode; -import jakarta.persistence.Tuple; - -import static org.junit.jupiter.api.Assertions.assertArrayEquals; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertInstanceOf; -import static org.junit.jupiter.api.Assertions.assertNull; - @BootstrapServiceRegistry( javaServices = @BootstrapServiceRegistry.JavaService( role = AdditionalMappingContributor.class,