diff --git a/adapter-jdbc/pom.xml b/adapter-jdbc/pom.xml
index f2900f257..878396c1d 100644
--- a/adapter-jdbc/pom.xml
+++ b/adapter-jdbc/pom.xml
@@ -51,7 +51,7 @@
org.postgresqlpostgresql
- 42.5.2
+ 42.6.0
diff --git a/adapter-jdbc/src/main/java/io/nosqlbench/adapter/jdbc/JDBCOpMapper.java b/adapter-jdbc/src/main/java/io/nosqlbench/adapter/jdbc/JDBCOpMapper.java
index d4d793f21..e307c36f7 100644
--- a/adapter-jdbc/src/main/java/io/nosqlbench/adapter/jdbc/JDBCOpMapper.java
+++ b/adapter-jdbc/src/main/java/io/nosqlbench/adapter/jdbc/JDBCOpMapper.java
@@ -52,15 +52,6 @@ public class JDBCOpMapper implements OpMapper {
String spaceName = op.getStaticConfigOr("space", "default");
JDBCSpace jdbcSpace = spaceCache.get(spaceName);
- int nbThreadNum = NumberUtils.toInt(op.getStaticConfig("threads", String.class));
- int maxConnNum = jdbcSpace.getMaxNumConn();
- if (nbThreadNum > maxConnNum) {
- throw new JDBCAdapterInvalidParamException(
- "JDBC connection is NOT thread safe. The total NB thread number (" + nbThreadNum +
- ") can NOT be greater than the maximum connection number 'num_conn' (" + maxConnNum + ")"
- );
- }
-
/*
* If the user provides a body element, then they want to provide the JSON or
* a data structure that can be converted into JSON, bypassing any further
diff --git a/adapter-jdbc/src/main/java/io/nosqlbench/adapter/jdbc/JDBCSpace.java b/adapter-jdbc/src/main/java/io/nosqlbench/adapter/jdbc/JDBCSpace.java
index 058bea758..533626a3b 100644
--- a/adapter-jdbc/src/main/java/io/nosqlbench/adapter/jdbc/JDBCSpace.java
+++ b/adapter-jdbc/src/main/java/io/nosqlbench/adapter/jdbc/JDBCSpace.java
@@ -20,8 +20,6 @@ import com.zaxxer.hikari.HikariConfig;
import com.zaxxer.hikari.HikariDataSource;
import io.nosqlbench.adapter.jdbc.exceptions.JDBCAdapterInvalidParamException;
import io.nosqlbench.adapter.jdbc.exceptions.JDBCAdapterUnexpectedException;
-import io.nosqlbench.adapter.jdbc.utils.JDBCAdapterUtil;
-import io.nosqlbench.adapter.jdbc.utils.JDBCPgVector;
import io.nosqlbench.api.config.standard.ConfigModel;
import io.nosqlbench.api.config.standard.NBConfigModel;
import io.nosqlbench.api.config.standard.NBConfiguration;
@@ -33,12 +31,9 @@ import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import java.sql.Connection;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-import java.util.Optional;
+import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.atomic.AtomicInteger;
+import java.util.function.Supplier;
public class JDBCSpace implements AutoCloseable {
private final static Logger logger = LogManager.getLogger(JDBCSpace.class);
@@ -48,59 +43,38 @@ public class JDBCSpace implements AutoCloseable {
// NOTE: Since JDBC connection is NOT thread-safe, the total NB threads MUST be less
// than or equal to this number. This is to make sure one thread per connection.
private final static int DEFAULT_CONN_NUM = 5;
- private final int maxNumConn;
+ private int maxNumConn = DEFAULT_CONN_NUM;
- // For DML statements, how many statements to put together in one batch
+ // For DML write statements, how many statements to put together in one batch
// - 1 : no batch (default)
// - positive number: using batch
private final static int DEFAULT_DML_BATCH_NUM = 1;
- private final int dmlBatchNum;
+ private int dmlBatchNum = DEFAULT_DML_BATCH_NUM;
- private final long totalCycleNum;
- private static boolean isShuttingDown = false;
+ private long totalCycleNum;
+ private int totalThreadNum;
+ private boolean autoCommitCLI;
- private HikariConfig hikariConfig;
+ private boolean useHikariCP;
+ private final HikariConfig connConfig = new HikariConfig();
private HikariDataSource hikariDataSource;
- ConcurrentHashMap connections = new ConcurrentHashMap<>();
+ // Maintain a client-side pooling just to make sure the allocated connections can
+ // be reclaimed quickly, instead of waiting for Hikari pooling to reclaim it eventually
+ public record ConnectionCacheKey(String connName) {
+ }
+ private final ConcurrentHashMap connections = new ConcurrentHashMap<>();
public JDBCSpace(String spaceName, NBConfiguration cfg) {
this.spaceName = spaceName;
- this.totalCycleNum = NumberUtils.toLong(cfg.getOptional("cycles").orElse("1"));
- int totalThreads = NumberUtils.toInt(cfg.getOptional("threads").orElse("1"));
- int numConnInput = NumberUtils.toInt(cfg.getOptional("num_conn").orElse("10"));
- this.maxNumConn = Math.min(totalThreads, numConnInput);
- if (this.maxNumConn < 1) {
- throw new JDBCAdapterInvalidParamException(
- "'num_conn' NB CLI parameter must be a positive number!"
- );
- }
-
- // Must be after the 'maxNumConn' statements and before the rest of the remaining statements!
this.initializeSpace(cfg);
- this.dmlBatchNum = NumberUtils.toInt(cfg.getOptional("dml_batch").orElse("1"));
- if (this.dmlBatchNum < 1) {
- throw new JDBCAdapterInvalidParamException(
- "'dml_batch' NB CLI parameter must be a positive number!"
- );
- }
- // According to JDBC spec,
- // - The commit behavior of executeBatch is always implementation-defined
- // when an error occurs and auto-commit is true.
- //
// In this adapter, we treat it as an error if 'autoCommit' is ON and using batch at the same time.
- if ( (this.dmlBatchNum > 1) && (hikariConfig.isAutoCommit()) ) {
+ if ( (this.dmlBatchNum > 1) && isAutoCommit() ) {
throw new JDBCAdapterInvalidParamException(
"Using batch, 'dml_batch'(" + this.dmlBatchNum + ") > 1, along with 'autoCommit' ON is not supported!"
);
}
-
- if (logger.isDebugEnabled()) {
- logger.debug("{} JDBC connections will be created [max(threads/{}, num_conn/{}]; " +
- "dml_batch: {}, autoCommit: {}",
- maxNumConn, totalThreads, numConnInput, dmlBatchNum, hikariConfig.isAutoCommit());
- }
}
@Override
@@ -108,62 +82,67 @@ public class JDBCSpace implements AutoCloseable {
shutdownSpace();
}
- public int getMaxNumConn() { return this.maxNumConn; }
+ public int getMaxNumConn() { return maxNumConn; }
+ public void setMaxNumConn(int i) { maxNumConn = i; }
- public int getDmlBatchNum() { return this.dmlBatchNum; }
+ public int getDmlBatchNum() { return dmlBatchNum; }
- public long getTotalCycleNum() { return this.totalCycleNum; }
+ public long getTotalCycleNum() { return totalCycleNum; }
+ public int getTotalThreadNum() { return totalThreadNum; }
- public boolean isShuttingDown() { return isShuttingDown; }
- public void enterShutdownStage() { isShuttingDown = true; }
-
-
- public HikariDataSource getHikariDataSource() {
- return this.hikariDataSource;
+ public boolean isAutoCommit() {
+ if (useHikariCP)
+ return connConfig.isAutoCommit();
+ else
+ return this.autoCommitCLI;
}
+ public boolean useHikariCP() { return useHikariCP; }
+ public HikariConfig getConnConfig() { return connConfig; }
- public Connection getConnection(String connectionName) {
- Connection connection = connections.get(connectionName);
- if (connection == null) {
- try {
- connection = hikariDataSource.getConnection();
- if (logger.isDebugEnabled()) {
- logger.debug("JDBC connection ({}) is successfully created: {}",
- connectionName, connection);
- }
- // Register 'vector' type
- JDBCPgVector.addVectorType(connection);
+ public HikariDataSource getHikariDataSource() { return hikariDataSource; }
- connections.put(connectionName, connection);
- }
- catch (Exception ex) {
- String exp = "Exception occurred while attempting to create a connection using the HikariDataSource";
- logger.error(exp, ex);
- throw new JDBCAdapterUnexpectedException(exp);
- }
- }
-
- return connection;
+ public Connection getConnection(ConnectionCacheKey key, Supplier connectionSupplier) {
+ return connections.computeIfAbsent(key, __ -> connectionSupplier.get());
}
private void initializeSpace(NBConfiguration cfg) {
- hikariConfig = new HikariConfig();
+ //
+ // NOTE: Although it looks like a good idea to use Hikari Connection Pooling
+ // But in my testing, it shows some strange behaviors such as
+ // 1) failed to allocate connection while the target server is completely working fine
+ // e.g. it failed consistently on a m5d.4xlarge testing bed but not on my mac.
+ // 2) doesn't really respect the 'max_connections' setting
+ // 3) it also appears to me that Hikari connection is slow
+ //
+ // Therefore, use `use_hikaricp` option to control whether to use Hikari connection pooling. When
+ // setting to 'false' (as default), it uses JDBC adapter's own (simple) connection management, with
+ // JDBC driver's `DriverManager` to create connection directly.
+ //
+ this.useHikariCP = BooleanUtils.toBoolean(cfg.getOptional("use_hikaricp").orElse("true"));
+ this.autoCommitCLI = BooleanUtils.toBoolean(cfg.getOptional("autoCommit").orElse("true"));
+ this.dmlBatchNum = NumberUtils.toInt(cfg.getOptional("dml_batch").orElse("1"));
+ if (this.dmlBatchNum < 0) dmlBatchNum = 1;
+ logger.info("CLI input parameters -- useHikariCP:{}, autoCommitCLI:{}, dmlBatchNum:{}",
+ useHikariCP, autoCommitCLI, dmlBatchNum);
- hikariConfig.setJdbcUrl(cfg.get("url"));
- hikariConfig.addDataSourceProperty("serverName", cfg.get("serverName"));
+ this.totalCycleNum = NumberUtils.toLong(cfg.getOptional("cycles").orElse("1"));
+ this.totalThreadNum = NumberUtils.toInt(cfg.getOptional("threads").orElse("1"));
+
+ connConfig.setJdbcUrl(cfg.get("url"));
+ connConfig.addDataSourceProperty("serverName", cfg.get("serverName"));
Optional databaseName = cfg.getOptional("databaseName");
if (databaseName.isPresent()) {
- hikariConfig.addDataSourceProperty("databaseName", databaseName.get());
+ connConfig.addDataSourceProperty("databaseName", databaseName.get());
}
int portNumber = Integer.parseInt(cfg.get("portNumber"));
- hikariConfig.addDataSourceProperty("portNumber", portNumber);
+ connConfig.addDataSourceProperty("portNumber", portNumber);
Optional user = cfg.getOptional("user");
if (user.isPresent()) {
- hikariConfig.setUsername(user.get());
+ connConfig.setUsername(user.get());
}
Optional password = cfg.getOptional("password");
@@ -171,7 +150,7 @@ public class JDBCSpace implements AutoCloseable {
if (user.isEmpty()) {
throw new OpConfigError("Both user and password options are required. Only password is supplied in this case.");
}
- hikariConfig.setPassword(password.get());
+ connConfig.setPassword(password.get());
} else {
if (user.isPresent()) {
throw new OpConfigError("Both user and password options are required. Only user is supplied in this case.");
@@ -179,77 +158,56 @@ public class JDBCSpace implements AutoCloseable {
}
Optional ssl = cfg.getOptional(Boolean.class, "ssl");
- hikariConfig.addDataSourceProperty("ssl", ssl.orElse(false));
+ connConfig.addDataSourceProperty("ssl", ssl.orElse(false));
Optional sslMode = cfg.getOptional("sslmode");
if (sslMode.isPresent()) {
- hikariConfig.addDataSourceProperty("sslmode", sslMode.get());
+ connConfig.addDataSourceProperty("sslmode", sslMode.get());
} else {
- hikariConfig.addDataSourceProperty("sslmode", "prefer");
+ connConfig.addDataSourceProperty("sslmode", "prefer");
}
Optional sslCert = cfg.getOptional("sslcert");
if (sslCert.isPresent()) {
- hikariConfig.addDataSourceProperty("sslcert", sslCert.get());
+ connConfig.addDataSourceProperty("sslcert", sslCert.get());
} /*else if(sslMode.isPresent() && (!"disable".equalsIgnoreCase(sslMode.get()) || !"allow".equalsIgnoreCase(sslMode.get())) || !"prefer".equalsIgnoreCase(sslMode.get())) {
throw new OpConfigError("When sslmode is true, sslcert should be provided.");
}*/
Optional sslRootCert = cfg.getOptional("sslrootcert");
if (sslRootCert.isPresent()) {
- hikariConfig.addDataSourceProperty("sslrootcert", sslRootCert.get());
+ connConfig.addDataSourceProperty("sslrootcert", sslRootCert.get());
}
- hikariConfig.addDataSourceProperty("applicationName", cfg.get("applicationName"));
- hikariConfig.addDataSourceProperty("rewriteBatchedInserts", cfg.getOrDefault("rewriteBatchedInserts", true));
+ connConfig.addDataSourceProperty("applicationName", cfg.get("applicationName"));
+ connConfig.addDataSourceProperty("rewriteBatchedInserts", cfg.getOrDefault("rewriteBatchedInserts", true));
- // We're managing the auto-commit behavior of connections ourselves and hence disabling the auto-commit.
- Optional autoCommitOpt = cfg.getOptional("autoCommit");
- boolean autoCommit = false;
- if (autoCommitOpt.isPresent()) autoCommit = BooleanUtils.toBoolean(autoCommitOpt.get());
- hikariConfig.setAutoCommit(autoCommit);
-
- hikariConfig.setKeepaliveTime(Integer.parseInt(cfg.get("keepaliveTime")));
-
- // HikariCP "maximumPoolSize" parameter is ignored.
+ connConfig.setKeepaliveTime(Integer.parseInt(cfg.get("keepaliveTime")));
// Use the NB "num_conn" parameter instead, wth 20% extra capacity
- hikariConfig.setMaximumPoolSize((int)Math.ceil(1.2*maxNumConn));
+ connConfig.setMaximumPoolSize((int) Math.ceil(1.2 * maxNumConn));
- this.hikariDataSource = new HikariDataSource(hikariConfig);
+ if (useHikariCP) {
+ this.hikariDataSource = new HikariDataSource(connConfig);
+ logger.info("hikariDataSource is created : {}", hikariDataSource);
+ }
}
private void shutdownSpace() {
- isShuttingDown = true;
-
try {
- waitUntilAllOpFinished(System.currentTimeMillis());
-
+ logger.info("Shutting down JDBCSpace -- total {} of connections is being closed ...", connections.size());
for (Connection connection : connections.values()) {
+ if (logger.isDebugEnabled()) {
+ logger.debug("Close connection : {}", connection);
+ }
connection.close();
}
} catch (Exception e) {
throw new JDBCAdapterUnexpectedException("Unexpected error when trying to close the JDBC connection!");
}
- hikariDataSource.close();
- }
-
- private void waitUntilAllOpFinished(long shutdownStartTimeMills) {
- final int timeToWaitInSec = 5;
- long timeElapsedMills;
- boolean continueChk;
-
- do {
- JDBCAdapterUtil.pauseCurThreadExec(1);
-
- long curTimeMills = System.currentTimeMillis();
- timeElapsedMills = curTimeMills - shutdownStartTimeMills;
- continueChk = (timeElapsedMills <= (timeToWaitInSec*1000));
- } while (continueChk);
-
- logger.info(
- "shutdownSpace::waitUntilAllOpFinished -- " +
- "shutdown time elapsed: " + timeElapsedMills + "ms.");
+ if (hikariDataSource != null) {
+ hikariDataSource.close();
+ }
}
public static NBConfigModel getConfigModel() {
@@ -259,8 +217,8 @@ public class JDBCSpace implements AutoCloseable {
.add(Param.defaultTo("dml_batch", DEFAULT_DML_BATCH_NUM)
.setDescription("The number of DML write statements in a batch. Defaults to 1. Ignored by DML read statements!" +
DEFAULT_DML_BATCH_NUM + "' (no batch)"))
- .add(Param.defaultTo("url", "jdbc:postgresql:/")
- .setDescription("The connection URL used to connect to the DBMS. Defaults to 'jdbc:postgresql:/'"))
+ .add(Param.defaultTo("use_hikaricp", "true")
+ .setDescription("Whether to use Hikari connection pooling (default: true)!"))
.add(Param.defaultTo("url", "jdbc:postgresql:/")
.setDescription("The connection URL used to connect to the DBMS. Defaults to 'jdbc:postgresql:/'"))
.add(Param.defaultTo("serverName", "localhost")
diff --git a/adapter-jdbc/src/main/java/io/nosqlbench/adapter/jdbc/opdispensers/JDBCBaseOpDispenser.java b/adapter-jdbc/src/main/java/io/nosqlbench/adapter/jdbc/opdispensers/JDBCBaseOpDispenser.java
index e5c520bbc..9abfe45bc 100644
--- a/adapter-jdbc/src/main/java/io/nosqlbench/adapter/jdbc/opdispensers/JDBCBaseOpDispenser.java
+++ b/adapter-jdbc/src/main/java/io/nosqlbench/adapter/jdbc/opdispensers/JDBCBaseOpDispenser.java
@@ -17,10 +17,12 @@
package io.nosqlbench.adapter.jdbc.opdispensers;
import io.nosqlbench.adapter.jdbc.JDBCSpace;
+import io.nosqlbench.adapter.jdbc.exceptions.JDBCAdapterInvalidParamException;
import io.nosqlbench.adapter.jdbc.optypes.JDBCOp;
import io.nosqlbench.adapters.api.activityimpl.BaseOpDispenser;
import io.nosqlbench.adapters.api.activityimpl.uniform.DriverAdapter;
import io.nosqlbench.adapters.api.templating.ParsedOp;
+import org.apache.commons.lang3.math.NumberUtils;
public abstract class JDBCBaseOpDispenser extends BaseOpDispenser {
protected static final String ERROR_STATEMENT_CREATION =
@@ -39,9 +41,4 @@ public abstract class JDBCBaseOpDispenser extends BaseOpDispenser numConnInput) {
+ throw new JDBCAdapterInvalidParamException(
+ "JDBC connection is NOT thread safe. For write workload, the total NB thread number (" + threadNum +
+ ") can NOT be greater than the maximum connection number 'num_conn' (" + numConnInput + ")"
+ );
+ }
+ }
+ maxNumConnFinal = Math.min(threadNum, maxNumConnFinal);
+ if (maxNumConnFinal < 1) {
+ throw new JDBCAdapterInvalidParamException(
+ "'num_conn' NB CLI parameter must be a positive number!"
+ );
+ }
+ jdbcSpace.setMaxNumConn(maxNumConnFinal);
+
+ logger.info("Total {} JDBC connections will be created [isReadStmt:{}, threads/{}, num_conn/{}]; " +
+ "dml_batch: {}, autoCommit: {}",
+ maxNumConnFinal, isReadStmt, threadNum, numConnInput,
+ jdbcSpace.getDmlBatchNum(), jdbcSpace.isAutoCommit());
+
+ // TODO: this is a current limitation applied by this adapter
+ // improve this behavior by allowing the user to choose
if (!isPreparedStatement && !isReadStatement) {
throw new JDBCAdapterInvalidParamException("DML write statements MUST be prepared!");
}
@@ -69,8 +102,6 @@ public class JDBCDMLOpDispenser extends JDBCBaseOpDispenser {
@Override
public JDBCDMLOp apply(long cycle) {
- checkShutdownEntry(cycle);
-
if (isReadStatement) {
return new JDBCDMLReadOp(
jdbcSpace,
diff --git a/adapter-jdbc/src/main/java/io/nosqlbench/adapter/jdbc/optypes/JDBCDDLOp.java b/adapter-jdbc/src/main/java/io/nosqlbench/adapter/jdbc/optypes/JDBCDDLOp.java
index 2577ad91c..a5330e743 100644
--- a/adapter-jdbc/src/main/java/io/nosqlbench/adapter/jdbc/optypes/JDBCDDLOp.java
+++ b/adapter-jdbc/src/main/java/io/nosqlbench/adapter/jdbc/optypes/JDBCDDLOp.java
@@ -33,20 +33,11 @@ public class JDBCDDLOp extends JDBCOp {
this.ddlStmtStr = ddlStmtStr;
}
- private Statement createDDLStatement() {
- try {
- return jdbcConnection.createStatement();
- } catch (SQLException e) {
- throw new JDBCAdapterUnexpectedException(
- "Unable to create a regular (non-prepared) JDBC statement");
- }
- }
@Override
public Object apply(long value) {
try {
- Statement stmt = createDDLStatement();
+ Statement stmt = jdbcConnection.createStatement();
stmt.execute(ddlStmtStr);
- closeStatement(stmt);
return true;
} catch (SQLException sqlException) {
throw new JDBCAdapterUnexpectedException(
diff --git a/adapter-jdbc/src/main/java/io/nosqlbench/adapter/jdbc/optypes/JDBCDMLOp.java b/adapter-jdbc/src/main/java/io/nosqlbench/adapter/jdbc/optypes/JDBCDMLOp.java
index a44e0d1c7..a7fb0ce80 100644
--- a/adapter-jdbc/src/main/java/io/nosqlbench/adapter/jdbc/optypes/JDBCDMLOp.java
+++ b/adapter-jdbc/src/main/java/io/nosqlbench/adapter/jdbc/optypes/JDBCDMLOp.java
@@ -18,7 +18,6 @@ package io.nosqlbench.adapter.jdbc.optypes;
import io.nosqlbench.adapter.jdbc.JDBCSpace;
import io.nosqlbench.adapter.jdbc.exceptions.JDBCAdapterInvalidParamException;
import io.nosqlbench.adapter.jdbc.exceptions.JDBCAdapterUnexpectedException;
-import io.nosqlbench.adapter.jdbc.exceptions.JDBCPgVectorException;
import io.nosqlbench.adapter.jdbc.utils.JDBCPgVector;
import org.apache.commons.lang3.StringUtils;
import org.apache.logging.log4j.LogManager;
@@ -73,12 +72,12 @@ public abstract class JDBCDMLOp extends JDBCOp {
}
// Only applicable to a prepared statement
- protected PreparedStatement setPrepStmtValues(PreparedStatement stmt, List