diff --git a/adapter-cockroachdb/pom.xml b/adapter-cockroachdb/pom.xml deleted file mode 100644 index a8fd6cda6..000000000 --- a/adapter-cockroachdb/pom.xml +++ /dev/null @@ -1,62 +0,0 @@ - - - - 4.0.0 - - - io.nosqlbench - mvn-defaults - 4.17.32-SNAPSHOT - ../mvn-defaults - - - adapter-cockroachdb - jar - ${project.artifactId} - - - A DriverAdapter driver for CockroachDB - - - - - - - io.nosqlbench - adapters-api - 4.17.32-SNAPSHOT - - - - org.postgresql - postgresql - 42.5.1 - - - - - - - - com.zaxxer - HikariCP - 5.0.1 - - - - - diff --git a/adapter-cockroachdb/src/main/java/io/nosqlbench/adapter/cockroachdb/CockroachDBCmdType.java b/adapter-cockroachdb/src/main/java/io/nosqlbench/adapter/cockroachdb/CockroachDBCmdType.java deleted file mode 100644 index dc9c53cb8..000000000 --- a/adapter-cockroachdb/src/main/java/io/nosqlbench/adapter/cockroachdb/CockroachDBCmdType.java +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Copyright (c) 2023 nosqlbench - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package io.nosqlbench.adapter.cockroachdb; - -/** - * Op templates which are supported by the NoSQLBench CockroachDB driver are - * enumerated below. These command names should mirror those in the official - * CockroachDB API exactly. - */ -public class CockroachDBCmdType { - -} diff --git a/adapter-cockroachdb/src/main/java/io/nosqlbench/adapter/cockroachdb/CockroachDBSpace.java b/adapter-cockroachdb/src/main/java/io/nosqlbench/adapter/cockroachdb/CockroachDBSpace.java deleted file mode 100644 index cd6ba8c63..000000000 --- a/adapter-cockroachdb/src/main/java/io/nosqlbench/adapter/cockroachdb/CockroachDBSpace.java +++ /dev/null @@ -1,121 +0,0 @@ -/* - * Copyright (c) 2023 nosqlbench - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package io.nosqlbench.adapter.cockroachdb; - -import io.nosqlbench.api.config.standard.NBConfiguration; -import io.nosqlbench.api.errors.OpConfigError; -import org.postgresql.ds.PGSimpleDataSource; - -import javax.sql.DataSource; -import java.sql.Connection; -import java.util.Optional; - -public class CockroachDBSpace { - private final String name; - private final DataSource ds = null; -// private final HikariConfig hikariConfig = null; -// private final HikariDataSource hikariDataSource = null; - private Connection connection; - - public CockroachDBSpace(String name, NBConfiguration cfg) { - this.name = name; - PGSimpleDataSource client = createClient(cfg); -// dynamoDB= new DynamoDB(client); - } - private PGSimpleDataSource createClient(NBConfiguration cfg) { - PGSimpleDataSource ds = new PGSimpleDataSource(); - - Optional url = cfg.getOptional("url"); - if(url.isEmpty()) { - throw new OpConfigError("url option is required."); - } else { - ds.setURL(url.get()); - } - - Optional serverNames = cfg.getOptional("serverName"); - if(serverNames.isPresent()) { - ds.setServerNames(new String[]{serverNames.get()}); - } else { - throw new OpConfigError("Server name option is required."); - } - - Optional databaseName = cfg.getOptional("databaseName"); - if(databaseName.isPresent()) { - ds.setDatabaseName(databaseName.get()); - } else { - throw new OpConfigError("Database name option is required."); - } - - Optional portNumber = cfg.getOptional(Integer.class, "portNumber"); - ds.setPortNumbers(new int[] { portNumber.orElse(26257) }); - - Optional user = cfg.getOptional("user"); - if(user.isPresent()) { - ds.setUser(user.get()); - } - - Optional password = cfg.getOptional("password"); - if(password.isPresent()) { - if(user.isEmpty()) { - throw new OpConfigError("Both user and password options are required. Only password is supplied in this case."); - } - ds.setPassword(password.get()); - } else { - if(user.isPresent()) { - throw new OpConfigError("Both user and password options are required. Only user is supplied in this case."); - } - } - - Optional sslMode = cfg.getOptional("sslMode"); - if(sslMode.isPresent()) { - ds.setSslMode(sslMode.get()); - } else { - ds.setSslMode("verify-full"); - } - - Optional applicationName = cfg.getOptional("applicationName"); - if(applicationName.isPresent()) { - ds.setApplicationName(applicationName.get()); - } else { - ds.setApplicationName("NoSQLBench"); - } - Optional rewriteBatchedInserts = cfg.getOptional(Boolean.class, "rewriteBatchedInserts"); - ds.setReWriteBatchedInserts(rewriteBatchedInserts.orElse(false)); - - return ds; - } - - public static NBConfigModel getConfigModel() { - return ConfigModel.of(CockroachDBSpace.class) - .add(Param.optional("url")) - .add(Param.optional("serverName")) - .add(Param.optional("databaseName")) - //TODO remove these below - .add(Param.optional("client_socket_timeout")) - .add(Param.optional("client_execution_timeout")) - .add(Param.optional("client_max_connections")) - .add(Param.optional("client_max_error_retry")) - .add(Param.optional("client_user_agent_prefix")) - .add(Param.optional("client_consecutive_retries_before_throttling")) - .add(Param.optional("client_gzip")) - .add(Param.optional("client_tcp_keepalive")) - .add(Param.optional("client_disable_socket_proxy")) - .add(Param.optional("client_so_send_size_hint")) - .add(Param.optional("client_so_recv_size_hint")) - .asReadOnly(); - } -} diff --git a/adapter-cockroachdb/src/main/resources/cockroachdb.md b/adapter-cockroachdb/src/main/resources/cockroachdb.md deleted file mode 100644 index 7e84eaab5..000000000 --- a/adapter-cockroachdb/src/main/resources/cockroachdb.md +++ /dev/null @@ -1 +0,0 @@ -# cockroachdb driver diff --git a/adapter-jdbc/pom.xml b/adapter-jdbc/pom.xml new file mode 100644 index 000000000..96a522cc8 --- /dev/null +++ b/adapter-jdbc/pom.xml @@ -0,0 +1,115 @@ + + + + 4.0.0 + + + io.nosqlbench + mvn-defaults + 5.17.1-SNAPSHOT + ../mvn-defaults + + + adapter-jdbc + jar + ${project.artifactId} + + + A DriverAdapter driver for JDBC via PostgreSQL with HikariCP. + + + + + + io.nosqlbench + adapters-api + 5.17.1-SNAPSHOT + + + io.nosqlbench + engine-api + 5.17.1-SNAPSHOT + + + io.nosqlbench + nb-annotations + 5.17.1-SNAPSHOT + compile + + + + + org.postgresql + postgresql + 42.5.1 + + + + + com.zaxxer + HikariCP + 5.0.1 + + + + + + + org.jacoco + jacoco-maven-plugin + 0.8.8 + + + prepare-agent + + prepare-agent + + + + report + test + + report + + + + jacoco-check + verify + + check + + + + + BUNDLE + + + INSTRUCTION + COVEREDRATIO + 0.00 + 1.00 + + + + + + + + + + + diff --git a/adapter-jdbc/src/main/java/io/nosqlbench/adapter/jdbc/JDBCDriverAdapter.java b/adapter-jdbc/src/main/java/io/nosqlbench/adapter/jdbc/JDBCDriverAdapter.java new file mode 100644 index 000000000..2a1095cbd --- /dev/null +++ b/adapter-jdbc/src/main/java/io/nosqlbench/adapter/jdbc/JDBCDriverAdapter.java @@ -0,0 +1,36 @@ +package io.nosqlbench.adapter.jdbc; + +import io.nosqlbench.adapter.jdbc.optypes.JDBCOp; +import io.nosqlbench.api.config.standard.NBConfigModel; +import io.nosqlbench.api.config.standard.NBConfiguration; +import io.nosqlbench.engine.api.activityimpl.OpMapper; +import io.nosqlbench.engine.api.activityimpl.uniform.BaseDriverAdapter; +import io.nosqlbench.engine.api.activityimpl.uniform.DriverAdapter; +import io.nosqlbench.engine.api.activityimpl.uniform.DriverSpaceCache; +import io.nosqlbench.nb.annotations.Service; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + +import java.util.function.Function; + +@Service(value = DriverAdapter.class, selector = "jdbc") +public class JDBCDriverAdapter extends BaseDriverAdapter { + private final static Logger logger = LogManager.getLogger(JDBCDriverAdapter.class); + + @Override + public OpMapper getOpMapper() { + DriverSpaceCache spaceCache = getSpaceCache(); + NBConfiguration adapterConfig = getConfiguration(); + return new JDBCOpMapper(this, adapterConfig, spaceCache); + } + + @Override + public Function getSpaceInitializer(NBConfiguration cfg) { + return (s) -> new JDBCSpace(s, cfg); + } + + @Override + public NBConfigModel getConfigModel() { + return super.getConfigModel().add(JDBCSpace.getConfigModel()); + } +} diff --git a/adapter-jdbc/src/main/java/io/nosqlbench/adapter/jdbc/JDBCOpMapper.java b/adapter-jdbc/src/main/java/io/nosqlbench/adapter/jdbc/JDBCOpMapper.java new file mode 100644 index 000000000..79ff2f113 --- /dev/null +++ b/adapter-jdbc/src/main/java/io/nosqlbench/adapter/jdbc/JDBCOpMapper.java @@ -0,0 +1,70 @@ +package io.nosqlbench.adapter.jdbc; + +import io.nosqlbench.adapter.jdbc.opdispensers.JDBCDDLOpDispenser; +import io.nosqlbench.adapter.jdbc.optypes.JDBCOp; +import io.nosqlbench.api.config.standard.NBConfiguration; +import io.nosqlbench.engine.api.activityimpl.OpDispenser; +import io.nosqlbench.engine.api.activityimpl.OpMapper; +import io.nosqlbench.engine.api.activityimpl.uniform.DriverAdapter; +import io.nosqlbench.engine.api.activityimpl.uniform.DriverSpaceCache; +import io.nosqlbench.engine.api.templating.ParsedOp; +import io.nosqlbench.engine.api.templating.TypeAndTarget; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + +import java.sql.Connection; +import java.util.function.LongFunction; + +public class JDBCOpMapper implements OpMapper { + private final static Logger logger = LogManager.getLogger(JDBCOpMapper.class); + + private final DriverAdapter adapter; + private final NBConfiguration cfg; + private final DriverSpaceCache spaceCache; + + public JDBCOpMapper(DriverAdapter adapter, NBConfiguration cfg, DriverSpaceCache spaceCache) { + this.adapter = adapter; + this.cfg = cfg; + this.spaceCache = spaceCache; + } + + @Override + public OpDispenser apply(ParsedOp op) { + LongFunction spaceNameF = op.getAsFunctionOr("space", "default"); + LongFunction spaceFunc = l -> spaceCache.get(spaceNameF.apply(l)); + // Since the only needed thing in the JDBCSpace is the session, we can short-circuit + // to it here instead of stepping down from the cycle to the space to the connection. + LongFunction connectionLongFunc = l -> spaceCache.get(spaceNameF.apply(l)).getConnection(); + + // CREATE|DROP TABLE|VIEW uses execute (as opposed to executeQuery which returns a ResultSet) + // https://jdbc.postgresql.org/documentation/query/#example54dropping-a-table-in-jdbc + //return new JDBCQueryOpDispenser(adapter, spaceFunc, op);working + + /* + * If the user provides a body element, then they want to provide the JSON or + * a data structure that can be converted into JSON, bypassing any further + * specialized type-checking or op-type specific features + */ + + if (op.isDefined("body")) { + throw new RuntimeException("This mode is reserved for later. Do not use the 'body' op field."); + } + else { + TypeAndTarget opType = op.getTypeAndTarget(JDBCOpType.class, String.class, "type", "stmt"); + + logger.info(() -> "Using " + opType.enumId + " statement form for '" + op.getName()); + + //return new JDBCQueryOpDispenser(adapter, spaceFunc, op/*, opType.targetFunction*/); + + + return switch (opType.enumId) { + // CREATE|DROP TABLE|VIEW uses execute (as opposed to executeQuery which returns a ResultSet) + // https://jdbc.postgresql.org/documentation/query/#example54dropping-a-table-in-jdbc + + case select -> null; + case update -> null; + case create, drop, ddl -> new JDBCDDLOpDispenser(adapter, connectionLongFunc, op, opType.targetFunction)/*.apply(op)*/; + }; + } + } +} diff --git a/adapter-jdbc/src/main/java/io/nosqlbench/adapter/jdbc/JDBCOpType.java b/adapter-jdbc/src/main/java/io/nosqlbench/adapter/jdbc/JDBCOpType.java new file mode 100644 index 000000000..6c82243aa --- /dev/null +++ b/adapter-jdbc/src/main/java/io/nosqlbench/adapter/jdbc/JDBCOpType.java @@ -0,0 +1,17 @@ +package io.nosqlbench.adapter.jdbc; + +/** + * Op templates which are supported by the NoSQLBench CockroachDB driver are + * enumerated below. These command names should mirror those in the official + * CockroachDB API exactly. See the official API for more details. + * @see CockroachDB API Reference + */ +public enum JDBCOpType { + //See https://jdbc.postgresql.org/documentation/query/ + select, // used for SELECT operation matches executeQuery + update, // used for performing updates such as INSERT/UPDATE/DELETE matches executeUpdate + ddl, // used for creating/modifying database objects matches execute + //JdbcQuery, // generic placeholder TODO - implement this differently + create, // used for CREATE operation matches execute + drop, // used for DROP operation matches execute +} diff --git a/adapter-jdbc/src/main/java/io/nosqlbench/adapter/jdbc/JDBCSpace.java b/adapter-jdbc/src/main/java/io/nosqlbench/adapter/jdbc/JDBCSpace.java new file mode 100644 index 000000000..5db378f5c --- /dev/null +++ b/adapter-jdbc/src/main/java/io/nosqlbench/adapter/jdbc/JDBCSpace.java @@ -0,0 +1,225 @@ +/* + * Copyright (c) 2023 nosqlbench + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.nosqlbench.adapter.jdbc; + +import com.zaxxer.hikari.HikariConfig; +import com.zaxxer.hikari.HikariDataSource; +import io.nosqlbench.api.config.standard.ConfigModel; +import io.nosqlbench.api.config.standard.NBConfigModel; +import io.nosqlbench.api.config.standard.NBConfiguration; +import io.nosqlbench.api.config.standard.Param; +import io.nosqlbench.api.errors.OpConfigError; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.postgresql.ds.PGSimpleDataSource; + +import javax.sql.DataSource; +import java.sql.Connection; +import java.util.Optional; + +public class JDBCSpace implements AutoCloseable { + private final static Logger logger = LogManager.getLogger(JDBCSpace.class); + private final String spaceName; + private DataSource ds; + private HikariConfig hikariConfig; + private HikariDataSource hikariDataSource; + private Connection connection; + + public JDBCSpace(String spaceName, NBConfiguration cfg) { + this.spaceName = spaceName; + this.hikariDataSource = createClient(cfg); + } + + public Connection getConnection() { + return this.connection; + } + + public HikariDataSource getHikariDataSource() { + return this.hikariDataSource; + } + + private HikariDataSource createClient(NBConfiguration cfg) { + PGSimpleDataSource ds = new PGSimpleDataSource(); + hikariConfig = new HikariConfig(); + + Optional url = cfg.getOptional("url"); + if(url.isEmpty()) { + throw new OpConfigError("url option is required."); + } else { + ds.setURL(url.get()); + hikariConfig.setJdbcUrl(url.get()); + } + + Optional serverNames = cfg.getOptional("serverName"); + if(serverNames.isPresent()) { + ds.setServerNames(new String[]{serverNames.get()}); + //hds.setServerNames(new String[] {serverNames.get()}); + hikariConfig.addDataSourceProperty("serverName", serverNames.get()); + } else { + throw new OpConfigError("Server name option is required."); + } + + Optional databaseName = cfg.getOptional("databaseName"); + if(databaseName.isPresent()) { + ds.setDatabaseName(databaseName.get()); + hikariConfig.addDataSourceProperty("databaseName", databaseName.get()); + } else { + throw new OpConfigError("Database name option is required."); + } + + Optional portNumber = cfg.getOptional(Integer.class, "portNumber"); + ds.setPortNumbers(new int[] { portNumber.orElse(26257) }); + hikariConfig.addDataSourceProperty("portNumber", portNumber.orElse(26257)); + + Optional user = cfg.getOptional("user"); + if(user.isPresent()) { + ds.setUser(user.get()); + hikariConfig.setUsername(user.get()); + } + + Optional password = cfg.getOptional("password"); + if(password.isPresent()) { + if(user.isEmpty()) { + throw new OpConfigError("Both user and password options are required. Only password is supplied in this case."); + } + ds.setPassword(password.get()); + hikariConfig.setPassword(password.get()); + } else { + if(user.isPresent()) { + throw new OpConfigError("Both user and password options are required. Only user is supplied in this case."); + } + } + + Optional ssl = cfg.getOptional(Boolean.class,"ssl"); + if(ssl.isPresent()) { + ds.setSsl(ssl.get()); + hikariConfig.addDataSourceProperty("ssl", ssl.get()); + } else { + ds.setSsl(false); + hikariConfig.addDataSourceProperty("ssl", false); + } + + Optional sslMode = cfg.getOptional("sslmode"); + if(sslMode.isPresent()) { + ds.setSslMode(sslMode.get()); + hikariConfig.addDataSourceProperty("sslmode", sslMode.get()); + } else { + ds.setSslMode("verify-full"); + hikariConfig.addDataSourceProperty("sslmode", "verify-full"); + } + + Optional sslCert = cfg.getOptional("sslcert"); + if(sslCert.isPresent()) { + ds.setSslcert(sslCert.get()); + hikariConfig.addDataSourceProperty("sslcert", sslCert.get()); + } /*else if(sslMode.isPresent() && (!"disable".equalsIgnoreCase(sslMode.get()) || !"allow".equalsIgnoreCase(sslMode.get())) || !"prefer".equalsIgnoreCase(sslMode.get())) { + throw new OpConfigError("When sslmode is true, sslcert should be provided."); + }*/ + + Optional sslRootCert = cfg.getOptional("sslrootcert"); + if(sslRootCert.isPresent()) { + ds.setSslRootCert(sslRootCert.get()); + hikariConfig.addDataSourceProperty("sslrootcert", sslRootCert.get()); + } + + Optional applicationName = cfg.getOptional("applicationName"); + if(applicationName.isPresent()) { + ds.setApplicationName(applicationName.get()); + hikariConfig.addDataSourceProperty("applicationName", applicationName.orElse("NoSQLBench CRDB")); + } else { + ds.setApplicationName("NoSQLBench CRDB"); + hikariConfig.addDataSourceProperty("applicationName", "NoSQLBench CRDB"); + } + Optional rewriteBatchedInserts = cfg.getOptional(Boolean.class, "rewriteBatchedInserts"); + ds.setReWriteBatchedInserts(rewriteBatchedInserts.orElse(true)); + hikariConfig.addDataSourceProperty("rewriteBatchedInserts", rewriteBatchedInserts.orElse(true)); + + Optional autoCommit = cfg.getOptional(Boolean.class, "autoCommit"); + hikariConfig.setAutoCommit(autoCommit.orElse(false)); + + Optional maximumPoolSize = cfg.getOptional(Integer.class,"maximumPoolSize"); + hikariConfig.setMaximumPoolSize(maximumPoolSize.orElse(40)); + + Optional keepaliveTime = cfg.getOptional(Integer.class,"keepaliveTime"); + hikariConfig.setKeepaliveTime(keepaliveTime.orElse(150000)); + + HikariDataSource hds = new HikariDataSource(hikariConfig); + try { + this.connection = hds.getConnection(); + } catch (Exception ex) { + String exp = "Exception occurred while attempting to create a connection using the Hikari Data Source"; + logger.error(exp, ex); + throw new RuntimeException(exp, ex); + } + + return hds; + } + + public static NBConfigModel getConfigModel() { + return ConfigModel.of(JDBCSpace.class) + .add(Param.defaultTo("url", "jdbc:postgresql:/").setDescription("The connection URL used to connect to the DBMS. Defaults to 'jdbc:postgresql:/'")) + .add(Param.defaultTo("serverName", "localhost").setDescription("The host name of the server. Defaults to 'localhost'")) + .add(Param.optional("databaseName").setDescription("The database name. The default is to connect to a database with the same name as the user name used to connect to the server.")) + // See https://github.com/brettwooldridge/HikariCP/tree/dev#gear-configuration-knobs-baby & https://jdbc.postgresql.org/documentation/use/ + .add(Param.defaultTo("portNumber", 5432).setDescription("The port number the server is listening on. Defaults to the PostgreSQLĀ® standard port number (5432)")) + .add(Param.optional("user")) + .add(Param.optional("password")) + .add(Param.optional("ssl")) + .add(Param.optional("sslmode")) + .add(Param.optional("sslcert")) + .add(Param.optional("sslrootcert")) + .add(Param.optional("applicationName")) + .add(Param.optional("rewriteBatchedInserts")) + .add(Param.optional("autoCommit")) + .add(Param.optional("connectionTimeout")) + .add(Param.optional("idleTimeout")) + .add(Param.optional("keepaliveTime")) + .add(Param.optional("maxLifetime")) + .add(Param.optional("connectionTestQuery")) + .add(Param.optional("minimumIdle")) + .add(Param.optional("maximumPoolSize")) + .add(Param.optional("metricRegistry")) + .add(Param.optional("healthCheckRegistry")) + .add(Param.optional("poolName")) + .add(Param.optional("initializationFailTimeout")) + .add(Param.optional("isolateInternalQueries")) + .add(Param.optional("allowPoolSuspension")) + .add(Param.optional("readOnly")) + .add(Param.optional("registerMbeans")) + .add(Param.optional("catalog")) + .add(Param.optional("connectionInitSql")) + .add(Param.optional("driverClassName")) + .add(Param.optional("transactionIsolation")) + .add(Param.optional("validationTimeout")) + .add(Param.optional("leakDetectionThreshold")) + .add(Param.optional("dataSource")) + .add(Param.optional("schema")) + .add(Param.optional("threadFactory")) + .add(Param.optional("scheduledExecutor")) + .asReadOnly(); + } + + @Override + public void close() { + try { + this.getConnection().close(); + } catch (Exception e) { + logger.warn("auto-closeable jdbc connection threw exception in jdbc space(" + this.spaceName + "): " + e); + throw new RuntimeException(e); + } + } +} diff --git a/adapter-jdbc/src/main/java/io/nosqlbench/adapter/jdbc/opdispensers/JDBCDDLOpDispenser.java b/adapter-jdbc/src/main/java/io/nosqlbench/adapter/jdbc/opdispensers/JDBCDDLOpDispenser.java new file mode 100644 index 000000000..6e4697033 --- /dev/null +++ b/adapter-jdbc/src/main/java/io/nosqlbench/adapter/jdbc/opdispensers/JDBCDDLOpDispenser.java @@ -0,0 +1,51 @@ +package io.nosqlbench.adapter.jdbc.opdispensers; + +import io.nosqlbench.adapter.jdbc.JDBCSpace; +import io.nosqlbench.adapter.jdbc.optypes.JDBCOp; +import io.nosqlbench.engine.api.activityimpl.BaseOpDispenser; +import io.nosqlbench.engine.api.activityimpl.uniform.DriverAdapter; +import io.nosqlbench.engine.api.templating.ParsedOp; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + +import java.sql.Connection; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.function.LongFunction; + +public class JDBCDDLOpDispenser extends BaseOpDispenser { + private static final Logger logger = LogManager.getLogger(JDBCDDLOpDispenser.class); + private final LongFunction targetFunction; + private final LongFunction connectionLongFunction; + private final LongFunction statementLongFunction; + + public JDBCDDLOpDispenser(DriverAdapter adapter, LongFunction connectionLongFunc, ParsedOp op, LongFunction targetFunction) { + super(adapter, op); + + this.connectionLongFunction = connectionLongFunc; + this.targetFunction = targetFunction; + this.statementLongFunction = createStmtFunc(op); + } + + protected LongFunction createStmtFunc(ParsedOp cmd) { + try { + LongFunction basefunc = l -> { + try { + return this.connectionLongFunction.apply(l).createStatement(); + } catch (SQLException e) { + throw new RuntimeException(e); + } + }; + return basefunc; + } catch(Exception ex) { + String err_msg = "Error while attempting to create the jdbc statement from the connection"; + logger.error(err_msg, ex); + throw new RuntimeException(err_msg, ex); + } + } + + @Override + public JDBCOp apply(long cycle) { + return new JDBCOp(this.connectionLongFunction.apply(cycle), this.statementLongFunction.apply(cycle), targetFunction.apply(cycle)); + } +} diff --git a/adapter-jdbc/src/main/java/io/nosqlbench/adapter/jdbc/opdispensers/JDBCQueryOpDispenser.java b/adapter-jdbc/src/main/java/io/nosqlbench/adapter/jdbc/opdispensers/JDBCQueryOpDispenser.java new file mode 100644 index 000000000..98ea715e9 --- /dev/null +++ b/adapter-jdbc/src/main/java/io/nosqlbench/adapter/jdbc/opdispensers/JDBCQueryOpDispenser.java @@ -0,0 +1,130 @@ +package io.nosqlbench.adapter.jdbc.opdispensers; + +import io.nosqlbench.adapter.jdbc.JDBCSpace; +import io.nosqlbench.adapter.jdbc.optypes.JDBCOp; +import io.nosqlbench.engine.api.activityimpl.BaseOpDispenser; +import io.nosqlbench.engine.api.activityimpl.uniform.DriverAdapter; +import io.nosqlbench.engine.api.templating.ParsedOp; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + +import javax.sql.DataSource; +import java.sql.Statement; +import java.util.function.LongFunction; + +public class JDBCQueryOpDispenser extends BaseOpDispenser { + private final static Logger logger = LogManager.getLogger(JDBCQueryOpDispenser.class); + private final DataSource dataSource; + private final LongFunction jdbcOpLongFunction; +// private final LongFunction tableNameFunc; + //private final LongFunction targetFunction; + + public JDBCQueryOpDispenser(DriverAdapter adapter, LongFunction jdbcSpaceLongFunction, ParsedOp op/*, LongFunction targetFunction*/) { + super(adapter, op); + this.jdbcOpLongFunction = getOpFunc(jdbcSpaceLongFunction, op); + //this.targetFunction = targetFunction; + //TODO -- implement this + dataSource = null; + } + + public JDBCQueryOpDispenser(DriverAdapter adapter, ParsedOp op) { + super(adapter, op); + //TODO -- implement this + this.jdbcOpLongFunction = null; + this.dataSource = null; + //this.targetFunction = null; + } + + protected LongFunction createStmtFunc(ParsedOp cmd) { + LongFunction basefunc = l -> null;//targetFunction.apply(l)); + return null; + } + + private LongFunction getOpFunc(LongFunction jdbcSpaceLongFunction, ParsedOp op) { +/* + LongFunction builderF = l -> HttpRequest.newBuilder(); + LongFunction bodyF = op.getAsFunctionOr("body", null); + LongFunction bodyPublisherF = + l -> Optional.ofNullable(bodyF.apply(l)).map(HttpRequest.BodyPublishers::ofString).orElse( + HttpRequest.BodyPublishers.noBody() + ); + + LongFunction methodF = op.getAsFunctionOr("method", "GET"); + LongFunction initBuilderF = + l -> builderF.apply(l).method(methodF.apply(l), bodyPublisherF.apply(l)); + + initBuilderF = op.enhanceFuncOptionally( + initBuilderF, "version", String.class, + (b, v) -> b.version(HttpClient.Version.valueOf( + v.replaceAll("/1.1", "_1_1") + .replaceAll("/2.0", "_2") + ) + ) + ); + + Optional> optionalUriFunc = op.getAsOptionalFunction("uri", String.class); + LongFunction urifunc; + // Add support for URLENCODE on the uri field if either it statically or dynamically contains the E or URLENCODE pattern, + // OR the enable_urlencode op field is set to true. + if (optionalUriFunc.isPresent()) { + String testUriValue = optionalUriFunc.get().apply(0L); + if (HttpFormatParser.URLENCODER_PATTERN.matcher(testUriValue).find() + || op.getStaticConfigOr("enable_urlencode", true)) { + initBuilderF = + op.enhanceFuncOptionally( + initBuilderF, + "uri", + String.class, + (b, v) -> b.uri(URI.create(HttpFormatParser.rewriteExplicitSections(v))) + ); + } + } else { + initBuilderF = op.enhanceFuncOptionally(initBuilderF, "uri", String.class, (b, v) -> b.uri(URI.create(v))); + } + + op.getOptionalStaticValue("follow_redirects", boolean.class); + + + List headerNames = op.getDefinedNames().stream() + .filter(n -> n.charAt(0) >= 'A') + .filter(n -> n.charAt(0) <= 'Z') + .toList(); + if (headerNames.size() > 0) { + for (String headerName : headerNames) { + initBuilderF = op.enhanceFunc(initBuilderF, headerName, String.class, (b, h) -> b.header(headerName, h)); + } + } + + initBuilderF = op.enhanceFuncOptionally(initBuilderF, "timeout", long.class, (b, v) -> b.timeout(Duration.ofMillis(v))); + + LongFunction finalInitBuilderF = initBuilderF; + LongFunction reqF = l -> finalInitBuilderF.apply(l).build(); + + + Pattern ok_status = op.getOptionalStaticValue("ok-status", String.class) + .map(Pattern::compile) + .orElse(Pattern.compile(DEFAULT_OK_STATUS)); + + Pattern ok_body = op.getOptionalStaticValue("ok-body", String.class) + .map(Pattern::compile) + .orElse(null); + + LongFunction opFunc = cycle -> new HttpOp( + jdbcSpaceLongFunction.apply(cycle).getClient(), + reqF.apply(cycle), + ok_status, + ok_body, + jdbcSpaceLongFunction.apply(cycle), cycle + ); + */ + //return null; + LongFunction jdbcOpLongFunction = cycle -> new JDBCOp(jdbcSpaceLongFunction.apply(cycle), "DUMMY_STRINGcycle"); + return jdbcOpLongFunction; + } + + @Override + public JDBCOp apply(long value) { + JDBCOp op = this.jdbcOpLongFunction.apply(value); + return op; + } +} diff --git a/adapter-jdbc/src/main/java/io/nosqlbench/adapter/jdbc/optypes/JDBCOp.java b/adapter-jdbc/src/main/java/io/nosqlbench/adapter/jdbc/optypes/JDBCOp.java new file mode 100644 index 000000000..7a034bd73 --- /dev/null +++ b/adapter-jdbc/src/main/java/io/nosqlbench/adapter/jdbc/optypes/JDBCOp.java @@ -0,0 +1,601 @@ +package io.nosqlbench.adapter.jdbc.optypes; + +import io.nosqlbench.adapter.jdbc.JDBCSpace; +import io.nosqlbench.engine.api.activityimpl.uniform.flowtypes.RunnableOp; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + +import javax.sql.DataSource; +import java.sql.Connection; +import java.sql.SQLException; +import java.sql.Statement; + +/* + * @see HikariCP connection pooling for details. + */ +public /*abstract*/ class JDBCOp implements RunnableOp/*CycleOp*/ { + private final static Logger logger = LogManager.getLogger(JDBCOp.class); + protected DataSource dataSource; + private final JDBCSpace jdbcSpace; + + public String getTargetStatement() { + return targetStatement; + } + + private final String targetStatement; + + public DataSource getDataSource() { + return dataSource; + } + + public JDBCSpace getJdbcSpace() { + return jdbcSpace; + } + + public Connection getConnection() { + return connection; + } + + public Statement getStatement() { + return statement; + } + + private final Connection connection; + private final Statement statement; + + /** + * Unused. + * @param jdbcSpace + * @param targetStatement + */ + public JDBCOp(JDBCSpace jdbcSpace, String targetStatement) { + //TODO - implement code + //this.dataSource = new HikariDataSource(); + this.jdbcSpace = jdbcSpace; + this.targetStatement = targetStatement; + this.connection = null; + this.statement = null; + } + + /** + * + * @param connection + * @param statement + * @param targetStatement + */ + public JDBCOp(Connection connection, Statement statement, String targetStatement) { + this.connection = connection; + this.statement = statement; + this.targetStatement = targetStatement; + this.jdbcSpace = null; + } + + @Override + public void run() { + logger.info(() -> "Executing JDBCOp for the given cycle."); + try { + this.getConnection().setAutoCommit(false); + if(logger.isDebugEnabled()) { + logger.debug(() -> "JDBC Query is: " + this.getTargetStatement()); + } + this.getStatement().execute(this.getTargetStatement()); + this.getConnection().commit(); + logger.info(() -> "Executed the JDBC statement & committed the connection successfully"); + } catch (SQLException sqlException) { + logger.error("JDBCOp ERROR: { state => %s, cause => %s, message => %s }\n", + sqlException.getSQLState(), sqlException.getCause(), sqlException.getMessage(), sqlException); + throw new RuntimeException(sqlException); + } catch (Exception ex) { + String exMsg = String.format("Exception while attempting to run the jdbc statement %s", getStatement()); + logger.error(exMsg, ex); + throw new RuntimeException(exMsg, ex); + } + + /* + //TODO - implement code + dataSource = jdbcSpace.getHikariDataSource(); + if(null != dataSource) { + try (Connection connection = dataSource.getConnection()) { + connection.setAutoCommit(false); + + Statement stmt = connection.createStatement(); + stmt.execute("DROP TABLE IF EXISTS accounts"); + if (null != stmt) { + stmt.close(); + } + connection.commit(); + logger.info(() -> "Executed the JDBC statement & committed the connection successfully"); + } catch (SQLException sqlException) { + logger.error("JDBCOp ERROR: { state => %s, cause => %s, message => %s }\n", + sqlException.getSQLState(), sqlException.getCause(), sqlException.getMessage(), sqlException); + throw new RuntimeException(sqlException); + } catch(Exception exception) { + logger.error("Exception while executing JDBCOp", exception); + throw new RuntimeException(exception); + } finally { + dataSource = null; + } + } else { + logger.error(() -> "Datasource is found to be null. Exiting operation."); + }*/ + } +} + +/** + * References: + * https://docs.oracle.com/javase/tutorial/jdbc/basics/gettingstarted.html + * https://docs.oracle.com/javase/9/docs/api/java/sql/package-summary.html + * https://jdbc.postgresql.org/documentation/query/ + * https://www.cockroachlabs.com/docs/v22.2/connection-pooling.html + * https://www.cockroachlabs.com/docs/v22.2/connection-parameters#supported-options-parameters + * https://www.cockroachlabs.com/docs/v22.2/sql-statements.html#query-management-statements + * https://docs.yugabyte.com/preview/drivers-orms/java/yugabyte-jdbc/ + * @author Created by Madhavan Sridharan on Jan 21, 2023 6:26:09 PM. + +package com.jdbc.example.postgresql; + + import java.math.BigDecimal; + import java.sql.Connection; + import java.sql.PreparedStatement; + import java.sql.ResultSet; + import java.sql.ResultSetMetaData; + import java.sql.SQLException; + import java.sql.Statement; + import java.time.LocalTime; + import java.util.HashMap; + import java.util.Map; + import java.util.Random; + import java.util.UUID; + + import javax.sql.DataSource; + + import org.postgresql.ds.PGSimpleDataSource; + + + * @author madhavan.sridharan + * + +public class crdb { + public static void main(String... args) { + PGSimpleDataSource ds = new PGSimpleDataSource(); + //ds.setUrl("jdbc:postgresql://insectdb0-3153.g8z.cockroachlabs.cloud:26257/defaultdb?sslmode=verify-full"); + ds.setUrl("jdbc:postgresql://insectdb0-3153.g8z.cockroachlabs.cloud:26257/defaultdb?sslmode=verify-ca"); + ds.setUser(System.getProperty("CRDB_USER")); + ds.setPassword(System.getProperty("CRDB_PSWD")); + ds.setSslRootCert(System.getProperty("CRDB_SSLCERT")); + + try (Connection con = ds.getConnection()) { + + } catch (SQLException e) { + // TODO Auto-generated catch block + e.printStackTrace(); + } + + // Create DAO. + BasicExampleDAO dao = new BasicExampleDAO(ds); + + // Test our retry handling logic if FORCE_RETRY is true. This + // method is only used to test the retry logic. It is not + // necessary in production code. + dao.testRetryHandling(); + + dao.dropAccountTable(); + dao.createAccountTable(); + + // Insert a few accounts "by hand", using INSERTs on the backend. + Map balances = new HashMap<>(); + UUID id1 = UUID.randomUUID(); + UUID id2 = UUID.randomUUID(); + balances.put(id1.toString(), "1000"); + balances.put(id2.toString(), "250"); + int updatedAccounts = dao.updateAccounts(balances); + System.out.printf("BasicExampleDAO.updateAccounts:\n => %s total updated accounts\n", updatedAccounts); + + // How much money is in these accounts? + BigDecimal balance1 = dao.getAccountBalance(id1); + BigDecimal balance2 = dao.getAccountBalance(id2); + System.out.printf("main:\n => Account balances at time '%s':\n ID %s => $%s\n ID %s => $%s\n", + LocalTime.now(), 1, balance1, 2, balance2); + + // Transfer $100 from account 1 to account 2 + UUID fromAccount = UUID.randomUUID(); + UUID toAccount = UUID.randomUUID(); + BigDecimal transferAmount = BigDecimal.valueOf(100); + int transferredAccounts = dao.transferFunds(fromAccount, toAccount, transferAmount); + if (transferredAccounts != -1) { + System.out.printf( + "BasicExampleDAO.transferFunds:\n => $%s transferred between accounts %s and %s, %s rows updated\n", + transferAmount, fromAccount, toAccount, transferredAccounts); + } + + balance1 = dao.getAccountBalance(id1); + balance2 = dao.getAccountBalance(id2); + System.out.printf("main:\n => Account balances at time '%s':\n ID %s => $%s\n ID %s => $%s\n", + LocalTime.now(), 1, balance1, 2, balance2); + + // Bulk insertion example using JDBC's batching support. + int totalRowsInserted = dao.bulkInsertRandomAccountData(); + System.out.printf("\nBasicExampleDAO.bulkInsertRandomAccountData:\n => finished, %s total rows inserted\n", + totalRowsInserted); + + // Print out 10 account values. + int accountsRead = dao.readAccounts(10); + } +} + +* + * Data access object used by 'BasicExample'. Abstraction over some common + * CockroachDB operations, including: + * + * - Auto-handling transaction retries in the 'runSQL' method + * + * - Example of bulk inserts in the 'bulkInsertRandomAccountData' method + + +class BasicExampleDAO { + + private static final int MAX_RETRY_COUNT = 3; + private static final String RETRY_SQL_STATE = "40001"; + private static final boolean FORCE_RETRY = false; + + private final DataSource ds; + + private final Random rand = new Random(); + + BasicExampleDAO(DataSource ds) { + this.ds = ds; + } + + * + * Used to test the retry logic in 'runSQL'. It is not necessary in production + * code. + + void testRetryHandling() { + if (BasicExampleDAO.FORCE_RETRY) { + runSQL("SELECT crdb_internal.force_retry('1s':::INTERVAL)"); + } + } + + * + * Run SQL code in a way that automatically handles the transaction retry logic + * so we don't have to duplicate it in various places. + * + * @param sqlCode a String containing the SQL code you want to execute. Can have + * placeholders, e.g., "INSERT INTO accounts (id, balance) VALUES + * (?, ?)". + * + * @param args String Varargs to fill in the SQL code's placeholders. + * @return Integer Number of rows updated, or -1 if an error is thrown. + + public Integer runSQL(String sqlCode, String... args) { + + // This block is only used to emit class and method names in + // the program output. It is not necessary in production + // code. + StackTraceElement[] stacktrace = Thread.currentThread().getStackTrace(); + StackTraceElement elem = stacktrace[2]; + String callerClass = elem.getClassName(); + String callerMethod = elem.getMethodName(); + + int rv = 0; + + try (Connection connection = ds.getConnection()) { + + // We're managing the commit lifecycle ourselves so we can + // automatically issue transaction retries. + connection.setAutoCommit(false); + + int retryCount = 0; + + while (retryCount <= MAX_RETRY_COUNT) { + + if (retryCount == MAX_RETRY_COUNT) { + String err = String.format("hit max of %s retries, aborting", MAX_RETRY_COUNT); + throw new RuntimeException(err); + } + + // This block is only used to test the retry logic. + // It is not necessary in production code. See also + // the method 'testRetryHandling()'. + if (FORCE_RETRY) { + forceRetry(connection); // SELECT 1 + } + + try (PreparedStatement pstmt = connection.prepareStatement(sqlCode)) { + + // Loop over the args and insert them into the + // prepared statement based on their types. In + // this simple example we classify the argument + // types as "integers" and "everything else" + // (a.k.a. strings). + for (int i = 0; i < args.length; i++) { + int place = i + 1; + String arg = args[i]; + + try { + int val = Integer.parseInt(arg); + pstmt.setInt(place, val); + } catch (NumberFormatException e) { + pstmt.setString(place, arg); + } + } + + if (pstmt.execute()) { + // We know that `pstmt.getResultSet()` will + // not return `null` if `pstmt.execute()` was + // true + ResultSet rs = pstmt.getResultSet(); + ResultSetMetaData rsmeta = rs.getMetaData(); + int colCount = rsmeta.getColumnCount(); + + // This printed output is for debugging and/or demonstration + // purposes only. It would not be necessary in production code. + System.out.printf("\n%s.%s:\n '%s'\n", callerClass, callerMethod, pstmt); + + while (rs.next()) { + for (int i = 1; i <= colCount; i++) { + String name = rsmeta.getColumnName(i); + String type = rsmeta.getColumnTypeName(i); + + // In this "bank account" example we know we are only handling + // integer values (technically 64-bit INT8s, the CockroachDB + // default). This code could be made into a switch statement + // to handle the various SQL types needed by the application. + if ("int8".equals(type)) { + int val = rs.getInt(name); + + // This printed output is for debugging and/or demonstration + // purposes only. It would not be necessary in production code. + System.out.printf(" %-8s => %10s\n", name, val); + } + } + } + } else { + int updateCount = pstmt.getUpdateCount(); + rv += updateCount; + + // This printed output is for debugging and/or demonstration + // purposes only. It would not be necessary in production code. + System.out.printf("\n%s.%s:\n '%s'\n", callerClass, callerMethod, pstmt); + } + + connection.commit(); + break; + + } catch (SQLException e) { + + if (RETRY_SQL_STATE.equals(e.getSQLState())) { + // Since this is a transaction retry error, we + // roll back the transaction and sleep a + // little before trying again. Each time + // through the loop we sleep for a little + // longer than the last time + // (A.K.A. exponential backoff). + System.out.printf( + "retryable exception occurred:\n sql state = [%s]\n message = [%s]\n retry counter = %s\n", + e.getSQLState(), e.getMessage(), retryCount); + connection.rollback(); + retryCount++; + int sleepMillis = (int) (Math.pow(2, retryCount) * 100) + rand.nextInt(100); + System.out.printf("Hit 40001 transaction retry error, sleeping %s milliseconds\n", sleepMillis); + try { + Thread.sleep(sleepMillis); + } catch (InterruptedException ignored) { + // Necessary to allow the Thread.sleep() + // above so the retry loop can continue. + } + + rv = -1; + } else { + rv = -1; + throw e; + } + } + } + } catch (SQLException e) { + System.out.printf("BasicExampleDAO.runSQL ERROR: { state => %s, cause => %s, message => %s }\n", + e.getSQLState(), e.getCause(), e.getMessage()); + rv = -1; + } + + return rv; + } + + * + * Helper method called by 'testRetryHandling'. It simply issues a "SELECT 1" + * inside the transaction to force a retry. This is necessary to take the + * connection's session out of the AutoRetry state, since otherwise the other + * statements in the session will be retried automatically, and the client (us) + * will not see a retry error. Note that this information is taken from the + * following test: + * https://github.com/cockroachdb/cockroach/blob/master/pkg/sql/logictest/testdata/logic_test/manual_retry + * + * @param connection Connection + + private void forceRetry(Connection connection) throws SQLException { + try (PreparedStatement statement = connection.prepareStatement("SELECT 1")) { + statement.executeQuery(); + } + } + + public void dropAccountTable() { + try (Connection connection = ds.getConnection()) { + connection.setAutoCommit(false); + + // Check the current balance. + Statement stmt = connection.createStatement(); + boolean executed = stmt + .execute("DROP TABLE IF EXISTS accounts"); + if(null != stmt) {stmt.close();} + connection.commit(); + System.out.printf("DROPPED accounts table successfully\n"); + } catch (SQLException e) { + System.out.printf("BasicExampleDAO.deleteAccountTable ERROR: { state => %s, cause => %s, message => %s }\n", + e.getSQLState(), e.getCause(), e.getMessage()); + } + } + + public void createAccountTable() { + try (Connection connection = ds.getConnection()) { + // Check the current balance. + connection.createStatement() + .execute("CREATE TABLE IF NOT EXISTS accounts (id UUID PRIMARY KEY, balance int8)"); + + System.out.printf("Created accounts table successfully\n"); + } catch (SQLException e) { + System.out.printf("BasicExampleDAO.createAccountTable ERROR: { state => %s, cause => %s, message => %s }\n", + e.getSQLState(), e.getCause(), e.getMessage()); + } + + } + + * + * Update accounts by passing in a Map of (ID, Balance) pairs. + * + * @param accounts (Map) + * @return The number of updated accounts (int) + + public int updateAccounts(Map accounts) { + int rows = 0; + for (Map.Entry account : accounts.entrySet()) { + + String k = account.getKey(); + String v = account.getValue(); + + String[] args = { k, v }; + rows += runSQL("INSERT INTO accounts (id, balance) VALUES (?, ?)", args); + } + return rows; + } + + + * Transfer funds between one account and another. Handles transaction retries + * in case of conflict automatically on the backend. + * + * @param fromId (UUID) + * @param toId (UUID) + * @param amount (int) + * @return The number of updated accounts (int) + + public int transferFunds(UUID fromId, UUID toId, BigDecimal amount) { + String sFromId = fromId.toString(); + String sToId = toId.toString(); + String sAmount = amount.toPlainString(); + + // We have omitted explicit BEGIN/COMMIT statements for + // brevity. Individual statements are treated as implicit + // transactions by CockroachDB (see + // https://www.cockroachlabs.com/docs/stable/transactions.html#individual-statements). + + String sqlCode = "UPSERT INTO accounts (id, balance) VALUES" + + "(?, ((SELECT balance FROM accounts WHERE id = ?) - ?))," + + "(?, ((SELECT balance FROM accounts WHERE id = ?) + ?))"; + + return runSQL(sqlCode, sFromId, sFromId, sAmount, sToId, sToId, sAmount); + } + + + * Get the account balance for one account. + * + * We skip using the retry logic in 'runSQL()' here for the following reasons: + * + * 1. Since this is a single read ("SELECT"), we don't expect any transaction + * conflicts to handle + * + * 2. We need to return the balance as an integer + * + * @param id (UUID) + * @return balance (int) + + public BigDecimal getAccountBalance(UUID id) { + BigDecimal balance = BigDecimal.valueOf(0); + + try (Connection connection = ds.getConnection()) { + + // Check the current balance. + ResultSet res = connection.createStatement() + .executeQuery(String.format("SELECT balance FROM accounts WHERE id = '%s'", id.toString())); + if (!res.next()) { + System.out.printf("No users in the table with id %d", id); + } else { + balance = res.getBigDecimal("balance"); + } + } catch (SQLException e) { + System.out.printf("BasicExampleDAO.getAccountBalance ERROR: { state => %s, cause => %s, message => %s }\n", + e.getSQLState(), e.getCause(), e.getMessage()); + } + + return balance; + } + + + * Insert randomized account data (ID, balance) using the JDBC fast path for + * bulk inserts. The fastest way to get data into CockroachDB is the IMPORT + * statement. However, if you must bulk ingest from the application using INSERT + * statements, the best option is the method shown here. It will require the + * following: + * + * 1. Add `rewriteBatchedInserts=true` to your JDBC connection settings (see the + * connection info in 'BasicExample.main'). + * + * 2. Inserting in batches of 128 rows, as used inside this method (see + * BATCH_SIZE), since the PGJDBC driver's logic works best with powers of two, + * such that a batch of size 128 can be 6x faster than a batch of size 250. + * + * @return The number of new accounts inserted (int) + + public int bulkInsertRandomAccountData() { + + Random random = new Random(); + int BATCH_SIZE = 128; + int totalNewAccounts = 0; + + try (Connection connection = ds.getConnection()) { + + // We're managing the commit lifecycle ourselves so we can + // control the size of our batch inserts. + connection.setAutoCommit(false); + + // In this example we are adding 500 rows to the database, + // but it could be any number. What's important is that + // the batch size is 128. + try (PreparedStatement pstmt = connection + .prepareStatement("INSERT INTO accounts (id, balance) VALUES (?, ?)")) { + for (int i = 0; i <= (500 / BATCH_SIZE); i++) { + for (int j = 0; j < BATCH_SIZE; j++) { + String id = UUID.randomUUID().toString(); + BigDecimal balance = BigDecimal.valueOf(random.nextInt(1000000000)); + pstmt.setString(1, id); + pstmt.setBigDecimal(2, balance); + pstmt.addBatch(); + } + int[] count = pstmt.executeBatch(); + totalNewAccounts += count.length; + System.out.printf("\nBasicExampleDAO.bulkInsertRandomAccountData:\n '%s'\n", pstmt.toString()); + System.out.printf(" => %s row(s) updated in this batch\n", count.length); + } + connection.commit(); + } catch (SQLException e) { + System.out.printf( + "BasicExampleDAO.bulkInsertRandomAccountData ERROR: { state => %s, cause => %s, message => %s }\n", + e.getSQLState(), e.getCause(), e.getMessage()); + } + } catch (SQLException e) { + System.out.printf( + "BasicExampleDAO.bulkInsertRandomAccountData ERROR: { state => %s, cause => %s, message => %s }\n", + e.getSQLState(), e.getCause(), e.getMessage()); + } + return totalNewAccounts; + } + + + * Read out a subset of accounts from the data store. + * + * @param limit (int) + * @return Number of accounts read (int) + + public int readAccounts(int limit) { + return runSQL("SELECT id, balance FROM accounts LIMIT ?", Integer.toString(limit)); + } +} + */ diff --git a/adapter-jdbc/src/main/java/io/nosqlbench/adapter/jdbc/optypes/SelectJDBCOp.java b/adapter-jdbc/src/main/java/io/nosqlbench/adapter/jdbc/optypes/SelectJDBCOp.java new file mode 100644 index 000000000..67e5b6a05 --- /dev/null +++ b/adapter-jdbc/src/main/java/io/nosqlbench/adapter/jdbc/optypes/SelectJDBCOp.java @@ -0,0 +1,19 @@ +package io.nosqlbench.adapter.jdbc.optypes; + +import io.nosqlbench.adapter.jdbc.JDBCSpace; +import io.nosqlbench.engine.api.activityimpl.uniform.flowtypes.RunnableOp; + +public class SelectJDBCOp implements RunnableOp { + public SelectJDBCOp(JDBCSpace jdbcSpace, long cycle) { + super(); + } + + public SelectJDBCOp() { + + } + + @Override + public void run() { + + } +} diff --git a/adapter-jdbc/src/main/resources/jdbc.md b/adapter-jdbc/src/main/resources/jdbc.md new file mode 100644 index 000000000..655c6a5f0 --- /dev/null +++ b/adapter-jdbc/src/main/resources/jdbc.md @@ -0,0 +1,28 @@ +--- +weight: 0 +title: jdbc +--- + +# JDBC driver +This JDBC driver leverages HikariCP for connection pool and works with PostgreSQL. This leverages NoSQLBench based workload generation and performance testing against any PostgreSQL-compatible database cluster. Example: CockroachDB or YugabyteDB (YSQL API). + +# Executing JDBC Workload +The following is an example of invoking a JDBC workload. +```yaml + run driver=jdbc workload=/path/to/workload.yaml cycles=1000 threads=100 ... +``` +In the above NB command, following are JDBC driver specific parameters: +* take1 +* take2 + +Other NB engine parameters are straight forward: +* `driver`: must be `jdbc` +* `threads`: depending on the workload type, the NB thread number determines how many clients will be created. All the clients will share the Connection originated from the Hikari Connection Pool. +* `*.yaml`: the NB jdbc scenario definition workload yaml file + +# Configuration +## Config Sources +### Examples + + + diff --git a/nb5/pom.xml b/nb5/pom.xml index 5a047a2fc..0f1bac48f 100644 --- a/nb5/pom.xml +++ b/nb5/pom.xml @@ -133,8 +133,8 @@ io.nosqlbench - adapter-cockroachdb - 4.17.32-SNAPSHOT + adapter-jdbc + 5.17.1-SNAPSHOT diff --git a/pom.xml b/pom.xml index ef8be2c93..be0d79f04 100644 --- a/pom.xml +++ b/pom.xml @@ -61,6 +61,7 @@ adapter-pulsar adapter-s4j adapter-kafka + adapter-jdbc virtdata-api @@ -102,7 +103,7 @@ adapter-pulsar adapter-s4j adapter-kafka - adapter-cockroachdb + adapter-jdbc virtdata-api @@ -234,6 +235,9 @@ + + + @@ -322,6 +326,9 @@ + + + @@ -368,6 +375,7 @@ + @@ -404,7 +412,7 @@ org.apache.maven.plugins maven-project-info-reports-plugin - 3.4.1 + 3.4.2 @@ -422,6 +430,12 @@ nosqlbench.io http://nosqlbench.io/ + + Madhavan S + madhavan_5k@yahoo.com + nosqlbench.io + http://nosqlbench.io/ +