diff --git a/adapter-jdbc/src/main/java/io/nosqlbench/adapter/jdbc/JDBCOpMapper.java b/adapter-jdbc/src/main/java/io/nosqlbench/adapter/jdbc/JDBCOpMapper.java index e6c05b6e4..1ea98ef96 100644 --- a/adapter-jdbc/src/main/java/io/nosqlbench/adapter/jdbc/JDBCOpMapper.java +++ b/adapter-jdbc/src/main/java/io/nosqlbench/adapter/jdbc/JDBCOpMapper.java @@ -17,8 +17,6 @@ package io.nosqlbench.adapter.jdbc; import io.nosqlbench.adapter.jdbc.opdispensers.JDBCExecuteOpDispenser; -import io.nosqlbench.adapter.jdbc.opdispensers.JDBCExecuteQueryOpDispenser; -import io.nosqlbench.adapter.jdbc.opdispensers.JDBCExecuteUpdateOpDispenser; import io.nosqlbench.adapter.jdbc.optypes.JDBCOp; import io.nosqlbench.api.config.standard.NBConfiguration; import io.nosqlbench.engine.api.activityimpl.OpDispenser; @@ -51,13 +49,10 @@ public class JDBCOpMapper implements OpMapper { LongFunction spaceNameF = op.getAsFunctionOr("space", "default"); LongFunction spaceFunc = l -> spaceCache.get(spaceNameF.apply(l)); - // Since the only needed thing in the JDBCSpace is the connection, we can short-circuit + // Since the only needed thing in the JDBCSpace is the Connection, we can short-circuit // to it here instead of stepping down from the cycle to the space to the connection. LongFunction connectionLongFunc = l -> spaceCache.get(spaceNameF.apply(l)).getConnection(); - - //return new JDBCExecuteQueryOpDispenser(adapter, spaceFunc, op);working - /* * If the user provides a body element, then they want to provide the JSON or * a data structure that can be converted into JSON, bypassing any further @@ -65,26 +60,22 @@ public class JDBCOpMapper implements OpMapper { */ if (op.isDefined("body")) { throw new RuntimeException("This mode is reserved for later. Do not use the 'body' op field."); - } - else { + } else { TypeAndTarget opType = op.getTypeAndTarget(JDBCOpType.class, String.class, "type", "stmt"); logger.info(() -> "Using " + opType.enumId + " statement form for '" + op.getName()); - //return new JDBCExecuteQueryOpDispenser(adapter, spaceFunc, op/*, opType.targetFunction*/); return switch (opType.enumId) { // SELECT uses 'executeQuery' and returns a 'ResultSet' // https://jdbc.postgresql.org/documentation/query/#example51processing-a-simple-query-in-jdbc - case executeQuery, select -> new JDBCExecuteQueryOpDispenser(adapter, connectionLongFunc, op, opType.targetFunction); // INSERT|UPDATE|DELETE uses 'executeUpdate' and returns an 'int' // https://jdbc.postgresql.org/documentation/query/#performing-updates - case dml, executeUpdate, update, insert, delete -> new JDBCExecuteUpdateOpDispenser(adapter, connectionLongFunc, op, opType.targetFunction); // CREATE|DROP TABLE|VIEW uses 'execute' (as opposed to 'executeQuery' which returns a 'ResultSet') // https://jdbc.postgresql.org/documentation/query/#example54dropping-a-table-in-jdbc - case ddl, create, drop, execute -> new JDBCExecuteOpDispenser(adapter, connectionLongFunc, op, opType.targetFunction)/*.apply(op)*/; + case jdbcQuery -> new JDBCExecuteOpDispenser(adapter, connectionLongFunc, op, opType.targetFunction); }; } } diff --git a/adapter-jdbc/src/main/java/io/nosqlbench/adapter/jdbc/JDBCOpType.java b/adapter-jdbc/src/main/java/io/nosqlbench/adapter/jdbc/JDBCOpType.java index b730f0224..99c361a63 100644 --- a/adapter-jdbc/src/main/java/io/nosqlbench/adapter/jdbc/JDBCOpType.java +++ b/adapter-jdbc/src/main/java/io/nosqlbench/adapter/jdbc/JDBCOpType.java @@ -17,22 +17,13 @@ package io.nosqlbench.adapter.jdbc; /** - * Op templates which are supported by the NoSQLBench CockroachDB driver are + * Op templates which are supported by the NoSQLBench JDBC driver are * enumerated below. These command names should mirror those in the official - * CockroachDB API exactly. See the official API for more details. + * CockroachDB API exactly, as an example. See the official API for more details. + * * @see CockroachDB API Reference */ public enum JDBCOpType { //See https://jdbc.postgresql.org/documentation/query/ - executeQuery, // used for SELECT operation matches executeQuery - executeUpdate, // used for performing updates such as INSERT/UPDATE/DELETE matches executeUpdate - execute, // used for creating/modifying database objects matches execute - create, // used for DDL - CREATE operation using 'execute' - drop, // used for DDL - DROP operation using 'execute' - insert, // used for DML - INSERT operation using 'executeUpdate' - update, // used for DML - UPDATE operation using 'executeUpdate' - delete, // used for DML - DELETE operation using 'executeUpdate' - select, // used for DML - SELECT operation using 'executeQuery' - dml, // used for DML operations like SELECT|INSERT|UPDATE|DELETE leveraging `executeUpdate` & `executeQuery` - ddl, // used for DDL operations like CREATE|DROP DATABASE|TABLE leveraging `execute` + jdbcQuery, } diff --git a/adapter-jdbc/src/main/java/io/nosqlbench/adapter/jdbc/JDBCSpace.java b/adapter-jdbc/src/main/java/io/nosqlbench/adapter/jdbc/JDBCSpace.java index 551ed75ef..242110469 100644 --- a/adapter-jdbc/src/main/java/io/nosqlbench/adapter/jdbc/JDBCSpace.java +++ b/adapter-jdbc/src/main/java/io/nosqlbench/adapter/jdbc/JDBCSpace.java @@ -25,16 +25,13 @@ import io.nosqlbench.api.config.standard.Param; import io.nosqlbench.api.errors.OpConfigError; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.postgresql.ds.PGSimpleDataSource; -import javax.sql.DataSource; import java.sql.Connection; import java.util.Optional; public class JDBCSpace implements AutoCloseable { private final static Logger logger = LogManager.getLogger(JDBCSpace.class); private final String spaceName; - private DataSource ds; private HikariConfig hikariConfig; private HikariDataSource hikariDataSource; private Connection connection; @@ -53,111 +50,90 @@ public class JDBCSpace implements AutoCloseable { } private HikariDataSource createClient(NBConfiguration cfg) { - PGSimpleDataSource ds = new PGSimpleDataSource(); hikariConfig = new HikariConfig(); Optional url = cfg.getOptional("url"); - if(url.isEmpty()) { + if (url.isEmpty()) { throw new OpConfigError("url option is required."); } else { - ds.setURL(url.get()); hikariConfig.setJdbcUrl(url.get()); } Optional serverNames = cfg.getOptional("serverName"); - if(serverNames.isPresent()) { - ds.setServerNames(new String[]{serverNames.get()}); - //hds.setServerNames(new String[] {serverNames.get()}); + if (serverNames.isPresent()) { hikariConfig.addDataSourceProperty("serverName", serverNames.get()); } else { throw new OpConfigError("Server name option is required."); } Optional databaseName = cfg.getOptional("databaseName"); - if(databaseName.isPresent()) { - ds.setDatabaseName(databaseName.get()); + if (databaseName.isPresent()) { hikariConfig.addDataSourceProperty("databaseName", databaseName.get()); } else { throw new OpConfigError("Database name option is required."); } - //Optional portNumber = cfg.getOptional(Integer.class, "portNumber"); int portNumber = Integer.parseInt(cfg.getOptional("portNumber").orElse("26257")); - //ds.setPortNumbers(new int[] { portNumber.orElse(26257) }); - ds.setPortNumbers(new int[] { portNumber }); hikariConfig.addDataSourceProperty("portNumber", portNumber); Optional user = cfg.getOptional("user"); - if(user.isPresent()) { - ds.setUser(user.get()); + if (user.isPresent()) { hikariConfig.setUsername(user.get()); } Optional password = cfg.getOptional("password"); - if(password.isPresent()) { - if(user.isEmpty()) { + if (password.isPresent()) { + if (user.isEmpty()) { throw new OpConfigError("Both user and password options are required. Only password is supplied in this case."); } - ds.setPassword(password.get()); hikariConfig.setPassword(password.get()); } else { - if(user.isPresent()) { + if (user.isPresent()) { throw new OpConfigError("Both user and password options are required. Only user is supplied in this case."); + // Maybe simply ignore user and move on as opposed to throwing an error? + //logger.warn(() -> "Both user and password options are required. Only user is supplied in this case and it will be ignored."); } } - Optional ssl = cfg.getOptional(Boolean.class,"ssl"); - if(ssl.isPresent()) { - ds.setSsl(ssl.get()); + Optional ssl = cfg.getOptional(Boolean.class, "ssl"); + if (ssl.isPresent()) { hikariConfig.addDataSourceProperty("ssl", ssl.get()); } else { - ds.setSsl(false); hikariConfig.addDataSourceProperty("ssl", false); } Optional sslMode = cfg.getOptional("sslmode"); - if(sslMode.isPresent()) { - ds.setSslMode(sslMode.get()); + if (sslMode.isPresent()) { hikariConfig.addDataSourceProperty("sslmode", sslMode.get()); } else { - ds.setSslMode("verify-full"); hikariConfig.addDataSourceProperty("sslmode", "verify-full"); } Optional sslCert = cfg.getOptional("sslcert"); - if(sslCert.isPresent()) { - ds.setSslcert(sslCert.get()); + if (sslCert.isPresent()) { hikariConfig.addDataSourceProperty("sslcert", sslCert.get()); } /*else if(sslMode.isPresent() && (!"disable".equalsIgnoreCase(sslMode.get()) || !"allow".equalsIgnoreCase(sslMode.get())) || !"prefer".equalsIgnoreCase(sslMode.get())) { throw new OpConfigError("When sslmode is true, sslcert should be provided."); }*/ Optional sslRootCert = cfg.getOptional("sslrootcert"); - if(sslRootCert.isPresent()) { - ds.setSslRootCert(sslRootCert.get()); + if (sslRootCert.isPresent()) { hikariConfig.addDataSourceProperty("sslrootcert", sslRootCert.get()); } Optional applicationName = cfg.getOptional("applicationName"); - if(applicationName.isPresent()) { - ds.setApplicationName(applicationName.get()); - hikariConfig.addDataSourceProperty("applicationName", applicationName.orElse("NoSQLBench CRDB")); - } else { - ds.setApplicationName("NoSQLBench CRDB"); - hikariConfig.addDataSourceProperty("applicationName", "NoSQLBench CRDB"); - } + hikariConfig.addDataSourceProperty("applicationName", applicationName.orElse("NoSQLBench")); + Optional rewriteBatchedInserts = cfg.getOptional(Boolean.class, "rewriteBatchedInserts"); - ds.setReWriteBatchedInserts(rewriteBatchedInserts.orElse(true)); hikariConfig.addDataSourceProperty("rewriteBatchedInserts", rewriteBatchedInserts.orElse(true)); + //Maybe always disable auto-commit since we manage ourselves? Optional autoCommit = cfg.getOptional(Boolean.class, "autoCommit"); hikariConfig.setAutoCommit(autoCommit.orElse(false)); - //Optional maximumPoolSize = cfg.getOptional(Integer.class,"maximumPoolSize"); int maximumPoolSize = Integer.parseInt(cfg.getOptional("maximumPoolSize").orElse("40")); hikariConfig.setMaximumPoolSize(maximumPoolSize); - //Optional keepaliveTime = cfg.getOptional(Integer.class,"keepaliveTime"); int keepaliveTime = Integer.parseInt(cfg.getOptional("keepaliveTime").orElse("150000")); hikariConfig.setKeepaliveTime(keepaliveTime); @@ -165,7 +141,7 @@ public class JDBCSpace implements AutoCloseable { try { this.connection = hds.getConnection(); } catch (Exception ex) { - String exp = "Exception occurred while attempting to create a connection using the Hikari Data Source"; + String exp = "Exception occurred while attempting to create a connection using the HikariDataSource"; logger.error(exp, ex); throw new RuntimeException(exp, ex); } diff --git a/adapter-jdbc/src/main/java/io/nosqlbench/adapter/jdbc/opdispensers/JDBCExecuteOpDispenser.java b/adapter-jdbc/src/main/java/io/nosqlbench/adapter/jdbc/opdispensers/JDBCExecuteOpDispenser.java index 6175c60a5..cc3873f9c 100644 --- a/adapter-jdbc/src/main/java/io/nosqlbench/adapter/jdbc/opdispensers/JDBCExecuteOpDispenser.java +++ b/adapter-jdbc/src/main/java/io/nosqlbench/adapter/jdbc/opdispensers/JDBCExecuteOpDispenser.java @@ -54,7 +54,7 @@ public class JDBCExecuteOpDispenser extends BaseOpDispenser { } }; return basefunc; - } catch(Exception ex) { + } catch (Exception ex) { String err_msg = "Error while attempting to create the jdbc statement from the connection"; logger.error(err_msg, ex); throw new RuntimeException(err_msg, ex); diff --git a/adapter-jdbc/src/main/java/io/nosqlbench/adapter/jdbc/opdispensers/JDBCExecuteQueryOpDispenser.java b/adapter-jdbc/src/main/java/io/nosqlbench/adapter/jdbc/opdispensers/JDBCExecuteQueryOpDispenser.java deleted file mode 100644 index 7ea043a42..000000000 --- a/adapter-jdbc/src/main/java/io/nosqlbench/adapter/jdbc/opdispensers/JDBCExecuteQueryOpDispenser.java +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Copyright (c) 2023 nosqlbench - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package io.nosqlbench.adapter.jdbc.opdispensers; - -import io.nosqlbench.adapter.jdbc.JDBCSpace; -import io.nosqlbench.adapter.jdbc.optypes.JDBCExecuteQueryOp; -import io.nosqlbench.engine.api.activityimpl.BaseOpDispenser; -import io.nosqlbench.engine.api.activityimpl.uniform.DriverAdapter; -import io.nosqlbench.engine.api.templating.ParsedOp; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; - -import java.sql.Connection; -import java.sql.SQLException; -import java.sql.Statement; -import java.util.function.LongFunction; - -public class JDBCExecuteQueryOpDispenser extends BaseOpDispenser { - private static final Logger logger = LogManager.getLogger(JDBCExecuteQueryOpDispenser.class); - private final LongFunction targetFunction; - private final LongFunction connectionLongFunction; - private final LongFunction statementLongFunction; - - public JDBCExecuteQueryOpDispenser(DriverAdapter adapter, LongFunction connectionLongFunc, ParsedOp op, LongFunction targetFunction) { - super(adapter, op); - this.connectionLongFunction = connectionLongFunc; - this.targetFunction = targetFunction; - this.statementLongFunction = createStmtFunc(op); - } - - protected LongFunction createStmtFunc(ParsedOp cmd) { - try { - LongFunction basefunc = l -> { - try { - return this.connectionLongFunction.apply(l).createStatement(); - } catch (SQLException e) { - String err_msg = "Exception occurred while attempting to construct the statement"; - logger.error(err_msg, e); - throw new RuntimeException(err_msg, e); - } - }; - return basefunc; - } catch (Exception ex) { - String err_msg = "Error while attempting to create the jdbc statement from the connection"; - logger.error(err_msg, ex); - throw new RuntimeException(err_msg, ex); - } - } - - @Override - public JDBCExecuteQueryOp apply(long cycle) { - return new JDBCExecuteQueryOp(this.connectionLongFunction.apply(cycle), this.statementLongFunction.apply(cycle), targetFunction.apply(cycle)); - } -} diff --git a/adapter-jdbc/src/main/java/io/nosqlbench/adapter/jdbc/opdispensers/JDBCExecuteUpdateOpDispenser.java b/adapter-jdbc/src/main/java/io/nosqlbench/adapter/jdbc/opdispensers/JDBCExecuteUpdateOpDispenser.java deleted file mode 100644 index 97491b4bf..000000000 --- a/adapter-jdbc/src/main/java/io/nosqlbench/adapter/jdbc/opdispensers/JDBCExecuteUpdateOpDispenser.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Copyright (c) 2023 nosqlbench - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.nosqlbench.adapter.jdbc.opdispensers; - -import io.nosqlbench.adapter.jdbc.JDBCSpace; -import io.nosqlbench.adapter.jdbc.optypes.JDBCExecuteUpdateOp; -import io.nosqlbench.engine.api.activityimpl.BaseOpDispenser; -import io.nosqlbench.engine.api.activityimpl.uniform.DriverAdapter; -import io.nosqlbench.engine.api.templating.ParsedOp; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; - -import java.sql.Connection; -import java.sql.SQLException; -import java.sql.Statement; -import java.util.function.LongFunction; - -public class JDBCExecuteUpdateOpDispenser extends BaseOpDispenser { - private static final Logger logger = LogManager.getLogger(JDBCExecuteUpdateOpDispenser.class); - private final LongFunction targetFunction; - private final LongFunction connectionLongFunction; - private final LongFunction statementLongFunction; - - public JDBCExecuteUpdateOpDispenser(DriverAdapter adapter, LongFunction connectionLongFunc, ParsedOp op, LongFunction targetFunction) { - super(adapter, op); - this.connectionLongFunction = connectionLongFunc; - this.targetFunction = targetFunction; - this.statementLongFunction = createStmtFunc(op); - } - - protected LongFunction createStmtFunc(ParsedOp cmd) { - try { - LongFunction basefunc = l -> { - try { - return this.connectionLongFunction.apply(l).createStatement(); - } catch (SQLException e) { - String err_msg = "Exception occurred while attempting to construct the statement"; - logger.error(err_msg, e); - throw new RuntimeException(err_msg, e); - } - }; - return basefunc; - } catch (Exception ex) { - String err_msg = "Error while attempting to create the jdbc statement from the connection"; - logger.error(err_msg, ex); - throw new RuntimeException(err_msg, ex); - } - } - - @Override - public JDBCExecuteUpdateOp apply(long cycle) { - return new JDBCExecuteUpdateOp(this.connectionLongFunction.apply(cycle), this.statementLongFunction.apply(cycle), targetFunction.apply(cycle)); - } -} diff --git a/adapter-jdbc/src/main/java/io/nosqlbench/adapter/jdbc/optypes/JDBCExecuteOp.java b/adapter-jdbc/src/main/java/io/nosqlbench/adapter/jdbc/optypes/JDBCExecuteOp.java index d32205450..afe62b844 100644 --- a/adapter-jdbc/src/main/java/io/nosqlbench/adapter/jdbc/optypes/JDBCExecuteOp.java +++ b/adapter-jdbc/src/main/java/io/nosqlbench/adapter/jdbc/optypes/JDBCExecuteOp.java @@ -32,19 +32,19 @@ public class JDBCExecuteOp extends JDBCOp { @Override public void run() { - logger.info(() -> "Executing JDBCExecuteOp for the given cycle."); + logger.debug(() -> "Executing JDBCExecuteOp for the given cycle."); try { this.getConnection().setAutoCommit(false); - if(logger.isDebugEnabled()) { + if (logger.isDebugEnabled()) { logger.debug(() -> "JDBC Query is: " + this.getQueryString()); } boolean isResultSet = this.getStatement().execute(this.getQueryString()); - if(isResultSet) { - logger.info(() -> ">>>>>>>>>>Executed a SELECT operation [" + this.getQueryString() + "]<<<<<<<<<<"); - } else if(!isResultSet) { - logger.info(() -> { + if (isResultSet) { + logger.debug(() -> ">>>>>>>>>>Executed a SELECT operation [" + this.getQueryString() + "]<<<<<<<<<<"); + } else if (!isResultSet) { + logger.debug(() -> { try { - return ">>>>>>>>>>Executed a normal DDL/DML (non-SELECT) operation [" + this.getStatement().getUpdateCount() + "]<<<<<<<<<<"; + return ">>>>>>>>>>Executed a normal DDL/DML (non-SELECT) operation. Objects affected is [" + this.getStatement().getUpdateCount() + "]<<<<<<<<<<"; } catch (SQLException e) { String err_msg = "Exception occurred while attempting to fetch the update count of the query operation"; logger.error(err_msg, e); @@ -55,7 +55,7 @@ public class JDBCExecuteOp extends JDBCOp { int countResults = 0; ResultSet rs = this.getStatement().getResultSet(); countResults += rs.getRow(); - while(!this.getStatement().getMoreResults() && 0 < this.getStatement().getUpdateCount()) { + while (!this.getStatement().getMoreResults() && 0 < this.getStatement().getUpdateCount()) { rs = this.getStatement().getResultSet(); countResults += rs.getRow(); //rs.close(); Optional as getMoreResults() will already close it. @@ -64,7 +64,7 @@ public class JDBCExecuteOp extends JDBCOp { logger.debug(() -> ">>>>>>>>>>Total number of rows processed is [" + finalCountResults + "]<<<<<<<<<<"); } this.getConnection().commit(); - logger.info(() -> "Executed the JDBC statement & committed the connection successfully"); + logger.debug(() -> "Executed the JDBC statement & committed the connection successfully"); } catch (SQLException sqlException) { logger.error("ERROR: { state => {}, cause => {}, message => {} }\n", sqlException.getSQLState(), sqlException.getCause(), sqlException.getMessage(), sqlException); diff --git a/adapter-jdbc/src/main/java/io/nosqlbench/adapter/jdbc/optypes/JDBCExecuteQueryOp.java b/adapter-jdbc/src/main/java/io/nosqlbench/adapter/jdbc/optypes/JDBCExecuteQueryOp.java deleted file mode 100644 index 6f9b1d43d..000000000 --- a/adapter-jdbc/src/main/java/io/nosqlbench/adapter/jdbc/optypes/JDBCExecuteQueryOp.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Copyright (c) 2023 nosqlbench - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package io.nosqlbench.adapter.jdbc.optypes; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; - -import java.sql.Connection; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Statement; - -public class JDBCExecuteQueryOp extends JDBCOp { - private final static Logger logger = LogManager.getLogger(JDBCExecuteQueryOp.class); - - public JDBCExecuteQueryOp(Connection connection, Statement statement, String queryString) { - super(connection, statement, queryString); - } - - @Override - public void run() { - logger.debug(() -> "Executing JDBCExecuteQueryOp for the given cycle."); - - try { - this.getConnection().setAutoCommit(false); - logger.debug(() -> "JDBC Query is: " + this.getQueryString()); - ResultSet resultSet = this.getStatement().executeQuery(this.getQueryString()); - this.getConnection().commit(); - logger.debug(() -> "Executed the JDBC statement & committed the connection successfully fetching a total of "+ resultSet.toString() + " records."); - } catch (SQLException sqlException) { - logger.error("JDBCExecuteQueryOp ERROR: { state => {}, cause => {}, message => {} }\n", - sqlException.getSQLState(), sqlException.getCause(), sqlException.getMessage(), sqlException); - throw new RuntimeException(sqlException); - } catch (Exception ex) { - String exMsg = String.format("Exception while attempting to run the jdbc statement %s", getStatement()); - logger.error(exMsg, ex); - throw new RuntimeException(exMsg, ex); - } - } -} diff --git a/adapter-jdbc/src/main/java/io/nosqlbench/adapter/jdbc/optypes/JDBCExecuteUpdateOp.java b/adapter-jdbc/src/main/java/io/nosqlbench/adapter/jdbc/optypes/JDBCExecuteUpdateOp.java deleted file mode 100644 index 7f7838e51..000000000 --- a/adapter-jdbc/src/main/java/io/nosqlbench/adapter/jdbc/optypes/JDBCExecuteUpdateOp.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Copyright (c) 2023 nosqlbench - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.nosqlbench.adapter.jdbc.optypes; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; - -import java.sql.Connection; -import java.sql.SQLException; -import java.sql.Statement; - -public class JDBCExecuteUpdateOp extends JDBCOp { - private final static Logger logger = LogManager.getLogger(JDBCExecuteUpdateOp.class); - - public JDBCExecuteUpdateOp(Connection connection, Statement statement, String queryString) { - super(connection, statement, queryString); - } - - @Override - public void run() { - logger.debug(() -> "Executing JDBCExecuteUpdateOp for the given cycle."); - - try { - this.getConnection().setAutoCommit(false); - logger.debug(() -> "JDBC Query is: " + this.getQueryString()); - int rowsAffected = this.getStatement().executeUpdate(this.getQueryString()); - this.getConnection().commit(); - logger.debug(() -> "Executed the JDBC statement & committed the connection successfully impacting a total of "+ rowsAffected + " records."); - } catch (SQLException sqlException) { - logger.error("JDBCExecuteUpdateOp ERROR: { state => {}, cause => {}, message => {} }\n", - sqlException.getSQLState(), sqlException.getCause(), sqlException.getMessage(), sqlException); - throw new RuntimeException(sqlException); - } catch (Exception ex) { - String exMsg = String.format("Exception while attempting to run the jdbc statement %s", getStatement()); - logger.error(exMsg, ex); - throw new RuntimeException(exMsg, ex); - } - } -} diff --git a/adapter-jdbc/src/main/java/io/nosqlbench/adapter/jdbc/optypes/JDBCOp.java b/adapter-jdbc/src/main/java/io/nosqlbench/adapter/jdbc/optypes/JDBCOp.java index 9a51eb61c..5ae8da771 100644 --- a/adapter-jdbc/src/main/java/io/nosqlbench/adapter/jdbc/optypes/JDBCOp.java +++ b/adapter-jdbc/src/main/java/io/nosqlbench/adapter/jdbc/optypes/JDBCOp.java @@ -33,9 +33,10 @@ import java.sql.Statement; * https://www.cockroachlabs.com/docs/v22.2/connection-parameters#supported-options-parameters * https://www.cockroachlabs.com/docs/v22.2/sql-statements.html#query-management-statements * https://docs.yugabyte.com/preview/drivers-orms/java/yugabyte-jdbc/ + * * @see HikariCP connection pooling for details. */ -public abstract class JDBCOp implements RunnableOp/*CycleOp*/ { +public abstract class JDBCOp implements RunnableOp { private final static Logger logger = LogManager.getLogger(JDBCOp.class); private final Connection connection; @@ -55,7 +56,6 @@ public abstract class JDBCOp implements RunnableOp/*CycleOp*/ { } /** - * * @param connection * @param statement * @param queryString diff --git a/adapter-jdbc/src/main/resources/activities.baselinesv2/cockroachdb-keyvalue.yaml b/adapter-jdbc/src/main/resources/activities.baselinesv2/cockroachdb-keyvalue.yaml index 02c861bc6..6142a41cd 100644 --- a/adapter-jdbc/src/main/resources/activities.baselinesv2/cockroachdb-keyvalue.yaml +++ b/adapter-jdbc/src/main/resources/activities.baselinesv2/cockroachdb-keyvalue.yaml @@ -1,4 +1,4 @@ -# run driver=jdbc workload="/path/to/cockroachdb-keyvalue.yaml" tags="block:schema" threads=1 cycles=4 url="jdbc:postgresql://insectdb0-3153.g8z.cockroachlabs.cloud:26257/defaultdb?sslmode=verify-ca&password=CHANGE_ME&user=newuser" databaseName="defaultdb" portNumber=26257 user="newuser" password="CHANGE_ME" sslmode="verify-full" serverName="insectdb" sslrootcert="/path/to/postgresql_certs/root.crt" -vv --show-stacktraces +# run driver=jdbc workload="/path/to/cockroachdb-keyvalue.yaml" tags="block:schema" threads=AUTO cycles=4 url="jdbc:postgresql://crdb0-3153.g8z.cockroachlabs.cloud:26257/defaultdb?sslmode=verify-ca&password=CHANGE_ME&user=newuser" databaseName="defaultdb" portNumber=26257 user="newuser" password="CHANGE_ME" sslmode="verify-full" serverName=insectdb sslrootcert="/path/to/postgresql_certs/root.crt" -vv --show-stacktraces min_version: "5.17.1" description: | @@ -7,11 +7,13 @@ description: | scenarios: default: - schema: run driver=jdbc tags=="block:schema" threads==1 cycles==4 url="jdbc:postgresql://insectdb0-3153.g8z.cockroachlabs.cloud" user="newuser" password="CHANGE_ME" sslmode="verify-ca" + schema: run driver=jdbc workload="/path/to/cockroachdb-keyvalue.yaml" tags==block:schema threads=1 cycles==UNDEF url="jdbc:postgresql://crdb0-3153.g8z.cockroachlabs.cloud:26257/defaultdb?sslmode=verify-ca&password=CHANGE_ME&user=newuser" databaseName="defaultdb" portNumber=26257 user="newuser" password="CHANGE_ME" sslmode="verify-full" serverName="crdb" sslrootcert="/path/to/postgresql_certs/root.crt" + rampup: run driver=jdbc workload="/path/to/cockroachdb-keyvalue.yaml" tags==block:rampup threads=AUTO cycles===TEMPLATE(rampup-cycles,10000000) url="jdbc:postgresql://crdb0-3153.g8z.cockroachlabs.cloud:26257/defaultdb?sslmode=verify-ca&password=CHANGE_ME&user=newuser" databaseName="defaultdb" portNumber=26257 user="newuser" password="CHANGE_ME" sslmode="verify-full" serverName="crdb" sslrootcert="/path/to/postgresql_certs/root.crt" + main: run driver=jdbc workload="/path/to/cockroachdb-keyvalue.yaml" tags=="block:main.*" threads=AUTO cycles===TEMPLATE(main-cycles,10000000) url="jdbc:postgresql://crdb0-3153.g8z.cockroachlabs.cloud:26257/defaultdb?sslmode=verify-ca&password=CHANGE_ME&user=newuser" databaseName="defaultdb" portNumber=26257 user="newuser" password="CHANGE_ME" sslmode="verify-full" serverName="crdb" sslrootcert="/path/to/postgresql_certs/root.crt" bindings: - seq_key: Mod(<>); ToString() -> String - seq_value: Hash(); Mod(<>); ToString() -> String + seq_key: Mod(TEMPLATE(keycount,1000000000)); ToString() -> String + seq_value: Hash(); Mod(TEMPLATE(valuecount,1000000000)); ToString() -> String rw_key: <int>>; ToString() -> String rw_value: Hash(); <int>>; ToString() -> String @@ -19,37 +21,38 @@ blocks: schema: ops: drop-database: - ddl: | + jdbcQuery: | DROP DATABASE IF EXISTS TEMPLATE(database,baselines); create-database: - create: | + jdbcQuery: | CREATE DATABASE IF NOT EXISTS TEMPLATE(database,baselines); drop-table: - drop: | + jdbcQuery: | DROP TABLE IF EXISTS TEMPLATE(database,baselines).TEMPLATE(table,keyvalue); create-table: - create: | + jdbcQuery: | CREATE TABLE IF NOT EXISTS TEMPLATE(database,baselines).TEMPLATE(table,keyvalue) (key STRING PRIMARY KEY, value STRING); rampup: params: ops: - rampup-insert: | - INSERT INTO TEMPLATE(database,baselines).TEMPLATE(table,keyvalue) - (key, value) - VALUES ({seq_key},{seq_value}); + rampup-insert: + jdbcQuery: | + INSERT INTO TEMPLATE(database,baselines).TEMPLATE(table,keyvalue) + (key, value) VALUES ({seq_key},{seq_value}); + main-read: params: - ratio: 5 - cl: TEMPLATE(read_cl,LOCAL_QUORUM) - statements: - main-select: | - SELECT * FROM TEMPLATE(keyspace,baselines).TEMPLATE(table,keyvalue) WHERE key={rw_key}; + ratio: TEMPLATE(read_ratio,5) + ops: + main-select: + jdbcQuery: | + SELECT * FROM TEMPLATE(keyspace,baselines).TEMPLATE(table,keyvalue) WHERE key='{rw_key}'; main-write: params: - ratio: 5 - cl: TEMPLATE(write_cl,LOCAL_QUORUM) - statements: - main-insert: | - INSERT INTO TEMPLATE(keyspace,baselines).TEMPLATE(table,keyvalue) - (key, value) VALUES ({rw_key}, {rw_value}); + ratio: TEMPLATE(write_ratio,5) + ops: + main-insert: + jdbcQuery: | + INSERT INTO TEMPLATE(keyspace,baselines).TEMPLATE(table,keyvalue) + (key, value) VALUES ('{rw_key}', '{rw_value}'); diff --git a/adapter-jdbc/src/main/resources/activities.baselinesv2/cockroachdb-tabular.yaml b/adapter-jdbc/src/main/resources/activities.baselinesv2/cockroachdb-tabular.yaml new file mode 100644 index 000000000..1dccf6263 --- /dev/null +++ b/adapter-jdbc/src/main/resources/activities.baselinesv2/cockroachdb-tabular.yaml @@ -0,0 +1,137 @@ +min_version: "5.17.1" + +description: | + A tabular workload with partitions, clusters, and data fields + This workload contains partitioning and cluster along with a set + of 8 fields of varying length. The field values vary in size according + to the fibonacci sequence times a base size factor of 10, with + an additional 10% variance for each field. + The read patterns have a variety of field subsets specified. + + During rampup, all rows will be written partition by partition, + filling in all rows of that partition before moving on to the next. + Example: With a partition size of 1000 and 1B rows, there will be + 1000000 partitions. + + During main phase, the read patterns are varied with different + field sets. As well, the number of rows which will be returned + is varied between 1 and 10. + + By default, reads occur at the same ratio as writes, with main + phase writes writing full rows. + + You can bulk up the size of the payloads by 10x with addzeroes='0', + by 100x with addzeroes='00', and so on, but if you want to go higher + than 100x, you'll need to modify the workload with a larger reference + file in the HashedFileExtractToString(...) bindings. + +scenarios: + default: + schema: run driver=jdbc tags==block:schema threads==1 cycles==UNDEF + rampup: run driver=jdbc tags==block:rampup cycles===TEMPLATE(rampup-cycles,10B) threads=auto + main: run driver=jdbc tags==block:"main.*" cycles===TEMPLATE(main-cycles,100M) threads=auto + +params: + instrument: true + +bindings: + # for ramp-up and verify phases + # + part_layout: Div(<>); ToString() -> String + clust_layout: Mod(<>); ToString() -> String + # todo: update these definitions to use the simpler 10,0.1, 20, 0.2, ... + data0: Add(10); HashedFileExtractToString('data/lorem_ipsum_full.txt',9TEMPLATE(addzeroes,),11TEMPLATE(addzeroes,)) + data1: Add(20); HashedFileExtractToString('data/lorem_ipsum_full.txt',18TEMPLATE(addzeroes,),22TEMPLATE(addzeroes,)) + data2: Add(30); HashedFileExtractToString('data/lorem_ipsum_full.txt',27TEMPLATE(addzeroes,),33TEMPLATE(addzeroes,)) + data3: Add(40); HashedFileExtractToString('data/lorem_ipsum_full.txt',45TEMPLATE(addzeroes,),55TEMPLATE(addzeroes,)) + data4: Add(50); HashedFileExtractToString('data/lorem_ipsum_full.txt',72TEMPLATE(addzeroes,),88TEMPLATE(addzeroes,)) + data5: Add(60); HashedFileExtractToString('data/lorem_ipsum_full.txt',107TEMPLATE(addzeroes,),143TEMPLATE(addzeroes,)) + data6: Add(70); HashedFileExtractToString('data/lorem_ipsum_full.txt',189TEMPLATE(addzeroes,),231TEMPLATE(addzeroes,)) + data7: Add(80); HashedFileExtractToString('data/lorem_ipsum_full.txt',306TEMPLATE(addzeroes,),374TEMPLATE(addzeroes,)) + + # for main phase + # for write + part_write: Hash(); Uniform(0,TEMPLATE(partcount,100))->int; ToString() -> String + clust_write: Hash(); Add(1); Uniform(0,TEMPLATE(partsize,1000000))->int; ToString() -> String + data_write: Hash(); HashedFileExtractToString('data/lorem_ipsum_full.txt',50,150) -> String + + # for read + limit: Uniform(1,10) -> int + part_read: Uniform(0,TEMPLATE(partcount,100))->int; ToString() -> String + clust_read: Add(1); Uniform(0,TEMPLATE(partsize,1000000))->int; ToString() -> String + +blocks: + schema: + params: + prepared: false + ops: + #drop-database: + # jdbcQuery: | + # DROP DATABASE IF EXISTS TEMPLATE(database,baselines); + create-database: + jdbcQuery: | + CREATE DATABASE IF NOT EXISTS TEMPLATE(database,baselines); + drop-table: + jdbcQuery: | + DROP TABLE IF EXISTS TEMPLATE(database,baselines).TEMPLATE(table,tabular); + create-table: + jdbcQuery: | + CREATE TABLE IF NOT EXISTS TEMPLATE(keyspace,baselines).TEMPLATE(table,tabular) ( + part STRING, + clust STRING, + data0 STRING, data1 STRING, data2 STRING, data3 STRING, + data4 STRING, data5 STRING, data6 STRING, data7 STRING, + PRIMARY KEY (part,clust) + ); + rampup: + params: + ops: + rampup-insert: + jdbcQuery: | + INSERT INTO TEMPLATE(keyspace,baselines).TEMPLATE(table,tabular) + (part,clust,data0,data1,data2,data3,data4,data5,data6,data7) + VALUES ('{part_layout}','{clust_layout}','{data0}','{data1}','{data2}','{data3}','{data4}','{data5}','{data6}','{data7}') + verify: + params: + cl: TEMPLATE(read_cl,LOCAL_QUORUM) + ops: + verify-select: + jdbcQuery: | + SELECT * FROM TEMPLATE(keyspace,baselines).TEMPLATE(table,tabular) WHERE part='{part_layout}' AND clust='{clust_layout}' + main-read: + params: + ratio: TEMPLATE(read_ratio,1) + ops: + main-select-all: + jdbcQuery: | + SELECT * FROM TEMPLATE(keyspace,baselines).TEMPLATE(table,tabular) WHERE part='{part_read}' LIMIT {limit}; + main-select-01: + jdbcQuery: | + SELECT data0,data1 from TEMPLATE(keyspace,baselines).TEMPLATE(table,tabular) WHERE part='{part_read}' LIMIT {limit}; + main-select-0246: + jdbcQuery: | + SELECT data0,data2,data4,data6 FROM TEMPLATE(keyspace,baselines).TEMPLATE(table,tabular) WHERE part='{part_read}' LIMIT {limit}; + main-select-1357: + jdbcQuery: | + SELECT data1,data3,data5,data7 FROM TEMPLATE(keyspace,baselines).TEMPLATE(table,tabular) WHERE part='{part_read}' LIMIT {limit}; + main-select-0123: + jdbcQuery: | + SELECT data0,data1,data2,data3 FROM TEMPLATE(keyspace,baselines).TEMPLATE(table,tabular) WHERE part='{part_read}' LIMIT {limit}; + main-select-4567: + jdbcQuery: | + SELECT data4,data5,data6,data7 FROM TEMPLATE(keyspace,baselines).TEMPLATE(table,tabular) WHERE part='{part_read}' LIMIT {limit}; + main-select-67: + jdbcQuery: | + SELECT data6,data7 FROM TEMPLATE(keyspace,baselines).TEMPLATE(table,tabular) WHERE part='{part_read}' LIMIT {limit}; + main-select: + jdbcQuery: | + SELECT data0,data1,data2,data3,data4,data5,data6,data7 FROM TEMPLATE(keyspace,baselines).TEMPLATE(table,tabular) WHERE part='{part_read}' LIMIT {limit}; + main-write: + params: + ratio: TEMPLATE(write_ratio,8) + ops: + main-write: + jdbcQuery: | + INSERT INTO TEMPLATE(keyspace,baselines).TEMPLATE(table,tabular) + (part, clust, data0,data1,data2,data3,data4,data5,data6,data7) + VALUES ('{part_write}','{clust_write}','{data0}','{data1}','{data2}','{data3}','{data4}','{data5}','{data6}','{data7}') diff --git a/adapter-jdbc/src/main/resources/activities.baselinesv2/cockroachdb-timeseries.yaml b/adapter-jdbc/src/main/resources/activities.baselinesv2/cockroachdb-timeseries.yaml new file mode 100644 index 000000000..38679d6e8 --- /dev/null +++ b/adapter-jdbc/src/main/resources/activities.baselinesv2/cockroachdb-timeseries.yaml @@ -0,0 +1,80 @@ +# jara -jar nb5.jar cockroachdb-timeseries default -vv --show-stacktraces +min_version: "5.17.1" + +description: | + This workload emulates a time-series data model and access patterns. + +scenarios: + default: + schema: run driver=jdbc tags==block:schema threads==1 cycles==UNDEF url="jdbc:postgresql://crdb0-3153.g8z.cockroachlabs.cloud:26257/defaultdb?sslmode=verify-ca&password=CHANGE_ME&user=newuser" databaseName="defaultdb" portNumber=26257 user="newuser" password="CHANGE_ME" sslmode="verify-full" serverName="crdb" sslrootcert="/path/to/postgresql_certs/root.crt" + rampup: run driver=jdbc tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto url="jdbc:postgresql://crdb0-3153.g8z.cockroachlabs.cloud:26257/defaultdb?sslmode=verify-ca&password=CHANGE_ME&user=newuser" databaseName="defaultdb" portNumber=26257 user="newuser" password="CHANGE_ME" sslmode="verify-full" serverName="crdb" sslrootcert="/path/to/postgresql_certs/root.crt" + main: run driver=jdbc tags==block:"main.*" cycles===TEMPLATE(main-cycles,10000000) threads=auto url="jdbc:postgresql://crdb0-3153.g8z.cockroachlabs.cloud:26257/defaultdb?sslmode=verify-ca&password=CHANGE_ME&user=newuser" databaseName="defaultdb" portNumber=26257 user="newuser" password="CHANGE_ME" sslmode="verify-full" serverName="crdb" sslrootcert="/path/to/postgresql_certs/root.crt" + +params: + instrument: TEMPLATE(instrument,false) + +bindings: + machine_id: Mod(TEMPLATE(sources,10000)); ToHashedUUID() -> java.util.UUID + sensor_name: HashedLineToString('data/variable_words.txt') + time: Mul(TEMPLATE(timespeed,100)L); Div(TEMPLATE(sources,10000)L); ToJavaInstant() + cell_timestamp: Mul(TEMPLATE(timespeed,100)L); Div(TEMPLATE(sources,10000)L); Mul(1000L) + sensor_value: Normal(0.0,5.0); Add(100.0) -> double + station_id: Div(TEMPLATE(sources,10000));Mod(TEMPLATE(stations,100)); ToHashedUUID() -> java.util.UUID + data: HashedFileExtractToString('data/lorem_ipsum_full.txt',800TEMPLATE(addzeroes,),1200TEMPLATE(addzeroes,)) + +blocks: + schema: + params: + ops: + drop-database: + jdbcQuery: | + DROP DATABASE IF EXISTS TEMPLATE(database,baselines); + create-database: + jdbcQuery: | + CREATE DATABASE IF NOT EXISTS TEMPLATE(database,baselines); + drop-table: + jdbcQuery: | + DROP TABLE IF EXISTS TEMPLATE(database,baselines).TEMPLATE(table,iot); + create-table: + jdbcQuery: | + CREATE TABLE IF NOT EXISTS TEMPLATE(keyspace,baselines).TEMPLATE(table,iot) ( + machine_id UUID, + sensor_name STRING, + time TIMESTAMP, + sensor_value FLOAT, + station_id UUID, + data STRING, + PRIMARY KEY (machine_id, sensor_name, time) + ); + + rampup: + params: + ops: + insert-rampup: + jdbcQuery: | + INSERT INTO TEMPLATE(keyspace,baselines).TEMPLATE(table,iot) + (machine_id, sensor_name, time, sensor_value, station_id, data) + VALUES ('{machine_id}', '{sensor_name}', '{time}', {sensor_value}, '{station_id}', '{data}') + + #using timestamp {cell_timestamp} + + main-read: + params: + ratio: TEMPLATE(read_ratio,1) + ops: + select-read: + jdbcQuery: | + SELECT * FROM TEMPLATE(keyspace,baselines).TEMPLATE(table,iot) + WHERE machine_id='{machine_id}' and sensor_name='{sensor_name}' + LIMIT TEMPLATE(limit,10) + main-write: + params: + ratio: TEMPLATE(write_ratio,9) + ops: + insert-main: + jdbcQuery: | + INSERT INTO TEMPLATE(keyspace,baselines).TEMPLATE(table,iot) + (machine_id, sensor_name, time, sensor_value, station_id, data) + VALUES ('{machine_id}', '{sensor_name}', '{time}', {sensor_value}, '{station_id}', '{data}') + + #using timestamp {cell_timestamp} diff --git a/adapter-jdbc/src/main/resources/jdbc.md b/adapter-jdbc/src/main/resources/jdbc.md index 895065b52..14a0588b4 100644 --- a/adapter-jdbc/src/main/resources/jdbc.md +++ b/adapter-jdbc/src/main/resources/jdbc.md @@ -8,7 +8,7 @@ This JDBC driver leverages [Hikari Connection Pool](https://github.com/brettwool # Executing JDBC Workload The following is an example of invoking a JDBC workload. -```yaml +```shell run driver=jdbc workload="/path/to/workload.yaml" cycles=1000 threads=100 url="jdbc:postgresql://" serverName=localhost portNumber=5432 databaseName=defaultdb ... -vv --show-stacktraces ``` In the above NB command, following are JDBC driver specific parameters: @@ -18,17 +18,14 @@ In the above NB command, following are JDBC driver specific parameters: * `serverName`: The database name. The default is to connect to a database with the same name as the user name used to connect to the server. Other NB engine parameters are straight forward: -* `driver`: must be `jdbc` +* `driver`: *must* be `jdbc`. * `threads`: depending on the workload type, the NB thread number determines how many clients will be created. All the clients will share the Connection originated from the Hikari Connection Pool. -* `*.yaml`: the NB jdbc scenario definition workload yaml file +* `*.yaml`: the NB jdbc scenario definition workload yaml file. +* ``: is `./nb` (using binary) or the `java -jar nb5.jar`. # Configuration -There are three main configuration with which we could issue a query and process the results back based on the [PostgreSQL® Query](https://jdbc.postgresql.org/documentation/query/) pattern. +There is really one main configuration with which we could issue a query and process the results back based on the [PostgreSQL® Query](https://jdbc.postgresql.org/documentation/query/) pattern. ## Config Sources -* `execute`: This is to issue any DDL statements such `CREATE DATABASE|TABLE` or `DROP DATABASE|TABLE` operations which returns nothing. -* `executeUpdate`: This is to issue DML statements such as `INSERT|UPDATE|DELETE` operations that will return how many number of rows were affected by that operation. -* `executeQuery`: This is to issue DML statement such as `SELECT` operation which would return a `ResultSet` object to process. +* `jdbcQuery`: This is to issue DML statement such as `SELECT` operation which would return a `ResultSet` object to process. This is to issue any DDL statements such `CREATE DATABASE|TABLE` or `DROP DATABASE|TABLE` operations which returns nothing. This is to issue DML statements such as `INSERT|UPDATE|DELETE` operations that will return how many number of rows were affected by that operation. ### Examples - - - +Check out the default activities under the [activities.baselinesv2](./activities.baselinesv2) directory. diff --git a/engine-api/src/main/java/io/nosqlbench/engine/api/activityapi/core/RunState.java b/engine-api/src/main/java/io/nosqlbench/engine/api/activityapi/core/RunState.java index 0476dc762..17180dfc9 100644 --- a/engine-api/src/main/java/io/nosqlbench/engine/api/activityapi/core/RunState.java +++ b/engine-api/src/main/java/io/nosqlbench/engine/api/activityapi/core/RunState.java @@ -106,7 +106,7 @@ public enum RunState { case Stopping, Finished, Errored -> true;// A motor has exhausted its input, and is finished with its work default -> false; }; - case Stopping -> (target == Stopped||target==Finished); // A motor was stopped by request before exhausting input + case Stopping -> (target == Stopped||target==Finished||target==Errored); // A motor was stopped by request before exhausting input case Finished -> (target == Running); // A motor was restarted? case Errored -> target==Errored; }; diff --git a/engine-core/src/main/java/io/nosqlbench/engine/core/lifecycle/activity/ActivityExecutor.java b/engine-core/src/main/java/io/nosqlbench/engine/core/lifecycle/activity/ActivityExecutor.java index bfdaf695e..8b55b6717 100644 --- a/engine-core/src/main/java/io/nosqlbench/engine/core/lifecycle/activity/ActivityExecutor.java +++ b/engine-core/src/main/java/io/nosqlbench/engine/core/lifecycle/activity/ActivityExecutor.java @@ -87,10 +87,10 @@ public class ActivityExecutor implements ActivityController, ParameterMap.Listen activity.setRunState(RunState.Stopping); motors.forEach(Motor::requestStop); - tally.awaitNoneOther(RunState.Stopped, RunState.Finished); + tally.awaitNoneOther(RunState.Stopped, RunState.Finished, RunState.Errored); shutdownExecutorService(Integer.MAX_VALUE); - tally.awaitNoneOther(RunState.Stopped, RunState.Finished); + tally.awaitNoneOther(RunState.Stopped, RunState.Finished, RunState.Errored); activity.setRunState(RunState.Stopped); logger.info(() -> "stopped: " + this.getActivityDef().getAlias() + " with " + motors.size() + " slots"); @@ -349,7 +349,7 @@ public class ActivityExecutor implements ActivityController, ParameterMap.Listen return activity; } - public void notifyException(Thread t, Throwable e) { + public synchronized void notifyException(Thread t, Throwable e) { logger.debug(() -> "Uncaught exception in activity thread forwarded to activity executor: " + e.getMessage()); this.exception = new RuntimeException("Error in activity thread " + t.getName(), e); this.requestStopMotors(); diff --git a/mvn-defaults/pom.xml b/mvn-defaults/pom.xml index 86b3f7919..96d7baf94 100644 --- a/mvn-defaults/pom.xml +++ b/mvn-defaults/pom.xml @@ -79,7 +79,7 @@ org.snakeyaml snakeyaml-engine - 2.5 + 2.6