jdbc workload yaml files

This commit is contained in:
Madhavan Sridharan
2023-02-03 21:07:01 -05:00
17 changed files with 292 additions and 358 deletions

View File

@@ -17,8 +17,6 @@
package io.nosqlbench.adapter.jdbc;
import io.nosqlbench.adapter.jdbc.opdispensers.JDBCExecuteOpDispenser;
import io.nosqlbench.adapter.jdbc.opdispensers.JDBCExecuteQueryOpDispenser;
import io.nosqlbench.adapter.jdbc.opdispensers.JDBCExecuteUpdateOpDispenser;
import io.nosqlbench.adapter.jdbc.optypes.JDBCOp;
import io.nosqlbench.api.config.standard.NBConfiguration;
import io.nosqlbench.engine.api.activityimpl.OpDispenser;
@@ -51,13 +49,10 @@ public class JDBCOpMapper implements OpMapper<JDBCOp> {
LongFunction<String> spaceNameF = op.getAsFunctionOr("space", "default");
LongFunction<JDBCSpace> spaceFunc = l -> spaceCache.get(spaceNameF.apply(l));
// Since the only needed thing in the JDBCSpace is the connection, we can short-circuit
// Since the only needed thing in the JDBCSpace is the Connection, we can short-circuit
// to it here instead of stepping down from the cycle to the space to the connection.
LongFunction<Connection> connectionLongFunc = l -> spaceCache.get(spaceNameF.apply(l)).getConnection();
//return new JDBCExecuteQueryOpDispenser(adapter, spaceFunc, op);working
/*
* If the user provides a body element, then they want to provide the JSON or
* a data structure that can be converted into JSON, bypassing any further
@@ -65,26 +60,22 @@ public class JDBCOpMapper implements OpMapper<JDBCOp> {
*/
if (op.isDefined("body")) {
throw new RuntimeException("This mode is reserved for later. Do not use the 'body' op field.");
}
else {
} else {
TypeAndTarget<JDBCOpType, String> opType = op.getTypeAndTarget(JDBCOpType.class, String.class, "type", "stmt");
logger.info(() -> "Using " + opType.enumId + " statement form for '" + op.getName());
//return new JDBCExecuteQueryOpDispenser(adapter, spaceFunc, op/*, opType.targetFunction*/);
return switch (opType.enumId) {
// SELECT uses 'executeQuery' and returns a 'ResultSet'
// https://jdbc.postgresql.org/documentation/query/#example51processing-a-simple-query-in-jdbc
case executeQuery, select -> new JDBCExecuteQueryOpDispenser(adapter, connectionLongFunc, op, opType.targetFunction);
// INSERT|UPDATE|DELETE uses 'executeUpdate' and returns an 'int'
// https://jdbc.postgresql.org/documentation/query/#performing-updates
case dml, executeUpdate, update, insert, delete -> new JDBCExecuteUpdateOpDispenser(adapter, connectionLongFunc, op, opType.targetFunction);
// CREATE|DROP TABLE|VIEW uses 'execute' (as opposed to 'executeQuery' which returns a 'ResultSet')
// https://jdbc.postgresql.org/documentation/query/#example54dropping-a-table-in-jdbc
case ddl, create, drop, execute -> new JDBCExecuteOpDispenser(adapter, connectionLongFunc, op, opType.targetFunction)/*.apply(op)*/;
case jdbcQuery -> new JDBCExecuteOpDispenser(adapter, connectionLongFunc, op, opType.targetFunction);
};
}
}

View File

@@ -17,22 +17,13 @@
package io.nosqlbench.adapter.jdbc;
/**
* Op templates which are supported by the NoSQLBench CockroachDB driver are
* Op templates which are supported by the NoSQLBench JDBC driver are
* enumerated below. These command names should mirror those in the official
* CockroachDB API exactly. See the official API for more details.
* CockroachDB API exactly, as an example. See the official API for more details.
*
* @see <a href="https://www.cockroachlabs.com/docs/v22.2/sql-statements.html#data-definition-statements">CockroachDB API Reference</a>
*/
public enum JDBCOpType {
//See https://jdbc.postgresql.org/documentation/query/
executeQuery, // used for SELECT operation matches executeQuery
executeUpdate, // used for performing updates such as INSERT/UPDATE/DELETE matches executeUpdate
execute, // used for creating/modifying database objects matches execute
create, // used for DDL - CREATE operation using 'execute'
drop, // used for DDL - DROP operation using 'execute'
insert, // used for DML - INSERT operation using 'executeUpdate'
update, // used for DML - UPDATE operation using 'executeUpdate'
delete, // used for DML - DELETE operation using 'executeUpdate'
select, // used for DML - SELECT operation using 'executeQuery'
dml, // used for DML operations like SELECT|INSERT|UPDATE|DELETE leveraging `executeUpdate` & `executeQuery`
ddl, // used for DDL operations like CREATE|DROP DATABASE|TABLE leveraging `execute`
jdbcQuery,
}

View File

@@ -25,16 +25,13 @@ import io.nosqlbench.api.config.standard.Param;
import io.nosqlbench.api.errors.OpConfigError;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.postgresql.ds.PGSimpleDataSource;
import javax.sql.DataSource;
import java.sql.Connection;
import java.util.Optional;
public class JDBCSpace implements AutoCloseable {
private final static Logger logger = LogManager.getLogger(JDBCSpace.class);
private final String spaceName;
private DataSource ds;
private HikariConfig hikariConfig;
private HikariDataSource hikariDataSource;
private Connection connection;
@@ -53,111 +50,90 @@ public class JDBCSpace implements AutoCloseable {
}
private HikariDataSource createClient(NBConfiguration cfg) {
PGSimpleDataSource ds = new PGSimpleDataSource();
hikariConfig = new HikariConfig();
Optional<String> url = cfg.getOptional("url");
if(url.isEmpty()) {
if (url.isEmpty()) {
throw new OpConfigError("url option is required.");
} else {
ds.setURL(url.get());
hikariConfig.setJdbcUrl(url.get());
}
Optional<String> serverNames = cfg.getOptional("serverName");
if(serverNames.isPresent()) {
ds.setServerNames(new String[]{serverNames.get()});
//hds.setServerNames(new String[] {serverNames.get()});
if (serverNames.isPresent()) {
hikariConfig.addDataSourceProperty("serverName", serverNames.get());
} else {
throw new OpConfigError("Server name option is required.");
}
Optional<String> databaseName = cfg.getOptional("databaseName");
if(databaseName.isPresent()) {
ds.setDatabaseName(databaseName.get());
if (databaseName.isPresent()) {
hikariConfig.addDataSourceProperty("databaseName", databaseName.get());
} else {
throw new OpConfigError("Database name option is required.");
}
//Optional<Integer> portNumber = cfg.getOptional(Integer.class, "portNumber");
int portNumber = Integer.parseInt(cfg.getOptional("portNumber").orElse("26257"));
//ds.setPortNumbers(new int[] { portNumber.orElse(26257) });
ds.setPortNumbers(new int[] { portNumber });
hikariConfig.addDataSourceProperty("portNumber", portNumber);
Optional<String> user = cfg.getOptional("user");
if(user.isPresent()) {
ds.setUser(user.get());
if (user.isPresent()) {
hikariConfig.setUsername(user.get());
}
Optional<String> password = cfg.getOptional("password");
if(password.isPresent()) {
if(user.isEmpty()) {
if (password.isPresent()) {
if (user.isEmpty()) {
throw new OpConfigError("Both user and password options are required. Only password is supplied in this case.");
}
ds.setPassword(password.get());
hikariConfig.setPassword(password.get());
} else {
if(user.isPresent()) {
if (user.isPresent()) {
throw new OpConfigError("Both user and password options are required. Only user is supplied in this case.");
// Maybe simply ignore user and move on as opposed to throwing an error?
//logger.warn(() -> "Both user and password options are required. Only user is supplied in this case and it will be ignored.");
}
}
Optional<Boolean> ssl = cfg.getOptional(Boolean.class,"ssl");
if(ssl.isPresent()) {
ds.setSsl(ssl.get());
Optional<Boolean> ssl = cfg.getOptional(Boolean.class, "ssl");
if (ssl.isPresent()) {
hikariConfig.addDataSourceProperty("ssl", ssl.get());
} else {
ds.setSsl(false);
hikariConfig.addDataSourceProperty("ssl", false);
}
Optional<String> sslMode = cfg.getOptional("sslmode");
if(sslMode.isPresent()) {
ds.setSslMode(sslMode.get());
if (sslMode.isPresent()) {
hikariConfig.addDataSourceProperty("sslmode", sslMode.get());
} else {
ds.setSslMode("verify-full");
hikariConfig.addDataSourceProperty("sslmode", "verify-full");
}
Optional<String> sslCert = cfg.getOptional("sslcert");
if(sslCert.isPresent()) {
ds.setSslcert(sslCert.get());
if (sslCert.isPresent()) {
hikariConfig.addDataSourceProperty("sslcert", sslCert.get());
} /*else if(sslMode.isPresent() && (!"disable".equalsIgnoreCase(sslMode.get()) || !"allow".equalsIgnoreCase(sslMode.get())) || !"prefer".equalsIgnoreCase(sslMode.get())) {
throw new OpConfigError("When sslmode is true, sslcert should be provided.");
}*/
Optional<String> sslRootCert = cfg.getOptional("sslrootcert");
if(sslRootCert.isPresent()) {
ds.setSslRootCert(sslRootCert.get());
if (sslRootCert.isPresent()) {
hikariConfig.addDataSourceProperty("sslrootcert", sslRootCert.get());
}
Optional<String> applicationName = cfg.getOptional("applicationName");
if(applicationName.isPresent()) {
ds.setApplicationName(applicationName.get());
hikariConfig.addDataSourceProperty("applicationName", applicationName.orElse("NoSQLBench CRDB"));
} else {
ds.setApplicationName("NoSQLBench CRDB");
hikariConfig.addDataSourceProperty("applicationName", "NoSQLBench CRDB");
}
hikariConfig.addDataSourceProperty("applicationName", applicationName.orElse("NoSQLBench"));
Optional<Boolean> rewriteBatchedInserts = cfg.getOptional(Boolean.class, "rewriteBatchedInserts");
ds.setReWriteBatchedInserts(rewriteBatchedInserts.orElse(true));
hikariConfig.addDataSourceProperty("rewriteBatchedInserts", rewriteBatchedInserts.orElse(true));
//Maybe always disable auto-commit since we manage ourselves?
Optional<Boolean> autoCommit = cfg.getOptional(Boolean.class, "autoCommit");
hikariConfig.setAutoCommit(autoCommit.orElse(false));
//Optional<Integer> maximumPoolSize = cfg.getOptional(Integer.class,"maximumPoolSize");
int maximumPoolSize = Integer.parseInt(cfg.getOptional("maximumPoolSize").orElse("40"));
hikariConfig.setMaximumPoolSize(maximumPoolSize);
//Optional<Integer> keepaliveTime = cfg.getOptional(Integer.class,"keepaliveTime");
int keepaliveTime = Integer.parseInt(cfg.getOptional("keepaliveTime").orElse("150000"));
hikariConfig.setKeepaliveTime(keepaliveTime);
@@ -165,7 +141,7 @@ public class JDBCSpace implements AutoCloseable {
try {
this.connection = hds.getConnection();
} catch (Exception ex) {
String exp = "Exception occurred while attempting to create a connection using the Hikari Data Source";
String exp = "Exception occurred while attempting to create a connection using the HikariDataSource";
logger.error(exp, ex);
throw new RuntimeException(exp, ex);
}

View File

@@ -54,7 +54,7 @@ public class JDBCExecuteOpDispenser extends BaseOpDispenser<JDBCOp, JDBCSpace> {
}
};
return basefunc;
} catch(Exception ex) {
} catch (Exception ex) {
String err_msg = "Error while attempting to create the jdbc statement from the connection";
logger.error(err_msg, ex);
throw new RuntimeException(err_msg, ex);

View File

@@ -1,68 +0,0 @@
/*
* Copyright (c) 2023 nosqlbench
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.nosqlbench.adapter.jdbc.opdispensers;
import io.nosqlbench.adapter.jdbc.JDBCSpace;
import io.nosqlbench.adapter.jdbc.optypes.JDBCExecuteQueryOp;
import io.nosqlbench.engine.api.activityimpl.BaseOpDispenser;
import io.nosqlbench.engine.api.activityimpl.uniform.DriverAdapter;
import io.nosqlbench.engine.api.templating.ParsedOp;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import java.sql.Connection;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.function.LongFunction;
public class JDBCExecuteQueryOpDispenser extends BaseOpDispenser<JDBCExecuteQueryOp, JDBCSpace> {
private static final Logger logger = LogManager.getLogger(JDBCExecuteQueryOpDispenser.class);
private final LongFunction<String> targetFunction;
private final LongFunction<Connection> connectionLongFunction;
private final LongFunction<Statement> statementLongFunction;
public JDBCExecuteQueryOpDispenser(DriverAdapter adapter, LongFunction<Connection> connectionLongFunc, ParsedOp op, LongFunction<String> targetFunction) {
super(adapter, op);
this.connectionLongFunction = connectionLongFunc;
this.targetFunction = targetFunction;
this.statementLongFunction = createStmtFunc(op);
}
protected LongFunction<Statement> createStmtFunc(ParsedOp cmd) {
try {
LongFunction<Statement> basefunc = l -> {
try {
return this.connectionLongFunction.apply(l).createStatement();
} catch (SQLException e) {
String err_msg = "Exception occurred while attempting to construct the statement";
logger.error(err_msg, e);
throw new RuntimeException(err_msg, e);
}
};
return basefunc;
} catch (Exception ex) {
String err_msg = "Error while attempting to create the jdbc statement from the connection";
logger.error(err_msg, ex);
throw new RuntimeException(err_msg, ex);
}
}
@Override
public JDBCExecuteQueryOp apply(long cycle) {
return new JDBCExecuteQueryOp(this.connectionLongFunction.apply(cycle), this.statementLongFunction.apply(cycle), targetFunction.apply(cycle));
}
}

View File

@@ -1,67 +0,0 @@
/*
* Copyright (c) 2023 nosqlbench
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.nosqlbench.adapter.jdbc.opdispensers;
import io.nosqlbench.adapter.jdbc.JDBCSpace;
import io.nosqlbench.adapter.jdbc.optypes.JDBCExecuteUpdateOp;
import io.nosqlbench.engine.api.activityimpl.BaseOpDispenser;
import io.nosqlbench.engine.api.activityimpl.uniform.DriverAdapter;
import io.nosqlbench.engine.api.templating.ParsedOp;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import java.sql.Connection;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.function.LongFunction;
public class JDBCExecuteUpdateOpDispenser extends BaseOpDispenser<JDBCExecuteUpdateOp, JDBCSpace> {
private static final Logger logger = LogManager.getLogger(JDBCExecuteUpdateOpDispenser.class);
private final LongFunction<String> targetFunction;
private final LongFunction<Connection> connectionLongFunction;
private final LongFunction<Statement> statementLongFunction;
public JDBCExecuteUpdateOpDispenser(DriverAdapter adapter, LongFunction<Connection> connectionLongFunc, ParsedOp op, LongFunction<String> targetFunction) {
super(adapter, op);
this.connectionLongFunction = connectionLongFunc;
this.targetFunction = targetFunction;
this.statementLongFunction = createStmtFunc(op);
}
protected LongFunction<Statement> createStmtFunc(ParsedOp cmd) {
try {
LongFunction<Statement> basefunc = l -> {
try {
return this.connectionLongFunction.apply(l).createStatement();
} catch (SQLException e) {
String err_msg = "Exception occurred while attempting to construct the statement";
logger.error(err_msg, e);
throw new RuntimeException(err_msg, e);
}
};
return basefunc;
} catch (Exception ex) {
String err_msg = "Error while attempting to create the jdbc statement from the connection";
logger.error(err_msg, ex);
throw new RuntimeException(err_msg, ex);
}
}
@Override
public JDBCExecuteUpdateOp apply(long cycle) {
return new JDBCExecuteUpdateOp(this.connectionLongFunction.apply(cycle), this.statementLongFunction.apply(cycle), targetFunction.apply(cycle));
}
}

View File

@@ -32,19 +32,19 @@ public class JDBCExecuteOp extends JDBCOp {
@Override
public void run() {
logger.info(() -> "Executing JDBCExecuteOp for the given cycle.");
logger.debug(() -> "Executing JDBCExecuteOp for the given cycle.");
try {
this.getConnection().setAutoCommit(false);
if(logger.isDebugEnabled()) {
if (logger.isDebugEnabled()) {
logger.debug(() -> "JDBC Query is: " + this.getQueryString());
}
boolean isResultSet = this.getStatement().execute(this.getQueryString());
if(isResultSet) {
logger.info(() -> ">>>>>>>>>>Executed a SELECT operation [" + this.getQueryString() + "]<<<<<<<<<<");
} else if(!isResultSet) {
logger.info(() -> {
if (isResultSet) {
logger.debug(() -> ">>>>>>>>>>Executed a SELECT operation [" + this.getQueryString() + "]<<<<<<<<<<");
} else if (!isResultSet) {
logger.debug(() -> {
try {
return ">>>>>>>>>>Executed a normal DDL/DML (non-SELECT) operation [" + this.getStatement().getUpdateCount() + "]<<<<<<<<<<";
return ">>>>>>>>>>Executed a normal DDL/DML (non-SELECT) operation. Objects affected is [" + this.getStatement().getUpdateCount() + "]<<<<<<<<<<";
} catch (SQLException e) {
String err_msg = "Exception occurred while attempting to fetch the update count of the query operation";
logger.error(err_msg, e);
@@ -55,7 +55,7 @@ public class JDBCExecuteOp extends JDBCOp {
int countResults = 0;
ResultSet rs = this.getStatement().getResultSet();
countResults += rs.getRow();
while(!this.getStatement().getMoreResults() && 0 < this.getStatement().getUpdateCount()) {
while (!this.getStatement().getMoreResults() && 0 < this.getStatement().getUpdateCount()) {
rs = this.getStatement().getResultSet();
countResults += rs.getRow();
//rs.close(); Optional as getMoreResults() will already close it.
@@ -64,7 +64,7 @@ public class JDBCExecuteOp extends JDBCOp {
logger.debug(() -> ">>>>>>>>>>Total number of rows processed is [" + finalCountResults + "]<<<<<<<<<<");
}
this.getConnection().commit();
logger.info(() -> "Executed the JDBC statement & committed the connection successfully");
logger.debug(() -> "Executed the JDBC statement & committed the connection successfully");
} catch (SQLException sqlException) {
logger.error("ERROR: { state => {}, cause => {}, message => {} }\n",
sqlException.getSQLState(), sqlException.getCause(), sqlException.getMessage(), sqlException);

View File

@@ -1,54 +0,0 @@
/*
* Copyright (c) 2023 nosqlbench
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.nosqlbench.adapter.jdbc.optypes;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import java.sql.Connection;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
public class JDBCExecuteQueryOp extends JDBCOp {
private final static Logger logger = LogManager.getLogger(JDBCExecuteQueryOp.class);
public JDBCExecuteQueryOp(Connection connection, Statement statement, String queryString) {
super(connection, statement, queryString);
}
@Override
public void run() {
logger.debug(() -> "Executing JDBCExecuteQueryOp for the given cycle.");
try {
this.getConnection().setAutoCommit(false);
logger.debug(() -> "JDBC Query is: " + this.getQueryString());
ResultSet resultSet = this.getStatement().executeQuery(this.getQueryString());
this.getConnection().commit();
logger.debug(() -> "Executed the JDBC statement & committed the connection successfully fetching a total of "+ resultSet.toString() + " records.");
} catch (SQLException sqlException) {
logger.error("JDBCExecuteQueryOp ERROR: { state => {}, cause => {}, message => {} }\n",
sqlException.getSQLState(), sqlException.getCause(), sqlException.getMessage(), sqlException);
throw new RuntimeException(sqlException);
} catch (Exception ex) {
String exMsg = String.format("Exception while attempting to run the jdbc statement %s", getStatement());
logger.error(exMsg, ex);
throw new RuntimeException(exMsg, ex);
}
}
}

View File

@@ -1,52 +0,0 @@
/*
* Copyright (c) 2023 nosqlbench
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.nosqlbench.adapter.jdbc.optypes;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import java.sql.Connection;
import java.sql.SQLException;
import java.sql.Statement;
public class JDBCExecuteUpdateOp extends JDBCOp {
private final static Logger logger = LogManager.getLogger(JDBCExecuteUpdateOp.class);
public JDBCExecuteUpdateOp(Connection connection, Statement statement, String queryString) {
super(connection, statement, queryString);
}
@Override
public void run() {
logger.debug(() -> "Executing JDBCExecuteUpdateOp for the given cycle.");
try {
this.getConnection().setAutoCommit(false);
logger.debug(() -> "JDBC Query is: " + this.getQueryString());
int rowsAffected = this.getStatement().executeUpdate(this.getQueryString());
this.getConnection().commit();
logger.debug(() -> "Executed the JDBC statement & committed the connection successfully impacting a total of "+ rowsAffected + " records.");
} catch (SQLException sqlException) {
logger.error("JDBCExecuteUpdateOp ERROR: { state => {}, cause => {}, message => {} }\n",
sqlException.getSQLState(), sqlException.getCause(), sqlException.getMessage(), sqlException);
throw new RuntimeException(sqlException);
} catch (Exception ex) {
String exMsg = String.format("Exception while attempting to run the jdbc statement %s", getStatement());
logger.error(exMsg, ex);
throw new RuntimeException(exMsg, ex);
}
}
}

View File

@@ -33,9 +33,10 @@ import java.sql.Statement;
* https://www.cockroachlabs.com/docs/v22.2/connection-parameters#supported-options-parameters
* https://www.cockroachlabs.com/docs/v22.2/sql-statements.html#query-management-statements
* https://docs.yugabyte.com/preview/drivers-orms/java/yugabyte-jdbc/
*
* @see <a href="https://github.com/brettwooldridge/HikariCP">HikariCP connection pooling</a> for details.
*/
public abstract class JDBCOp implements RunnableOp/*CycleOp<Object>*/ {
public abstract class JDBCOp implements RunnableOp {
private final static Logger logger = LogManager.getLogger(JDBCOp.class);
private final Connection connection;
@@ -55,7 +56,6 @@ public abstract class JDBCOp implements RunnableOp/*CycleOp<Object>*/ {
}
/**
*
* @param connection
* @param statement
* @param queryString

View File

@@ -1,4 +1,4 @@
# run driver=jdbc workload="/path/to/cockroachdb-keyvalue.yaml" tags="block:schema" threads=1 cycles=4 url="jdbc:postgresql://insectdb0-3153.g8z.cockroachlabs.cloud:26257/defaultdb?sslmode=verify-ca&password=CHANGE_ME&user=newuser" databaseName="defaultdb" portNumber=26257 user="newuser" password="CHANGE_ME" sslmode="verify-full" serverName="insectdb" sslrootcert="/path/to/postgresql_certs/root.crt" -vv --show-stacktraces
# run driver=jdbc workload="/path/to/cockroachdb-keyvalue.yaml" tags="block:schema" threads=AUTO cycles=4 url="jdbc:postgresql://crdb0-3153.g8z.cockroachlabs.cloud:26257/defaultdb?sslmode=verify-ca&password=CHANGE_ME&user=newuser" databaseName="defaultdb" portNumber=26257 user="newuser" password="CHANGE_ME" sslmode="verify-full" serverName=insectdb sslrootcert="/path/to/postgresql_certs/root.crt" -vv --show-stacktraces
min_version: "5.17.1"
description: |
@@ -7,11 +7,13 @@ description: |
scenarios:
default:
schema: run driver=jdbc tags=="block:schema" threads==1 cycles==4 url="jdbc:postgresql://insectdb0-3153.g8z.cockroachlabs.cloud" user="newuser" password="CHANGE_ME" sslmode="verify-ca"
schema: run driver=jdbc workload="/path/to/cockroachdb-keyvalue.yaml" tags==block:schema threads=1 cycles==UNDEF url="jdbc:postgresql://crdb0-3153.g8z.cockroachlabs.cloud:26257/defaultdb?sslmode=verify-ca&password=CHANGE_ME&user=newuser" databaseName="defaultdb" portNumber=26257 user="newuser" password="CHANGE_ME" sslmode="verify-full" serverName="crdb" sslrootcert="/path/to/postgresql_certs/root.crt"
rampup: run driver=jdbc workload="/path/to/cockroachdb-keyvalue.yaml" tags==block:rampup threads=AUTO cycles===TEMPLATE(rampup-cycles,10000000) url="jdbc:postgresql://crdb0-3153.g8z.cockroachlabs.cloud:26257/defaultdb?sslmode=verify-ca&password=CHANGE_ME&user=newuser" databaseName="defaultdb" portNumber=26257 user="newuser" password="CHANGE_ME" sslmode="verify-full" serverName="crdb" sslrootcert="/path/to/postgresql_certs/root.crt"
main: run driver=jdbc workload="/path/to/cockroachdb-keyvalue.yaml" tags=="block:main.*" threads=AUTO cycles===TEMPLATE(main-cycles,10000000) url="jdbc:postgresql://crdb0-3153.g8z.cockroachlabs.cloud:26257/defaultdb?sslmode=verify-ca&password=CHANGE_ME&user=newuser" databaseName="defaultdb" portNumber=26257 user="newuser" password="CHANGE_ME" sslmode="verify-full" serverName="crdb" sslrootcert="/path/to/postgresql_certs/root.crt"
bindings:
seq_key: Mod(<<keycount:1000000000>>); ToString() -> String
seq_value: Hash(); Mod(<<valuecount:1000000000>>); ToString() -> String
seq_key: Mod(TEMPLATE(keycount,1000000000)); ToString() -> String
seq_value: Hash(); Mod(TEMPLATE(valuecount,1000000000)); ToString() -> String
rw_key: <<keydist:Uniform(0,1000000000)->int>>; ToString() -> String
rw_value: Hash(); <<valdist:Uniform(0,1000000000)->int>>; ToString() -> String
@@ -19,37 +21,38 @@ blocks:
schema:
ops:
drop-database:
ddl: |
jdbcQuery: |
DROP DATABASE IF EXISTS TEMPLATE(database,baselines);
create-database:
create: |
jdbcQuery: |
CREATE DATABASE IF NOT EXISTS TEMPLATE(database,baselines);
drop-table:
drop: |
jdbcQuery: |
DROP TABLE IF EXISTS TEMPLATE(database,baselines).TEMPLATE(table,keyvalue);
create-table:
create: |
jdbcQuery: |
CREATE TABLE IF NOT EXISTS TEMPLATE(database,baselines).TEMPLATE(table,keyvalue) (key STRING PRIMARY KEY, value STRING);
rampup:
params:
ops:
rampup-insert: |
INSERT INTO TEMPLATE(database,baselines).TEMPLATE(table,keyvalue)
(key, value)
VALUES ({seq_key},{seq_value});
rampup-insert:
jdbcQuery: |
INSERT INTO TEMPLATE(database,baselines).TEMPLATE(table,keyvalue)
(key, value) VALUES ({seq_key},{seq_value});
main-read:
params:
ratio: 5
cl: TEMPLATE(read_cl,LOCAL_QUORUM)
statements:
main-select: |
SELECT * FROM TEMPLATE(keyspace,baselines).TEMPLATE(table,keyvalue) WHERE key={rw_key};
ratio: TEMPLATE(read_ratio,5)
ops:
main-select:
jdbcQuery: |
SELECT * FROM TEMPLATE(keyspace,baselines).TEMPLATE(table,keyvalue) WHERE key='{rw_key}';
main-write:
params:
ratio: 5
cl: TEMPLATE(write_cl,LOCAL_QUORUM)
statements:
main-insert: |
INSERT INTO TEMPLATE(keyspace,baselines).TEMPLATE(table,keyvalue)
(key, value) VALUES ({rw_key}, {rw_value});
ratio: TEMPLATE(write_ratio,5)
ops:
main-insert:
jdbcQuery: |
INSERT INTO TEMPLATE(keyspace,baselines).TEMPLATE(table,keyvalue)
(key, value) VALUES ('{rw_key}', '{rw_value}');

View File

@@ -0,0 +1,137 @@
min_version: "5.17.1"
description: |
A tabular workload with partitions, clusters, and data fields
This workload contains partitioning and cluster along with a set
of 8 fields of varying length. The field values vary in size according
to the fibonacci sequence times a base size factor of 10, with
an additional 10% variance for each field.
The read patterns have a variety of field subsets specified.
During rampup, all rows will be written partition by partition,
filling in all rows of that partition before moving on to the next.
Example: With a partition size of 1000 and 1B rows, there will be
1000000 partitions.
During main phase, the read patterns are varied with different
field sets. As well, the number of rows which will be returned
is varied between 1 and 10.
By default, reads occur at the same ratio as writes, with main
phase writes writing full rows.
You can bulk up the size of the payloads by 10x with addzeroes='0',
by 100x with addzeroes='00', and so on, but if you want to go higher
than 100x, you'll need to modify the workload with a larger reference
file in the HashedFileExtractToString(...) bindings.
scenarios:
default:
schema: run driver=jdbc tags==block:schema threads==1 cycles==UNDEF
rampup: run driver=jdbc tags==block:rampup cycles===TEMPLATE(rampup-cycles,10B) threads=auto
main: run driver=jdbc tags==block:"main.*" cycles===TEMPLATE(main-cycles,100M) threads=auto
params:
instrument: true
bindings:
# for ramp-up and verify phases
#
part_layout: Div(<<partsize:1000>>); ToString() -> String
clust_layout: Mod(<<partsize:1000>>); ToString() -> String
# todo: update these definitions to use the simpler 10,0.1, 20, 0.2, ...
data0: Add(10); HashedFileExtractToString('data/lorem_ipsum_full.txt',9TEMPLATE(addzeroes,),11TEMPLATE(addzeroes,))
data1: Add(20); HashedFileExtractToString('data/lorem_ipsum_full.txt',18TEMPLATE(addzeroes,),22TEMPLATE(addzeroes,))
data2: Add(30); HashedFileExtractToString('data/lorem_ipsum_full.txt',27TEMPLATE(addzeroes,),33TEMPLATE(addzeroes,))
data3: Add(40); HashedFileExtractToString('data/lorem_ipsum_full.txt',45TEMPLATE(addzeroes,),55TEMPLATE(addzeroes,))
data4: Add(50); HashedFileExtractToString('data/lorem_ipsum_full.txt',72TEMPLATE(addzeroes,),88TEMPLATE(addzeroes,))
data5: Add(60); HashedFileExtractToString('data/lorem_ipsum_full.txt',107TEMPLATE(addzeroes,),143TEMPLATE(addzeroes,))
data6: Add(70); HashedFileExtractToString('data/lorem_ipsum_full.txt',189TEMPLATE(addzeroes,),231TEMPLATE(addzeroes,))
data7: Add(80); HashedFileExtractToString('data/lorem_ipsum_full.txt',306TEMPLATE(addzeroes,),374TEMPLATE(addzeroes,))
# for main phase
# for write
part_write: Hash(); Uniform(0,TEMPLATE(partcount,100))->int; ToString() -> String
clust_write: Hash(); Add(1); Uniform(0,TEMPLATE(partsize,1000000))->int; ToString() -> String
data_write: Hash(); HashedFileExtractToString('data/lorem_ipsum_full.txt',50,150) -> String
# for read
limit: Uniform(1,10) -> int
part_read: Uniform(0,TEMPLATE(partcount,100))->int; ToString() -> String
clust_read: Add(1); Uniform(0,TEMPLATE(partsize,1000000))->int; ToString() -> String
blocks:
schema:
params:
prepared: false
ops:
#drop-database:
# jdbcQuery: |
# DROP DATABASE IF EXISTS TEMPLATE(database,baselines);
create-database:
jdbcQuery: |
CREATE DATABASE IF NOT EXISTS TEMPLATE(database,baselines);
drop-table:
jdbcQuery: |
DROP TABLE IF EXISTS TEMPLATE(database,baselines).TEMPLATE(table,tabular);
create-table:
jdbcQuery: |
CREATE TABLE IF NOT EXISTS TEMPLATE(keyspace,baselines).TEMPLATE(table,tabular) (
part STRING,
clust STRING,
data0 STRING, data1 STRING, data2 STRING, data3 STRING,
data4 STRING, data5 STRING, data6 STRING, data7 STRING,
PRIMARY KEY (part,clust)
);
rampup:
params:
ops:
rampup-insert:
jdbcQuery: |
INSERT INTO TEMPLATE(keyspace,baselines).TEMPLATE(table,tabular)
(part,clust,data0,data1,data2,data3,data4,data5,data6,data7)
VALUES ('{part_layout}','{clust_layout}','{data0}','{data1}','{data2}','{data3}','{data4}','{data5}','{data6}','{data7}')
verify:
params:
cl: TEMPLATE(read_cl,LOCAL_QUORUM)
ops:
verify-select:
jdbcQuery: |
SELECT * FROM TEMPLATE(keyspace,baselines).TEMPLATE(table,tabular) WHERE part='{part_layout}' AND clust='{clust_layout}'
main-read:
params:
ratio: TEMPLATE(read_ratio,1)
ops:
main-select-all:
jdbcQuery: |
SELECT * FROM TEMPLATE(keyspace,baselines).TEMPLATE(table,tabular) WHERE part='{part_read}' LIMIT {limit};
main-select-01:
jdbcQuery: |
SELECT data0,data1 from TEMPLATE(keyspace,baselines).TEMPLATE(table,tabular) WHERE part='{part_read}' LIMIT {limit};
main-select-0246:
jdbcQuery: |
SELECT data0,data2,data4,data6 FROM TEMPLATE(keyspace,baselines).TEMPLATE(table,tabular) WHERE part='{part_read}' LIMIT {limit};
main-select-1357:
jdbcQuery: |
SELECT data1,data3,data5,data7 FROM TEMPLATE(keyspace,baselines).TEMPLATE(table,tabular) WHERE part='{part_read}' LIMIT {limit};
main-select-0123:
jdbcQuery: |
SELECT data0,data1,data2,data3 FROM TEMPLATE(keyspace,baselines).TEMPLATE(table,tabular) WHERE part='{part_read}' LIMIT {limit};
main-select-4567:
jdbcQuery: |
SELECT data4,data5,data6,data7 FROM TEMPLATE(keyspace,baselines).TEMPLATE(table,tabular) WHERE part='{part_read}' LIMIT {limit};
main-select-67:
jdbcQuery: |
SELECT data6,data7 FROM TEMPLATE(keyspace,baselines).TEMPLATE(table,tabular) WHERE part='{part_read}' LIMIT {limit};
main-select:
jdbcQuery: |
SELECT data0,data1,data2,data3,data4,data5,data6,data7 FROM TEMPLATE(keyspace,baselines).TEMPLATE(table,tabular) WHERE part='{part_read}' LIMIT {limit};
main-write:
params:
ratio: TEMPLATE(write_ratio,8)
ops:
main-write:
jdbcQuery: |
INSERT INTO TEMPLATE(keyspace,baselines).TEMPLATE(table,tabular)
(part, clust, data0,data1,data2,data3,data4,data5,data6,data7)
VALUES ('{part_write}','{clust_write}','{data0}','{data1}','{data2}','{data3}','{data4}','{data5}','{data6}','{data7}')

View File

@@ -0,0 +1,80 @@
# jara -jar nb5.jar cockroachdb-timeseries default -vv --show-stacktraces
min_version: "5.17.1"
description: |
This workload emulates a time-series data model and access patterns.
scenarios:
default:
schema: run driver=jdbc tags==block:schema threads==1 cycles==UNDEF url="jdbc:postgresql://crdb0-3153.g8z.cockroachlabs.cloud:26257/defaultdb?sslmode=verify-ca&password=CHANGE_ME&user=newuser" databaseName="defaultdb" portNumber=26257 user="newuser" password="CHANGE_ME" sslmode="verify-full" serverName="crdb" sslrootcert="/path/to/postgresql_certs/root.crt"
rampup: run driver=jdbc tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto url="jdbc:postgresql://crdb0-3153.g8z.cockroachlabs.cloud:26257/defaultdb?sslmode=verify-ca&password=CHANGE_ME&user=newuser" databaseName="defaultdb" portNumber=26257 user="newuser" password="CHANGE_ME" sslmode="verify-full" serverName="crdb" sslrootcert="/path/to/postgresql_certs/root.crt"
main: run driver=jdbc tags==block:"main.*" cycles===TEMPLATE(main-cycles,10000000) threads=auto url="jdbc:postgresql://crdb0-3153.g8z.cockroachlabs.cloud:26257/defaultdb?sslmode=verify-ca&password=CHANGE_ME&user=newuser" databaseName="defaultdb" portNumber=26257 user="newuser" password="CHANGE_ME" sslmode="verify-full" serverName="crdb" sslrootcert="/path/to/postgresql_certs/root.crt"
params:
instrument: TEMPLATE(instrument,false)
bindings:
machine_id: Mod(TEMPLATE(sources,10000)); ToHashedUUID() -> java.util.UUID
sensor_name: HashedLineToString('data/variable_words.txt')
time: Mul(TEMPLATE(timespeed,100)L); Div(TEMPLATE(sources,10000)L); ToJavaInstant()
cell_timestamp: Mul(TEMPLATE(timespeed,100)L); Div(TEMPLATE(sources,10000)L); Mul(1000L)
sensor_value: Normal(0.0,5.0); Add(100.0) -> double
station_id: Div(TEMPLATE(sources,10000));Mod(TEMPLATE(stations,100)); ToHashedUUID() -> java.util.UUID
data: HashedFileExtractToString('data/lorem_ipsum_full.txt',800TEMPLATE(addzeroes,),1200TEMPLATE(addzeroes,))
blocks:
schema:
params:
ops:
drop-database:
jdbcQuery: |
DROP DATABASE IF EXISTS TEMPLATE(database,baselines);
create-database:
jdbcQuery: |
CREATE DATABASE IF NOT EXISTS TEMPLATE(database,baselines);
drop-table:
jdbcQuery: |
DROP TABLE IF EXISTS TEMPLATE(database,baselines).TEMPLATE(table,iot);
create-table:
jdbcQuery: |
CREATE TABLE IF NOT EXISTS TEMPLATE(keyspace,baselines).TEMPLATE(table,iot) (
machine_id UUID,
sensor_name STRING,
time TIMESTAMP,
sensor_value FLOAT,
station_id UUID,
data STRING,
PRIMARY KEY (machine_id, sensor_name, time)
);
rampup:
params:
ops:
insert-rampup:
jdbcQuery: |
INSERT INTO TEMPLATE(keyspace,baselines).TEMPLATE(table,iot)
(machine_id, sensor_name, time, sensor_value, station_id, data)
VALUES ('{machine_id}', '{sensor_name}', '{time}', {sensor_value}, '{station_id}', '{data}')
#using timestamp {cell_timestamp}
main-read:
params:
ratio: TEMPLATE(read_ratio,1)
ops:
select-read:
jdbcQuery: |
SELECT * FROM TEMPLATE(keyspace,baselines).TEMPLATE(table,iot)
WHERE machine_id='{machine_id}' and sensor_name='{sensor_name}'
LIMIT TEMPLATE(limit,10)
main-write:
params:
ratio: TEMPLATE(write_ratio,9)
ops:
insert-main:
jdbcQuery: |
INSERT INTO TEMPLATE(keyspace,baselines).TEMPLATE(table,iot)
(machine_id, sensor_name, time, sensor_value, station_id, data)
VALUES ('{machine_id}', '{sensor_name}', '{time}', {sensor_value}, '{station_id}', '{data}')
#using timestamp {cell_timestamp}

View File

@@ -8,7 +8,7 @@ This JDBC driver leverages [Hikari Connection Pool](https://github.com/brettwool
# Executing JDBC Workload
The following is an example of invoking a JDBC workload.
```yaml
```shell
<nb_cmd> run driver=jdbc workload="/path/to/workload.yaml" cycles=1000 threads=100 url="jdbc:postgresql://" serverName=localhost portNumber=5432 databaseName=defaultdb ... -vv --show-stacktraces
```
In the above NB command, following are JDBC driver specific parameters:
@@ -18,17 +18,14 @@ In the above NB command, following are JDBC driver specific parameters:
* `serverName`: The database name. The default is to connect to a database with the same name as the user name used to connect to the server.
Other NB engine parameters are straight forward:
* `driver`: must be `jdbc`
* `driver`: *must* be `jdbc`.
* `threads`: depending on the workload type, the NB thread number determines how many clients will be created. All the clients will share the Connection originated from the Hikari Connection Pool.
* `*.yaml`: the NB jdbc scenario definition workload yaml file
* `*.yaml`: the NB jdbc scenario definition workload yaml file.
* `<nb_cmd>`: is `./nb` (using binary) or the `java -jar nb5.jar`.
# Configuration
There are three main configuration with which we could issue a query and process the results back based on the [PostgreSQL® Query](https://jdbc.postgresql.org/documentation/query/) pattern.
There is really one main configuration with which we could issue a query and process the results back based on the [PostgreSQL® Query](https://jdbc.postgresql.org/documentation/query/) pattern.
## Config Sources
* `execute`: This is to issue any DDL statements such `CREATE DATABASE|TABLE` or `DROP DATABASE|TABLE` operations which returns nothing.
* `executeUpdate`: This is to issue DML statements such as `INSERT|UPDATE|DELETE` operations that will return how many number of rows were affected by that operation.
* `executeQuery`: This is to issue DML statement such as `SELECT` operation which would return a `ResultSet` object to process.
* `jdbcQuery`: This is to issue DML statement such as `SELECT` operation which would return a `ResultSet` object to process. This is to issue any DDL statements such `CREATE DATABASE|TABLE` or `DROP DATABASE|TABLE` operations which returns nothing. This is to issue DML statements such as `INSERT|UPDATE|DELETE` operations that will return how many number of rows were affected by that operation.
### Examples
Check out the default activities under the [activities.baselinesv2](./activities.baselinesv2) directory.