Merge remote-tracking branch 'origin/main' into nosqlbench-948-testcontainers

This commit is contained in:
MikeYaacoubStax 2023-02-10 08:26:37 -05:00
commit 67ba6e270d
19 changed files with 1199 additions and 4 deletions

74
adapter-jdbc/pom.xml Normal file
View File

@ -0,0 +1,74 @@
<!--
~ Copyright (c) 2023 nosqlbench
~
~ Licensed under the Apache License, Version 2.0 (the "License");
~ you may not use this file except in compliance with the License.
~ You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>io.nosqlbench</groupId>
<artifactId>mvn-defaults</artifactId>
<version>${revision}</version>
<relativePath>../mvn-defaults</relativePath>
</parent>
<artifactId>adapter-jdbc</artifactId>
<packaging>jar</packaging>
<name>${project.artifactId}</name>
<description>
A JDBC driver for nosqlbench. This provides the ability to inject synthetic data
into a PostegreSQL® compatible database leveraging HikariCP.
</description>
<dependencies>
<!-- core dependencies -->
<dependency>
<groupId>io.nosqlbench</groupId>
<artifactId>adapters-api</artifactId>
<version>${revision}</version>
</dependency>
<dependency>
<groupId>io.nosqlbench</groupId>
<artifactId>engine-api</artifactId>
<version>${revision}</version>
</dependency>
<dependency>
<groupId>io.nosqlbench</groupId>
<artifactId>nb-annotations</artifactId>
<version>${revision}</version>
<scope>compile</scope>
</dependency>
<!-- https://search.maven.org/artifact/org.postgresql/postgresql -->
<dependency>
<groupId>org.postgresql</groupId>
<artifactId>postgresql</artifactId>
<version>42.5.2</version>
</dependency>
<!-- https://search.maven.org/artifact/com.zaxxer/HikariCP -->
<dependency>
<groupId>com.zaxxer</groupId>
<artifactId>HikariCP</artifactId>
<version>5.0.1</version>
</dependency>
</dependencies>
<build>
<plugins>
</plugins>
</build>
</project>

View File

@ -0,0 +1,52 @@
/*
* Copyright (c) 2023 nosqlbench
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.nosqlbench.adapter.jdbc;
import io.nosqlbench.adapter.jdbc.optypes.JDBCOp;
import io.nosqlbench.api.config.standard.NBConfigModel;
import io.nosqlbench.api.config.standard.NBConfiguration;
import io.nosqlbench.engine.api.activityimpl.OpMapper;
import io.nosqlbench.engine.api.activityimpl.uniform.BaseDriverAdapter;
import io.nosqlbench.engine.api.activityimpl.uniform.DriverAdapter;
import io.nosqlbench.engine.api.activityimpl.uniform.DriverSpaceCache;
import io.nosqlbench.nb.annotations.Service;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import java.util.function.Function;
@Service(value = DriverAdapter.class, selector = "jdbc")
public class JDBCDriverAdapter extends BaseDriverAdapter<JDBCOp, JDBCSpace> {
private final static Logger logger = LogManager.getLogger(JDBCDriverAdapter.class);
@Override
public OpMapper<JDBCOp> getOpMapper() {
DriverSpaceCache<? extends JDBCSpace> spaceCache = getSpaceCache();
NBConfiguration adapterConfig = getConfiguration();
return new JDBCOpMapper(this, adapterConfig, spaceCache);
}
@Override
public Function<String, ? extends JDBCSpace> getSpaceInitializer(NBConfiguration cfg) {
return (s) -> new JDBCSpace(s, cfg);
}
@Override
public NBConfigModel getConfigModel() {
return super.getConfigModel().add(JDBCSpace.getConfigModel());
}
}

View File

@ -0,0 +1,86 @@
/*
* Copyright (c) 2023 nosqlbench
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.nosqlbench.adapter.jdbc;
import io.nosqlbench.adapter.jdbc.opdispensers.JDBCExecuteOpDispenser;
import io.nosqlbench.adapter.jdbc.opdispensers.JDBCExecuteQueryOpDispenser;
import io.nosqlbench.adapter.jdbc.optypes.JDBCOp;
import io.nosqlbench.api.config.standard.NBConfiguration;
import io.nosqlbench.engine.api.activityimpl.OpDispenser;
import io.nosqlbench.engine.api.activityimpl.OpMapper;
import io.nosqlbench.engine.api.activityimpl.uniform.DriverAdapter;
import io.nosqlbench.engine.api.activityimpl.uniform.DriverSpaceCache;
import io.nosqlbench.engine.api.templating.ParsedOp;
import io.nosqlbench.engine.api.templating.TypeAndTarget;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import java.sql.Connection;
import java.util.function.LongFunction;
public class JDBCOpMapper implements OpMapper<JDBCOp> {
private final static Logger logger = LogManager.getLogger(JDBCOpMapper.class);
private final DriverAdapter adapter;
private final NBConfiguration cfg;
private final DriverSpaceCache<? extends JDBCSpace> spaceCache;
public JDBCOpMapper(DriverAdapter adapter, NBConfiguration cfg, DriverSpaceCache<? extends JDBCSpace> spaceCache) {
this.adapter = adapter;
this.cfg = cfg;
this.spaceCache = spaceCache;
}
@Override
public OpDispenser<? extends JDBCOp> apply(ParsedOp op) {
LongFunction<String> spaceNameF = op.getAsFunctionOr("space", "default");
LongFunction<JDBCSpace> spaceFunc = l -> spaceCache.get(spaceNameF.apply(l));
// Since the only needed thing in the JDBCSpace is the Connection, we can short-circuit
// to it here instead of stepping down from the cycle to the space to the connection.
LongFunction<Connection> connectionLongFunc = l -> spaceCache.get(spaceNameF.apply(l)).getConnection();
/*
* If the user provides a body element, then they want to provide the JSON or
* a data structure that can be converted into JSON, bypassing any further
* specialized type-checking or op-type specific features
*/
if (op.isDefined("body")) {
throw new RuntimeException("This mode is reserved for later. Do not use the 'body' op field.");
} else {
TypeAndTarget<JDBCOpType, String> opType = op.getTypeAndTarget(JDBCOpType.class, String.class, "type", "stmt");
logger.info(() -> "Using " + opType.enumId + " statement form for '" + op.getName());
return switch (opType.enumId) {
// SELECT uses 'executeQuery' and returns a 'ResultSet'
// https://jdbc.postgresql.org/documentation/query/#example51processing-a-simple-query-in-jdbc
case query ->
new JDBCExecuteQueryOpDispenser(adapter, connectionLongFunc, op, opType.targetFunction);
// INSERT|UPDATE|DELETE uses 'executeUpdate' and returns an 'int'
// https://jdbc.postgresql.org/documentation/query/#performing-updates
// CREATE|DROP TABLE|VIEW uses 'execute' (as opposed to 'executeQuery' which returns a 'ResultSet')
// https://jdbc.postgresql.org/documentation/query/#example54dropping-a-table-in-jdbc
case execute, update ->
new JDBCExecuteOpDispenser(adapter, connectionLongFunc, op, opType.targetFunction);
};
}
}
}

View File

@ -0,0 +1,31 @@
/*
* Copyright (c) 2023 nosqlbench
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.nosqlbench.adapter.jdbc;
/**
* Op templates which are supported by the NoSQLBench JDBC driver are
* enumerated below. These command names should mirror those in the official
* CockroachDB API exactly, as an example. See the official API for more details.
*
* @see <a href="https://www.cockroachlabs.com/docs/v22.2/sql-statements.html#data-definition-statements">CockroachDB API Reference</a>
*/
public enum JDBCOpType {
//See https://jdbc.postgresql.org/documentation/query/
execute, // Used for CREATE|DROP DATABASE|TABLE operation. Returns nothing.
query, // Used for SELECT operation. Returns a ResultSet object.
update // Used for updating records such as INSERT|UPDATE|DELETE. Returns the number of rows affected.
}

View File

@ -0,0 +1,252 @@
/*
* Copyright (c) 2023 nosqlbench
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.nosqlbench.adapter.jdbc;
import com.zaxxer.hikari.HikariConfig;
import com.zaxxer.hikari.HikariDataSource;
import io.nosqlbench.api.config.standard.ConfigModel;
import io.nosqlbench.api.config.standard.NBConfigModel;
import io.nosqlbench.api.config.standard.NBConfiguration;
import io.nosqlbench.api.config.standard.Param;
import io.nosqlbench.api.errors.OpConfigError;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import java.sql.Connection;
import java.util.Optional;
public class JDBCSpace implements AutoCloseable {
private final static Logger logger = LogManager.getLogger(JDBCSpace.class);
private final String spaceName;
private HikariConfig hikariConfig;
private HikariDataSource hikariDataSource;
private Connection connection;
public JDBCSpace(String spaceName, NBConfiguration cfg) {
this.spaceName = spaceName;
this.hikariDataSource = createClient(cfg);
}
public Connection getConnection() {
return this.connection;
}
public HikariDataSource getHikariDataSource() {
return this.hikariDataSource;
}
private HikariDataSource createClient(NBConfiguration cfg) {
hikariConfig = new HikariConfig();
hikariConfig.setJdbcUrl(cfg.get("url"));
hikariConfig.addDataSourceProperty("serverName", cfg.get("serverName"));
Optional<String> databaseName = cfg.getOptional("databaseName");
if (databaseName.isPresent()) {
hikariConfig.addDataSourceProperty("databaseName", databaseName.get());
}
int portNumber = Integer.parseInt(cfg.get("portNumber"));
hikariConfig.addDataSourceProperty("portNumber", portNumber);
Optional<String> user = cfg.getOptional("user");
if (user.isPresent()) {
hikariConfig.setUsername(user.get());
}
Optional<String> password = cfg.getOptional("password");
if (password.isPresent()) {
if (user.isEmpty()) {
throw new OpConfigError("Both user and password options are required. Only password is supplied in this case.");
}
hikariConfig.setPassword(password.get());
} else {
if (user.isPresent()) {
throw new OpConfigError("Both user and password options are required. Only user is supplied in this case.");
}
}
Optional<Boolean> ssl = cfg.getOptional(Boolean.class, "ssl");
hikariConfig.addDataSourceProperty("ssl", ssl.orElse(false));
Optional<String> sslMode = cfg.getOptional("sslmode");
if (sslMode.isPresent()) {
hikariConfig.addDataSourceProperty("sslmode", sslMode.get());
} else {
hikariConfig.addDataSourceProperty("sslmode", "prefer");
}
Optional<String> sslCert = cfg.getOptional("sslcert");
if (sslCert.isPresent()) {
hikariConfig.addDataSourceProperty("sslcert", sslCert.get());
} /*else if(sslMode.isPresent() && (!"disable".equalsIgnoreCase(sslMode.get()) || !"allow".equalsIgnoreCase(sslMode.get())) || !"prefer".equalsIgnoreCase(sslMode.get())) {
throw new OpConfigError("When sslmode is true, sslcert should be provided.");
}*/
Optional<String> sslRootCert = cfg.getOptional("sslrootcert");
if (sslRootCert.isPresent()) {
hikariConfig.addDataSourceProperty("sslrootcert", sslRootCert.get());
}
hikariConfig.addDataSourceProperty("applicationName", cfg.get("applicationName"));
hikariConfig.addDataSourceProperty("rewriteBatchedInserts", cfg.getOrDefault("rewriteBatchedInserts", true));
// We're managing the auto-commit behavior of connections ourselves and hence disabling the auto-commit.
//Optional<Boolean> autoCommit = cfg.getOptional(Boolean.class, "autoCommit");
hikariConfig.setAutoCommit(false);
hikariConfig.setMaximumPoolSize(Integer.parseInt(cfg.get("maximumPoolSize")));
hikariConfig.setKeepaliveTime(Integer.parseInt(cfg.get("keepaliveTime")));
hikariConfig.setMaximumPoolSize(Integer.parseInt(cfg.get("maximumPoolSize")));
HikariDataSource hds = new HikariDataSource(hikariConfig);
try {
this.connection = hds.getConnection();
// We're taking an opinionated approach here and managing the commit ourselves.
this.getConnection().setAutoCommit(false);
} catch (Exception ex) {
String exp = "Exception occurred while attempting to create a connection using the HikariDataSource";
logger.error(exp, ex);
throw new RuntimeException(exp, ex);
}
return hds;
}
public static NBConfigModel getConfigModel() {
return ConfigModel.of(JDBCSpace.class)
.add(Param.defaultTo("url", "jdbc:postgresql:/")
.setDescription("The connection URL used to connect to the DBMS. Defaults to 'jdbc:postgresql:/'"))
.add(Param.defaultTo("serverName", "localhost")
.setDescription("The host name of the server. Defaults to 'localhost'"))
.add(Param.optional("databaseName")
.setDescription("The database name. The default is to connect to a database with the same name as the user name used to connect to the server."))
// See https://github.com/brettwooldridge/HikariCP/tree/dev#gear-configuration-knobs-baby & https://jdbc.postgresql.org/documentation/use/
.add(Param.defaultTo("portNumber", "5432")
.setDescription("The port number the server is listening on. Defaults to the PostgreSQL® standard port number (5432)."))
.add(Param.optional("user")
.setDescription("The database user on whose behalf the connection is being made."))
.add(Param.optional("password")
.setDescription("The database users password."))
.add(Param.optional("ssl")
.setDescription("Whether to connect using SSL. Default is false."))
.add(Param.optional("sslmode")
.setDescription("Possible values include disable , allow , prefer , require , verify-ca and verify-full." +
" require , allow and prefer all default to a non-validating SSL factory and do not check the validity of the certificate or the host name." +
" verify-ca validates the certificate, but does not verify the hostname." +
" verify-full will validate that the certificate is correct and verify the host connected to has the same hostname as the certificate." +
" Default is prefer."))
.add(Param.optional("sslcert")
.setDescription("Provide the full path for the certificate file. Defaults to defaultdir/postgresql.crt, where defaultdir is ${user.home}/.postgresql/ in *nix systems and %appdata%/postgresql/ on windows."))
.add(Param.optional("sslrootcert")
.setDescription("File name of the SSL root certificate."))
.add(Param.defaultTo("applicationName", "NoSQLBench")
.setDescription("The application name to be used. Default is 'NoSQLBench'."))
.add(Param.optional("rewriteBatchedInserts")
.setDescription("This will change batch inserts from insert into foo (col1, col2, col3) values (1, 2, 3) into insert into foo (col1, col2, col3) values (1, 2, 3), (4, 5, 6) this provides 2-3x performance improvement. " +
"Default is true"))
.add(Param.optional("autoCommit")
.setDescription("This property controls the default auto-commit behavior of connections returned from the pool. " +
"It is a boolean value. Default: false. This cannot be changed."))
.add(Param.optional("connectionTimeout")
.setDescription("See https://github.com/brettwooldridge/HikariCP/tree/dev#gear-configuration-knobs-baby for details. " +
"This property is not exposed and hence cannot be changed."))
.add(Param.optional("idleTimeout")
.setDescription("See https://github.com/brettwooldridge/HikariCP/tree/dev#gear-configuration-knobs-baby for details. " +
"This property is not exposed and hence cannot be changed."))
.add(Param.defaultTo("keepaliveTime", "150000")
.setDescription("See https://github.com/brettwooldridge/HikariCP/tree/dev#gear-configuration-knobs-baby for details. " +
"This property is not exposed and hence cannot be changed."))
.add(Param.optional("maxLifetime")
.setDescription("See https://github.com/brettwooldridge/HikariCP/tree/dev#gear-configuration-knobs-baby for details. " +
"This property is not exposed and hence cannot be changed."))
.add(Param.optional("connectionTestQuery")
.setDescription("See https://github.com/brettwooldridge/HikariCP/tree/dev#gear-configuration-knobs-baby for details. " +
"This property is not exposed and hence cannot be changed."))
.add(Param.optional("minimumIdle")
.setDescription("See https://github.com/brettwooldridge/HikariCP/tree/dev#gear-configuration-knobs-baby for details. " +
"This property is not exposed and hence cannot be changed."))
.add(Param.defaultTo("maximumPoolSize", "40")
.setDescription("See https://github.com/brettwooldridge/HikariCP/tree/dev#gear-configuration-knobs-baby for details. Default value is 40 and cannot be changed."))
.add(Param.optional("metricRegistry")
.setDescription("See https://github.com/brettwooldridge/HikariCP/tree/dev#gear-configuration-knobs-baby for details. " +
"This property is not exposed and hence cannot be changed."))
.add(Param.optional("healthCheckRegistry")
.setDescription("See https://github.com/brettwooldridge/HikariCP/tree/dev#gear-configuration-knobs-baby for details. " +
"This property is not exposed and hence cannot be changed."))
.add(Param.optional("poolName")
.setDescription("See https://github.com/brettwooldridge/HikariCP/tree/dev#gear-configuration-knobs-baby for details. " +
"This property is not exposed and hence cannot be changed."))
.add(Param.optional("initializationFailTimeout")
.setDescription("See https://github.com/brettwooldridge/HikariCP/tree/dev#gear-configuration-knobs-baby for details. " +
"This property is not exposed and hence cannot be changed."))
.add(Param.optional("isolateInternalQueries")
.setDescription("See https://github.com/brettwooldridge/HikariCP/tree/dev#gear-configuration-knobs-baby for details. " +
"This property is not exposed and hence cannot be changed."))
.add(Param.optional("allowPoolSuspension")
.setDescription("See https://github.com/brettwooldridge/HikariCP/tree/dev#gear-configuration-knobs-baby for details. " +
"This property is not exposed and hence cannot be changed."))
.add(Param.optional("readOnly")
.setDescription("See https://github.com/brettwooldridge/HikariCP/tree/dev#gear-configuration-knobs-baby for details. " +
"This property is not exposed and hence cannot be changed."))
.add(Param.optional("registerMbeans")
.setDescription("See https://github.com/brettwooldridge/HikariCP/tree/dev#gear-configuration-knobs-baby for details. " +
"This property is not exposed and hence cannot be changed."))
.add(Param.optional("catalog")
.setDescription("See https://github.com/brettwooldridge/HikariCP/tree/dev#gear-configuration-knobs-baby for details. " +
"This property is not exposed and hence cannot be changed."))
.add(Param.optional("connectionInitSql")
.setDescription("See https://github.com/brettwooldridge/HikariCP/tree/dev#gear-configuration-knobs-baby for details. " +
"This property is not exposed and hence cannot be changed."))
.add(Param.optional("driverClassName")
.setDescription("See https://github.com/brettwooldridge/HikariCP/tree/dev#gear-configuration-knobs-baby for details. " +
"This property is not exposed and hence cannot be changed."))
.add(Param.optional("transactionIsolation")
.setDescription("See https://github.com/brettwooldridge/HikariCP/tree/dev#gear-configuration-knobs-baby for details. " +
"This property is not exposed and hence cannot be changed."))
.add(Param.optional("validationTimeout")
.setDescription("See https://github.com/brettwooldridge/HikariCP/tree/dev#gear-configuration-knobs-baby for details. " +
"This property is not exposed and hence cannot be changed."))
.add(Param.optional("leakDetectionThreshold")
.setDescription("See https://github.com/brettwooldridge/HikariCP/tree/dev#gear-configuration-knobs-baby for details. " +
"This property is not exposed and hence cannot be changed."))
.add(Param.optional("dataSource")
.setDescription("See https://github.com/brettwooldridge/HikariCP/tree/dev#gear-configuration-knobs-baby for details. " +
"This property is not exposed and hence cannot be changed."))
.add(Param.optional("schema")
.setDescription("See https://github.com/brettwooldridge/HikariCP/tree/dev#gear-configuration-knobs-baby for details. " +
"This property is not exposed and hence cannot be changed."))
.add(Param.optional("threadFactory")
.setDescription("See https://github.com/brettwooldridge/HikariCP/tree/dev#gear-configuration-knobs-baby for details. " +
"This property is not exposed and hence cannot be changed."))
.add(Param.optional("scheduledExecutor")
.setDescription("See https://github.com/brettwooldridge/HikariCP/tree/dev#gear-configuration-knobs-baby for details. " +
"This property is not exposed and hence cannot be changed."))
.asReadOnly();
}
@Override
public void close() {
try {
this.getConnection().close();
this.getHikariDataSource().close();
} catch (Exception e) {
logger.error("auto-closeable jdbc connection threw exception in jdbc space(" + this.spaceName + "): " + e);
throw new RuntimeException(e);
}
}
}

View File

@ -0,0 +1,64 @@
/*
* Copyright (c) 2023 nosqlbench
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.nosqlbench.adapter.jdbc.opdispensers;
import io.nosqlbench.adapter.jdbc.JDBCSpace;
import io.nosqlbench.adapter.jdbc.optypes.JDBCOp;
import io.nosqlbench.engine.api.activityimpl.BaseOpDispenser;
import io.nosqlbench.engine.api.activityimpl.uniform.DriverAdapter;
import io.nosqlbench.engine.api.templating.ParsedOp;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import java.sql.Connection;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.function.LongFunction;
public abstract class JDBCBaseOpDispenser extends BaseOpDispenser<JDBCOp, JDBCSpace> {
private static final Logger logger = LogManager.getLogger(JDBCBaseOpDispenser.class);
protected static final String ERROR_STATEMENT_CREATION = "Error while attempting to create the jdbc statement from the connection";
protected final LongFunction<String> targetFunction;
protected final LongFunction<Connection> connectionLongFunction;
protected final LongFunction<Statement> statementLongFunction;
public JDBCBaseOpDispenser(DriverAdapter<JDBCOp, JDBCSpace> adapter, LongFunction<Connection> connectionLongFunc, ParsedOp op, LongFunction<String> targetFunction) {
super(adapter, op);
this.connectionLongFunction = connectionLongFunc;
this.targetFunction = targetFunction;
this.statementLongFunction = createStmtFunc(op);
}
protected LongFunction<Statement> createStmtFunc(ParsedOp cmd) {
try {
LongFunction<Statement> basefunc = l -> {
try {
return this.connectionLongFunction.apply(l).createStatement();
} catch (SQLException e) {
throw new RuntimeException(e);
}
};
return basefunc;
} catch (Exception ex) {
logger.error(ERROR_STATEMENT_CREATION, ex);
throw new RuntimeException(ERROR_STATEMENT_CREATION, ex);
}
}
}

View File

@ -0,0 +1,38 @@
/*
* Copyright (c) 2023 nosqlbench
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.nosqlbench.adapter.jdbc.opdispensers;
import io.nosqlbench.adapter.jdbc.JDBCSpace;
import io.nosqlbench.adapter.jdbc.optypes.JDBCExecuteOp;
import io.nosqlbench.adapter.jdbc.optypes.JDBCOp;
import io.nosqlbench.engine.api.activityimpl.uniform.DriverAdapter;
import io.nosqlbench.engine.api.templating.ParsedOp;
import java.sql.Connection;
import java.util.function.LongFunction;
public class JDBCExecuteOpDispenser extends JDBCBaseOpDispenser {
public JDBCExecuteOpDispenser(DriverAdapter<JDBCOp, JDBCSpace> adapter, LongFunction<Connection> connectionLongFunc, ParsedOp op, LongFunction<String> targetFunction) {
super(adapter, connectionLongFunc, op, targetFunction);
}
@Override
public JDBCExecuteOp apply(long cycle) {
return new JDBCExecuteOp(this.connectionLongFunction.apply(cycle), this.statementLongFunction.apply(cycle), targetFunction.apply(cycle));
}
}

View File

@ -0,0 +1,41 @@
/*
* Copyright (c) 2023 nosqlbench
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.nosqlbench.adapter.jdbc.opdispensers;
import io.nosqlbench.adapter.jdbc.JDBCSpace;
import io.nosqlbench.adapter.jdbc.optypes.JDBCExecuteQueryOp;
import io.nosqlbench.adapter.jdbc.optypes.JDBCOp;
import io.nosqlbench.engine.api.activityimpl.uniform.DriverAdapter;
import io.nosqlbench.engine.api.templating.ParsedOp;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import java.sql.Connection;
import java.util.function.LongFunction;
public class JDBCExecuteQueryOpDispenser extends JDBCBaseOpDispenser {
private static final Logger logger = LogManager.getLogger(JDBCExecuteQueryOpDispenser.class);
public JDBCExecuteQueryOpDispenser(DriverAdapter<JDBCOp, JDBCSpace> adapter, LongFunction<Connection> connectionLongFunc, ParsedOp op, LongFunction<String> targetFunction) {
super(adapter, connectionLongFunc, op, targetFunction);
}
@Override
public JDBCExecuteQueryOp apply(long cycle) {
return new JDBCExecuteQueryOp(this.connectionLongFunction.apply(cycle), this.statementLongFunction.apply(cycle), targetFunction.apply(cycle));
}
}

View File

@ -0,0 +1,59 @@
/*
* Copyright (c) 2023 nosqlbench
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.nosqlbench.adapter.jdbc.optypes;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import java.sql.Connection;
import java.sql.SQLException;
import java.sql.Statement;
public class JDBCExecuteOp extends JDBCOp {
private static final Logger LOGGER = LogManager.getLogger(JDBCExecuteOp.class);
private static final String LOG_UPDATE_COUNT_ERROR = "Exception occurred while attempting to fetch the update count of the query operation";
private static final String LOG_UPDATE_COUNT = "Executed a normal DDL/DML (non-SELECT) operation. DML query updated [%d] records";
public JDBCExecuteOp(Connection connection, Statement statement, String queryString) {
super(connection, statement, queryString);
}
@Override
public void run() {
try {
if (!statement.execute(queryString)) {
LOGGER.debug(() -> {
try {
return String.format(LOG_UPDATE_COUNT, statement.getUpdateCount());
} catch (SQLException e) {
LOGGER.error(LOG_UPDATE_COUNT_ERROR, e);
throw new RuntimeException(LOG_UPDATE_COUNT_ERROR, e);
}
});
}
connection.commit();
LOGGER.debug(() -> LOG_COMMIT_SUCCESS);
} catch (SQLException sqlException) {
String exMsg = String.format("ERROR: [ state => %s, cause => %s, message => %s ]",
sqlException.getSQLState(), sqlException.getCause(), sqlException.getMessage());
LOGGER.error(exMsg, sqlException);
throw new RuntimeException(exMsg, sqlException);
} catch (Exception ex) {
LOGGER.error(LOG_GENERIC_ERROR, ex);
throw new RuntimeException(LOG_GENERIC_ERROR, ex);
}
}
}

View File

@ -0,0 +1,68 @@
/*
* Copyright (c) 2023 nosqlbench
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.nosqlbench.adapter.jdbc.optypes;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import java.sql.Connection;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.Objects;
public class JDBCExecuteQueryOp extends JDBCOp {
private static final Logger LOGGER = LogManager.getLogger(JDBCExecuteQueryOp.class);
public JDBCExecuteQueryOp(Connection connection, Statement statement, String queryString) {
super(connection, statement, queryString);
}
@Override
public void run() {
try {
boolean isResultSet = statement.execute(queryString);
ResultSet rs;
if (isResultSet) {
int countResults = 0;
rs = statement.getResultSet();
Objects.requireNonNull(rs);
countResults += rs.getRow();
while (null != rs) {
while (statement.getMoreResults() && -1 > statement.getUpdateCount()) {
countResults += rs.getRow();
}
rs = statement.getResultSet();
}
finalResultCount = countResults;
LOGGER.debug(() -> LOG_ROWS_PROCESSED);
}
connection.commit();
LOGGER.debug(() -> LOG_COMMIT_SUCCESS);
} catch (SQLException sqlException) {
String exMsg = String.format("ERROR: [ state => %s, cause => %s, message => %s ]",
sqlException.getSQLState(), sqlException.getCause(), sqlException.getMessage());
LOGGER.error(exMsg, sqlException);
throw new RuntimeException(exMsg, sqlException);
} catch (Exception ex) {
LOGGER.error(LOG_GENERIC_ERROR, ex);
throw new RuntimeException(LOG_GENERIC_ERROR, ex);
}
}
}

View File

@ -0,0 +1,63 @@
/*
* Copyright (c) 2023 nosqlbench
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.nosqlbench.adapter.jdbc.optypes;
import io.nosqlbench.engine.api.activityimpl.uniform.flowtypes.RunnableOp;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import java.sql.Connection;
import java.sql.Statement;
/**
* References:
* https://docs.oracle.com/javase/tutorial/jdbc/basics/gettingstarted.html
* https://docs.oracle.com/javase/17/docs/api/java/sql/package-summary.html
* https://docs.oracle.com/en/java/javase/17/docs/api/java.sql/java/sql/package-summary.html
* https://jdbc.postgresql.org/documentation/query/
* https://www.cockroachlabs.com/docs/v22.2/connection-pooling.html
* https://www.cockroachlabs.com/docs/v22.2/connection-parameters#supported-options-parameters
* https://www.cockroachlabs.com/docs/v22.2/sql-statements.html#query-management-statements
* https://docs.yugabyte.com/preview/drivers-orms/java/yugabyte-jdbc/
*
* @see <a href="https://github.com/brettwooldridge/HikariCP">HikariCP connection pooling</a> for details.
*/
public abstract class JDBCOp implements RunnableOp {
private static final Logger LOGGER = LogManager.getLogger(JDBCOp.class);
protected static final String LOG_COMMIT_SUCCESS = "Executed the JDBC statement & committed the connection successfully";
protected final String LOG_GENERIC_ERROR;
protected final Connection connection;
protected final Statement statement;
protected final String queryString;
protected int finalResultCount;
protected String LOG_ROWS_PROCESSED = "Total number of rows processed is [" + finalResultCount + "]";
/**
* @param connection
* @param statement
* @param queryString
*/
public JDBCOp(Connection connection, Statement statement, String queryString) {
this.connection = connection;
this.statement = statement;
this.queryString = queryString;
LOG_GENERIC_ERROR = String.format("Exception while attempting to run the jdbc query %s", queryString);
LOGGER.debug(() -> "Query to be executed: " + queryString);
}
}

View File

@ -0,0 +1,62 @@
# run driver=jdbc workload="/path/to/postgresql-keyvalue.yaml" tags="block:schema" threads=AUTO cycles=4 url="jdbc:postgresql://host:port/database" databaseName="defaultdb" portNumber=5432 user="newuser" password="CHANGE_ME" sslmode="prefer" serverName=insectdb sslrootcert="/path/to/postgresql_certs/root.crt" -vv --show-stacktraces
min_version: "5.17.1"
description: |
A workload with only text keys and text values. This is based on the CQL keyvalue workloads as found
in cql-keyvalue2.yaml.
scenarios:
default:
schema: run driver=jdbc workload="/path/to/postgresql-keyvalue.yaml" tags==block:schema threads=1 cycles==UNDEF url="jdbc:postgresql://host:port/" databaseName="defaultdb" portNumber=5432 user="newuser" password="CHANGE_ME" sslmode="prefer" serverName="pgsql" sslrootcert="/path/to/postgresql_certs/root.crt"
rampup: run driver=jdbc workload="/path/to/postgresql-keyvalue.yaml" tags==block:rampup threads=AUTO cycles===TEMPLATE(rampup-cycles,100) url="jdbc:postgresql://host:port/" databaseName="defaultdb" portNumber=5432 user="newuser" password="CHANGE_ME" sslmode="prefer" serverName="pgsql" sslrootcert="/path/to/postgresql_certs/root.crt"
main: run driver=jdbc workload="/path/to/postgresql-keyvalue.yaml" tags==block:'main.*' threads=AUTO cycles===TEMPLATE(main-cycles,100) url="jdbc:postgresql://host:port/" databaseName="defaultdb" portNumber=5432 user="newuser" password="CHANGE_ME" sslmode="prefer" serverName="pgsql" sslrootcert="/path/to/postgresql_certs/root.crt"
params:
instrument: TEMPLATE(instrument,false)
bindings:
seq_key: Mod(TEMPLATE(keycount,1000000000)); ToString() -> String
seq_value: Hash(); Mod(TEMPLATE(valuecount,1000000000)); ToString() -> String
rw_key: <<keydist:Uniform(0,1000000000)->int>>; ToString() -> String
rw_value: Hash(); <<valdist:Uniform(0,1000000000)->int>>; ToString() -> String
blocks:
schema:
ops:
drop-database:
execute: |
DROP DATABASE IF EXISTS TEMPLATE(database,baselines);
create-database:
execute: |
CREATE DATABASE IF NOT EXISTS TEMPLATE(database,baselines);
drop-table:
execute: |
DROP TABLE IF EXISTS TEMPLATE(database,baselines).TEMPLATE(table,keyvalue);
create-table:
execute: |
CREATE TABLE IF NOT EXISTS TEMPLATE(database,baselines).TEMPLATE(table,keyvalue)
(key STRING PRIMARY KEY, value STRING);
rampup:
params:
ops:
rampup-insert:
update: |
INSERT INTO TEMPLATE(database,baselines).TEMPLATE(table,keyvalue)
(key, value) VALUES ({seq_key},{seq_value});
main-read:
params:
ratio: TEMPLATE(read_ratio,5)
ops:
main-select:
query: |
SELECT * FROM TEMPLATE(keyspace,baselines).TEMPLATE(table,keyvalue) WHERE key='{rw_key}';
main-write:
params:
ratio: TEMPLATE(write_ratio,5)
ops:
main-insert:
update: |
INSERT INTO TEMPLATE(keyspace,baselines).TEMPLATE(table,keyvalue)
(key, value) VALUES ('{rw_key}', '{rw_value}');

View File

@ -0,0 +1,157 @@
min_version: "5.17.1"
description: |
A tabular workload with partitions, clusters, and data fields
This workload contains partitioning and cluster along with a set
of 8 fields of varying length. The field values vary in size according
to the fibonacci sequence times a base size factor of 10, with
an additional 10% variance for each field.
The read patterns have a variety of field subsets specified.
During rampup, all rows will be written partition by partition,
filling in all rows of that partition before moving on to the next.
Example: With a partition size of 1000 and 1B rows, there will be
1000000 partitions.
During main phase, the read patterns are varied with different
field sets. As well, the number of rows which will be returned
is varied between 1 and 10.
By default, reads occur at the same ratio as writes, with main
phase writes writing full rows.
You can bulk up the size of the payloads by 10x with addzeroes='0',
by 100x with addzeroes='00', and so on, but if you want to go higher
than 100x, you'll need to modify the workload with a larger reference
file in the HashedFileExtractToString(...) bindings.
scenarios:
default:
schema: run driver=jdbc tags==block:schema cycles==UNDEF threads==1
rampup: run driver=jdbc tags==block:rampup cycles===TEMPLATE(rampup-cycles,100) threads=auto
main: run driver=jdbc tags==block:'main.*' cycles===TEMPLATE(main-cycles,100) threads=auto
params:
instrument: TEMPLATE(instrument,false)
bindings:
# for ramp-up and verify phases
#
part_layout: Div(<<partsize:1000>>); ToString() -> String
clust_layout: Mod(<<partsize:1000>>); ToString() -> String
# todo: update these definitions to use the simpler 10,0.1, 20, 0.2, ...
data0: Add(10); HashedFileExtractToString('data/lorem_ipsum_full.txt',9TEMPLATE(addzeroes,),11TEMPLATE(addzeroes,))
data1: Add(20); HashedFileExtractToString('data/lorem_ipsum_full.txt',18TEMPLATE(addzeroes,),22TEMPLATE(addzeroes,))
data2: Add(30); HashedFileExtractToString('data/lorem_ipsum_full.txt',27TEMPLATE(addzeroes,),33TEMPLATE(addzeroes,))
data3: Add(40); HashedFileExtractToString('data/lorem_ipsum_full.txt',45TEMPLATE(addzeroes,),55TEMPLATE(addzeroes,))
data4: Add(50); HashedFileExtractToString('data/lorem_ipsum_full.txt',72TEMPLATE(addzeroes,),88TEMPLATE(addzeroes,))
data5: Add(60); HashedFileExtractToString('data/lorem_ipsum_full.txt',107TEMPLATE(addzeroes,),143TEMPLATE(addzeroes,))
data6: Add(70); HashedFileExtractToString('data/lorem_ipsum_full.txt',189TEMPLATE(addzeroes,),231TEMPLATE(addzeroes,))
data7: Add(80); HashedFileExtractToString('data/lorem_ipsum_full.txt',306TEMPLATE(addzeroes,),374TEMPLATE(addzeroes,))
# for main phase
# for write
part_write: Hash(); Uniform(0,TEMPLATE(partcount,100))->int; ToString() -> String
clust_write: Hash(); Add(1); Uniform(0,TEMPLATE(partsize,1000000))->int; ToString() -> String
data_write: Hash(); HashedFileExtractToString('data/lorem_ipsum_full.txt',50,150) -> String
# for read
limit: Uniform(1,10) -> int
part_read: Uniform(0,TEMPLATE(partcount,100))->int; ToString() -> String
clust_read: Add(1); Uniform(0,TEMPLATE(partsize,1000000))->int; ToString() -> String
blocks:
schema:
params:
prepared: false
ops:
#drop-database:
# execute: |
# DROP DATABASE IF EXISTS TEMPLATE(database,baselines);
create-database:
execute: |
CREATE DATABASE IF NOT EXISTS TEMPLATE(database,baselines);
drop-table:
execute: |
DROP TABLE IF EXISTS TEMPLATE(database,baselines).TEMPLATE(table,tabular);
create-table:
execute: |
CREATE TABLE IF NOT EXISTS TEMPLATE(keyspace,baselines).TEMPLATE(table,tabular) (
part STRING,
clust STRING,
data0 STRING, data1 STRING, data2 STRING, data3 STRING,
data4 STRING, data5 STRING, data6 STRING, data7 STRING,
PRIMARY KEY (part,clust)
);
rampup:
params:
ops:
rampup-insert:
update: |
INSERT INTO TEMPLATE(keyspace,baselines).TEMPLATE(table,tabular)
(part,clust,data0,data1,data2,data3,data4,data5,data6,data7)
VALUES (
'{part_layout}','{clust_layout}','{data0}','{data1}','{data2}',
'{data3}','{data4}','{data5}','{data6}','{data7}'
);
verify:
params:
cl: TEMPLATE(read_cl,LOCAL_QUORUM)
ops:
verify-select:
query: |
SELECT * FROM TEMPLATE(keyspace,baselines).TEMPLATE(table,tabular)
WHERE part='{part_layout}'
AND clust='{clust_layout}'
main-read:
params:
ratio: TEMPLATE(read_ratio,1)
ops:
main-select-all:
query: |
SELECT * FROM TEMPLATE(keyspace,baselines).TEMPLATE(table,tabular)
WHERE part='{part_read}' LIMIT {limit};
main-select-01:
query: |
SELECT data0,data1 from TEMPLATE(keyspace,baselines).TEMPLATE(table,tabular)
WHERE part='{part_read}' LIMIT {limit};
main-select-0246:
query: |
SELECT data0,data2,data4,data6 FROM TEMPLATE(keyspace,baselines).TEMPLATE(table,tabular)
WHERE part='{part_read}' LIMIT {limit};
main-select-1357:
query: |
SELECT data1,data3,data5,data7 FROM TEMPLATE(keyspace,baselines).TEMPLATE(table,tabular)
WHERE part='{part_read}' LIMIT {limit};
main-select-0123:
query: |
SELECT data0,data1,data2,data3 FROM TEMPLATE(keyspace,baselines).TEMPLATE(table,tabular)
WHERE part='{part_read}' LIMIT {limit};
main-select-4567:
query: |
SELECT data4,data5,data6,data7 FROM TEMPLATE(keyspace,baselines).TEMPLATE(table,tabular)
WHERE part='{part_read}' LIMIT {limit};
main-select-67:
query: |
SELECT data6,data7 FROM TEMPLATE(keyspace,baselines).TEMPLATE(table,tabular)
WHERE part='{part_read}' LIMIT {limit};
main-select:
query: |
SELECT data0,data1,data2,data3,data4,data5,data6,data7
FROM TEMPLATE(keyspace,baselines).TEMPLATE(table,tabular)
WHERE part='{part_read}' LIMIT {limit};
main-write:
params:
ratio: TEMPLATE(write_ratio,8)
ops:
main-write:
update: |
INSERT INTO TEMPLATE(keyspace,baselines).TEMPLATE(table,tabular)
(part, clust, data0,data1,data2,data3,data4,data5,data6,data7)
VALUES (
'{part_write}','{clust_write}','{data0}','{data1}','{data2}',
'{data3}','{data4}','{data5}','{data6}','{data7}'
);

View File

@ -0,0 +1,84 @@
# jara -jar nb5.jar cockroachdb-timeseries default -vv --show-stacktraces
min_version: "5.17.1"
description: |
This workload emulates a time-series data model and access patterns.
scenarios:
default:
schema: run driver=jdbc tags==block:schema cycles==UNDEF threads==1 url="jdbc:postgresql://host:port/" databaseName="defaultdb" portNumber=5432 user="newuser" password="CHANGE_ME" sslmode="prefer" serverName="pgsql" sslrootcert="/path/to/postgresql_certs/root.crt"
rampup: run driver=jdbc tags==block:rampup cycles===TEMPLATE(rampup-cycles,100) threads=auto url="jdbc:postgresql://host:port/" databaseName="defaultdb" portNumber=5432 user="newuser" password="CHANGE_ME" sslmode="prefer" serverName="pgsql" sslrootcert="/path/to/postgresql_certs/root.crt"
main: run driver=jdbc tags==block:'main.*' cycles===TEMPLATE(main-cycles,100) threads=auto url="jdbc:postgresql://host:port/" databaseName="defaultdb" portNumber=5432 user="newuser" password="CHANGE_ME" sslmode="prefer" serverName="pgsql" sslrootcert="/path/to/postgresql_certs/root.crt"
params:
instrument: TEMPLATE(instrument,false)
bindings:
machine_id: Mod(TEMPLATE(sources,10000)); ToHashedUUID() -> java.util.UUID
sensor_name: HashedLineToString('data/variable_words.txt')
time: Mul(TEMPLATE(timespeed,100)L); Div(TEMPLATE(sources,10000)L); ToJavaInstant()
cell_timestamp: Mul(TEMPLATE(timespeed,100)L); Div(TEMPLATE(sources,10000)L); Mul(1000L)
sensor_value: Normal(0.0,5.0); Add(100.0) -> double
station_id: Div(TEMPLATE(sources,10000));Mod(TEMPLATE(stations,100)); ToHashedUUID() -> java.util.UUID
data: HashedFileExtractToString('data/lorem_ipsum_full.txt',800TEMPLATE(addzeroes,),1200TEMPLATE(addzeroes,))
blocks:
schema:
params:
ops:
drop-database:
#execute: |
# DROP DATABASE IF EXISTS TEMPLATE(database,baselines);
create-database:
execute: |
CREATE DATABASE IF NOT EXISTS TEMPLATE(database,baselines);
drop-table:
execute: |
DROP TABLE IF EXISTS TEMPLATE(database,baselines).TEMPLATE(table,iot);
create-table:
execute: |
CREATE TABLE IF NOT EXISTS TEMPLATE(keyspace,baselines).TEMPLATE(table,iot) (
machine_id UUID,
sensor_name STRING,
time TIMESTAMP,
sensor_value FLOAT,
station_id UUID,
data STRING,
PRIMARY KEY (machine_id, sensor_name, time)
);
rampup:
params:
ops:
insert-rampup:
update: |
INSERT INTO TEMPLATE(keyspace,baselines).TEMPLATE(table,iot)
(machine_id, sensor_name, time, sensor_value, station_id, data)
VALUES (
'{machine_id}', '{sensor_name}', '{time}', {sensor_value}, '{station_id}', '{data}'
);
#using timestamp {cell_timestamp}
main-read:
params:
ratio: TEMPLATE(read_ratio,1)
ops:
select-read:
query: |
SELECT * FROM TEMPLATE(keyspace,baselines).TEMPLATE(table,iot)
WHERE machine_id='{machine_id}' and sensor_name='{sensor_name}'
LIMIT TEMPLATE(limit,10);
main-write:
params:
ratio: TEMPLATE(write_ratio,9)
ops:
insert-main:
update: |
INSERT INTO TEMPLATE(keyspace,baselines).TEMPLATE(table,iot)
(machine_id, sensor_name, time, sensor_value, station_id, data)
VALUES (
'{machine_id}', '{sensor_name}', '{time}', {sensor_value}, '{station_id}', '{data}'
);
#using timestamp {cell_timestamp}

View File

@ -0,0 +1,50 @@
# JDBC driver
This JDBC driver leverages [Hikari Connection Pool](https://github.com/brettwooldridge/HikariCP/wiki) for connection pool and works with PostgreSQL®. This leverages NoSQLBench based workload generation and performance testing against any PostgreSQL-compatible database cluster. Example: CockroachDB® or YugabyteDB® (YSQL API).
# Executing JDBC Workload
The following is an example of invoking a JDBC workload.
```shell
<nb_cmd> run driver=jdbc workload="/path/to/workload.yaml" cycles=1000 threads=100 url="jdbc:postgresql://" serverName=localhost portNumber=5432 databaseName=defaultdb ... -vv --show-stacktraces
```
In the above NB command, following are JDBC driver specific parameters:
* `url`: URL of the database cluster. Default is `jdbc:postgresql://`.
* `serverName`: Default is `localhost`.
* `portNumber`: Default is `5432`.
* `serverName`: The database name. The default is to connect to a database with the same name as the user name used to connect to the server.
Other NB engine parameters are straight forward:
* `driver`: *must* be `jdbc`.
* `threads`: depending on the workload type, the NB thread number determines how many clients will be created. All the clients will share the Connection originated from the Hikari Connection Pool.
* `*.yaml`: the NB jdbc scenario definition workload yaml file.
* `<nb_cmd>`: is `./nb` (using binary) or the `java -jar nb5.jar`.
# Configuration
These are the main configurations with which we could issue a query and process the results back based on the [PostgreSQL® Query](https://jdbc.postgresql.org/documentation/query/) pattern.
## Config Sources
* `execute`: This is to issue any DDL statements such `CREATE DATABASE|TABLE` or `DROP DATABASE|TABLE` operations which returns nothing.
* `query`: This is to issue DML statement such as `SELECT` operation which would return a `ResultSet` object to process.
* `update`: This is to issue DML statements such as `INSERT|UPDATE|DELETE` operations that will return how many number of rows were affected by that operation.
## Statement Forms
The syntax for specifying these types is simplified as well, using only a single `type` field which allows values of `execute`, `query`, & `update`
and specifying the raw statements in the `stmt`. Alternatively, one could directly use one of the types and provide the raw query directly.
### Examples
Check out the default activities under the [activities.baselinesv2](./activities.baselinesv2) directory.
#### Op Template Examples
````yaml
ops:
drop-database:
type: execute
stmt: |
DROP DATABASE IF EXISTS TEMPLATE(database,baselines);
create-table:
execute: |
CREATE TABLE IF NOT EXISTS TEMPLATE(database,baselines).TEMPLATE(table,keyvalue);
select-table:
query: |
SELECT one, two, three FROM TEMPLATE(database,baselines).TEMPLATE(table,keyvalue) WHERE ...;
insert-table:
update: |
UPDATE TABLE TEMPLATE(database,baselines).TEMPLATE(table,keyvalue) SET key = 'value' WHERE ...;
````

View File

@ -34,7 +34,7 @@
</description>
<properties>
<kafka.version>3.3.2</kafka.version>
<kafka.version>3.4.0</kafka.version>
</properties>
<dependencies>

View File

@ -388,13 +388,13 @@
<dependency>
<groupId>org.graalvm.js</groupId>
<artifactId>js</artifactId>
<version>22.3.0</version>
<version>22.3.1</version>
<scope>runtime</scope>
</dependency>
<dependency>
<groupId>org.graalvm.js</groupId>
<artifactId>js-scriptengine</artifactId>
<version>22.3.0</version>
<version>22.3.1</version>
</dependency>
<dependency>
<groupId>org.graalvm.tools</groupId>

View File

@ -131,6 +131,12 @@
<!-- </exclusions>-->
</dependency>
<dependency>
<groupId>io.nosqlbench</groupId>
<artifactId>adapter-jdbc</artifactId>
<version>${revision}</version>
</dependency>
</dependencies>
<build>

10
pom.xml
View File

@ -64,6 +64,7 @@
<module.adapter-pulsar>adapter-pulsar</module.adapter-pulsar>
<module.adapter-s4j>adapter-s4j</module.adapter-s4j>
<module.adapter-kafka>adapter-kafka</module.adapter-kafka>
<module.adapter-jdbc>adapter-jdbc</module.adapter-jdbc>
<!-- VIRTDATA MODULES -->
<module.virtdata-api>virtdata-api</module.virtdata-api>
@ -107,6 +108,7 @@
<module>adapter-pulsar</module>
<module>adapter-s4j</module>
<module>adapter-kafka</module>
<module>adapter-jdbc</module>
<!-- VIRTDATA MODULES -->
<module>virtdata-api</module>
@ -165,7 +167,7 @@
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-project-info-reports-plugin</artifactId>
<version>3.4.1</version>
<version>3.4.2</version>
</plugin>
</plugins>
</reporting>
@ -183,6 +185,12 @@
<organization>nosqlbench.io</organization>
<organizationUrl>http://nosqlbench.io/</organizationUrl>
</developer>
<developer>
<name>Madhavan S.</name>
<url>https://github.com/msmygit</url>
<organization>nosqlbench.io</organization>
<organizationUrl>http://nosqlbench.io/</organizationUrl>
</developer>
</developers>
<repositories>