mirror of
https://github.com/nosqlbench/nosqlbench.git
synced 2025-02-25 18:55:28 -06:00
Initial draft for JDBC adapter
This commit is contained in:
@@ -1,62 +0,0 @@
|
|||||||
<!--
|
|
||||||
~ Copyright (c) 2022 nosqlbench
|
|
||||||
~
|
|
||||||
~ Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
~ you may not use this file except in compliance with the License.
|
|
||||||
~ You may obtain a copy of the License at
|
|
||||||
~
|
|
||||||
~ http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
~
|
|
||||||
~ Unless required by applicable law or agreed to in writing, software
|
|
||||||
~ distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
~ See the License for the specific language governing permissions and
|
|
||||||
~ limitations under the License.
|
|
||||||
-->
|
|
||||||
|
|
||||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
|
||||||
<modelVersion>4.0.0</modelVersion>
|
|
||||||
|
|
||||||
<parent>
|
|
||||||
<groupId>io.nosqlbench</groupId>
|
|
||||||
<artifactId>mvn-defaults</artifactId>
|
|
||||||
<version>4.17.32-SNAPSHOT</version>
|
|
||||||
<relativePath>../mvn-defaults</relativePath>
|
|
||||||
</parent>
|
|
||||||
|
|
||||||
<artifactId>adapter-cockroachdb</artifactId>
|
|
||||||
<packaging>jar</packaging>
|
|
||||||
<name>${project.artifactId}</name>
|
|
||||||
|
|
||||||
<description>
|
|
||||||
A DriverAdapter driver for CockroachDB
|
|
||||||
</description>
|
|
||||||
|
|
||||||
<dependencies>
|
|
||||||
<!-- core dependencies -->
|
|
||||||
|
|
||||||
<dependency>
|
|
||||||
<groupId>io.nosqlbench</groupId>
|
|
||||||
<artifactId>adapters-api</artifactId>
|
|
||||||
<version>4.17.32-SNAPSHOT</version>
|
|
||||||
</dependency>
|
|
||||||
|
|
||||||
<dependency>
|
|
||||||
<groupId>org.postgresql</groupId>
|
|
||||||
<artifactId>postgresql</artifactId>
|
|
||||||
<version>42.5.1</version>
|
|
||||||
</dependency>
|
|
||||||
<!-- <dependency>-->
|
|
||||||
<!-- <groupId>org.postgresql</groupId>-->
|
|
||||||
<!-- <artifactId>postgresql</artifactId>-->
|
|
||||||
<!-- <version>42.3.3</version>-->
|
|
||||||
<!-- </dependency>-->
|
|
||||||
<dependency>
|
|
||||||
<groupId>com.zaxxer</groupId>
|
|
||||||
<artifactId>HikariCP</artifactId>
|
|
||||||
<version>5.0.1</version>
|
|
||||||
</dependency>
|
|
||||||
|
|
||||||
</dependencies>
|
|
||||||
|
|
||||||
</project>
|
|
||||||
@@ -1,26 +0,0 @@
|
|||||||
/*
|
|
||||||
* Copyright (c) 2023 nosqlbench
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package io.nosqlbench.adapter.cockroachdb;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Op templates which are supported by the NoSQLBench CockroachDB driver are
|
|
||||||
* enumerated below. These command names should mirror those in the official
|
|
||||||
* CockroachDB API exactly.
|
|
||||||
*/
|
|
||||||
public class CockroachDBCmdType {
|
|
||||||
|
|
||||||
}
|
|
||||||
@@ -1,121 +0,0 @@
|
|||||||
/*
|
|
||||||
* Copyright (c) 2023 nosqlbench
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package io.nosqlbench.adapter.cockroachdb;
|
|
||||||
|
|
||||||
import io.nosqlbench.api.config.standard.NBConfiguration;
|
|
||||||
import io.nosqlbench.api.errors.OpConfigError;
|
|
||||||
import org.postgresql.ds.PGSimpleDataSource;
|
|
||||||
|
|
||||||
import javax.sql.DataSource;
|
|
||||||
import java.sql.Connection;
|
|
||||||
import java.util.Optional;
|
|
||||||
|
|
||||||
public class CockroachDBSpace {
|
|
||||||
private final String name;
|
|
||||||
private final DataSource ds = null;
|
|
||||||
// private final HikariConfig hikariConfig = null;
|
|
||||||
// private final HikariDataSource hikariDataSource = null;
|
|
||||||
private Connection connection;
|
|
||||||
|
|
||||||
public CockroachDBSpace(String name, NBConfiguration cfg) {
|
|
||||||
this.name = name;
|
|
||||||
PGSimpleDataSource client = createClient(cfg);
|
|
||||||
// dynamoDB= new DynamoDB(client);
|
|
||||||
}
|
|
||||||
private PGSimpleDataSource createClient(NBConfiguration cfg) {
|
|
||||||
PGSimpleDataSource ds = new PGSimpleDataSource();
|
|
||||||
|
|
||||||
Optional<String> url = cfg.getOptional("url");
|
|
||||||
if(url.isEmpty()) {
|
|
||||||
throw new OpConfigError("url option is required.");
|
|
||||||
} else {
|
|
||||||
ds.setURL(url.get());
|
|
||||||
}
|
|
||||||
|
|
||||||
Optional<String> serverNames = cfg.getOptional("serverName");
|
|
||||||
if(serverNames.isPresent()) {
|
|
||||||
ds.setServerNames(new String[]{serverNames.get()});
|
|
||||||
} else {
|
|
||||||
throw new OpConfigError("Server name option is required.");
|
|
||||||
}
|
|
||||||
|
|
||||||
Optional<String> databaseName = cfg.getOptional("databaseName");
|
|
||||||
if(databaseName.isPresent()) {
|
|
||||||
ds.setDatabaseName(databaseName.get());
|
|
||||||
} else {
|
|
||||||
throw new OpConfigError("Database name option is required.");
|
|
||||||
}
|
|
||||||
|
|
||||||
Optional<Integer> portNumber = cfg.getOptional(Integer.class, "portNumber");
|
|
||||||
ds.setPortNumbers(new int[] { portNumber.orElse(26257) });
|
|
||||||
|
|
||||||
Optional<String> user = cfg.getOptional("user");
|
|
||||||
if(user.isPresent()) {
|
|
||||||
ds.setUser(user.get());
|
|
||||||
}
|
|
||||||
|
|
||||||
Optional<String> password = cfg.getOptional("password");
|
|
||||||
if(password.isPresent()) {
|
|
||||||
if(user.isEmpty()) {
|
|
||||||
throw new OpConfigError("Both user and password options are required. Only password is supplied in this case.");
|
|
||||||
}
|
|
||||||
ds.setPassword(password.get());
|
|
||||||
} else {
|
|
||||||
if(user.isPresent()) {
|
|
||||||
throw new OpConfigError("Both user and password options are required. Only user is supplied in this case.");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Optional<String> sslMode = cfg.getOptional("sslMode");
|
|
||||||
if(sslMode.isPresent()) {
|
|
||||||
ds.setSslMode(sslMode.get());
|
|
||||||
} else {
|
|
||||||
ds.setSslMode("verify-full");
|
|
||||||
}
|
|
||||||
|
|
||||||
Optional<String> applicationName = cfg.getOptional("applicationName");
|
|
||||||
if(applicationName.isPresent()) {
|
|
||||||
ds.setApplicationName(applicationName.get());
|
|
||||||
} else {
|
|
||||||
ds.setApplicationName("NoSQLBench");
|
|
||||||
}
|
|
||||||
Optional<Boolean> rewriteBatchedInserts = cfg.getOptional(Boolean.class, "rewriteBatchedInserts");
|
|
||||||
ds.setReWriteBatchedInserts(rewriteBatchedInserts.orElse(false));
|
|
||||||
|
|
||||||
return ds;
|
|
||||||
}
|
|
||||||
|
|
||||||
public static NBConfigModel getConfigModel() {
|
|
||||||
return ConfigModel.of(CockroachDBSpace.class)
|
|
||||||
.add(Param.optional("url"))
|
|
||||||
.add(Param.optional("serverName"))
|
|
||||||
.add(Param.optional("databaseName"))
|
|
||||||
//TODO remove these below
|
|
||||||
.add(Param.optional("client_socket_timeout"))
|
|
||||||
.add(Param.optional("client_execution_timeout"))
|
|
||||||
.add(Param.optional("client_max_connections"))
|
|
||||||
.add(Param.optional("client_max_error_retry"))
|
|
||||||
.add(Param.optional("client_user_agent_prefix"))
|
|
||||||
.add(Param.optional("client_consecutive_retries_before_throttling"))
|
|
||||||
.add(Param.optional("client_gzip"))
|
|
||||||
.add(Param.optional("client_tcp_keepalive"))
|
|
||||||
.add(Param.optional("client_disable_socket_proxy"))
|
|
||||||
.add(Param.optional("client_so_send_size_hint"))
|
|
||||||
.add(Param.optional("client_so_recv_size_hint"))
|
|
||||||
.asReadOnly();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
# cockroachdb driver
|
|
||||||
115
adapter-jdbc/pom.xml
Normal file
115
adapter-jdbc/pom.xml
Normal file
@@ -0,0 +1,115 @@
|
|||||||
|
<!--
|
||||||
|
~ Copyright (c) 2022 nosqlbench
|
||||||
|
~
|
||||||
|
~ Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
~ you may not use this file except in compliance with the License.
|
||||||
|
~ You may obtain a copy of the License at
|
||||||
|
~
|
||||||
|
~ http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
~
|
||||||
|
~ Unless required by applicable law or agreed to in writing, software
|
||||||
|
~ distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
~ See the License for the specific language governing permissions and
|
||||||
|
~ limitations under the License.
|
||||||
|
-->
|
||||||
|
|
||||||
|
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||||
|
<modelVersion>4.0.0</modelVersion>
|
||||||
|
|
||||||
|
<parent>
|
||||||
|
<groupId>io.nosqlbench</groupId>
|
||||||
|
<artifactId>mvn-defaults</artifactId>
|
||||||
|
<version>5.17.1-SNAPSHOT</version>
|
||||||
|
<relativePath>../mvn-defaults</relativePath>
|
||||||
|
</parent>
|
||||||
|
|
||||||
|
<artifactId>adapter-jdbc</artifactId>
|
||||||
|
<packaging>jar</packaging>
|
||||||
|
<name>${project.artifactId}</name>
|
||||||
|
|
||||||
|
<description>
|
||||||
|
A DriverAdapter driver for JDBC via PostgreSQL with HikariCP.
|
||||||
|
</description>
|
||||||
|
|
||||||
|
<dependencies>
|
||||||
|
<!-- core dependencies -->
|
||||||
|
<dependency>
|
||||||
|
<groupId>io.nosqlbench</groupId>
|
||||||
|
<artifactId>adapters-api</artifactId>
|
||||||
|
<version>5.17.1-SNAPSHOT</version>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>io.nosqlbench</groupId>
|
||||||
|
<artifactId>engine-api</artifactId>
|
||||||
|
<version>5.17.1-SNAPSHOT</version>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>io.nosqlbench</groupId>
|
||||||
|
<artifactId>nb-annotations</artifactId>
|
||||||
|
<version>5.17.1-SNAPSHOT</version>
|
||||||
|
<scope>compile</scope>
|
||||||
|
</dependency>
|
||||||
|
|
||||||
|
<!-- https://search.maven.org/artifact/org.postgresql/postgresql -->
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.postgresql</groupId>
|
||||||
|
<artifactId>postgresql</artifactId>
|
||||||
|
<version>42.5.1</version>
|
||||||
|
</dependency>
|
||||||
|
|
||||||
|
<!-- https://search.maven.org/artifact/com.zaxxer/HikariCP -->
|
||||||
|
<dependency>
|
||||||
|
<groupId>com.zaxxer</groupId>
|
||||||
|
<artifactId>HikariCP</artifactId>
|
||||||
|
<version>5.0.1</version>
|
||||||
|
</dependency>
|
||||||
|
</dependencies>
|
||||||
|
|
||||||
|
<build>
|
||||||
|
<plugins>
|
||||||
|
<plugin>
|
||||||
|
<groupId>org.jacoco</groupId>
|
||||||
|
<artifactId>jacoco-maven-plugin</artifactId>
|
||||||
|
<version>0.8.8</version>
|
||||||
|
<executions>
|
||||||
|
<execution>
|
||||||
|
<id>prepare-agent</id>
|
||||||
|
<goals>
|
||||||
|
<goal>prepare-agent</goal>
|
||||||
|
</goals>
|
||||||
|
</execution>
|
||||||
|
<execution>
|
||||||
|
<id>report</id>
|
||||||
|
<phase>test</phase>
|
||||||
|
<goals>
|
||||||
|
<goal>report</goal>
|
||||||
|
</goals>
|
||||||
|
</execution>
|
||||||
|
<execution>
|
||||||
|
<id>jacoco-check</id>
|
||||||
|
<phase>verify</phase>
|
||||||
|
<goals>
|
||||||
|
<goal>check</goal>
|
||||||
|
</goals>
|
||||||
|
<configuration>
|
||||||
|
<rules>
|
||||||
|
<rule>
|
||||||
|
<element>BUNDLE</element>
|
||||||
|
<limits>
|
||||||
|
<limit>
|
||||||
|
<counter>INSTRUCTION</counter>
|
||||||
|
<value>COVEREDRATIO</value>
|
||||||
|
<minimum>0.00</minimum>
|
||||||
|
<maximum>1.00</maximum>
|
||||||
|
</limit>
|
||||||
|
</limits>
|
||||||
|
</rule>
|
||||||
|
</rules>
|
||||||
|
</configuration>
|
||||||
|
</execution>
|
||||||
|
</executions>
|
||||||
|
</plugin>
|
||||||
|
</plugins>
|
||||||
|
</build>
|
||||||
|
</project>
|
||||||
@@ -0,0 +1,36 @@
|
|||||||
|
package io.nosqlbench.adapter.jdbc;
|
||||||
|
|
||||||
|
import io.nosqlbench.adapter.jdbc.optypes.JDBCOp;
|
||||||
|
import io.nosqlbench.api.config.standard.NBConfigModel;
|
||||||
|
import io.nosqlbench.api.config.standard.NBConfiguration;
|
||||||
|
import io.nosqlbench.engine.api.activityimpl.OpMapper;
|
||||||
|
import io.nosqlbench.engine.api.activityimpl.uniform.BaseDriverAdapter;
|
||||||
|
import io.nosqlbench.engine.api.activityimpl.uniform.DriverAdapter;
|
||||||
|
import io.nosqlbench.engine.api.activityimpl.uniform.DriverSpaceCache;
|
||||||
|
import io.nosqlbench.nb.annotations.Service;
|
||||||
|
import org.apache.logging.log4j.LogManager;
|
||||||
|
import org.apache.logging.log4j.Logger;
|
||||||
|
|
||||||
|
import java.util.function.Function;
|
||||||
|
|
||||||
|
@Service(value = DriverAdapter.class, selector = "jdbc")
|
||||||
|
public class JDBCDriverAdapter extends BaseDriverAdapter<JDBCOp, JDBCSpace> {
|
||||||
|
private final static Logger logger = LogManager.getLogger(JDBCDriverAdapter.class);
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public OpMapper<JDBCOp> getOpMapper() {
|
||||||
|
DriverSpaceCache<? extends JDBCSpace> spaceCache = getSpaceCache();
|
||||||
|
NBConfiguration adapterConfig = getConfiguration();
|
||||||
|
return new JDBCOpMapper(this, adapterConfig, spaceCache);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Function<String, ? extends JDBCSpace> getSpaceInitializer(NBConfiguration cfg) {
|
||||||
|
return (s) -> new JDBCSpace(s, cfg);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public NBConfigModel getConfigModel() {
|
||||||
|
return super.getConfigModel().add(JDBCSpace.getConfigModel());
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,70 @@
|
|||||||
|
package io.nosqlbench.adapter.jdbc;
|
||||||
|
|
||||||
|
import io.nosqlbench.adapter.jdbc.opdispensers.JDBCDDLOpDispenser;
|
||||||
|
import io.nosqlbench.adapter.jdbc.optypes.JDBCOp;
|
||||||
|
import io.nosqlbench.api.config.standard.NBConfiguration;
|
||||||
|
import io.nosqlbench.engine.api.activityimpl.OpDispenser;
|
||||||
|
import io.nosqlbench.engine.api.activityimpl.OpMapper;
|
||||||
|
import io.nosqlbench.engine.api.activityimpl.uniform.DriverAdapter;
|
||||||
|
import io.nosqlbench.engine.api.activityimpl.uniform.DriverSpaceCache;
|
||||||
|
import io.nosqlbench.engine.api.templating.ParsedOp;
|
||||||
|
import io.nosqlbench.engine.api.templating.TypeAndTarget;
|
||||||
|
import org.apache.logging.log4j.LogManager;
|
||||||
|
import org.apache.logging.log4j.Logger;
|
||||||
|
|
||||||
|
import java.sql.Connection;
|
||||||
|
import java.util.function.LongFunction;
|
||||||
|
|
||||||
|
public class JDBCOpMapper implements OpMapper<JDBCOp> {
|
||||||
|
private final static Logger logger = LogManager.getLogger(JDBCOpMapper.class);
|
||||||
|
|
||||||
|
private final DriverAdapter adapter;
|
||||||
|
private final NBConfiguration cfg;
|
||||||
|
private final DriverSpaceCache<? extends JDBCSpace> spaceCache;
|
||||||
|
|
||||||
|
public JDBCOpMapper(DriverAdapter adapter, NBConfiguration cfg, DriverSpaceCache<? extends JDBCSpace> spaceCache) {
|
||||||
|
this.adapter = adapter;
|
||||||
|
this.cfg = cfg;
|
||||||
|
this.spaceCache = spaceCache;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public OpDispenser<? extends JDBCOp> apply(ParsedOp op) {
|
||||||
|
LongFunction<String> spaceNameF = op.getAsFunctionOr("space", "default");
|
||||||
|
LongFunction<JDBCSpace> spaceFunc = l -> spaceCache.get(spaceNameF.apply(l));
|
||||||
|
// Since the only needed thing in the JDBCSpace is the session, we can short-circuit
|
||||||
|
// to it here instead of stepping down from the cycle to the space to the connection.
|
||||||
|
LongFunction<Connection> connectionLongFunc = l -> spaceCache.get(spaceNameF.apply(l)).getConnection();
|
||||||
|
|
||||||
|
// CREATE|DROP TABLE|VIEW uses execute (as opposed to executeQuery which returns a ResultSet)
|
||||||
|
// https://jdbc.postgresql.org/documentation/query/#example54dropping-a-table-in-jdbc
|
||||||
|
//return new JDBCQueryOpDispenser(adapter, spaceFunc, op);working
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If the user provides a body element, then they want to provide the JSON or
|
||||||
|
* a data structure that can be converted into JSON, bypassing any further
|
||||||
|
* specialized type-checking or op-type specific features
|
||||||
|
*/
|
||||||
|
|
||||||
|
if (op.isDefined("body")) {
|
||||||
|
throw new RuntimeException("This mode is reserved for later. Do not use the 'body' op field.");
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
TypeAndTarget<JDBCOpType, String> opType = op.getTypeAndTarget(JDBCOpType.class, String.class, "type", "stmt");
|
||||||
|
|
||||||
|
logger.info(() -> "Using " + opType.enumId + " statement form for '" + op.getName());
|
||||||
|
|
||||||
|
//return new JDBCQueryOpDispenser(adapter, spaceFunc, op/*, opType.targetFunction*/);
|
||||||
|
|
||||||
|
|
||||||
|
return switch (opType.enumId) {
|
||||||
|
// CREATE|DROP TABLE|VIEW uses execute (as opposed to executeQuery which returns a ResultSet)
|
||||||
|
// https://jdbc.postgresql.org/documentation/query/#example54dropping-a-table-in-jdbc
|
||||||
|
|
||||||
|
case select -> null;
|
||||||
|
case update -> null;
|
||||||
|
case create, drop, ddl -> new JDBCDDLOpDispenser(adapter, connectionLongFunc, op, opType.targetFunction)/*.apply(op)*/;
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,17 @@
|
|||||||
|
package io.nosqlbench.adapter.jdbc;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Op templates which are supported by the NoSQLBench CockroachDB driver are
|
||||||
|
* enumerated below. These command names should mirror those in the official
|
||||||
|
* CockroachDB API exactly. See the official API for more details.
|
||||||
|
* @see <a href="https://www.cockroachlabs.com/docs/v22.2/sql-statements.html#data-definition-statements">CockroachDB API Reference</a>
|
||||||
|
*/
|
||||||
|
public enum JDBCOpType {
|
||||||
|
//See https://jdbc.postgresql.org/documentation/query/
|
||||||
|
select, // used for SELECT operation matches executeQuery
|
||||||
|
update, // used for performing updates such as INSERT/UPDATE/DELETE matches executeUpdate
|
||||||
|
ddl, // used for creating/modifying database objects matches execute
|
||||||
|
//JdbcQuery, // generic placeholder TODO - implement this differently
|
||||||
|
create, // used for CREATE operation matches execute
|
||||||
|
drop, // used for DROP operation matches execute
|
||||||
|
}
|
||||||
@@ -0,0 +1,225 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2023 nosqlbench
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package io.nosqlbench.adapter.jdbc;
|
||||||
|
|
||||||
|
import com.zaxxer.hikari.HikariConfig;
|
||||||
|
import com.zaxxer.hikari.HikariDataSource;
|
||||||
|
import io.nosqlbench.api.config.standard.ConfigModel;
|
||||||
|
import io.nosqlbench.api.config.standard.NBConfigModel;
|
||||||
|
import io.nosqlbench.api.config.standard.NBConfiguration;
|
||||||
|
import io.nosqlbench.api.config.standard.Param;
|
||||||
|
import io.nosqlbench.api.errors.OpConfigError;
|
||||||
|
import org.apache.logging.log4j.LogManager;
|
||||||
|
import org.apache.logging.log4j.Logger;
|
||||||
|
import org.postgresql.ds.PGSimpleDataSource;
|
||||||
|
|
||||||
|
import javax.sql.DataSource;
|
||||||
|
import java.sql.Connection;
|
||||||
|
import java.util.Optional;
|
||||||
|
|
||||||
|
public class JDBCSpace implements AutoCloseable {
|
||||||
|
private final static Logger logger = LogManager.getLogger(JDBCSpace.class);
|
||||||
|
private final String spaceName;
|
||||||
|
private DataSource ds;
|
||||||
|
private HikariConfig hikariConfig;
|
||||||
|
private HikariDataSource hikariDataSource;
|
||||||
|
private Connection connection;
|
||||||
|
|
||||||
|
public JDBCSpace(String spaceName, NBConfiguration cfg) {
|
||||||
|
this.spaceName = spaceName;
|
||||||
|
this.hikariDataSource = createClient(cfg);
|
||||||
|
}
|
||||||
|
|
||||||
|
public Connection getConnection() {
|
||||||
|
return this.connection;
|
||||||
|
}
|
||||||
|
|
||||||
|
public HikariDataSource getHikariDataSource() {
|
||||||
|
return this.hikariDataSource;
|
||||||
|
}
|
||||||
|
|
||||||
|
private HikariDataSource createClient(NBConfiguration cfg) {
|
||||||
|
PGSimpleDataSource ds = new PGSimpleDataSource();
|
||||||
|
hikariConfig = new HikariConfig();
|
||||||
|
|
||||||
|
Optional<String> url = cfg.getOptional("url");
|
||||||
|
if(url.isEmpty()) {
|
||||||
|
throw new OpConfigError("url option is required.");
|
||||||
|
} else {
|
||||||
|
ds.setURL(url.get());
|
||||||
|
hikariConfig.setJdbcUrl(url.get());
|
||||||
|
}
|
||||||
|
|
||||||
|
Optional<String> serverNames = cfg.getOptional("serverName");
|
||||||
|
if(serverNames.isPresent()) {
|
||||||
|
ds.setServerNames(new String[]{serverNames.get()});
|
||||||
|
//hds.setServerNames(new String[] {serverNames.get()});
|
||||||
|
hikariConfig.addDataSourceProperty("serverName", serverNames.get());
|
||||||
|
} else {
|
||||||
|
throw new OpConfigError("Server name option is required.");
|
||||||
|
}
|
||||||
|
|
||||||
|
Optional<String> databaseName = cfg.getOptional("databaseName");
|
||||||
|
if(databaseName.isPresent()) {
|
||||||
|
ds.setDatabaseName(databaseName.get());
|
||||||
|
hikariConfig.addDataSourceProperty("databaseName", databaseName.get());
|
||||||
|
} else {
|
||||||
|
throw new OpConfigError("Database name option is required.");
|
||||||
|
}
|
||||||
|
|
||||||
|
Optional<Integer> portNumber = cfg.getOptional(Integer.class, "portNumber");
|
||||||
|
ds.setPortNumbers(new int[] { portNumber.orElse(26257) });
|
||||||
|
hikariConfig.addDataSourceProperty("portNumber", portNumber.orElse(26257));
|
||||||
|
|
||||||
|
Optional<String> user = cfg.getOptional("user");
|
||||||
|
if(user.isPresent()) {
|
||||||
|
ds.setUser(user.get());
|
||||||
|
hikariConfig.setUsername(user.get());
|
||||||
|
}
|
||||||
|
|
||||||
|
Optional<String> password = cfg.getOptional("password");
|
||||||
|
if(password.isPresent()) {
|
||||||
|
if(user.isEmpty()) {
|
||||||
|
throw new OpConfigError("Both user and password options are required. Only password is supplied in this case.");
|
||||||
|
}
|
||||||
|
ds.setPassword(password.get());
|
||||||
|
hikariConfig.setPassword(password.get());
|
||||||
|
} else {
|
||||||
|
if(user.isPresent()) {
|
||||||
|
throw new OpConfigError("Both user and password options are required. Only user is supplied in this case.");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Optional<Boolean> ssl = cfg.getOptional(Boolean.class,"ssl");
|
||||||
|
if(ssl.isPresent()) {
|
||||||
|
ds.setSsl(ssl.get());
|
||||||
|
hikariConfig.addDataSourceProperty("ssl", ssl.get());
|
||||||
|
} else {
|
||||||
|
ds.setSsl(false);
|
||||||
|
hikariConfig.addDataSourceProperty("ssl", false);
|
||||||
|
}
|
||||||
|
|
||||||
|
Optional<String> sslMode = cfg.getOptional("sslmode");
|
||||||
|
if(sslMode.isPresent()) {
|
||||||
|
ds.setSslMode(sslMode.get());
|
||||||
|
hikariConfig.addDataSourceProperty("sslmode", sslMode.get());
|
||||||
|
} else {
|
||||||
|
ds.setSslMode("verify-full");
|
||||||
|
hikariConfig.addDataSourceProperty("sslmode", "verify-full");
|
||||||
|
}
|
||||||
|
|
||||||
|
Optional<String> sslCert = cfg.getOptional("sslcert");
|
||||||
|
if(sslCert.isPresent()) {
|
||||||
|
ds.setSslcert(sslCert.get());
|
||||||
|
hikariConfig.addDataSourceProperty("sslcert", sslCert.get());
|
||||||
|
} /*else if(sslMode.isPresent() && (!"disable".equalsIgnoreCase(sslMode.get()) || !"allow".equalsIgnoreCase(sslMode.get())) || !"prefer".equalsIgnoreCase(sslMode.get())) {
|
||||||
|
throw new OpConfigError("When sslmode is true, sslcert should be provided.");
|
||||||
|
}*/
|
||||||
|
|
||||||
|
Optional<String> sslRootCert = cfg.getOptional("sslrootcert");
|
||||||
|
if(sslRootCert.isPresent()) {
|
||||||
|
ds.setSslRootCert(sslRootCert.get());
|
||||||
|
hikariConfig.addDataSourceProperty("sslrootcert", sslRootCert.get());
|
||||||
|
}
|
||||||
|
|
||||||
|
Optional<String> applicationName = cfg.getOptional("applicationName");
|
||||||
|
if(applicationName.isPresent()) {
|
||||||
|
ds.setApplicationName(applicationName.get());
|
||||||
|
hikariConfig.addDataSourceProperty("applicationName", applicationName.orElse("NoSQLBench CRDB"));
|
||||||
|
} else {
|
||||||
|
ds.setApplicationName("NoSQLBench CRDB");
|
||||||
|
hikariConfig.addDataSourceProperty("applicationName", "NoSQLBench CRDB");
|
||||||
|
}
|
||||||
|
Optional<Boolean> rewriteBatchedInserts = cfg.getOptional(Boolean.class, "rewriteBatchedInserts");
|
||||||
|
ds.setReWriteBatchedInserts(rewriteBatchedInserts.orElse(true));
|
||||||
|
hikariConfig.addDataSourceProperty("rewriteBatchedInserts", rewriteBatchedInserts.orElse(true));
|
||||||
|
|
||||||
|
Optional<Boolean> autoCommit = cfg.getOptional(Boolean.class, "autoCommit");
|
||||||
|
hikariConfig.setAutoCommit(autoCommit.orElse(false));
|
||||||
|
|
||||||
|
Optional<Integer> maximumPoolSize = cfg.getOptional(Integer.class,"maximumPoolSize");
|
||||||
|
hikariConfig.setMaximumPoolSize(maximumPoolSize.orElse(40));
|
||||||
|
|
||||||
|
Optional<Integer> keepaliveTime = cfg.getOptional(Integer.class,"keepaliveTime");
|
||||||
|
hikariConfig.setKeepaliveTime(keepaliveTime.orElse(150000));
|
||||||
|
|
||||||
|
HikariDataSource hds = new HikariDataSource(hikariConfig);
|
||||||
|
try {
|
||||||
|
this.connection = hds.getConnection();
|
||||||
|
} catch (Exception ex) {
|
||||||
|
String exp = "Exception occurred while attempting to create a connection using the Hikari Data Source";
|
||||||
|
logger.error(exp, ex);
|
||||||
|
throw new RuntimeException(exp, ex);
|
||||||
|
}
|
||||||
|
|
||||||
|
return hds;
|
||||||
|
}
|
||||||
|
|
||||||
|
public static NBConfigModel getConfigModel() {
|
||||||
|
return ConfigModel.of(JDBCSpace.class)
|
||||||
|
.add(Param.defaultTo("url", "jdbc:postgresql:/").setDescription("The connection URL used to connect to the DBMS. Defaults to 'jdbc:postgresql:/'"))
|
||||||
|
.add(Param.defaultTo("serverName", "localhost").setDescription("The host name of the server. Defaults to 'localhost'"))
|
||||||
|
.add(Param.optional("databaseName").setDescription("The database name. The default is to connect to a database with the same name as the user name used to connect to the server."))
|
||||||
|
// See https://github.com/brettwooldridge/HikariCP/tree/dev#gear-configuration-knobs-baby & https://jdbc.postgresql.org/documentation/use/
|
||||||
|
.add(Param.defaultTo("portNumber", 5432).setDescription("The port number the server is listening on. Defaults to the PostgreSQL® standard port number (5432)"))
|
||||||
|
.add(Param.optional("user"))
|
||||||
|
.add(Param.optional("password"))
|
||||||
|
.add(Param.optional("ssl"))
|
||||||
|
.add(Param.optional("sslmode"))
|
||||||
|
.add(Param.optional("sslcert"))
|
||||||
|
.add(Param.optional("sslrootcert"))
|
||||||
|
.add(Param.optional("applicationName"))
|
||||||
|
.add(Param.optional("rewriteBatchedInserts"))
|
||||||
|
.add(Param.optional("autoCommit"))
|
||||||
|
.add(Param.optional("connectionTimeout"))
|
||||||
|
.add(Param.optional("idleTimeout"))
|
||||||
|
.add(Param.optional("keepaliveTime"))
|
||||||
|
.add(Param.optional("maxLifetime"))
|
||||||
|
.add(Param.optional("connectionTestQuery"))
|
||||||
|
.add(Param.optional("minimumIdle"))
|
||||||
|
.add(Param.optional("maximumPoolSize"))
|
||||||
|
.add(Param.optional("metricRegistry"))
|
||||||
|
.add(Param.optional("healthCheckRegistry"))
|
||||||
|
.add(Param.optional("poolName"))
|
||||||
|
.add(Param.optional("initializationFailTimeout"))
|
||||||
|
.add(Param.optional("isolateInternalQueries"))
|
||||||
|
.add(Param.optional("allowPoolSuspension"))
|
||||||
|
.add(Param.optional("readOnly"))
|
||||||
|
.add(Param.optional("registerMbeans"))
|
||||||
|
.add(Param.optional("catalog"))
|
||||||
|
.add(Param.optional("connectionInitSql"))
|
||||||
|
.add(Param.optional("driverClassName"))
|
||||||
|
.add(Param.optional("transactionIsolation"))
|
||||||
|
.add(Param.optional("validationTimeout"))
|
||||||
|
.add(Param.optional("leakDetectionThreshold"))
|
||||||
|
.add(Param.optional("dataSource"))
|
||||||
|
.add(Param.optional("schema"))
|
||||||
|
.add(Param.optional("threadFactory"))
|
||||||
|
.add(Param.optional("scheduledExecutor"))
|
||||||
|
.asReadOnly();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void close() {
|
||||||
|
try {
|
||||||
|
this.getConnection().close();
|
||||||
|
} catch (Exception e) {
|
||||||
|
logger.warn("auto-closeable jdbc connection threw exception in jdbc space(" + this.spaceName + "): " + e);
|
||||||
|
throw new RuntimeException(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,51 @@
|
|||||||
|
package io.nosqlbench.adapter.jdbc.opdispensers;
|
||||||
|
|
||||||
|
import io.nosqlbench.adapter.jdbc.JDBCSpace;
|
||||||
|
import io.nosqlbench.adapter.jdbc.optypes.JDBCOp;
|
||||||
|
import io.nosqlbench.engine.api.activityimpl.BaseOpDispenser;
|
||||||
|
import io.nosqlbench.engine.api.activityimpl.uniform.DriverAdapter;
|
||||||
|
import io.nosqlbench.engine.api.templating.ParsedOp;
|
||||||
|
import org.apache.logging.log4j.LogManager;
|
||||||
|
import org.apache.logging.log4j.Logger;
|
||||||
|
|
||||||
|
import java.sql.Connection;
|
||||||
|
import java.sql.SQLException;
|
||||||
|
import java.sql.Statement;
|
||||||
|
import java.util.function.LongFunction;
|
||||||
|
|
||||||
|
public class JDBCDDLOpDispenser extends BaseOpDispenser<JDBCOp, JDBCSpace> {
|
||||||
|
private static final Logger logger = LogManager.getLogger(JDBCDDLOpDispenser.class);
|
||||||
|
private final LongFunction<String> targetFunction;
|
||||||
|
private final LongFunction<Connection> connectionLongFunction;
|
||||||
|
private final LongFunction<Statement> statementLongFunction;
|
||||||
|
|
||||||
|
public JDBCDDLOpDispenser(DriverAdapter<JDBCOp, JDBCSpace> adapter, LongFunction<Connection> connectionLongFunc, ParsedOp op, LongFunction<String> targetFunction) {
|
||||||
|
super(adapter, op);
|
||||||
|
|
||||||
|
this.connectionLongFunction = connectionLongFunc;
|
||||||
|
this.targetFunction = targetFunction;
|
||||||
|
this.statementLongFunction = createStmtFunc(op);
|
||||||
|
}
|
||||||
|
|
||||||
|
protected LongFunction<Statement> createStmtFunc(ParsedOp cmd) {
|
||||||
|
try {
|
||||||
|
LongFunction<Statement> basefunc = l -> {
|
||||||
|
try {
|
||||||
|
return this.connectionLongFunction.apply(l).createStatement();
|
||||||
|
} catch (SQLException e) {
|
||||||
|
throw new RuntimeException(e);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
return basefunc;
|
||||||
|
} catch(Exception ex) {
|
||||||
|
String err_msg = "Error while attempting to create the jdbc statement from the connection";
|
||||||
|
logger.error(err_msg, ex);
|
||||||
|
throw new RuntimeException(err_msg, ex);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public JDBCOp apply(long cycle) {
|
||||||
|
return new JDBCOp(this.connectionLongFunction.apply(cycle), this.statementLongFunction.apply(cycle), targetFunction.apply(cycle));
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,130 @@
|
|||||||
|
package io.nosqlbench.adapter.jdbc.opdispensers;
|
||||||
|
|
||||||
|
import io.nosqlbench.adapter.jdbc.JDBCSpace;
|
||||||
|
import io.nosqlbench.adapter.jdbc.optypes.JDBCOp;
|
||||||
|
import io.nosqlbench.engine.api.activityimpl.BaseOpDispenser;
|
||||||
|
import io.nosqlbench.engine.api.activityimpl.uniform.DriverAdapter;
|
||||||
|
import io.nosqlbench.engine.api.templating.ParsedOp;
|
||||||
|
import org.apache.logging.log4j.LogManager;
|
||||||
|
import org.apache.logging.log4j.Logger;
|
||||||
|
|
||||||
|
import javax.sql.DataSource;
|
||||||
|
import java.sql.Statement;
|
||||||
|
import java.util.function.LongFunction;
|
||||||
|
|
||||||
|
public class JDBCQueryOpDispenser extends BaseOpDispenser<JDBCOp, JDBCSpace> {
|
||||||
|
private final static Logger logger = LogManager.getLogger(JDBCQueryOpDispenser.class);
|
||||||
|
private final DataSource dataSource;
|
||||||
|
private final LongFunction<JDBCOp> jdbcOpLongFunction;
|
||||||
|
// private final LongFunction<String> tableNameFunc;
|
||||||
|
//private final LongFunction<String> targetFunction;
|
||||||
|
|
||||||
|
public JDBCQueryOpDispenser(DriverAdapter adapter, LongFunction<JDBCSpace> jdbcSpaceLongFunction, ParsedOp op/*, LongFunction<String> targetFunction*/) {
|
||||||
|
super(adapter, op);
|
||||||
|
this.jdbcOpLongFunction = getOpFunc(jdbcSpaceLongFunction, op);
|
||||||
|
//this.targetFunction = targetFunction;
|
||||||
|
//TODO -- implement this
|
||||||
|
dataSource = null;
|
||||||
|
}
|
||||||
|
|
||||||
|
public JDBCQueryOpDispenser(DriverAdapter<JDBCOp, JDBCSpace> adapter, ParsedOp op) {
|
||||||
|
super(adapter, op);
|
||||||
|
//TODO -- implement this
|
||||||
|
this.jdbcOpLongFunction = null;
|
||||||
|
this.dataSource = null;
|
||||||
|
//this.targetFunction = null;
|
||||||
|
}
|
||||||
|
|
||||||
|
protected LongFunction<Statement> createStmtFunc(ParsedOp cmd) {
|
||||||
|
LongFunction<Statement> basefunc = l -> null;//targetFunction.apply(l));
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
private LongFunction<JDBCOp> getOpFunc(LongFunction<JDBCSpace> jdbcSpaceLongFunction, ParsedOp op) {
|
||||||
|
/*
|
||||||
|
LongFunction<HttpRequest.Builder> builderF = l -> HttpRequest.newBuilder();
|
||||||
|
LongFunction<String> bodyF = op.getAsFunctionOr("body", null);
|
||||||
|
LongFunction<HttpRequest.BodyPublisher> bodyPublisherF =
|
||||||
|
l -> Optional.ofNullable(bodyF.apply(l)).map(HttpRequest.BodyPublishers::ofString).orElse(
|
||||||
|
HttpRequest.BodyPublishers.noBody()
|
||||||
|
);
|
||||||
|
|
||||||
|
LongFunction<String> methodF = op.getAsFunctionOr("method", "GET");
|
||||||
|
LongFunction<HttpRequest.Builder> initBuilderF =
|
||||||
|
l -> builderF.apply(l).method(methodF.apply(l), bodyPublisherF.apply(l));
|
||||||
|
|
||||||
|
initBuilderF = op.enhanceFuncOptionally(
|
||||||
|
initBuilderF, "version", String.class,
|
||||||
|
(b, v) -> b.version(HttpClient.Version.valueOf(
|
||||||
|
v.replaceAll("/1.1", "_1_1")
|
||||||
|
.replaceAll("/2.0", "_2")
|
||||||
|
)
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
|
Optional<LongFunction<String>> optionalUriFunc = op.getAsOptionalFunction("uri", String.class);
|
||||||
|
LongFunction<String> urifunc;
|
||||||
|
// Add support for URLENCODE on the uri field if either it statically or dynamically contains the E or URLENCODE pattern,
|
||||||
|
// OR the enable_urlencode op field is set to true.
|
||||||
|
if (optionalUriFunc.isPresent()) {
|
||||||
|
String testUriValue = optionalUriFunc.get().apply(0L);
|
||||||
|
if (HttpFormatParser.URLENCODER_PATTERN.matcher(testUriValue).find()
|
||||||
|
|| op.getStaticConfigOr("enable_urlencode", true)) {
|
||||||
|
initBuilderF =
|
||||||
|
op.enhanceFuncOptionally(
|
||||||
|
initBuilderF,
|
||||||
|
"uri",
|
||||||
|
String.class,
|
||||||
|
(b, v) -> b.uri(URI.create(HttpFormatParser.rewriteExplicitSections(v)))
|
||||||
|
);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
initBuilderF = op.enhanceFuncOptionally(initBuilderF, "uri", String.class, (b, v) -> b.uri(URI.create(v)));
|
||||||
|
}
|
||||||
|
|
||||||
|
op.getOptionalStaticValue("follow_redirects", boolean.class);
|
||||||
|
|
||||||
|
|
||||||
|
List<String> headerNames = op.getDefinedNames().stream()
|
||||||
|
.filter(n -> n.charAt(0) >= 'A')
|
||||||
|
.filter(n -> n.charAt(0) <= 'Z')
|
||||||
|
.toList();
|
||||||
|
if (headerNames.size() > 0) {
|
||||||
|
for (String headerName : headerNames) {
|
||||||
|
initBuilderF = op.enhanceFunc(initBuilderF, headerName, String.class, (b, h) -> b.header(headerName, h));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
initBuilderF = op.enhanceFuncOptionally(initBuilderF, "timeout", long.class, (b, v) -> b.timeout(Duration.ofMillis(v)));
|
||||||
|
|
||||||
|
LongFunction<HttpRequest.Builder> finalInitBuilderF = initBuilderF;
|
||||||
|
LongFunction<HttpRequest> reqF = l -> finalInitBuilderF.apply(l).build();
|
||||||
|
|
||||||
|
|
||||||
|
Pattern ok_status = op.getOptionalStaticValue("ok-status", String.class)
|
||||||
|
.map(Pattern::compile)
|
||||||
|
.orElse(Pattern.compile(DEFAULT_OK_STATUS));
|
||||||
|
|
||||||
|
Pattern ok_body = op.getOptionalStaticValue("ok-body", String.class)
|
||||||
|
.map(Pattern::compile)
|
||||||
|
.orElse(null);
|
||||||
|
|
||||||
|
LongFunction<HttpOp> opFunc = cycle -> new HttpOp(
|
||||||
|
jdbcSpaceLongFunction.apply(cycle).getClient(),
|
||||||
|
reqF.apply(cycle),
|
||||||
|
ok_status,
|
||||||
|
ok_body,
|
||||||
|
jdbcSpaceLongFunction.apply(cycle), cycle
|
||||||
|
);
|
||||||
|
*/
|
||||||
|
//return null;
|
||||||
|
LongFunction<JDBCOp> jdbcOpLongFunction = cycle -> new JDBCOp(jdbcSpaceLongFunction.apply(cycle), "DUMMY_STRINGcycle");
|
||||||
|
return jdbcOpLongFunction;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public JDBCOp apply(long value) {
|
||||||
|
JDBCOp op = this.jdbcOpLongFunction.apply(value);
|
||||||
|
return op;
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,601 @@
|
|||||||
|
package io.nosqlbench.adapter.jdbc.optypes;
|
||||||
|
|
||||||
|
import io.nosqlbench.adapter.jdbc.JDBCSpace;
|
||||||
|
import io.nosqlbench.engine.api.activityimpl.uniform.flowtypes.RunnableOp;
|
||||||
|
import org.apache.logging.log4j.LogManager;
|
||||||
|
import org.apache.logging.log4j.Logger;
|
||||||
|
|
||||||
|
import javax.sql.DataSource;
|
||||||
|
import java.sql.Connection;
|
||||||
|
import java.sql.SQLException;
|
||||||
|
import java.sql.Statement;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* @see <a href="https://github.com/brettwooldridge/HikariCP">HikariCP connection pooling</a> for details.
|
||||||
|
*/
|
||||||
|
public /*abstract*/ class JDBCOp implements RunnableOp/*CycleOp<Object>*/ {
|
||||||
|
private final static Logger logger = LogManager.getLogger(JDBCOp.class);
|
||||||
|
protected DataSource dataSource;
|
||||||
|
private final JDBCSpace jdbcSpace;
|
||||||
|
|
||||||
|
public String getTargetStatement() {
|
||||||
|
return targetStatement;
|
||||||
|
}
|
||||||
|
|
||||||
|
private final String targetStatement;
|
||||||
|
|
||||||
|
public DataSource getDataSource() {
|
||||||
|
return dataSource;
|
||||||
|
}
|
||||||
|
|
||||||
|
public JDBCSpace getJdbcSpace() {
|
||||||
|
return jdbcSpace;
|
||||||
|
}
|
||||||
|
|
||||||
|
public Connection getConnection() {
|
||||||
|
return connection;
|
||||||
|
}
|
||||||
|
|
||||||
|
public Statement getStatement() {
|
||||||
|
return statement;
|
||||||
|
}
|
||||||
|
|
||||||
|
private final Connection connection;
|
||||||
|
private final Statement statement;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Unused.
|
||||||
|
* @param jdbcSpace
|
||||||
|
* @param targetStatement
|
||||||
|
*/
|
||||||
|
public JDBCOp(JDBCSpace jdbcSpace, String targetStatement) {
|
||||||
|
//TODO - implement code
|
||||||
|
//this.dataSource = new HikariDataSource();
|
||||||
|
this.jdbcSpace = jdbcSpace;
|
||||||
|
this.targetStatement = targetStatement;
|
||||||
|
this.connection = null;
|
||||||
|
this.statement = null;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
*
|
||||||
|
* @param connection
|
||||||
|
* @param statement
|
||||||
|
* @param targetStatement
|
||||||
|
*/
|
||||||
|
public JDBCOp(Connection connection, Statement statement, String targetStatement) {
|
||||||
|
this.connection = connection;
|
||||||
|
this.statement = statement;
|
||||||
|
this.targetStatement = targetStatement;
|
||||||
|
this.jdbcSpace = null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void run() {
|
||||||
|
logger.info(() -> "Executing JDBCOp for the given cycle.");
|
||||||
|
try {
|
||||||
|
this.getConnection().setAutoCommit(false);
|
||||||
|
if(logger.isDebugEnabled()) {
|
||||||
|
logger.debug(() -> "JDBC Query is: " + this.getTargetStatement());
|
||||||
|
}
|
||||||
|
this.getStatement().execute(this.getTargetStatement());
|
||||||
|
this.getConnection().commit();
|
||||||
|
logger.info(() -> "Executed the JDBC statement & committed the connection successfully");
|
||||||
|
} catch (SQLException sqlException) {
|
||||||
|
logger.error("JDBCOp ERROR: { state => %s, cause => %s, message => %s }\n",
|
||||||
|
sqlException.getSQLState(), sqlException.getCause(), sqlException.getMessage(), sqlException);
|
||||||
|
throw new RuntimeException(sqlException);
|
||||||
|
} catch (Exception ex) {
|
||||||
|
String exMsg = String.format("Exception while attempting to run the jdbc statement %s", getStatement());
|
||||||
|
logger.error(exMsg, ex);
|
||||||
|
throw new RuntimeException(exMsg, ex);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
//TODO - implement code
|
||||||
|
dataSource = jdbcSpace.getHikariDataSource();
|
||||||
|
if(null != dataSource) {
|
||||||
|
try (Connection connection = dataSource.getConnection()) {
|
||||||
|
connection.setAutoCommit(false);
|
||||||
|
|
||||||
|
Statement stmt = connection.createStatement();
|
||||||
|
stmt.execute("DROP TABLE IF EXISTS accounts");
|
||||||
|
if (null != stmt) {
|
||||||
|
stmt.close();
|
||||||
|
}
|
||||||
|
connection.commit();
|
||||||
|
logger.info(() -> "Executed the JDBC statement & committed the connection successfully");
|
||||||
|
} catch (SQLException sqlException) {
|
||||||
|
logger.error("JDBCOp ERROR: { state => %s, cause => %s, message => %s }\n",
|
||||||
|
sqlException.getSQLState(), sqlException.getCause(), sqlException.getMessage(), sqlException);
|
||||||
|
throw new RuntimeException(sqlException);
|
||||||
|
} catch(Exception exception) {
|
||||||
|
logger.error("Exception while executing JDBCOp", exception);
|
||||||
|
throw new RuntimeException(exception);
|
||||||
|
} finally {
|
||||||
|
dataSource = null;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
logger.error(() -> "Datasource is found to be null. Exiting operation.");
|
||||||
|
}*/
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* References:
|
||||||
|
* https://docs.oracle.com/javase/tutorial/jdbc/basics/gettingstarted.html
|
||||||
|
* https://docs.oracle.com/javase/9/docs/api/java/sql/package-summary.html
|
||||||
|
* https://jdbc.postgresql.org/documentation/query/
|
||||||
|
* https://www.cockroachlabs.com/docs/v22.2/connection-pooling.html
|
||||||
|
* https://www.cockroachlabs.com/docs/v22.2/connection-parameters#supported-options-parameters
|
||||||
|
* https://www.cockroachlabs.com/docs/v22.2/sql-statements.html#query-management-statements
|
||||||
|
* https://docs.yugabyte.com/preview/drivers-orms/java/yugabyte-jdbc/
|
||||||
|
* @author Created by Madhavan Sridharan on Jan 21, 2023 6:26:09 PM.
|
||||||
|
|
||||||
|
package com.jdbc.example.postgresql;
|
||||||
|
|
||||||
|
import java.math.BigDecimal;
|
||||||
|
import java.sql.Connection;
|
||||||
|
import java.sql.PreparedStatement;
|
||||||
|
import java.sql.ResultSet;
|
||||||
|
import java.sql.ResultSetMetaData;
|
||||||
|
import java.sql.SQLException;
|
||||||
|
import java.sql.Statement;
|
||||||
|
import java.time.LocalTime;
|
||||||
|
import java.util.HashMap;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.Random;
|
||||||
|
import java.util.UUID;
|
||||||
|
|
||||||
|
import javax.sql.DataSource;
|
||||||
|
|
||||||
|
import org.postgresql.ds.PGSimpleDataSource;
|
||||||
|
|
||||||
|
|
||||||
|
* @author madhavan.sridharan
|
||||||
|
*
|
||||||
|
|
||||||
|
public class crdb {
|
||||||
|
public static void main(String... args) {
|
||||||
|
PGSimpleDataSource ds = new PGSimpleDataSource();
|
||||||
|
//ds.setUrl("jdbc:postgresql://insectdb0-3153.g8z.cockroachlabs.cloud:26257/defaultdb?sslmode=verify-full");
|
||||||
|
ds.setUrl("jdbc:postgresql://insectdb0-3153.g8z.cockroachlabs.cloud:26257/defaultdb?sslmode=verify-ca");
|
||||||
|
ds.setUser(System.getProperty("CRDB_USER"));
|
||||||
|
ds.setPassword(System.getProperty("CRDB_PSWD"));
|
||||||
|
ds.setSslRootCert(System.getProperty("CRDB_SSLCERT"));
|
||||||
|
|
||||||
|
try (Connection con = ds.getConnection()) {
|
||||||
|
|
||||||
|
} catch (SQLException e) {
|
||||||
|
// TODO Auto-generated catch block
|
||||||
|
e.printStackTrace();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create DAO.
|
||||||
|
BasicExampleDAO dao = new BasicExampleDAO(ds);
|
||||||
|
|
||||||
|
// Test our retry handling logic if FORCE_RETRY is true. This
|
||||||
|
// method is only used to test the retry logic. It is not
|
||||||
|
// necessary in production code.
|
||||||
|
dao.testRetryHandling();
|
||||||
|
|
||||||
|
dao.dropAccountTable();
|
||||||
|
dao.createAccountTable();
|
||||||
|
|
||||||
|
// Insert a few accounts "by hand", using INSERTs on the backend.
|
||||||
|
Map<String, String> balances = new HashMap<>();
|
||||||
|
UUID id1 = UUID.randomUUID();
|
||||||
|
UUID id2 = UUID.randomUUID();
|
||||||
|
balances.put(id1.toString(), "1000");
|
||||||
|
balances.put(id2.toString(), "250");
|
||||||
|
int updatedAccounts = dao.updateAccounts(balances);
|
||||||
|
System.out.printf("BasicExampleDAO.updateAccounts:\n => %s total updated accounts\n", updatedAccounts);
|
||||||
|
|
||||||
|
// How much money is in these accounts?
|
||||||
|
BigDecimal balance1 = dao.getAccountBalance(id1);
|
||||||
|
BigDecimal balance2 = dao.getAccountBalance(id2);
|
||||||
|
System.out.printf("main:\n => Account balances at time '%s':\n ID %s => $%s\n ID %s => $%s\n",
|
||||||
|
LocalTime.now(), 1, balance1, 2, balance2);
|
||||||
|
|
||||||
|
// Transfer $100 from account 1 to account 2
|
||||||
|
UUID fromAccount = UUID.randomUUID();
|
||||||
|
UUID toAccount = UUID.randomUUID();
|
||||||
|
BigDecimal transferAmount = BigDecimal.valueOf(100);
|
||||||
|
int transferredAccounts = dao.transferFunds(fromAccount, toAccount, transferAmount);
|
||||||
|
if (transferredAccounts != -1) {
|
||||||
|
System.out.printf(
|
||||||
|
"BasicExampleDAO.transferFunds:\n => $%s transferred between accounts %s and %s, %s rows updated\n",
|
||||||
|
transferAmount, fromAccount, toAccount, transferredAccounts);
|
||||||
|
}
|
||||||
|
|
||||||
|
balance1 = dao.getAccountBalance(id1);
|
||||||
|
balance2 = dao.getAccountBalance(id2);
|
||||||
|
System.out.printf("main:\n => Account balances at time '%s':\n ID %s => $%s\n ID %s => $%s\n",
|
||||||
|
LocalTime.now(), 1, balance1, 2, balance2);
|
||||||
|
|
||||||
|
// Bulk insertion example using JDBC's batching support.
|
||||||
|
int totalRowsInserted = dao.bulkInsertRandomAccountData();
|
||||||
|
System.out.printf("\nBasicExampleDAO.bulkInsertRandomAccountData:\n => finished, %s total rows inserted\n",
|
||||||
|
totalRowsInserted);
|
||||||
|
|
||||||
|
// Print out 10 account values.
|
||||||
|
int accountsRead = dao.readAccounts(10);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
*
|
||||||
|
* Data access object used by 'BasicExample'. Abstraction over some common
|
||||||
|
* CockroachDB operations, including:
|
||||||
|
*
|
||||||
|
* - Auto-handling transaction retries in the 'runSQL' method
|
||||||
|
*
|
||||||
|
* - Example of bulk inserts in the 'bulkInsertRandomAccountData' method
|
||||||
|
|
||||||
|
|
||||||
|
class BasicExampleDAO {
|
||||||
|
|
||||||
|
private static final int MAX_RETRY_COUNT = 3;
|
||||||
|
private static final String RETRY_SQL_STATE = "40001";
|
||||||
|
private static final boolean FORCE_RETRY = false;
|
||||||
|
|
||||||
|
private final DataSource ds;
|
||||||
|
|
||||||
|
private final Random rand = new Random();
|
||||||
|
|
||||||
|
BasicExampleDAO(DataSource ds) {
|
||||||
|
this.ds = ds;
|
||||||
|
}
|
||||||
|
|
||||||
|
*
|
||||||
|
* Used to test the retry logic in 'runSQL'. It is not necessary in production
|
||||||
|
* code.
|
||||||
|
|
||||||
|
void testRetryHandling() {
|
||||||
|
if (BasicExampleDAO.FORCE_RETRY) {
|
||||||
|
runSQL("SELECT crdb_internal.force_retry('1s':::INTERVAL)");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
*
|
||||||
|
* Run SQL code in a way that automatically handles the transaction retry logic
|
||||||
|
* so we don't have to duplicate it in various places.
|
||||||
|
*
|
||||||
|
* @param sqlCode a String containing the SQL code you want to execute. Can have
|
||||||
|
* placeholders, e.g., "INSERT INTO accounts (id, balance) VALUES
|
||||||
|
* (?, ?)".
|
||||||
|
*
|
||||||
|
* @param args String Varargs to fill in the SQL code's placeholders.
|
||||||
|
* @return Integer Number of rows updated, or -1 if an error is thrown.
|
||||||
|
|
||||||
|
public Integer runSQL(String sqlCode, String... args) {
|
||||||
|
|
||||||
|
// This block is only used to emit class and method names in
|
||||||
|
// the program output. It is not necessary in production
|
||||||
|
// code.
|
||||||
|
StackTraceElement[] stacktrace = Thread.currentThread().getStackTrace();
|
||||||
|
StackTraceElement elem = stacktrace[2];
|
||||||
|
String callerClass = elem.getClassName();
|
||||||
|
String callerMethod = elem.getMethodName();
|
||||||
|
|
||||||
|
int rv = 0;
|
||||||
|
|
||||||
|
try (Connection connection = ds.getConnection()) {
|
||||||
|
|
||||||
|
// We're managing the commit lifecycle ourselves so we can
|
||||||
|
// automatically issue transaction retries.
|
||||||
|
connection.setAutoCommit(false);
|
||||||
|
|
||||||
|
int retryCount = 0;
|
||||||
|
|
||||||
|
while (retryCount <= MAX_RETRY_COUNT) {
|
||||||
|
|
||||||
|
if (retryCount == MAX_RETRY_COUNT) {
|
||||||
|
String err = String.format("hit max of %s retries, aborting", MAX_RETRY_COUNT);
|
||||||
|
throw new RuntimeException(err);
|
||||||
|
}
|
||||||
|
|
||||||
|
// This block is only used to test the retry logic.
|
||||||
|
// It is not necessary in production code. See also
|
||||||
|
// the method 'testRetryHandling()'.
|
||||||
|
if (FORCE_RETRY) {
|
||||||
|
forceRetry(connection); // SELECT 1
|
||||||
|
}
|
||||||
|
|
||||||
|
try (PreparedStatement pstmt = connection.prepareStatement(sqlCode)) {
|
||||||
|
|
||||||
|
// Loop over the args and insert them into the
|
||||||
|
// prepared statement based on their types. In
|
||||||
|
// this simple example we classify the argument
|
||||||
|
// types as "integers" and "everything else"
|
||||||
|
// (a.k.a. strings).
|
||||||
|
for (int i = 0; i < args.length; i++) {
|
||||||
|
int place = i + 1;
|
||||||
|
String arg = args[i];
|
||||||
|
|
||||||
|
try {
|
||||||
|
int val = Integer.parseInt(arg);
|
||||||
|
pstmt.setInt(place, val);
|
||||||
|
} catch (NumberFormatException e) {
|
||||||
|
pstmt.setString(place, arg);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pstmt.execute()) {
|
||||||
|
// We know that `pstmt.getResultSet()` will
|
||||||
|
// not return `null` if `pstmt.execute()` was
|
||||||
|
// true
|
||||||
|
ResultSet rs = pstmt.getResultSet();
|
||||||
|
ResultSetMetaData rsmeta = rs.getMetaData();
|
||||||
|
int colCount = rsmeta.getColumnCount();
|
||||||
|
|
||||||
|
// This printed output is for debugging and/or demonstration
|
||||||
|
// purposes only. It would not be necessary in production code.
|
||||||
|
System.out.printf("\n%s.%s:\n '%s'\n", callerClass, callerMethod, pstmt);
|
||||||
|
|
||||||
|
while (rs.next()) {
|
||||||
|
for (int i = 1; i <= colCount; i++) {
|
||||||
|
String name = rsmeta.getColumnName(i);
|
||||||
|
String type = rsmeta.getColumnTypeName(i);
|
||||||
|
|
||||||
|
// In this "bank account" example we know we are only handling
|
||||||
|
// integer values (technically 64-bit INT8s, the CockroachDB
|
||||||
|
// default). This code could be made into a switch statement
|
||||||
|
// to handle the various SQL types needed by the application.
|
||||||
|
if ("int8".equals(type)) {
|
||||||
|
int val = rs.getInt(name);
|
||||||
|
|
||||||
|
// This printed output is for debugging and/or demonstration
|
||||||
|
// purposes only. It would not be necessary in production code.
|
||||||
|
System.out.printf(" %-8s => %10s\n", name, val);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
int updateCount = pstmt.getUpdateCount();
|
||||||
|
rv += updateCount;
|
||||||
|
|
||||||
|
// This printed output is for debugging and/or demonstration
|
||||||
|
// purposes only. It would not be necessary in production code.
|
||||||
|
System.out.printf("\n%s.%s:\n '%s'\n", callerClass, callerMethod, pstmt);
|
||||||
|
}
|
||||||
|
|
||||||
|
connection.commit();
|
||||||
|
break;
|
||||||
|
|
||||||
|
} catch (SQLException e) {
|
||||||
|
|
||||||
|
if (RETRY_SQL_STATE.equals(e.getSQLState())) {
|
||||||
|
// Since this is a transaction retry error, we
|
||||||
|
// roll back the transaction and sleep a
|
||||||
|
// little before trying again. Each time
|
||||||
|
// through the loop we sleep for a little
|
||||||
|
// longer than the last time
|
||||||
|
// (A.K.A. exponential backoff).
|
||||||
|
System.out.printf(
|
||||||
|
"retryable exception occurred:\n sql state = [%s]\n message = [%s]\n retry counter = %s\n",
|
||||||
|
e.getSQLState(), e.getMessage(), retryCount);
|
||||||
|
connection.rollback();
|
||||||
|
retryCount++;
|
||||||
|
int sleepMillis = (int) (Math.pow(2, retryCount) * 100) + rand.nextInt(100);
|
||||||
|
System.out.printf("Hit 40001 transaction retry error, sleeping %s milliseconds\n", sleepMillis);
|
||||||
|
try {
|
||||||
|
Thread.sleep(sleepMillis);
|
||||||
|
} catch (InterruptedException ignored) {
|
||||||
|
// Necessary to allow the Thread.sleep()
|
||||||
|
// above so the retry loop can continue.
|
||||||
|
}
|
||||||
|
|
||||||
|
rv = -1;
|
||||||
|
} else {
|
||||||
|
rv = -1;
|
||||||
|
throw e;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} catch (SQLException e) {
|
||||||
|
System.out.printf("BasicExampleDAO.runSQL ERROR: { state => %s, cause => %s, message => %s }\n",
|
||||||
|
e.getSQLState(), e.getCause(), e.getMessage());
|
||||||
|
rv = -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
return rv;
|
||||||
|
}
|
||||||
|
|
||||||
|
*
|
||||||
|
* Helper method called by 'testRetryHandling'. It simply issues a "SELECT 1"
|
||||||
|
* inside the transaction to force a retry. This is necessary to take the
|
||||||
|
* connection's session out of the AutoRetry state, since otherwise the other
|
||||||
|
* statements in the session will be retried automatically, and the client (us)
|
||||||
|
* will not see a retry error. Note that this information is taken from the
|
||||||
|
* following test:
|
||||||
|
* https://github.com/cockroachdb/cockroach/blob/master/pkg/sql/logictest/testdata/logic_test/manual_retry
|
||||||
|
*
|
||||||
|
* @param connection Connection
|
||||||
|
|
||||||
|
private void forceRetry(Connection connection) throws SQLException {
|
||||||
|
try (PreparedStatement statement = connection.prepareStatement("SELECT 1")) {
|
||||||
|
statement.executeQuery();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public void dropAccountTable() {
|
||||||
|
try (Connection connection = ds.getConnection()) {
|
||||||
|
connection.setAutoCommit(false);
|
||||||
|
|
||||||
|
// Check the current balance.
|
||||||
|
Statement stmt = connection.createStatement();
|
||||||
|
boolean executed = stmt
|
||||||
|
.execute("DROP TABLE IF EXISTS accounts");
|
||||||
|
if(null != stmt) {stmt.close();}
|
||||||
|
connection.commit();
|
||||||
|
System.out.printf("DROPPED accounts table successfully\n");
|
||||||
|
} catch (SQLException e) {
|
||||||
|
System.out.printf("BasicExampleDAO.deleteAccountTable ERROR: { state => %s, cause => %s, message => %s }\n",
|
||||||
|
e.getSQLState(), e.getCause(), e.getMessage());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public void createAccountTable() {
|
||||||
|
try (Connection connection = ds.getConnection()) {
|
||||||
|
// Check the current balance.
|
||||||
|
connection.createStatement()
|
||||||
|
.execute("CREATE TABLE IF NOT EXISTS accounts (id UUID PRIMARY KEY, balance int8)");
|
||||||
|
|
||||||
|
System.out.printf("Created accounts table successfully\n");
|
||||||
|
} catch (SQLException e) {
|
||||||
|
System.out.printf("BasicExampleDAO.createAccountTable ERROR: { state => %s, cause => %s, message => %s }\n",
|
||||||
|
e.getSQLState(), e.getCause(), e.getMessage());
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
*
|
||||||
|
* Update accounts by passing in a Map of (ID, Balance) pairs.
|
||||||
|
*
|
||||||
|
* @param accounts (Map)
|
||||||
|
* @return The number of updated accounts (int)
|
||||||
|
|
||||||
|
public int updateAccounts(Map<String, String> accounts) {
|
||||||
|
int rows = 0;
|
||||||
|
for (Map.Entry<String, String> account : accounts.entrySet()) {
|
||||||
|
|
||||||
|
String k = account.getKey();
|
||||||
|
String v = account.getValue();
|
||||||
|
|
||||||
|
String[] args = { k, v };
|
||||||
|
rows += runSQL("INSERT INTO accounts (id, balance) VALUES (?, ?)", args);
|
||||||
|
}
|
||||||
|
return rows;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
* Transfer funds between one account and another. Handles transaction retries
|
||||||
|
* in case of conflict automatically on the backend.
|
||||||
|
*
|
||||||
|
* @param fromId (UUID)
|
||||||
|
* @param toId (UUID)
|
||||||
|
* @param amount (int)
|
||||||
|
* @return The number of updated accounts (int)
|
||||||
|
|
||||||
|
public int transferFunds(UUID fromId, UUID toId, BigDecimal amount) {
|
||||||
|
String sFromId = fromId.toString();
|
||||||
|
String sToId = toId.toString();
|
||||||
|
String sAmount = amount.toPlainString();
|
||||||
|
|
||||||
|
// We have omitted explicit BEGIN/COMMIT statements for
|
||||||
|
// brevity. Individual statements are treated as implicit
|
||||||
|
// transactions by CockroachDB (see
|
||||||
|
// https://www.cockroachlabs.com/docs/stable/transactions.html#individual-statements).
|
||||||
|
|
||||||
|
String sqlCode = "UPSERT INTO accounts (id, balance) VALUES"
|
||||||
|
+ "(?, ((SELECT balance FROM accounts WHERE id = ?) - ?)),"
|
||||||
|
+ "(?, ((SELECT balance FROM accounts WHERE id = ?) + ?))";
|
||||||
|
|
||||||
|
return runSQL(sqlCode, sFromId, sFromId, sAmount, sToId, sToId, sAmount);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
* Get the account balance for one account.
|
||||||
|
*
|
||||||
|
* We skip using the retry logic in 'runSQL()' here for the following reasons:
|
||||||
|
*
|
||||||
|
* 1. Since this is a single read ("SELECT"), we don't expect any transaction
|
||||||
|
* conflicts to handle
|
||||||
|
*
|
||||||
|
* 2. We need to return the balance as an integer
|
||||||
|
*
|
||||||
|
* @param id (UUID)
|
||||||
|
* @return balance (int)
|
||||||
|
|
||||||
|
public BigDecimal getAccountBalance(UUID id) {
|
||||||
|
BigDecimal balance = BigDecimal.valueOf(0);
|
||||||
|
|
||||||
|
try (Connection connection = ds.getConnection()) {
|
||||||
|
|
||||||
|
// Check the current balance.
|
||||||
|
ResultSet res = connection.createStatement()
|
||||||
|
.executeQuery(String.format("SELECT balance FROM accounts WHERE id = '%s'", id.toString()));
|
||||||
|
if (!res.next()) {
|
||||||
|
System.out.printf("No users in the table with id %d", id);
|
||||||
|
} else {
|
||||||
|
balance = res.getBigDecimal("balance");
|
||||||
|
}
|
||||||
|
} catch (SQLException e) {
|
||||||
|
System.out.printf("BasicExampleDAO.getAccountBalance ERROR: { state => %s, cause => %s, message => %s }\n",
|
||||||
|
e.getSQLState(), e.getCause(), e.getMessage());
|
||||||
|
}
|
||||||
|
|
||||||
|
return balance;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
* Insert randomized account data (ID, balance) using the JDBC fast path for
|
||||||
|
* bulk inserts. The fastest way to get data into CockroachDB is the IMPORT
|
||||||
|
* statement. However, if you must bulk ingest from the application using INSERT
|
||||||
|
* statements, the best option is the method shown here. It will require the
|
||||||
|
* following:
|
||||||
|
*
|
||||||
|
* 1. Add `rewriteBatchedInserts=true` to your JDBC connection settings (see the
|
||||||
|
* connection info in 'BasicExample.main').
|
||||||
|
*
|
||||||
|
* 2. Inserting in batches of 128 rows, as used inside this method (see
|
||||||
|
* BATCH_SIZE), since the PGJDBC driver's logic works best with powers of two,
|
||||||
|
* such that a batch of size 128 can be 6x faster than a batch of size 250.
|
||||||
|
*
|
||||||
|
* @return The number of new accounts inserted (int)
|
||||||
|
|
||||||
|
public int bulkInsertRandomAccountData() {
|
||||||
|
|
||||||
|
Random random = new Random();
|
||||||
|
int BATCH_SIZE = 128;
|
||||||
|
int totalNewAccounts = 0;
|
||||||
|
|
||||||
|
try (Connection connection = ds.getConnection()) {
|
||||||
|
|
||||||
|
// We're managing the commit lifecycle ourselves so we can
|
||||||
|
// control the size of our batch inserts.
|
||||||
|
connection.setAutoCommit(false);
|
||||||
|
|
||||||
|
// In this example we are adding 500 rows to the database,
|
||||||
|
// but it could be any number. What's important is that
|
||||||
|
// the batch size is 128.
|
||||||
|
try (PreparedStatement pstmt = connection
|
||||||
|
.prepareStatement("INSERT INTO accounts (id, balance) VALUES (?, ?)")) {
|
||||||
|
for (int i = 0; i <= (500 / BATCH_SIZE); i++) {
|
||||||
|
for (int j = 0; j < BATCH_SIZE; j++) {
|
||||||
|
String id = UUID.randomUUID().toString();
|
||||||
|
BigDecimal balance = BigDecimal.valueOf(random.nextInt(1000000000));
|
||||||
|
pstmt.setString(1, id);
|
||||||
|
pstmt.setBigDecimal(2, balance);
|
||||||
|
pstmt.addBatch();
|
||||||
|
}
|
||||||
|
int[] count = pstmt.executeBatch();
|
||||||
|
totalNewAccounts += count.length;
|
||||||
|
System.out.printf("\nBasicExampleDAO.bulkInsertRandomAccountData:\n '%s'\n", pstmt.toString());
|
||||||
|
System.out.printf(" => %s row(s) updated in this batch\n", count.length);
|
||||||
|
}
|
||||||
|
connection.commit();
|
||||||
|
} catch (SQLException e) {
|
||||||
|
System.out.printf(
|
||||||
|
"BasicExampleDAO.bulkInsertRandomAccountData ERROR: { state => %s, cause => %s, message => %s }\n",
|
||||||
|
e.getSQLState(), e.getCause(), e.getMessage());
|
||||||
|
}
|
||||||
|
} catch (SQLException e) {
|
||||||
|
System.out.printf(
|
||||||
|
"BasicExampleDAO.bulkInsertRandomAccountData ERROR: { state => %s, cause => %s, message => %s }\n",
|
||||||
|
e.getSQLState(), e.getCause(), e.getMessage());
|
||||||
|
}
|
||||||
|
return totalNewAccounts;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
* Read out a subset of accounts from the data store.
|
||||||
|
*
|
||||||
|
* @param limit (int)
|
||||||
|
* @return Number of accounts read (int)
|
||||||
|
|
||||||
|
public int readAccounts(int limit) {
|
||||||
|
return runSQL("SELECT id, balance FROM accounts LIMIT ?", Integer.toString(limit));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
*/
|
||||||
@@ -0,0 +1,19 @@
|
|||||||
|
package io.nosqlbench.adapter.jdbc.optypes;
|
||||||
|
|
||||||
|
import io.nosqlbench.adapter.jdbc.JDBCSpace;
|
||||||
|
import io.nosqlbench.engine.api.activityimpl.uniform.flowtypes.RunnableOp;
|
||||||
|
|
||||||
|
public class SelectJDBCOp implements RunnableOp {
|
||||||
|
public SelectJDBCOp(JDBCSpace jdbcSpace, long cycle) {
|
||||||
|
super();
|
||||||
|
}
|
||||||
|
|
||||||
|
public SelectJDBCOp() {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void run() {
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
28
adapter-jdbc/src/main/resources/jdbc.md
Normal file
28
adapter-jdbc/src/main/resources/jdbc.md
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
---
|
||||||
|
weight: 0
|
||||||
|
title: jdbc
|
||||||
|
---
|
||||||
|
|
||||||
|
# JDBC driver
|
||||||
|
This JDBC driver leverages HikariCP for connection pool and works with PostgreSQL. This leverages NoSQLBench based workload generation and performance testing against any PostgreSQL-compatible database cluster. Example: CockroachDB or YugabyteDB (YSQL API).
|
||||||
|
|
||||||
|
# Executing JDBC Workload
|
||||||
|
The following is an example of invoking a JDBC workload.
|
||||||
|
```yaml
|
||||||
|
<nb_cmd> run driver=jdbc workload=/path/to/workload.yaml cycles=1000 threads=100 ...
|
||||||
|
```
|
||||||
|
In the above NB command, following are JDBC driver specific parameters:
|
||||||
|
* take1
|
||||||
|
* take2
|
||||||
|
|
||||||
|
Other NB engine parameters are straight forward:
|
||||||
|
* `driver`: must be `jdbc`
|
||||||
|
* `threads`: depending on the workload type, the NB thread number determines how many clients will be created. All the clients will share the Connection originated from the Hikari Connection Pool.
|
||||||
|
* `*.yaml`: the NB jdbc scenario definition workload yaml file
|
||||||
|
|
||||||
|
# Configuration
|
||||||
|
## Config Sources
|
||||||
|
### Examples
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@@ -133,8 +133,8 @@
|
|||||||
|
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>io.nosqlbench</groupId>
|
<groupId>io.nosqlbench</groupId>
|
||||||
<artifactId>adapter-cockroachdb</artifactId>
|
<artifactId>adapter-jdbc</artifactId>
|
||||||
<version>4.17.32-SNAPSHOT</version>
|
<version>5.17.1-SNAPSHOT</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
|
||||||
</dependencies>
|
</dependencies>
|
||||||
|
|||||||
18
pom.xml
18
pom.xml
@@ -61,6 +61,7 @@
|
|||||||
<module.adapter-pulsar>adapter-pulsar</module.adapter-pulsar>
|
<module.adapter-pulsar>adapter-pulsar</module.adapter-pulsar>
|
||||||
<module.adapter-s4j>adapter-s4j</module.adapter-s4j>
|
<module.adapter-s4j>adapter-s4j</module.adapter-s4j>
|
||||||
<module.adapter-kafka>adapter-kafka</module.adapter-kafka>
|
<module.adapter-kafka>adapter-kafka</module.adapter-kafka>
|
||||||
|
<module.adapter-jdbc>adapter-jdbc</module.adapter-jdbc>
|
||||||
|
|
||||||
<!-- VIRTDATA MODULES -->
|
<!-- VIRTDATA MODULES -->
|
||||||
<module.virtdata-api>virtdata-api</module.virtdata-api>
|
<module.virtdata-api>virtdata-api</module.virtdata-api>
|
||||||
@@ -102,7 +103,7 @@
|
|||||||
<module>adapter-pulsar</module>
|
<module>adapter-pulsar</module>
|
||||||
<module>adapter-s4j</module>
|
<module>adapter-s4j</module>
|
||||||
<module>adapter-kafka</module>
|
<module>adapter-kafka</module>
|
||||||
<module>adapter-cockroachdb</module>
|
<module>adapter-jdbc</module>
|
||||||
|
|
||||||
<!-- VIRTDATA MODULES -->
|
<!-- VIRTDATA MODULES -->
|
||||||
<module>virtdata-api</module>
|
<module>virtdata-api</module>
|
||||||
@@ -234,6 +235,9 @@
|
|||||||
<fileset dir="${basedir}/${module.adapter-kafka}/target">
|
<fileset dir="${basedir}/${module.adapter-kafka}/target">
|
||||||
<include name="jacoco.exec" />
|
<include name="jacoco.exec" />
|
||||||
</fileset>
|
</fileset>
|
||||||
|
<fileset dir="${basedir}/${module.adapter-jdbc}/target">
|
||||||
|
<include name="jacoco.exec" />
|
||||||
|
</fileset>
|
||||||
<fileset dir="${basedir}/${module.virtdata-api}/target">
|
<fileset dir="${basedir}/${module.virtdata-api}/target">
|
||||||
<include name="jacoco.exec" />
|
<include name="jacoco.exec" />
|
||||||
</fileset>
|
</fileset>
|
||||||
@@ -322,6 +326,9 @@
|
|||||||
<fileset dir="${basedir}/${module.adapter-kafka}/target/classes">
|
<fileset dir="${basedir}/${module.adapter-kafka}/target/classes">
|
||||||
<include name="io/nosqlbench/**/*.class" />
|
<include name="io/nosqlbench/**/*.class" />
|
||||||
</fileset>
|
</fileset>
|
||||||
|
<fileset dir="${basedir}/${module.adapter-jdbc}/target/classes">
|
||||||
|
<include name="io/nosqlbench/**/*.class" />
|
||||||
|
</fileset>
|
||||||
<fileset dir="${basedir}/${module.virtdata-api}/target/classes">
|
<fileset dir="${basedir}/${module.virtdata-api}/target/classes">
|
||||||
<include name="io/nosqlbench/**/*.class" />
|
<include name="io/nosqlbench/**/*.class" />
|
||||||
</fileset>
|
</fileset>
|
||||||
@@ -368,6 +375,7 @@
|
|||||||
<fileset dir="${basedir}/${module.adapter-pulsar}/src/main/java" />
|
<fileset dir="${basedir}/${module.adapter-pulsar}/src/main/java" />
|
||||||
<fileset dir="${basedir}/${module.adapter-s4j}/src/main/java" />
|
<fileset dir="${basedir}/${module.adapter-s4j}/src/main/java" />
|
||||||
<fileset dir="${basedir}/${module.adapter-kafka}/src/main/java" />
|
<fileset dir="${basedir}/${module.adapter-kafka}/src/main/java" />
|
||||||
|
<fileset dir="${basedir}/${module.adapter-jdbc}/src/main/java" />
|
||||||
<fileset dir="${basedir}/${module.virtdata-api}/src/main/java" />
|
<fileset dir="${basedir}/${module.virtdata-api}/src/main/java" />
|
||||||
<fileset dir="${basedir}/${module.virtdata-lang}/src/main/java" />
|
<fileset dir="${basedir}/${module.virtdata-lang}/src/main/java" />
|
||||||
<fileset dir="${basedir}/${module.virtdata-realdata}/src/main/java" />
|
<fileset dir="${basedir}/${module.virtdata-realdata}/src/main/java" />
|
||||||
@@ -404,7 +412,7 @@
|
|||||||
<plugin>
|
<plugin>
|
||||||
<groupId>org.apache.maven.plugins</groupId>
|
<groupId>org.apache.maven.plugins</groupId>
|
||||||
<artifactId>maven-project-info-reports-plugin</artifactId>
|
<artifactId>maven-project-info-reports-plugin</artifactId>
|
||||||
<version>3.4.1</version>
|
<version>3.4.2</version>
|
||||||
</plugin>
|
</plugin>
|
||||||
</plugins>
|
</plugins>
|
||||||
</reporting>
|
</reporting>
|
||||||
@@ -422,6 +430,12 @@
|
|||||||
<organization>nosqlbench.io</organization>
|
<organization>nosqlbench.io</organization>
|
||||||
<organizationUrl>http://nosqlbench.io/</organizationUrl>
|
<organizationUrl>http://nosqlbench.io/</organizationUrl>
|
||||||
</developer>
|
</developer>
|
||||||
|
<developer>
|
||||||
|
<name>Madhavan S</name>
|
||||||
|
<email>madhavan_5k@yahoo.com</email>
|
||||||
|
<organization>nosqlbench.io</organization>
|
||||||
|
<organizationUrl>http://nosqlbench.io/</organizationUrl>
|
||||||
|
</developer>
|
||||||
</developers>
|
</developers>
|
||||||
|
|
||||||
<repositories>
|
<repositories>
|
||||||
|
|||||||
Reference in New Issue
Block a user