mirror of
https://github.com/nosqlbench/nosqlbench.git
synced 2025-02-25 18:55:28 -06:00
Merge remote-tracking branch 'upstream/master'
This commit is contained in:
commit
94d0ea29dc
23
.github/workflows/docker.yml
vendored
23
.github/workflows/docker.yml
vendored
@ -1,23 +0,0 @@
|
||||
name: Docker Release
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- 'nosqlbench-*'
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@master
|
||||
- name: Build nosqlbench docker
|
||||
run: cd nb && docker build -t nosqlbench -f ./Dockerfile-build ./
|
||||
- name: Publish to Registry
|
||||
uses: elgohr/Publish-Docker-Github-Action@master
|
||||
with:
|
||||
name: nosqlbench/nosqlbench
|
||||
username: ${{ secrets.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
tag_names: true
|
||||
dockerfile: Dockerfile
|
||||
workdir: nb
|
34
.github/workflows/dockerhub.yml
vendored
34
.github/workflows/dockerhub.yml
vendored
@ -1,34 +0,0 @@
|
||||
# This is a basic workflow to help you get started with Actions
|
||||
|
||||
name: dockerhub
|
||||
|
||||
on:
|
||||
push:
|
||||
# Sequence of patterns matched against refs/tags
|
||||
tags:
|
||||
- 'nosqlbench-*' # Push events to matching nosqlbench-[version]
|
||||
|
||||
# A workflow run is made up of one or more jobs that can run sequentially or in parallel
|
||||
jobs:
|
||||
# This workflow contains a single job called "build"
|
||||
build:
|
||||
# The type of runner that the job will run on
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
# Steps represent a sequence of tasks that will be executed as part of the job
|
||||
steps:
|
||||
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
|
||||
- uses: actions/checkout@v2
|
||||
- name: Login to DockerHub Registry
|
||||
run: echo ${{ secrets.DOCKERHUB_PASSWORD }} | docker login -u ${{ secrets.DOCKERHUB_USERNAME }} --password-stdin
|
||||
- name: Get the version
|
||||
id: vars
|
||||
run: echo ::set-output name=tag::$(echo ${GITHUB_REF:10})
|
||||
- name: Build the tagged Docker image
|
||||
run: docker build ./nb/ --file Dockerfile --tag nosqlbench/nosqlbench:${{steps.vars.outputs.tag}}
|
||||
- name: Push the tagged Docker image
|
||||
run: docker push nosqlbench/nosqlbench:${{steps.vars.outputs.tag}}
|
||||
- name: Build the latest Docker image
|
||||
run: docker build ./nb/ --file Dockerfile --tag nosqlbench/nosqlbench:latest
|
||||
- name: Push the latest Docker image
|
||||
run: docker push nosqlbench/nosqlbench:latest
|
73
.github/workflows/release.yml
vendored
73
.github/workflows/release.yml
vendored
@ -9,8 +9,12 @@ jobs:
|
||||
release:
|
||||
runs-on: ubuntu-18.04
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-java@v1
|
||||
|
||||
- name: checkout repo
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: setup java
|
||||
uses: actions/setup-java@v1
|
||||
with:
|
||||
java-version: '14'
|
||||
java-package: jdk
|
||||
@ -21,7 +25,7 @@ jobs:
|
||||
env:
|
||||
GIT_RELEASE_BOT_NAME: "nb-droid"
|
||||
|
||||
- name: capture tty
|
||||
- name: capture tty for gpg
|
||||
run: |
|
||||
echo "::set-env name=TTY::"$(tty)
|
||||
echo "::set-env name=GPG_TTY::"$(tty)
|
||||
@ -37,6 +41,7 @@ jobs:
|
||||
|
||||
- name: set git username
|
||||
run: git config --global user.email "${{ secrets.NBDROID_EMAIL }}"
|
||||
|
||||
- name: set git email
|
||||
run: git config --global user.name "${{ secrets.NBDROID_NAME }}"
|
||||
|
||||
@ -69,7 +74,7 @@ jobs:
|
||||
echo "::set-env name=RELEASE_VERSION::${RELEASE_VERSION}"
|
||||
echo "::set-env name=RELEASE_TAGNAME::${RELEASE_TAGNAME}"
|
||||
|
||||
- name: Prepare Summary
|
||||
- name: prepare release summary
|
||||
id: prepare_summary
|
||||
run: |
|
||||
summary=$(scripts/release-notes.sh)
|
||||
@ -111,25 +116,37 @@ jobs:
|
||||
MAVEN_REPO_SERVER_USERNAME: ${{ secrets.MVN_REPO_PRIVATE_REPO_USER }}
|
||||
MAVEN_REPO_SERVER_PASSWORD: ${{ secrets.MVN_REPO_PRIVATE_REPO_PASSWORD }}
|
||||
|
||||
|
||||
- name: upload artifacts
|
||||
- name: bundle artifacts
|
||||
run: |
|
||||
pwd
|
||||
ls -l
|
||||
mkdir staging && cp nb/target/nb.jar nb/target/nb staging
|
||||
- uses: actions/upload-artifact@v1
|
||||
|
||||
- name: upload artifacts
|
||||
uses: actions/upload-artifact@v1
|
||||
with:
|
||||
name: binaries
|
||||
path: staging
|
||||
|
||||
- name: upload guidebook
|
||||
- name: docker push
|
||||
uses: docker/build-push-action@v1
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
repository: nosqlbench/nosqlbench
|
||||
tags: latest, ${{ env.RELEASE_VERSION }}
|
||||
tag_with_ref: false
|
||||
|
||||
- name: bundle guidebook
|
||||
run: mkdir guidebook && cp -R nb/target/guidebook guidebook
|
||||
- uses: actions/upload-artifact@v1
|
||||
|
||||
- name: upload guidebook
|
||||
uses: actions/upload-artifact@v1
|
||||
with:
|
||||
name: guidebook
|
||||
path: guidebook
|
||||
|
||||
- name: Create Release
|
||||
- name: create release
|
||||
id: create_release
|
||||
uses: actions/create-release@v1
|
||||
env:
|
||||
@ -140,7 +157,8 @@ jobs:
|
||||
draft: false
|
||||
prerelease: false
|
||||
body: ${{ steps.prepare_summary.outputs.release_summary }}
|
||||
- name: Upload nb.jar
|
||||
|
||||
- name: upload nb.jar
|
||||
id: upload-nb-jar
|
||||
uses: actions/upload-release-asset@v1
|
||||
env:
|
||||
@ -150,7 +168,8 @@ jobs:
|
||||
asset_path: nb/target/nb.jar
|
||||
asset_name: nb.jar
|
||||
asset_content_type: application/octet-stream
|
||||
- name: Upload nb
|
||||
|
||||
- name: upload nb binary
|
||||
id: upload-nb-binary
|
||||
uses: actions/upload-release-asset@v1
|
||||
env:
|
||||
@ -161,43 +180,25 @@ jobs:
|
||||
asset_name: nb
|
||||
asset_content_type: application/octet-stream
|
||||
|
||||
# - name: find latest release
|
||||
# run: |
|
||||
# LATEST_GH_RELEASE_ID=$(curl --silent "https://api.github.com/repos/nosqlbench/nosqlbench/releases/latest" | jq -r .id)
|
||||
# echo "::set-env name=LATEST_GH_RELEASE_ID::${LATEST_GH_RELEASE_ID}"
|
||||
# - name: upload jar
|
||||
# uses: actions/upload-release-asset@v1
|
||||
# env:
|
||||
# GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
# with:
|
||||
# asset_path: nb/target/nb.jar
|
||||
# asset_name: nb.jar
|
||||
# asset_content_type: application/octet-stream
|
||||
# upload_url: https://uploads.github.com/repos/nosqlbench/nosqlbench/releases/${{ env.LATEST_GH_RELEASE_ID }}/assets{?name,label}
|
||||
# - name: upload binary
|
||||
# uses: actions/upload-release-asset@v1
|
||||
# env:
|
||||
# GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
# with:
|
||||
# asset_path: nb/target/nb
|
||||
# asset_name: nb
|
||||
# asset_content_type: application/octet-stream
|
||||
# upload_url: https://uploads.github.com/repos/nosqlbench/nosqlbench/releases/${{ env.LATEST_GH_RELEASE_ID }}/assets{?name,label}
|
||||
|
||||
docs:
|
||||
needs: release
|
||||
runs-on: ubuntu-18.04
|
||||
steps:
|
||||
|
||||
- name: set git username
|
||||
run: git config --global user.email "${{ secrets.NBDROID_EMAIL }}"
|
||||
|
||||
- name: set git email
|
||||
run: git config --global user.name "${{ secrets.NBDROID_NAME }}"
|
||||
|
||||
- name: download guidebook
|
||||
uses: actions/download-artifact@v1
|
||||
with:
|
||||
name: guidebook
|
||||
path: guidebook
|
||||
|
||||
- run: ls -la
|
||||
|
||||
- name: clone nosqlbench-docs
|
||||
env:
|
||||
NBDROID_NAME: ${{ secrets.NBDROID_NAME }}
|
||||
@ -209,6 +210,7 @@ jobs:
|
||||
find .
|
||||
git remote set-url origin https://${{secrets.NBDROID_NAME}}:${{secrets.NBDROID_TOKEN}}@github.com/nosqlbench/nosqlbench-docs.git
|
||||
git remote -v
|
||||
|
||||
- name: push changes
|
||||
env:
|
||||
NBDROID_NAME: ${{ secrets.NBDROID_NAME }}
|
||||
@ -229,4 +231,3 @@ jobs:
|
||||
|
||||
|
||||
|
||||
|
||||
|
3
Dockerfile
Normal file
3
Dockerfile
Normal file
@ -0,0 +1,3 @@
|
||||
FROM openjdk:14-alpine
|
||||
COPY nb/target/nb.jar nb.jar
|
||||
ENTRYPOINT ["java","-jar", "nb.jar"]
|
@ -1,23 +1 @@
|
||||
7b61ee3a (HEAD -> master) Don't swallow exception in VirtdataComposer
|
||||
2d4bf8d0 DateRangeFunc allows flexible signatures
|
||||
8cad4414 improve debugger view of virtdata AST
|
||||
2de8df4e incremental cql-d4 work
|
||||
7fb0eb83 make cql-d4 optional via profile
|
||||
be160856 organize virtdata entry points
|
||||
4f2b2929 remove extraneous build file
|
||||
6e74b5ab virtdata composer considers all arg type combinations
|
||||
526dc5de longflow example
|
||||
52501f40 (HEAD -> master) support graal-js in nashorn compat mode
|
||||
9d0403a5 polyglot mode now does full type introspection
|
||||
ae8506ca incremental work on cql-d4
|
||||
302c3ca4 higher order functions now consider all possible matches without explicity input and output types
|
||||
5f63092e misc AST cleanups
|
||||
087c0b80 (origin/master, origin/HEAD) release commit
|
||||
2d4bf8d0 DateRangeFunc allows flexible signatures
|
||||
8cad4414 improve debugger view of virtdata AST
|
||||
2de8df4e incremental cql-d4 work
|
||||
7fb0eb83 make cql-d4 optional via profile
|
||||
be160856 organize virtdata entry points
|
||||
4f2b2929 remove extraneous build file
|
||||
6e74b5ab virtdata composer considers all arg type combinations
|
||||
526dc5de longflow example
|
||||
2a1284c3 (HEAD -> master) sync up mongo version and enable
|
||||
|
36
devdocs/docstructure/docsketch.md
Normal file
36
devdocs/docstructure/docsketch.md
Normal file
@ -0,0 +1,36 @@
|
||||
# Doc System
|
||||
|
||||
This is a consolidation of all the doc system work thus far. This draft is meant to outline the basic features of the
|
||||
doc system at a high level, but with suitable detail for an initial refactoring. In general this builds on existing work
|
||||
in the doc system but with some adaptations for current needs, across CLI, apps, and reference material.
|
||||
|
||||
## Content Organization
|
||||
|
||||
All content loaded from any source is organized internally into a tree of sections by:
|
||||
|
||||
* Front Matter Topics
|
||||
* Header Level
|
||||
|
||||
The source path of content does not matter. However, each unit of source material is considered its own section, with
|
||||
zero or more additional subsections.
|
||||
|
||||
A root section is the container of all sections which are not homed under another section.
|
||||
|
||||
## Headings
|
||||
|
||||
In some cases, it is appropriate to consolidate individual docs into larger views. In order to facilitate this, all
|
||||
sections within markdown structure are enumerated according to
|
||||
|
||||
- The front matter in the content source, specifically the topics assigned
|
||||
- The heading structure within the doc
|
||||
|
||||
Thus, when the doc content is processed into the cohesive view needed by a user, all sections of all provided content
|
||||
are cross-referenced and organized into sections.
|
||||
|
||||
The location of a document within the source filesystem or archive is not important. Topics
|
||||
|
||||
## Content Naming
|
||||
|
||||
|
||||
|
||||
## Content Searching
|
@ -9,7 +9,7 @@
|
||||
<parent>
|
||||
<artifactId>mvn-defaults</artifactId>
|
||||
<groupId>io.nosqlbench</groupId>
|
||||
<version>3.12.107-SNAPSHOT</version>
|
||||
<version>3.12.119-SNAPSHOT</version>
|
||||
<relativePath>../mvn-defaults</relativePath>
|
||||
</parent>
|
||||
|
||||
@ -18,7 +18,7 @@
|
||||
<dependency>
|
||||
<groupId>io.nosqlbench</groupId>
|
||||
<artifactId>nb-api</artifactId>
|
||||
<version>3.12.107-SNAPSHOT</version>
|
||||
<version>3.12.119-SNAPSHOT</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
@ -112,7 +112,7 @@
|
||||
<dependency>
|
||||
<groupId>io.nosqlbench</groupId>
|
||||
<artifactId>virtdata-api</artifactId>
|
||||
<version>3.12.107-SNAPSHOT</version>
|
||||
<version>3.12.119-SNAPSHOT</version>
|
||||
</dependency>
|
||||
|
||||
</dependencies>
|
||||
|
@ -4,7 +4,7 @@
|
||||
<parent>
|
||||
<groupId>io.nosqlbench</groupId>
|
||||
<artifactId>mvn-defaults</artifactId>
|
||||
<version>3.12.107-SNAPSHOT</version>
|
||||
<version>3.12.119-SNAPSHOT</version>
|
||||
<relativePath>../mvn-defaults</relativePath>
|
||||
</parent>
|
||||
|
||||
@ -23,7 +23,7 @@
|
||||
<dependency>
|
||||
<groupId>io.nosqlbench</groupId>
|
||||
<artifactId>engine-api</artifactId>
|
||||
<version>3.12.107-SNAPSHOT</version>
|
||||
<version>3.12.119-SNAPSHOT</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
|
@ -136,7 +136,7 @@ public class CqlActivity extends SimpleActivity implements Activity, ActivityDef
|
||||
private void initSequencer() {
|
||||
|
||||
Session session = getSession();
|
||||
Map<String,Object> fconfig = Map.of("cluster",session.getCluster());
|
||||
Map<String,Object> fconfig = Map.of("session",session);
|
||||
|
||||
SequencerType sequencerType = SequencerType.valueOf(
|
||||
getParams().getOptionalString("seq").orElse("bucket")
|
||||
|
@ -195,10 +195,8 @@ activity types.
|
||||
The above traces every 1000th cycle to stdout.
|
||||
If the trace log is not specified, then 'tracelog' is assumed.
|
||||
If the filename is specified as stdout, then traces are dumped to stdout.
|
||||
- **clusterid** - names the configuration to be used for this activity. Within
|
||||
a given scenario, any activities that use the same name for clusterid will
|
||||
share a session and cluster.
|
||||
default: 'default'
|
||||
- **sessionid** - names the configuration to be used for this activity. Within a given scenario, any activities that use
|
||||
the same name for clusterid will share a session and cluster. default: 'default'
|
||||
- **drivermetrics** - enable reporting of driver metrics.
|
||||
default: false
|
||||
- **driverprefix** - set the metrics name that will prefix all CQL driver metrics.
|
||||
|
@ -1,5 +1,16 @@
|
||||
package com.datastax.driver.core;
|
||||
|
||||
import com.datastax.oss.driver.api.core.ProtocolVersion;
|
||||
import com.datastax.oss.driver.api.core.cql.Statement;
|
||||
import com.datastax.oss.driver.api.core.metadata.Metadata;
|
||||
import com.datastax.oss.driver.api.core.metadata.TokenMap;
|
||||
import com.datastax.oss.driver.api.core.metadata.token.Token;
|
||||
import com.datastax.oss.driver.api.core.metadata.token.TokenRange;
|
||||
import com.datastax.oss.driver.api.core.session.Session;
|
||||
import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry;
|
||||
import com.datastax.oss.driver.internal.core.metadata.token.Murmur3Token;
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
@ -7,40 +18,32 @@ import java.util.OptionalLong;
|
||||
import java.util.Set;
|
||||
|
||||
public class M3PTokenFilter {
|
||||
private final TokenRange[] ranges;
|
||||
private final ProtocolVersion protocolVersion;
|
||||
private final CodecRegistry codecRegistry;
|
||||
private final Metadata clusterMetadata;
|
||||
private final Token.Factory factory;
|
||||
|
||||
public M3PTokenFilter(Set<TokenRange> ranges, Cluster cluster) {
|
||||
protocolVersion = cluster.getConfiguration().getProtocolOptions().getProtocolVersion();
|
||||
codecRegistry = cluster.getConfiguration().getCodecRegistry();
|
||||
clusterMetadata = cluster.getMetadata();
|
||||
factory = Token.getFactory(clusterMetadata.partitioner);
|
||||
List<TokenRange> rangeList = new ArrayList<>();
|
||||
private final TokenRange[] ranges;
|
||||
|
||||
public M3PTokenFilter(Set<TokenRange> ranges, Session session) {
|
||||
TokenMap tokenMap = session.getMetadata().getTokenMap().orElseThrow();
|
||||
|
||||
List<TokenRange> rangelist = new ArrayList<>();
|
||||
|
||||
for (TokenRange range : ranges) {
|
||||
if (!range.getStart().getType().equals(DataType.bigint())) {
|
||||
throw new RuntimeException("This filter only works with bigint valued token types");
|
||||
rangelist.add(range);
|
||||
}
|
||||
rangeList.add(range);
|
||||
}
|
||||
this.ranges=rangeList.toArray(new TokenRange[0]);
|
||||
this.ranges = rangelist.toArray(new TokenRange[0]);
|
||||
if (this.ranges.length<1) {
|
||||
throw new RuntimeException("There were no tokens found. Please check your keyspace and cluster settings.");
|
||||
}
|
||||
}
|
||||
|
||||
public OptionalLong matches(Statement statement) {
|
||||
ByteBuffer routingKey = statement.getRoutingKey(protocolVersion, codecRegistry);
|
||||
Token token = factory.hash(routingKey);
|
||||
public boolean matches(Statement statement) {
|
||||
Token token = statement.getRoutingToken();
|
||||
|
||||
for (TokenRange range : ranges) {
|
||||
if (range.contains(token)) {
|
||||
return OptionalLong.of((long)token.getValue());
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return OptionalLong.empty();
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
|
@ -1,5 +1,15 @@
|
||||
package com.datastax.driver.core;
|
||||
|
||||
import com.datastax.oss.driver.api.core.ProtocolVersion;
|
||||
import com.datastax.oss.driver.api.core.cql.Statement;
|
||||
import com.datastax.oss.driver.api.core.metadata.Metadata;
|
||||
import com.datastax.oss.driver.api.core.metadata.token.Token;
|
||||
import com.datastax.oss.driver.api.core.metadata.token.TokenRange;
|
||||
import com.datastax.oss.driver.api.core.session.Session;
|
||||
import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry;
|
||||
import com.datastax.oss.driver.internal.core.metadata.token.Murmur3Token;
|
||||
import com.datastax.oss.driver.internal.core.metadata.token.Murmur3TokenFactory;
|
||||
import com.datastax.oss.driver.internal.core.metadata.token.Murmur3TokenRange;
|
||||
import io.nosqlbench.activitytype.cqld4.api.StatementFilter;
|
||||
|
||||
import java.nio.ByteBuffer;
|
||||
@ -13,41 +23,41 @@ public class TokenRangeStmtFilter implements StatementFilter {
|
||||
private final Metadata clusterMetadata;
|
||||
private final ProtocolVersion protocolVersion;
|
||||
private final CodecRegistry codecRegistry;
|
||||
private final Token.Factory factory;
|
||||
// private final Token.Factory factory;
|
||||
private TokenRange[] ranges;
|
||||
|
||||
public TokenRangeStmtFilter(Cluster cluster, String rangesSpec) {
|
||||
clusterMetadata = cluster.getMetadata();
|
||||
protocolVersion = cluster.getConfiguration().getProtocolOptions().getProtocolVersion();
|
||||
codecRegistry = cluster.getConfiguration().getCodecRegistry();
|
||||
factory = Token.getFactory(clusterMetadata.partitioner);
|
||||
ranges = parseRanges(factory, rangesSpec);
|
||||
public TokenRangeStmtFilter(Session session, String rangesSpec) {
|
||||
clusterMetadata = session.getMetadata();
|
||||
protocolVersion = session.getContext().getProtocolVersion();
|
||||
codecRegistry = session.getContext().getCodecRegistry();
|
||||
ranges = parseRanges(session, rangesSpec);
|
||||
}
|
||||
|
||||
private TokenRange[] parseRanges(Token.Factory factory, String rangesStr) {
|
||||
private TokenRange[] parseRanges(Session session, String rangesStr) {
|
||||
String[] ranges = rangesStr.split(",");
|
||||
List<TokenRange> tr = new ArrayList<>();
|
||||
|
||||
for (String range : ranges) {
|
||||
String[] interval = range.split(":");
|
||||
Token start = factory.fromString(interval[0]);
|
||||
Token end = factory.fromString(interval[1]);
|
||||
TokenRange tokenRange = new TokenRange(start, end, factory);
|
||||
Murmur3TokenFactory m3f = new Murmur3TokenFactory();
|
||||
Token start = m3f.parse(interval[0]);
|
||||
Token end = m3f.parse(interval[1]);
|
||||
TokenRange tokenRange = m3f.range(start,end);
|
||||
tr.add(tokenRange);
|
||||
}
|
||||
return tr.toArray(new TokenRange[tr.size()]);
|
||||
return tr.toArray(new TokenRange[0]);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean matches(Statement statement) {
|
||||
ByteBuffer routingKey = statement.getRoutingKey(protocolVersion, codecRegistry);
|
||||
Token token = factory.hash(routingKey);
|
||||
public boolean matches(Statement<?> statement) {
|
||||
Token routingToken = statement.getRoutingToken();
|
||||
for (TokenRange range : ranges) {
|
||||
if (range.contains(token)) {
|
||||
if (range.contains(routingToken)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -1,71 +0,0 @@
|
||||
package com.datastax.driver.core;
|
||||
|
||||
import java.io.BufferedWriter;
|
||||
import java.io.FileWriter;
|
||||
import java.io.IOException;
|
||||
import java.util.Comparator;
|
||||
import java.util.Set;
|
||||
|
||||
public class TokenRangeUtil {
|
||||
|
||||
private final Metadata clusterMetadata;
|
||||
private final ProtocolVersion protocolVersion;
|
||||
private final CodecRegistry codecRegistry;
|
||||
private final Token.Factory factory;
|
||||
private final Cluster cluster;
|
||||
|
||||
public TokenRangeUtil(Cluster cluster) {
|
||||
this.cluster= cluster;
|
||||
clusterMetadata = cluster.getMetadata();
|
||||
protocolVersion = cluster.getConfiguration().getProtocolOptions().getProtocolVersion();
|
||||
codecRegistry = cluster.getConfiguration().getCodecRegistry();
|
||||
factory = Token.getFactory(clusterMetadata.partitioner);
|
||||
}
|
||||
|
||||
public Set<TokenRange> getTokenRangesFor(String keyspace, String hostaddress) {
|
||||
Host host=null;
|
||||
if (hostaddress.matches("\\d+")) {
|
||||
int hostenum = Integer.parseInt(hostaddress);
|
||||
host = clusterMetadata.getAllHosts().stream()
|
||||
.sorted(Comparator.comparing(h -> h.getAddress().toString()))
|
||||
.skip(hostenum)
|
||||
.findFirst()
|
||||
.orElseThrow();
|
||||
} else if (!hostaddress.isEmpty()) {
|
||||
host = clusterMetadata.getAllHosts().stream()
|
||||
.filter(h -> h.getAddress().toString().replaceAll("/","").equals(hostaddress))
|
||||
.findFirst()
|
||||
.orElseThrow();
|
||||
} else {
|
||||
throw new RuntimeException("You must specify a host enum in order or a host address.");
|
||||
}
|
||||
return clusterMetadata.getTokenRanges(keyspace,host);
|
||||
}
|
||||
|
||||
|
||||
public void printRanges(String tokensks) {
|
||||
Set<Host> hosts = clusterMetadata.getAllHosts();
|
||||
|
||||
for (Host host : hosts) {
|
||||
String address = host.getAddress().toString().substring(1);
|
||||
BufferedWriter writer = null;
|
||||
try {
|
||||
writer = new BufferedWriter(new FileWriter("ranges-"+address));
|
||||
String ranges = getTokenRangesFor(tokensks, address).toString();
|
||||
writer.write(ranges);
|
||||
|
||||
writer.close();
|
||||
} catch (IOException e) {
|
||||
e.printStackTrace();
|
||||
throw new RuntimeException("Can't write token range files");
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
public M3PTokenFilter getFilterFor(Set<TokenRange> ranges) {
|
||||
return new M3PTokenFilter(ranges, this.cluster);
|
||||
}
|
||||
|
||||
}
|
@ -3,5 +3,5 @@ package io.nosqlbench.activitytype.cqld4.api;
|
||||
import com.datastax.oss.driver.api.core.cql.Statement;
|
||||
|
||||
public interface StatementFilter {
|
||||
boolean matches(Statement statement);
|
||||
boolean matches(Statement<?> statement);
|
||||
}
|
||||
|
@ -13,11 +13,10 @@ public class UDTCodecInjector {
|
||||
private final static Logger logger = LoggerFactory.getLogger(UDTCodecInjector.class);
|
||||
|
||||
private List<UserCodecProvider> codecProviders = new ArrayList<>();
|
||||
private List<UserType> userTypes = new ArrayList<>();
|
||||
|
||||
public void injectUserProvidedCodecs(Session session, boolean allowAcrossKeyspaces) {
|
||||
|
||||
CodecRegistry registry = session.getCluster().getConfiguration().getCodecRegistry();
|
||||
CodecRegistry registry = session.getContext().getCodecRegistry();
|
||||
|
||||
ServiceLoader<UserCodecProvider> codecLoader = ServiceLoader.load(UserCodecProvider.class);
|
||||
|
||||
|
@ -0,0 +1,92 @@
|
||||
package io.nosqlbench.activitytype.cqld4.config;
|
||||
|
||||
import com.datastax.oss.driver.api.core.config.DriverOption;
|
||||
import com.datastax.oss.driver.api.core.config.OptionsMap;
|
||||
import com.datastax.oss.driver.api.core.config.TypedDriverOption;
|
||||
import com.datastax.oss.driver.api.core.data.CqlDuration;
|
||||
|
||||
import java.math.BigDecimal;
|
||||
import java.math.BigInteger;
|
||||
import java.net.InetAddress;
|
||||
import java.net.UnknownHostException;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.time.*;
|
||||
import java.util.UUID;
|
||||
|
||||
public class CQLD4OptionsMapper {
|
||||
|
||||
public static void apply(OptionsMap optionsMap, String name, String value) {
|
||||
|
||||
for (TypedDriverOption<?> builtin : TypedDriverOption.builtInValues()) {
|
||||
DriverOption rawOption = builtin.getRawOption();
|
||||
String path = rawOption.getPath();
|
||||
if (name.equals(path)) {
|
||||
Class<?> rawType = builtin.getExpectedType().getRawType();
|
||||
Object convertedValue = adaptTypeValue(value, rawType, name);
|
||||
TypedDriverOption<? super Object> option = (TypedDriverOption<? super Object>) builtin;
|
||||
optionsMap.put(option, convertedValue);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
throw new RuntimeException("Driver option " + name + " was not found in the available options.");
|
||||
}
|
||||
|
||||
private static Object adaptTypeValue(String value, Class<?> rawOption, String optionName) {
|
||||
switch (rawOption.getCanonicalName()) {
|
||||
case "java.lang.Boolean":
|
||||
return Boolean.parseBoolean(value);
|
||||
case "java.lang.Byte":
|
||||
return Byte.parseByte(value);
|
||||
case "java.lang.Double":
|
||||
return Double.parseDouble(value);
|
||||
case "java.lang.Float":
|
||||
return Float.parseFloat(value);
|
||||
case "java.lang.Integer":
|
||||
return Integer.parseInt(value);
|
||||
case "java.lang.Long":
|
||||
return Long.parseLong(value);
|
||||
case "java.lang.Short":
|
||||
return Short.parseShort(value);
|
||||
case "java.time.Instant":
|
||||
return Instant.parse(value);
|
||||
case "java.time.ZonedDateTime":
|
||||
return ZonedDateTime.parse(value);
|
||||
case "java.time.LocalDate":
|
||||
return LocalDate.parse(value);
|
||||
case "java.time.LocalTime":
|
||||
return LocalTime.parse(value);
|
||||
case "java.nio.ByteBuffer":
|
||||
return ByteBuffer.wrap(value.getBytes(StandardCharsets.UTF_8)); // What else to do here?
|
||||
case "java.lang.String":
|
||||
return value;
|
||||
case "java.math.BigInteger":
|
||||
return new BigInteger(value);
|
||||
case "java.math.BigDecimal":
|
||||
return new BigDecimal(value);
|
||||
case "java.util.UUID":
|
||||
return UUID.fromString(value);
|
||||
case "java.net.InetAddress":
|
||||
try {
|
||||
return InetAddress.getByName(value);
|
||||
} catch (UnknownHostException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
case "com.datastax.oss.driver.api.core.data.CqlDuration":
|
||||
return CqlDuration.from(value);
|
||||
case "java.time.Duration:":
|
||||
return Duration.parse(value);
|
||||
default:
|
||||
// These appear to be valid types, but there is no record of them used in driver configuration,
|
||||
// nor a convenient way to convert them directly from known type and string value without invoking
|
||||
// connected metadata machinery from an active session.
|
||||
// case "com.datastax.oss.driver.api.core.data.TupleValue":
|
||||
// case "com.datastax.oss.driver.api.core.data.UdtValue":
|
||||
|
||||
throw new RuntimeException("The type converter for driver option named " + optionName + " was not " +
|
||||
"found, or is unimplemented. Please file an issue at nosqlbench.io");
|
||||
}
|
||||
}
|
||||
|
||||
}
|
@ -45,7 +45,7 @@ public class CQLBindHelper {
|
||||
|
||||
for (ColumnDefinition def : defs) {
|
||||
ByteBuffer byteBuffer = row.getByteBuffer(def.getName());
|
||||
bound.setBytesUnsafe(def.getName(), byteBuffer);
|
||||
bound=bound.setBytesUnsafe(def.getName(), byteBuffer);
|
||||
}
|
||||
return bound;
|
||||
}
|
||||
|
@ -1,226 +1,202 @@
|
||||
package io.nosqlbench.activitytype.cqld4.core;
|
||||
|
||||
import com.datastax.oss.driver.api.core.connection.ReconnectionPolicy;
|
||||
import com.datastax.oss.driver.api.core.context.DriverContext;
|
||||
import com.datastax.oss.driver.api.core.loadbalancing.LoadBalancingPolicy;
|
||||
import com.datastax.oss.driver.api.core.retry.RetryPolicy;
|
||||
import com.datastax.oss.driver.api.core.session.Session;
|
||||
import com.datastax.oss.driver.api.core.specex.SpeculativeExecutionPolicy;
|
||||
import com.datastax.oss.driver.internal.core.connection.ConstantReconnectionPolicy;
|
||||
import com.datastax.oss.driver.internal.core.connection.ExponentialReconnectionPolicy;
|
||||
import com.datastax.oss.driver.internal.core.context.NettyOptions;
|
||||
import com.datastax.oss.driver.internal.core.retry.DefaultRetryPolicy;
|
||||
import com.datastax.oss.driver.internal.core.specex.ConstantSpeculativeExecutionPolicy;
|
||||
import io.netty.util.HashedWheelTimer;
|
||||
import io.nosqlbench.nb.api.errors.BasicError;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.net.InetSocketAddress;
|
||||
import java.util.*;
|
||||
import java.util.concurrent.ThreadFactory;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
public class CQLOptions {
|
||||
private final static Logger logger = LoggerFactory.getLogger(CQLOptions.class);
|
||||
|
||||
private final static Pattern CORE_AND_MAX_RQ_PATTERN = Pattern.compile("(?<core>\\d+)(:(?<max>\\d+)(:(?<rq>\\d+))?)?(,(?<rcore>\\d+)(:(?<rmax>\\d+)(:(?<rrq>\\d+))?)?)?(,?heartbeat_interval_s:(?<heartbeatinterval>\\d+))?(,?idle_timeout_s:(?<idletimeout>\\d+))?(,?pool_timeout_ms:(?<pooltimeout>\\d+))?");
|
||||
private final static Pattern PERCENTILE_EAGER_PATTERN = Pattern.compile("^p(?<pctile>[^:]+)(:(?<executions>\\d+))?(:(?<tracked>\\d+)ms)?$");
|
||||
private final static Pattern CONSTANT_EAGER_PATTERN = Pattern.compile("^((?<msThreshold>\\d++)ms)(:(?<executions>\\d+))?$");
|
||||
|
||||
private static ConstantSpeculativeExecutionPolicy constantPolicy(DriverContext context, int threshold, int executions) {
|
||||
return new ConstantSpeculativeExecutionPolicy(threshold, executions);
|
||||
}
|
||||
|
||||
private static SpeculativeExecutionPolicy percentilePolicy(long tracked, double threshold, int executions) {
|
||||
PerHostPercentileTracker tracker = newTracker(tracked);
|
||||
return new PercentileSpeculativeExecutionPolicy(tracker, threshold, executions);
|
||||
}
|
||||
|
||||
private static PerHostPercentileTracker newTracker(long millis) {
|
||||
return PerHostPercentileTracker.builder(millis).build();
|
||||
}
|
||||
|
||||
public static PoolingOptions poolingOptionsFor(String spec) {
|
||||
Matcher matcher = CORE_AND_MAX_RQ_PATTERN.matcher(spec);
|
||||
if (matcher.matches()) {
|
||||
PoolingOptions poolingOptions = new PoolingOptions();
|
||||
|
||||
Optional.ofNullable(matcher.group("core")).map(Integer::valueOf)
|
||||
.ifPresent(core -> poolingOptions.setCoreConnectionsPerHost(HostDistance.LOCAL, core));
|
||||
Optional.ofNullable(matcher.group("max")).map(Integer::valueOf)
|
||||
.ifPresent(max -> poolingOptions.setMaxConnectionsPerHost(HostDistance.LOCAL, max));
|
||||
Optional.ofNullable(matcher.group("rq")).map(Integer::valueOf)
|
||||
.ifPresent(rq -> poolingOptions.setMaxRequestsPerConnection(HostDistance.LOCAL, rq));
|
||||
|
||||
Optional.ofNullable(matcher.group("rcore")).map(Integer::valueOf)
|
||||
.ifPresent(rcore -> poolingOptions.setCoreConnectionsPerHost(HostDistance.REMOTE, rcore));
|
||||
Optional.ofNullable(matcher.group("rmax")).map(Integer::valueOf)
|
||||
.ifPresent(rmax -> poolingOptions.setMaxConnectionsPerHost(HostDistance.REMOTE, rmax));
|
||||
Optional.ofNullable(matcher.group("rrq")).map(Integer::valueOf)
|
||||
.ifPresent(rrq -> poolingOptions.setMaxRequestsPerConnection(HostDistance.REMOTE, rrq));
|
||||
|
||||
Optional.ofNullable(matcher.group("heartbeatinterval")).map(Integer::valueOf)
|
||||
.ifPresent(poolingOptions::setHeartbeatIntervalSeconds);
|
||||
|
||||
Optional.ofNullable(matcher.group("idletimeout")).map(Integer::valueOf)
|
||||
.ifPresent(poolingOptions::setIdleTimeoutSeconds);
|
||||
|
||||
Optional.ofNullable(matcher.group("pooltimeout")).map(Integer::valueOf)
|
||||
.ifPresent(poolingOptions::setPoolTimeoutMillis);
|
||||
|
||||
return poolingOptions;
|
||||
}
|
||||
throw new RuntimeException("No pooling options could be parsed from spec: " + spec);
|
||||
|
||||
}
|
||||
|
||||
public static RetryPolicy retryPolicyFor(String spec, Session session) {
|
||||
Set<String> retryBehaviors = Arrays.stream(spec.split(",")).map(String::toLowerCase).collect(Collectors.toSet());
|
||||
RetryPolicy retryPolicy = new DefaultRetryPolicy(session.getContext(),"default");
|
||||
|
||||
if (retryBehaviors.contains("default")) {
|
||||
return retryPolicy;
|
||||
} // add other mutually-exclusive behaviors here with checks, if we want to extend beyond "default"
|
||||
|
||||
if (retryBehaviors.contains("logging")) {
|
||||
retryPolicy = new LoggingRetryPolicy(retryPolicy);
|
||||
}
|
||||
|
||||
return retryPolicy;
|
||||
}
|
||||
|
||||
public static ReconnectionPolicy reconnectPolicyFor(String spec, Session session) {
|
||||
if(spec.startsWith("exponential(")){
|
||||
String argsString = spec.substring(12);
|
||||
String[] args = argsString.substring(0, argsString.length() - 1).split("[,;]");
|
||||
if (args.length != 2){
|
||||
throw new BasicError("Invalid reconnectionpolicy, try reconnectionpolicy=exponential(<baseDelay>, <maxDelay>)");
|
||||
}
|
||||
long baseDelay = Long.parseLong(args[0]);
|
||||
long maxDelay = Long.parseLong(args[1]);
|
||||
ExponentialReconnectionPolicy exponentialReconnectionPolicy = new ExponentialReconnectionPolicy(session.getContext());
|
||||
}else if(spec.startsWith("constant(")){
|
||||
String argsString = spec.substring(9);
|
||||
long constantDelayMs= Long.parseLong(argsString.substring(0, argsString.length() - 1));
|
||||
return new ConstantReconnectionPolicy(constantDelayMs);
|
||||
}
|
||||
throw new BasicError("Invalid reconnectionpolicy, try reconnectionpolicy=exponential(<baseDelay>, <maxDelay>) or constant(<constantDelayMs>)");
|
||||
}
|
||||
|
||||
public static SocketOptions socketOptionsFor(String spec) {
|
||||
String[] assignments = spec.split("[,;]");
|
||||
Map<String, String> values = new HashMap<>();
|
||||
for (String assignment : assignments) {
|
||||
String[] namevalue = assignment.split("[:=]", 2);
|
||||
String name = namevalue[0];
|
||||
String value = namevalue[1];
|
||||
values.put(name, value);
|
||||
}
|
||||
|
||||
SocketOptions options = new SocketOptions();
|
||||
Optional.ofNullable(values.get("read_timeout_ms")).map(Integer::parseInt).ifPresent(
|
||||
options::setReadTimeoutMillis
|
||||
);
|
||||
Optional.ofNullable(values.get("connect_timeout_ms")).map(Integer::parseInt).ifPresent(
|
||||
options::setConnectTimeoutMillis
|
||||
);
|
||||
Optional.ofNullable(values.get("keep_alive")).map(Boolean::parseBoolean).ifPresent(
|
||||
options::setKeepAlive
|
||||
);
|
||||
Optional.ofNullable(values.get("reuse_address")).map(Boolean::parseBoolean).ifPresent(
|
||||
options::setReuseAddress
|
||||
);
|
||||
Optional.ofNullable(values.get("so_linger")).map(Integer::parseInt).ifPresent(
|
||||
options::setSoLinger
|
||||
);
|
||||
Optional.ofNullable(values.get("tcp_no_delay")).map(Boolean::parseBoolean).ifPresent(
|
||||
options::setTcpNoDelay
|
||||
);
|
||||
Optional.ofNullable(values.get("receive_buffer_size")).map(Integer::parseInt).ifPresent(
|
||||
options::setReceiveBufferSize
|
||||
);
|
||||
Optional.ofNullable(values.get("send_buffer_size")).map(Integer::parseInt).ifPresent(
|
||||
options::setSendBufferSize
|
||||
);
|
||||
|
||||
return options;
|
||||
}
|
||||
|
||||
public static SpeculativeExecutionPolicy defaultSpeculativePolicy() {
|
||||
PerHostPercentileTracker tracker = PerHostPercentileTracker
|
||||
.builder(15000)
|
||||
.build();
|
||||
PercentileSpeculativeExecutionPolicy defaultSpecPolicy =
|
||||
new PercentileSpeculativeExecutionPolicy(tracker, 99.0, 5);
|
||||
return defaultSpecPolicy;
|
||||
}
|
||||
|
||||
public static SpeculativeExecutionPolicy speculativeFor(String spec) {
|
||||
Matcher pctileMatcher = PERCENTILE_EAGER_PATTERN.matcher(spec);
|
||||
Matcher constantMatcher = CONSTANT_EAGER_PATTERN.matcher(spec);
|
||||
if (pctileMatcher.matches()) {
|
||||
double pctile = Double.valueOf(pctileMatcher.group("pctile"));
|
||||
if (pctile > 100.0 || pctile < 0.0) {
|
||||
throw new RuntimeException("pctile must be between 0.0 and 100.0");
|
||||
}
|
||||
String executionsSpec = pctileMatcher.group("executions");
|
||||
String trackedSpec = pctileMatcher.group("tracked");
|
||||
int executions = (executionsSpec != null && !executionsSpec.isEmpty()) ? Integer.valueOf(executionsSpec) : 5;
|
||||
int tracked = (trackedSpec != null && !trackedSpec.isEmpty()) ? Integer.valueOf(trackedSpec) : 15000;
|
||||
logger.debug("speculative: Creating new percentile tracker policy from spec '" + spec + "'");
|
||||
return percentilePolicy(tracked, pctile, executions);
|
||||
} else if (constantMatcher.matches()) {
|
||||
int threshold = Integer.valueOf(constantMatcher.group("msThreshold"));
|
||||
String executionsSpec = constantMatcher.group("executions");
|
||||
int executions = (executionsSpec != null && !executionsSpec.isEmpty()) ? Integer.valueOf(executionsSpec) : 5;
|
||||
logger.debug("speculative: Creating new constant policy from spec '" + spec + "'");
|
||||
return constantPolicy(threshold, executions);
|
||||
} else {
|
||||
throw new RuntimeException("Unable to parse pattern for speculative option: " + spec + ", it must be in " +
|
||||
"an accepted form, like p99.0:5:15000, or p99.0:5, or 5000ms:5");
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public static LoadBalancingPolicy whitelistFor(String s, LoadBalancingPolicy innerPolicy) {
|
||||
String[] addrSpecs = s.split(",");
|
||||
List<InetSocketAddress> sockAddrs = Arrays.stream(addrSpecs)
|
||||
.map(CQLOptions::toSocketAddr)
|
||||
.collect(Collectors.toList());
|
||||
if (innerPolicy == null) {
|
||||
innerPolicy = new RoundRobinPolicy();
|
||||
}
|
||||
return new WhiteListPolicy(innerPolicy, sockAddrs);
|
||||
}
|
||||
|
||||
public static NettyOptions withTickDuration(String tick) {
|
||||
logger.info("Cluster builder using custom tick duration value for HashedWheelTimer: " + tick + " milliseconds");
|
||||
int tickDuration = Integer.valueOf(tick);
|
||||
return new NettyOptions() {
|
||||
public io.netty.util.Timer timer(ThreadFactory threadFactory) {
|
||||
return new HashedWheelTimer(
|
||||
threadFactory, tickDuration, TimeUnit.MILLISECONDS);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
private static InetSocketAddress toSocketAddr(String addr) {
|
||||
String[] addrs = addr.split(":", 2);
|
||||
String inetHost = addrs[0];
|
||||
String inetPort = (addrs.length == 2) ? addrs[1] : "9042";
|
||||
return new InetSocketAddress(inetHost, Integer.valueOf(inetPort));
|
||||
}
|
||||
|
||||
public static ProtocolOptions.Compression withCompression(String compspec) {
|
||||
try {
|
||||
return ProtocolOptions.Compression.valueOf(compspec);
|
||||
} catch (IllegalArgumentException iae) {
|
||||
throw new RuntimeException("Compression option '" + compspec + "' was specified, but only " +
|
||||
Arrays.toString(ProtocolOptions.Compression.values()) + " are available.");
|
||||
}
|
||||
}
|
||||
// private final static Logger logger = LoggerFactory.getLogger(CQLOptions.class);
|
||||
//
|
||||
// private final static Pattern CORE_AND_MAX_RQ_PATTERN = Pattern.compile("(?<core>\\d+)(:(?<max>\\d+)(:(?<rq>\\d+))?)?(,(?<rcore>\\d+)(:(?<rmax>\\d+)(:(?<rrq>\\d+))?)?)?(,?heartbeat_interval_s:(?<heartbeatinterval>\\d+))?(,?idle_timeout_s:(?<idletimeout>\\d+))?(,?pool_timeout_ms:(?<pooltimeout>\\d+))?");
|
||||
// private final static Pattern PERCENTILE_EAGER_PATTERN = Pattern.compile("^p(?<pctile>[^:]+)(:(?<executions>\\d+))?(:(?<tracked>\\d+)ms)?$");
|
||||
// private final static Pattern CONSTANT_EAGER_PATTERN = Pattern.compile("^((?<msThreshold>\\d++)ms)(:(?<executions>\\d+))?$");
|
||||
//
|
||||
// private static ConstantSpeculativeExecutionPolicy constantPolicy(DriverContext context, int threshold, int executions) {
|
||||
// return new ConstantSpeculativeExecutionPolicy(threshold, executions);
|
||||
// }
|
||||
//
|
||||
// private static SpeculativeExecutionPolicy percentilePolicy(long tracked, double threshold, int executions) {
|
||||
// PerHostPercentileTracker tracker = newTracker(tracked);
|
||||
// return new PercentileSpeculativeExecutionPolicy(tracker, threshold, executions);
|
||||
// }
|
||||
//
|
||||
// private static PerHostPercentileTracker newTracker(long millis) {
|
||||
// return PerHostPercentileTracker.builder(millis).build();
|
||||
// }
|
||||
//
|
||||
// public static PoolingOptions poolingOptionsFor(String spec) {
|
||||
// Matcher matcher = CORE_AND_MAX_RQ_PATTERN.matcher(spec);
|
||||
// if (matcher.matches()) {
|
||||
// PoolingOptions poolingOptions = new PoolingOptions();
|
||||
//
|
||||
// Optional.ofNullable(matcher.group("core")).map(Integer::valueOf)
|
||||
// .ifPresent(core -> poolingOptions.setCoreConnectionsPerHost(HostDistance.LOCAL, core));
|
||||
// Optional.ofNullable(matcher.group("max")).map(Integer::valueOf)
|
||||
// .ifPresent(max -> poolingOptions.setMaxConnectionsPerHost(HostDistance.LOCAL, max));
|
||||
// Optional.ofNullable(matcher.group("rq")).map(Integer::valueOf)
|
||||
// .ifPresent(rq -> poolingOptions.setMaxRequestsPerConnection(HostDistance.LOCAL, rq));
|
||||
//
|
||||
// Optional.ofNullable(matcher.group("rcore")).map(Integer::valueOf)
|
||||
// .ifPresent(rcore -> poolingOptions.setCoreConnectionsPerHost(HostDistance.REMOTE, rcore));
|
||||
// Optional.ofNullable(matcher.group("rmax")).map(Integer::valueOf)
|
||||
// .ifPresent(rmax -> poolingOptions.setMaxConnectionsPerHost(HostDistance.REMOTE, rmax));
|
||||
// Optional.ofNullable(matcher.group("rrq")).map(Integer::valueOf)
|
||||
// .ifPresent(rrq -> poolingOptions.setMaxRequestsPerConnection(HostDistance.REMOTE, rrq));
|
||||
//
|
||||
// Optional.ofNullable(matcher.group("heartbeatinterval")).map(Integer::valueOf)
|
||||
// .ifPresent(poolingOptions::setHeartbeatIntervalSeconds);
|
||||
//
|
||||
// Optional.ofNullable(matcher.group("idletimeout")).map(Integer::valueOf)
|
||||
// .ifPresent(poolingOptions::setIdleTimeoutSeconds);
|
||||
//
|
||||
// Optional.ofNullable(matcher.group("pooltimeout")).map(Integer::valueOf)
|
||||
// .ifPresent(poolingOptions::setPoolTimeoutMillis);
|
||||
//
|
||||
// return poolingOptions;
|
||||
// }
|
||||
// throw new RuntimeException("No pooling options could be parsed from spec: " + spec);
|
||||
//
|
||||
// }
|
||||
//
|
||||
// public static RetryPolicy retryPolicyFor(String spec, Session session) {
|
||||
// Set<String> retryBehaviors = Arrays.stream(spec.split(",")).map(String::toLowerCase).collect(Collectors.toSet());
|
||||
// RetryPolicy retryPolicy = new DefaultRetryPolicy(session.getContext(),"default");
|
||||
//
|
||||
// if (retryBehaviors.contains("default")) {
|
||||
// return retryPolicy;
|
||||
// } // add other mutually-exclusive behaviors here with checks, if we want to extend beyond "default"
|
||||
//
|
||||
// if (retryBehaviors.contains("logging")) {
|
||||
// retryPolicy = new LoggingRetryPolicy(retryPolicy);
|
||||
// }
|
||||
//
|
||||
// return retryPolicy;
|
||||
// }
|
||||
//
|
||||
// public static ReconnectionPolicy reconnectPolicyFor(String spec, Session session) {
|
||||
// if(spec.startsWith("exponential(")){
|
||||
// String argsString = spec.substring(12);
|
||||
// String[] args = argsString.substring(0, argsString.length() - 1).split("[,;]");
|
||||
// if (args.length != 2){
|
||||
// throw new BasicError("Invalid reconnectionpolicy, try reconnectionpolicy=exponential(<baseDelay>, <maxDelay>)");
|
||||
// }
|
||||
// long baseDelay = Long.parseLong(args[0]);
|
||||
// long maxDelay = Long.parseLong(args[1]);
|
||||
// ExponentialReconnectionPolicy exponentialReconnectionPolicy = new ExponentialReconnectionPolicy(session.getContext());
|
||||
// }else if(spec.startsWith("constant(")){
|
||||
// String argsString = spec.substring(9);
|
||||
// long constantDelayMs= Long.parseLong(argsString.substring(0, argsString.length() - 1));
|
||||
// return new ConstantReconnectionPolicy(constantDelayMs);
|
||||
// }
|
||||
// throw new BasicError("Invalid reconnectionpolicy, try reconnectionpolicy=exponential(<baseDelay>, <maxDelay>) or constant(<constantDelayMs>)");
|
||||
// }
|
||||
//
|
||||
// public static SocketOptions socketOptionsFor(String spec) {
|
||||
// String[] assignments = spec.split("[,;]");
|
||||
// Map<String, String> values = new HashMap<>();
|
||||
// for (String assignment : assignments) {
|
||||
// String[] namevalue = assignment.split("[:=]", 2);
|
||||
// String name = namevalue[0];
|
||||
// String value = namevalue[1];
|
||||
// values.put(name, value);
|
||||
// }
|
||||
//
|
||||
// SocketOptions options = new SocketOptions();
|
||||
// Optional.ofNullable(values.get("read_timeout_ms")).map(Integer::parseInt).ifPresent(
|
||||
// options::setReadTimeoutMillis
|
||||
// );
|
||||
// Optional.ofNullable(values.get("connect_timeout_ms")).map(Integer::parseInt).ifPresent(
|
||||
// options::setConnectTimeoutMillis
|
||||
// );
|
||||
// Optional.ofNullable(values.get("keep_alive")).map(Boolean::parseBoolean).ifPresent(
|
||||
// options::setKeepAlive
|
||||
// );
|
||||
// Optional.ofNullable(values.get("reuse_address")).map(Boolean::parseBoolean).ifPresent(
|
||||
// options::setReuseAddress
|
||||
// );
|
||||
// Optional.ofNullable(values.get("so_linger")).map(Integer::parseInt).ifPresent(
|
||||
// options::setSoLinger
|
||||
// );
|
||||
// Optional.ofNullable(values.get("tcp_no_delay")).map(Boolean::parseBoolean).ifPresent(
|
||||
// options::setTcpNoDelay
|
||||
// );
|
||||
// Optional.ofNullable(values.get("receive_buffer_size")).map(Integer::parseInt).ifPresent(
|
||||
// options::setReceiveBufferSize
|
||||
// );
|
||||
// Optional.ofNullable(values.get("send_buffer_size")).map(Integer::parseInt).ifPresent(
|
||||
// options::setSendBufferSize
|
||||
// );
|
||||
//
|
||||
// return options;
|
||||
// }
|
||||
//
|
||||
// public static SpeculativeExecutionPolicy defaultSpeculativePolicy() {
|
||||
// PerHostPercentileTracker tracker = PerHostPercentileTracker
|
||||
// .builder(15000)
|
||||
// .build();
|
||||
// PercentileSpeculativeExecutionPolicy defaultSpecPolicy =
|
||||
// new PercentileSpeculativeExecutionPolicy(tracker, 99.0, 5);
|
||||
// return defaultSpecPolicy;
|
||||
// }
|
||||
//
|
||||
// public static SpeculativeExecutionPolicy speculativeFor(String spec) {
|
||||
// Matcher pctileMatcher = PERCENTILE_EAGER_PATTERN.matcher(spec);
|
||||
// Matcher constantMatcher = CONSTANT_EAGER_PATTERN.matcher(spec);
|
||||
// if (pctileMatcher.matches()) {
|
||||
// double pctile = Double.valueOf(pctileMatcher.group("pctile"));
|
||||
// if (pctile > 100.0 || pctile < 0.0) {
|
||||
// throw new RuntimeException("pctile must be between 0.0 and 100.0");
|
||||
// }
|
||||
// String executionsSpec = pctileMatcher.group("executions");
|
||||
// String trackedSpec = pctileMatcher.group("tracked");
|
||||
// int executions = (executionsSpec != null && !executionsSpec.isEmpty()) ? Integer.valueOf(executionsSpec) : 5;
|
||||
// int tracked = (trackedSpec != null && !trackedSpec.isEmpty()) ? Integer.valueOf(trackedSpec) : 15000;
|
||||
// logger.debug("speculative: Creating new percentile tracker policy from spec '" + spec + "'");
|
||||
// return percentilePolicy(tracked, pctile, executions);
|
||||
// } else if (constantMatcher.matches()) {
|
||||
// int threshold = Integer.valueOf(constantMatcher.group("msThreshold"));
|
||||
// String executionsSpec = constantMatcher.group("executions");
|
||||
// int executions = (executionsSpec != null && !executionsSpec.isEmpty()) ? Integer.valueOf(executionsSpec) : 5;
|
||||
// logger.debug("speculative: Creating new constant policy from spec '" + spec + "'");
|
||||
// return constantPolicy(threshold, executions);
|
||||
// } else {
|
||||
// throw new RuntimeException("Unable to parse pattern for speculative option: " + spec + ", it must be in " +
|
||||
// "an accepted form, like p99.0:5:15000, or p99.0:5, or 5000ms:5");
|
||||
// }
|
||||
//
|
||||
// }
|
||||
//
|
||||
// public static LoadBalancingPolicy whitelistFor(String s, LoadBalancingPolicy innerPolicy) {
|
||||
// String[] addrSpecs = s.split(",");
|
||||
// List<InetSocketAddress> sockAddrs = Arrays.stream(addrSpecs)
|
||||
// .map(CQLOptions::toSocketAddr)
|
||||
// .collect(Collectors.toList());
|
||||
// if (innerPolicy == null) {
|
||||
// innerPolicy = new RoundRobinPolicy();
|
||||
// }
|
||||
// return new WhiteListPolicy(innerPolicy, sockAddrs);
|
||||
// }
|
||||
//
|
||||
// public static NettyOptions withTickDuration(String tick) {
|
||||
// logger.info("Cluster builder using custom tick duration value for HashedWheelTimer: " + tick + " milliseconds");
|
||||
// int tickDuration = Integer.valueOf(tick);
|
||||
// return new NettyOptions() {
|
||||
// public io.netty.util.Timer timer(ThreadFactory threadFactory) {
|
||||
// return new HashedWheelTimer(
|
||||
// threadFactory, tickDuration, TimeUnit.MILLISECONDS);
|
||||
// }
|
||||
// };
|
||||
// }
|
||||
//
|
||||
// private static InetSocketAddress toSocketAddr(String addr) {
|
||||
// String[] addrs = addr.split(":", 2);
|
||||
// String inetHost = addrs[0];
|
||||
// String inetPort = (addrs.length == 2) ? addrs[1] : "9042";
|
||||
// return new InetSocketAddress(inetHost, Integer.valueOf(inetPort));
|
||||
// }
|
||||
//
|
||||
// public static ProtocolOptions.Compression withCompression(String compspec) {
|
||||
// try {
|
||||
// return ProtocolOptions.Compression.valueOf(compspec);
|
||||
// } catch (IllegalArgumentException iae) {
|
||||
// throw new RuntimeException("Compression option '" + compspec + "' was specified, but only " +
|
||||
// Arrays.toString(ProtocolOptions.Compression.values()) + " are available.");
|
||||
// }
|
||||
// }
|
||||
}
|
||||
|
@ -1,8 +1,9 @@
|
||||
package io.nosqlbench.activitytype.cqld4.core;
|
||||
|
||||
import com.codahale.metrics.Timer;
|
||||
import com.datastax.oss.driver.api.core.cql.ResultSet;
|
||||
import com.datastax.oss.driver.api.core.cql.Statement;
|
||||
import com.datastax.oss.driver.api.core.config.DefaultDriverOption;
|
||||
import com.datastax.oss.driver.api.core.cql.*;
|
||||
import com.datastax.oss.driver.api.core.session.Session;
|
||||
import io.nosqlbench.activitytype.cqld4.api.ResultSetCycleOperator;
|
||||
import io.nosqlbench.activitytype.cqld4.api.RowCycleOperator;
|
||||
import io.nosqlbench.activitytype.cqld4.api.StatementFilter;
|
||||
@ -22,6 +23,7 @@ import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.concurrent.CompletionStage;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
@SuppressWarnings("Duplicates")
|
||||
@ -81,7 +83,7 @@ public class CqlAction implements SyncAction, MultiPhaseAction, ActivityDefObser
|
||||
totalRowsFetchedForQuery = 0L;
|
||||
|
||||
Statement statement;
|
||||
ResultSetFuture resultSetFuture;
|
||||
CompletionStage<AsyncResultSet> resultSetFuture;
|
||||
ReadyCQLStatement readyCQLStatement;
|
||||
|
||||
int tries = 0;
|
||||
@ -124,7 +126,7 @@ public class CqlAction implements SyncAction, MultiPhaseAction, ActivityDefObser
|
||||
}
|
||||
|
||||
try (Timer.Context executeTime = cqlActivity.executeTimer.time()) {
|
||||
resultSetFuture = cqlActivity.getSession().executeAsync(statement);
|
||||
CompletionStage<AsyncResultSet> completion = cqlActivity.getSession().executeAsync(statement);
|
||||
}
|
||||
|
||||
Timer.Context resultTime = cqlActivity.resultTimer.time();
|
||||
@ -149,7 +151,8 @@ public class CqlAction implements SyncAction, MultiPhaseAction, ActivityDefObser
|
||||
Row row = resultSet.one();
|
||||
ColumnDefinitions defs = row.getColumnDefinitions();
|
||||
if (retryReplace) {
|
||||
statement = CQLBindHelper.rebindUnappliedStatement(statement, defs, row);
|
||||
statement =
|
||||
new CQLBindHelper(getCqlActivity().getSession()).rebindUnappliedStatement(statement, defs,row);
|
||||
}
|
||||
|
||||
logger.trace(readyCQLStatement.getQueryString(cycleValue));
|
||||
@ -212,7 +215,7 @@ public class CqlAction implements SyncAction, MultiPhaseAction, ActivityDefObser
|
||||
readyCQLStatement.getQueryString(cycleValue),
|
||||
1,
|
||||
cqlActivity.maxpages,
|
||||
cqlActivity.getSession().getCluster().getConfiguration().getQueryOptions().getFetchSize()
|
||||
cqlActivity.getSession().getContext().getConfig().getDefaultProfile().getInt(DefaultDriverOption.REQUEST_PAGE_SIZE)
|
||||
);
|
||||
}
|
||||
}
|
||||
@ -302,7 +305,7 @@ public class CqlAction implements SyncAction, MultiPhaseAction, ActivityDefObser
|
||||
pagingReadyStatement.getQueryString(cycleValue),
|
||||
pagesFetched,
|
||||
cqlActivity.maxpages,
|
||||
cqlActivity.getSession().getCluster().getConfiguration().getQueryOptions().getFetchSize()
|
||||
cqlActivity.getSession().getContext().getConfig().getDefaultProfile().getInt(DefaultDriverOption.REQUEST_PAGE_SIZE)
|
||||
);
|
||||
}
|
||||
pagingResultSet = resultSet;
|
||||
|
@ -4,6 +4,10 @@ import com.codahale.metrics.Histogram;
|
||||
import com.codahale.metrics.Meter;
|
||||
import com.codahale.metrics.Timer;
|
||||
import com.datastax.driver.core.*;
|
||||
import com.datastax.oss.driver.api.core.ConsistencyLevel;
|
||||
import com.datastax.oss.driver.api.core.CqlSession;
|
||||
import com.datastax.oss.driver.api.core.DefaultConsistencyLevel;
|
||||
import com.datastax.oss.driver.api.core.cql.*;
|
||||
import com.datastax.oss.driver.api.core.session.Session;
|
||||
import io.nosqlbench.activitytype.cqld4.codecsupport.UDTCodecInjector;
|
||||
import com.datastax.driver.core.TokenRangeStmtFilter;
|
||||
@ -73,7 +77,7 @@ public class CqlActivity extends SimpleActivity implements Activity, ActivityDef
|
||||
Meter rowsCounter;
|
||||
private HashedCQLErrorHandler errorHandler;
|
||||
private OpSequence<ReadyCQLStatement> opsequence;
|
||||
private Session session;
|
||||
private CqlSession session;
|
||||
private int maxTries;
|
||||
private StatementFilter statementFilter;
|
||||
private Boolean showcql;
|
||||
@ -85,6 +89,7 @@ public class CqlActivity extends SimpleActivity implements Activity, ActivityDef
|
||||
private long maxRetryDelay;
|
||||
private boolean retryReplace;
|
||||
private String pooling;
|
||||
private String profileName;
|
||||
|
||||
|
||||
public CqlActivity(ActivityDef activityDef) {
|
||||
@ -103,7 +108,8 @@ public class CqlActivity extends SimpleActivity implements Activity, ActivityDef
|
||||
@Override
|
||||
public synchronized void initActivity() {
|
||||
logger.debug("initializing activity: " + this.activityDef.getAlias());
|
||||
session = getSession();
|
||||
profileName = getParams().getOptionalString("profile").orElse("default");
|
||||
session = getSession(profileName);
|
||||
|
||||
if (getParams().getOptionalBoolean("usercodecs").orElse(false)) {
|
||||
registerCodecs(session);
|
||||
@ -125,9 +131,9 @@ public class CqlActivity extends SimpleActivity implements Activity, ActivityDef
|
||||
logger.debug("activity fully initialized: " + this.activityDef.getAlias());
|
||||
}
|
||||
|
||||
public synchronized Session getSession() {
|
||||
public synchronized CqlSession getSession(String profileName) {
|
||||
if (session == null) {
|
||||
session = CQLSessionCache.get().getSession(this.getActivityDef());
|
||||
session = CQLSessionCache.get().getSession(this.getActivityDef(), profileName);
|
||||
}
|
||||
return session;
|
||||
}
|
||||
@ -135,7 +141,7 @@ public class CqlActivity extends SimpleActivity implements Activity, ActivityDef
|
||||
private void initSequencer() {
|
||||
|
||||
Session session = getSession();
|
||||
Map<String,Object> fconfig = Map.of("cluster",session.getCluster());
|
||||
Map<String, Object> fconfig = Map.of("session", session);
|
||||
|
||||
SequencerType sequencerType = SequencerType.valueOf(
|
||||
getParams().getOptionalString("seq").orElse("bucket")
|
||||
@ -162,15 +168,6 @@ public class CqlActivity extends SimpleActivity implements Activity, ActivityDef
|
||||
boolean parametrized = Boolean.valueOf(stmtDef.getParams().getOrDefault("parametrized", "false"));
|
||||
long ratio = Long.valueOf(stmtDef.getParams().getOrDefault("ratio", "1"));
|
||||
|
||||
Optional<ConsistencyLevel> cl = Optional.ofNullable(
|
||||
stmtDef.getParams().getOrDefault("cl", null)).map(ConsistencyLevel::valueOf);
|
||||
|
||||
Optional<ConsistencyLevel> serial_cl = Optional.ofNullable(
|
||||
stmtDef.getParams().getOrDefault("serial_cl", null)).map(ConsistencyLevel::valueOf);
|
||||
|
||||
Optional<Boolean> idempotent = Optional.ofNullable(stmtDef.getParams().getOrDefault("idempotent", null))
|
||||
.map(Boolean::valueOf);
|
||||
|
||||
StringBuilder psummary = new StringBuilder();
|
||||
|
||||
boolean instrument = Optional.ofNullable(stmtDef.getParams()
|
||||
@ -190,44 +187,56 @@ public class CqlActivity extends SimpleActivity implements Activity, ActivityDef
|
||||
|
||||
ReadyCQLStatementTemplate template;
|
||||
String stmtForDriver = parsed.getPositionalStatement(s -> "?");
|
||||
if (prepared) {
|
||||
psummary.append(" prepared=>").append(prepared);
|
||||
PreparedStatement prepare = getSession().prepare(stmtForDriver);
|
||||
cl.ifPresent((conlvl) -> {
|
||||
|
||||
SimpleStatementBuilder stmtBuilder = SimpleStatement.builder(stmtForDriver);
|
||||
psummary.append(" statement=>").append(stmtForDriver);
|
||||
|
||||
Optional.ofNullable(stmtDef.getParams().getOrDefault("cl", null))
|
||||
.map(DefaultConsistencyLevel::valueOf)
|
||||
.map(conlvl -> {
|
||||
psummary.append(" consistency_level=>").append(conlvl);
|
||||
prepare.setConsistencyLevel(conlvl);
|
||||
});
|
||||
serial_cl.ifPresent((scl) -> {
|
||||
psummary.append(" serial_consistency_level=>").append(serial_cl);
|
||||
prepare.setSerialConsistencyLevel(scl);
|
||||
});
|
||||
idempotent.ifPresent((i) -> {
|
||||
return conlvl;
|
||||
})
|
||||
.ifPresent(stmtBuilder::setConsistencyLevel);
|
||||
|
||||
Optional.ofNullable(stmtDef.getParams().getOrDefault("serial_cl", null))
|
||||
.map(DefaultConsistencyLevel::valueOf)
|
||||
.map(sconlvel -> {
|
||||
psummary.append(" serial_consistency_level=>").append(sconlvel);
|
||||
return sconlvel;
|
||||
})
|
||||
.ifPresent(stmtBuilder::setSerialConsistencyLevel);
|
||||
|
||||
Optional.ofNullable(stmtDef.getParams().getOrDefault("idempotent", null))
|
||||
.map(Boolean::valueOf)
|
||||
.map(idempotent -> {
|
||||
psummary.append(" idempotent=").append(idempotent);
|
||||
prepare.setIdempotent(i);
|
||||
});
|
||||
return idempotent;
|
||||
})
|
||||
.ifPresent(stmtBuilder::setIdempotence);
|
||||
|
||||
|
||||
if (prepared) {
|
||||
PreparedStatement preparedStatement = getSession().prepare(stmtBuilder.build());
|
||||
|
||||
CqlBinderTypes binderType = CqlBinderTypes.valueOf(stmtDef.getParams()
|
||||
.getOrDefault("binder", CqlBinderTypes.DEFAULT.toString()));
|
||||
|
||||
template = new ReadyCQLStatementTemplate(fconfig, binderType, getSession(), prepare, ratio,
|
||||
parsed.getName());
|
||||
template = new ReadyCQLStatementTemplate(
|
||||
fconfig,
|
||||
binderType,
|
||||
getSession(),
|
||||
preparedStatement,
|
||||
ratio,
|
||||
parsed.getName()
|
||||
);
|
||||
} else {
|
||||
SimpleStatement simpleStatement = new SimpleStatement(stmtForDriver);
|
||||
cl.ifPresent((conlvl) -> {
|
||||
psummary.append(" consistency_level=>").append(conlvl);
|
||||
simpleStatement.setConsistencyLevel(conlvl);
|
||||
});
|
||||
serial_cl.ifPresent((scl) -> {
|
||||
psummary.append(" serial_consistency_level=>").append(scl);
|
||||
simpleStatement.setSerialConsistencyLevel(scl);
|
||||
});
|
||||
idempotent.ifPresent((i) -> {
|
||||
psummary.append(" idempotent=>").append(i);
|
||||
simpleStatement.setIdempotent(i);
|
||||
});
|
||||
SimpleStatement simpleStatement = SimpleStatement.newInstance(stmtForDriver);
|
||||
template = new ReadyCQLStatementTemplate(fconfig, getSession(), simpleStatement, ratio,
|
||||
parsed.getName(), parametrized);
|
||||
}
|
||||
|
||||
|
||||
Optional.ofNullable(stmtDef.getParams().getOrDefault("save", null))
|
||||
.map(s -> s.split("[,; ]"))
|
||||
.map(Save::new)
|
||||
@ -409,7 +418,7 @@ public class CqlActivity extends SimpleActivity implements Activity, ActivityDef
|
||||
|
||||
ParameterMap params = activityDef.getParams();
|
||||
Optional<String> fetchSizeOption = params.getOptionalString("fetchsize");
|
||||
Cluster cluster = getSession().getCluster();
|
||||
|
||||
if (fetchSizeOption.isPresent()) {
|
||||
int fetchSize = fetchSizeOption.flatMap(Unit::bytesFor).map(Double::intValue).orElseThrow(() -> new RuntimeException(
|
||||
"Unable to parse fetch size from " + fetchSizeOption.get()
|
||||
@ -420,6 +429,7 @@ public class CqlActivity extends SimpleActivity implements Activity, ActivityDef
|
||||
throw new RuntimeException("Setting the fetch size to " + fetchSize + " is likely to cause instability.");
|
||||
}
|
||||
logger.trace("setting fetchSize to " + fetchSize);
|
||||
|
||||
cluster.getConfiguration().getQueryOptions().setFetchSize(fetchSize);
|
||||
}
|
||||
|
||||
|
@ -1,9 +1,9 @@
|
||||
package io.nosqlbench.activitytype.cqld4.core;
|
||||
|
||||
|
||||
import com.datastax.driver.core.LocalDate;
|
||||
import com.datastax.driver.core.TupleValue;
|
||||
import com.datastax.driver.core.UDTValue;
|
||||
import com.datastax.oss.driver.api.core.data.TupleValue;
|
||||
import com.datastax.oss.driver.api.core.type.UserDefinedType;
|
||||
import io.nosqlbench.activitytype.cqld4.codecsupport.UDTJavaType;
|
||||
import io.nosqlbench.engine.api.activityapi.core.ActionDispenser;
|
||||
import io.nosqlbench.engine.api.activityapi.core.ActivityType;
|
||||
import io.nosqlbench.engine.api.activityimpl.ActivityDef;
|
||||
@ -14,6 +14,7 @@ import java.math.BigInteger;
|
||||
import java.net.InetAddress;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.time.Instant;
|
||||
import java.time.LocalDate;
|
||||
import java.time.LocalTime;
|
||||
import java.util.*;
|
||||
|
||||
@ -76,7 +77,7 @@ public class CqlActivityType implements ActivityType<CqlActivity> {
|
||||
typemap.put("timestamp", Instant.class);
|
||||
typemap.put("tinyint",byte.class);
|
||||
typemap.put("tuple", TupleValue.class);
|
||||
typemap.put("<udt>", UDTValue.class);
|
||||
typemap.put("<udt>", UserDefinedType.class);
|
||||
typemap.put("uuid",UUID.class);
|
||||
typemap.put("timeuuid",UUID.class);
|
||||
typemap.put("varchar",String.class);
|
||||
|
@ -2,7 +2,6 @@ package io.nosqlbench.activitytype.cqld4.core;
|
||||
|
||||
import com.codahale.metrics.Timer;
|
||||
import com.datastax.oss.driver.api.core.cql.ResultSet;
|
||||
import com.datastax.driver.core.ResultSetFuture;
|
||||
import io.nosqlbench.activitytype.cqld4.api.ErrorResponse;
|
||||
import io.nosqlbench.activitytype.cqld4.api.ResultSetCycleOperator;
|
||||
import io.nosqlbench.activitytype.cqld4.api.RowCycleOperator;
|
||||
@ -13,8 +12,6 @@ import io.nosqlbench.activitytype.cqld4.errorhandling.exceptions.CQLCycleWithSta
|
||||
import io.nosqlbench.activitytype.cqld4.errorhandling.exceptions.ChangeUnappliedCycleException;
|
||||
import io.nosqlbench.activitytype.cqld4.errorhandling.exceptions.UnexpectedPagingException;
|
||||
import io.nosqlbench.activitytype.cqld4.statements.core.ReadyCQLStatement;
|
||||
import com.google.common.util.concurrent.Futures;
|
||||
import com.google.common.util.concurrent.ListenableFuture;
|
||||
import io.nosqlbench.engine.api.activityapi.core.BaseAsyncAction;
|
||||
import io.nosqlbench.engine.api.activityapi.core.ops.fluent.opfacets.FailedOp;
|
||||
import io.nosqlbench.engine.api.activityapi.core.ops.fluent.opfacets.StartedOp;
|
||||
@ -115,8 +112,8 @@ public class CqlAsyncAction extends BaseAsyncAction<CqlOpData, CqlActivity> {
|
||||
|
||||
// The execute timer covers only the point at which EB hands the op to the driver to be executed
|
||||
try (Timer.Context executeTime = activity.executeTimer.time()) {
|
||||
cqlop.future = activity.getSession().executeAsync(cqlop.statement);
|
||||
Futures.addCallback(cqlop.future, cqlop);
|
||||
cqlop.completionStage = activity.getSession().executeAsync(cqlop.statement);
|
||||
Futures.addCallback(cqlop.completionStage, cqlop);
|
||||
}
|
||||
}
|
||||
|
||||
@ -234,8 +231,8 @@ public class CqlAsyncAction extends BaseAsyncAction<CqlOpData, CqlActivity> {
|
||||
if (errorStatus.isRetryable() && cqlop.triesAttempted < maxTries) {
|
||||
startedOp.retry();
|
||||
try (Timer.Context executeTime = activity.executeTimer.time()) {
|
||||
cqlop.future = activity.getSession().executeAsync(cqlop.statement);
|
||||
Futures.addCallback(cqlop.future, cqlop);
|
||||
cqlop.completionStage = activity.getSession().executeAsync(cqlop.statement);
|
||||
Futures.addCallback(cqlop.completionStage, cqlop);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
@ -1,14 +1,17 @@
|
||||
package io.nosqlbench.activitytype.cqld4.core;
|
||||
|
||||
import com.datastax.oss.driver.api.core.cql.ResultSet;
|
||||
import com.datastax.driver.core.ResultSetFuture;
|
||||
import com.datastax.oss.driver.api.core.cql.AsyncResultSet;
|
||||
import com.datastax.oss.driver.api.core.cql.Row;
|
||||
import com.datastax.oss.driver.api.core.cql.Statement;
|
||||
import io.nosqlbench.activitytype.cqld4.statements.core.ReadyCQLStatement;
|
||||
import com.google.common.util.concurrent.FutureCallback;
|
||||
import io.nosqlbench.engine.api.activityapi.core.ops.fluent.opfacets.StartedOp;
|
||||
import java.util.concurrent.CompletableFuture;
|
||||
import java.util.concurrent.CompletionStage;
|
||||
|
||||
public class CqlOpData extends CompletableFuture<AsyncResultSet> {
|
||||
|
||||
public class CqlOpData implements FutureCallback<ResultSet> {
|
||||
final long cycle;
|
||||
public CompletionStage<AsyncResultSet> completionStage;
|
||||
|
||||
// op state is managed via callbacks, we keep a ref here
|
||||
StartedOp<CqlOpData> startedOp;
|
||||
@ -19,8 +22,6 @@ public class CqlOpData implements FutureCallback<ResultSet> {
|
||||
|
||||
ReadyCQLStatement readyCQLStatement;
|
||||
Statement statement;
|
||||
ResultSetFuture future;
|
||||
ResultSet resultSet;
|
||||
|
||||
long totalRowsFetchedForQuery;
|
||||
long totalPagesFetchedForQuery;
|
||||
@ -28,6 +29,7 @@ public class CqlOpData implements FutureCallback<ResultSet> {
|
||||
public Throwable throwable;
|
||||
public long resultAt;
|
||||
private long errorAt;
|
||||
private Iterable<Row> page;
|
||||
|
||||
public CqlOpData(long cycle, CqlAsyncAction action) {
|
||||
this.cycle = cycle;
|
||||
@ -35,18 +37,20 @@ public class CqlOpData implements FutureCallback<ResultSet> {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onSuccess(ResultSet result) {
|
||||
this.resultSet = result;
|
||||
this.resultAt = System.nanoTime();
|
||||
action.onSuccess(startedOp);
|
||||
|
||||
public boolean completeExceptionally(Throwable ex) {
|
||||
this.throwable=ex;
|
||||
this.errorAt = System.nanoTime();
|
||||
action.onFailure(startedOp);
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable throwable) {
|
||||
this.throwable=throwable;
|
||||
this.errorAt = System.nanoTime();
|
||||
action.onFailure(startedOp);
|
||||
public boolean complete(AsyncResultSet value) {
|
||||
this.page = value.currentPage();
|
||||
this.resultAt = System.nanoTime();
|
||||
action.onSuccess(startedOp);
|
||||
return true;
|
||||
// ? return !value.hasMorePages();
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -33,6 +33,8 @@ public class ExceptionMap {
|
||||
|
||||
|
||||
|
||||
// DriverException subtypes
|
||||
|
||||
put(AllNodesFailedException.class, DriverException.class);
|
||||
put(NoNodeAvailableException.class, AllNodesFailedException.class);
|
||||
put(BusyConnectionException.class, DriverException.class);
|
||||
@ -53,8 +55,8 @@ public class ExceptionMap {
|
||||
put(UnavailableException.class, QueryExecutionException.class);
|
||||
put(QueryValidationException.class, CoordinatorException.class);
|
||||
put(AlreadyExistsException.class, QueryValidationException.class);
|
||||
put(InvalidConfigurationInQueryException.class, QueryValidationException.class);
|
||||
put(InvalidQueryException.class, QueryValidationException.class);
|
||||
put(InvalidConfigurationInQueryException.class, QueryValidationException.class);
|
||||
put(SyntaxError.class, QueryValidationException.class);
|
||||
put(UnauthorizedException.class, QueryValidationException.class);
|
||||
put(ServerError.class,CoordinatorException.class);
|
||||
@ -67,20 +69,8 @@ public class ExceptionMap {
|
||||
put(RequestThrottlingException.class,DriverException.class);
|
||||
put(UnsupportedProtocolVersionException.class, DriverException.class);
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
put(UnpreparedException.class, QueryValidationException.class);
|
||||
put(InvalidTypeException.class, DriverException.class);
|
||||
put(FunctionExecutionException.class, QueryValidationException.class);
|
||||
put(DriverInternalError.class, DriverException.class);
|
||||
// package org.apache.tinkerpop.gremlin.driver.exception;
|
||||
put(ConnectionException.class, DriverException.class);
|
||||
put(TransportException.class, ConnectionException.class);
|
||||
put(OperationTimedOutException.class, ConnectionException.class);
|
||||
put(PagingStateException.class, DriverException.class);
|
||||
put(UnresolvedUserTypeException.class, DriverException.class);
|
||||
put(UnsupportedFeatureException.class, DriverException.class);
|
||||
|
||||
put(ChangeUnappliedCycleException.class, CqlGenericCycleException.class);
|
||||
put(ResultSetVerificationException.class, CqlGenericCycleException.class);
|
||||
|
@ -2,7 +2,6 @@ package io.nosqlbench.activitytype.cqld4.errorhandling;
|
||||
|
||||
import io.nosqlbench.activitytype.cqld4.api.ErrorResponse;
|
||||
import io.nosqlbench.activitytype.cqld4.errorhandling.exceptions.CQLCycleWithStatementException;
|
||||
import io.nosqlbench.activitytype.cqld4.errorhandling.exceptions.CQLExceptionDetailer;
|
||||
import io.nosqlbench.engine.api.activityapi.errorhandling.CycleErrorHandler;
|
||||
import io.nosqlbench.engine.api.metrics.ExceptionCountMetrics;
|
||||
import io.nosqlbench.engine.api.metrics.ExceptionHistoMetrics;
|
||||
|
@ -1,25 +0,0 @@
|
||||
package io.nosqlbench.activitytype.cqld4.errorhandling.exceptions;
|
||||
|
||||
import com.datastax.driver.core.exceptions.ReadTimeoutException;
|
||||
import com.datastax.driver.core.exceptions.WriteTimeoutException;
|
||||
|
||||
public class CQLExceptionDetailer {
|
||||
|
||||
public static String messageFor(long cycle, Throwable e) {
|
||||
|
||||
if (e instanceof ReadTimeoutException) {
|
||||
ReadTimeoutException rte = (ReadTimeoutException) e;
|
||||
return rte.getMessage() +
|
||||
", coordinator: " + rte.getHost() +
|
||||
", wasDataRetrieved: " + rte.wasDataRetrieved();
|
||||
}
|
||||
|
||||
if (e instanceof WriteTimeoutException) {
|
||||
WriteTimeoutException wte = (WriteTimeoutException) e;
|
||||
return wte.getMessage() +
|
||||
", coordinator: " + wte.getHost();
|
||||
}
|
||||
|
||||
return e.getMessage();
|
||||
}
|
||||
}
|
@ -1,8 +1,8 @@
|
||||
package io.nosqlbench.activitytype.cqld4.errorhandling.exceptions;
|
||||
|
||||
import com.datastax.driver.core.BoundStatement;
|
||||
import com.datastax.oss.driver.api.core.cql.BoundStatement;
|
||||
import com.datastax.oss.driver.api.core.cql.ResultSet;
|
||||
import com.datastax.driver.core.SimpleStatement;
|
||||
import com.datastax.oss.driver.api.core.cql.SimpleStatement;
|
||||
import com.datastax.oss.driver.api.core.cql.Statement;
|
||||
|
||||
public abstract class CQLResultSetException extends CqlGenericCycleException {
|
||||
|
@ -49,7 +49,9 @@ public class UnexpectedPagingException extends CqlGenericCycleException {
|
||||
sb.append("Additional paging would be required to read the results from this query fully" +
|
||||
", but the user has not explicitly indicated that paging was expected.")
|
||||
.append(" fetched/allowed: ").append(fetchedPages).append("/").append(maxpages)
|
||||
.append(" fetchSize(").append(fetchSize).append("): ").append(queryString);
|
||||
.append(" fetchSize(").append(fetchSize).append("): ").append(queryString).append(", note this value " +
|
||||
"is shown for reference from the default driver profile. If you are using a custom profile, it may be " +
|
||||
"different.");
|
||||
return sb.toString();
|
||||
}
|
||||
}
|
||||
|
@ -5,23 +5,23 @@ import com.datastax.oss.driver.api.core.cql.Statement;
|
||||
import com.datastax.oss.driver.api.core.session.Session;
|
||||
import io.nosqlbench.virtdata.core.bindings.ValuesArrayBinder;
|
||||
|
||||
import java.util.function.Function;
|
||||
|
||||
public enum CqlBinderTypes {
|
||||
direct_array,
|
||||
unset_aware,
|
||||
diagnostic;
|
||||
direct_array(s -> new DirectArrayValuesBinder()),
|
||||
unset_aware(UnsettableValuesBinder::new),
|
||||
diagnostic(s -> new DiagnosticPreparedBinder());
|
||||
|
||||
private final Function<Session, ValuesArrayBinder<PreparedStatement, Statement<?>>> mapper;
|
||||
|
||||
CqlBinderTypes(Function<Session,ValuesArrayBinder<PreparedStatement,Statement<?>>> mapper) {
|
||||
this.mapper = mapper;
|
||||
}
|
||||
|
||||
public final static CqlBinderTypes DEFAULT = unset_aware;
|
||||
|
||||
public ValuesArrayBinder<PreparedStatement, Statement> get(Session session) {
|
||||
if (this==direct_array) {
|
||||
return new DirectArrayValuesBinder();
|
||||
} else if (this== unset_aware) {
|
||||
return new UnsettableValuesBinder(session);
|
||||
} else if (this==diagnostic) {
|
||||
return new DiagnosticPreparedBinder();
|
||||
} else {
|
||||
throw new RuntimeException("Impossible-ish statement branch");
|
||||
}
|
||||
public ValuesArrayBinder<PreparedStatement,Statement<?>> get(Session session) {
|
||||
return mapper.apply(session);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -16,10 +16,10 @@ import java.util.List;
|
||||
* Other binders will call to this one in an exception handler when needed in
|
||||
* order to explain in more detail what is happening for users.
|
||||
*/
|
||||
public class DiagnosticPreparedBinder implements ValuesArrayBinder<PreparedStatement, Statement> {
|
||||
public class DiagnosticPreparedBinder implements ValuesArrayBinder<PreparedStatement, Statement<?>> {
|
||||
public static final Logger logger = LoggerFactory.getLogger(DiagnosticPreparedBinder.class);
|
||||
@Override
|
||||
public Statement bindValues(PreparedStatement prepared, Object[] values) {
|
||||
public Statement<?> bindValues(PreparedStatement prepared, Object[] values) {
|
||||
ColumnDefinitions columnDefinitions = prepared.getVariableDefinitions();
|
||||
BoundStatement bound = prepared.bind();
|
||||
|
||||
|
@ -18,7 +18,7 @@ import java.util.Arrays;
|
||||
* to be modeled, and at no clear benefit. Thus the {@link CqlBinderTypes#unset_aware} one
|
||||
* will become the default.
|
||||
*/
|
||||
public class DirectArrayValuesBinder implements ValuesArrayBinder<PreparedStatement, Statement> {
|
||||
public class DirectArrayValuesBinder implements ValuesArrayBinder<PreparedStatement, Statement<?>> {
|
||||
public final static Logger logger = LoggerFactory.getLogger(DirectArrayValuesBinder.class);
|
||||
|
||||
@Override
|
||||
|
@ -17,7 +17,7 @@ import java.util.ArrayList;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
|
||||
public class UnsettableValuesBinder implements ValuesArrayBinder<PreparedStatement, Statement> {
|
||||
public class UnsettableValuesBinder implements ValuesArrayBinder<PreparedStatement, Statement<?>> {
|
||||
private final static Logger logger = LoggerFactory.getLogger(UnsettableValuesBinder.class);
|
||||
|
||||
private final Session session;
|
||||
|
@ -1,10 +1,17 @@
|
||||
package io.nosqlbench.activitytype.cqld4.statements.core;
|
||||
|
||||
import com.datastax.oss.driver.api.core.CqlSession;
|
||||
import com.datastax.oss.driver.api.core.CqlSessionBuilder;
|
||||
import com.datastax.oss.driver.api.core.config.*;
|
||||
import com.datastax.oss.driver.api.core.loadbalancing.LoadBalancingPolicy;
|
||||
import com.datastax.oss.driver.api.core.metadata.EndPoint;
|
||||
import com.datastax.oss.driver.api.core.retry.RetryPolicy;
|
||||
import com.datastax.oss.driver.api.core.session.Session;
|
||||
import com.datastax.oss.driver.api.core.specex.SpeculativeExecutionPolicy;
|
||||
import com.datastax.oss.driver.internal.core.config.map.MapBasedDriverConfigLoader;
|
||||
import com.datastax.oss.driver.internal.core.config.typesafe.DefaultDriverConfigLoader;
|
||||
import com.datastax.oss.driver.internal.core.retry.DefaultRetryPolicy;
|
||||
import com.typesafe.config.ConfigFactory;
|
||||
import io.nosqlbench.activitytype.cqld4.core.CQLOptions;
|
||||
import io.nosqlbench.activitytype.cqld4.core.ProxyTranslator;
|
||||
import io.nosqlbench.engine.api.activityapi.core.Shutdownable;
|
||||
@ -13,6 +20,7 @@ import io.nosqlbench.engine.api.metrics.ActivityMetrics;
|
||||
import io.nosqlbench.engine.api.scripting.NashornEvaluator;
|
||||
import io.nosqlbench.engine.api.util.SSLKsFactory;
|
||||
import org.apache.tinkerpop.gremlin.driver.Cluster;
|
||||
import org.graalvm.options.OptionMap;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
@ -23,13 +31,24 @@ import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.Paths;
|
||||
import java.util.*;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
|
||||
public class CQLSessionCache implements Shutdownable {
|
||||
|
||||
private final static Logger logger = LoggerFactory.getLogger(CQLSessionCache.class);
|
||||
private final static String DEFAULT_SESSION_ID = "default";
|
||||
private static CQLSessionCache instance = new CQLSessionCache();
|
||||
private Map<String, Session> sessionCache = new HashMap<>();
|
||||
private Map<String, SessionConfig> sessionCache = new HashMap<>();
|
||||
|
||||
|
||||
private final static class SessionConfig extends ConcurrentHashMap<String,String> {
|
||||
public CqlSession session;
|
||||
public Map<String,String> config = new ConcurrentHashMap<>();
|
||||
|
||||
public SessionConfig(CqlSession session) {
|
||||
this.session = session;
|
||||
}
|
||||
}
|
||||
|
||||
private CQLSessionCache() {
|
||||
}
|
||||
@ -39,66 +58,83 @@ public class CQLSessionCache implements Shutdownable {
|
||||
}
|
||||
|
||||
public void stopSession(ActivityDef activityDef) {
|
||||
String key = activityDef.getParams().getOptionalString("clusterid").orElse(DEFAULT_SESSION_ID);
|
||||
Session session = sessionCache.get(key);
|
||||
session.close();
|
||||
String key = activityDef.getParams().getOptionalString("sessionid").orElse(DEFAULT_SESSION_ID);
|
||||
SessionConfig sessionConfig = sessionCache.get(key);
|
||||
sessionConfig.session.close();
|
||||
}
|
||||
|
||||
public Session getSession(ActivityDef activityDef) {
|
||||
String key = activityDef.getParams().getOptionalString("clusterid").orElse(DEFAULT_SESSION_ID);
|
||||
return sessionCache.computeIfAbsent(key, (cid) -> createSession(activityDef, key));
|
||||
public CqlSession getSession(ActivityDef activityDef) {
|
||||
String key = activityDef.getParams().getOptionalString("sessionid").orElse(DEFAULT_SESSION_ID);
|
||||
String profileName = activityDef.getParams().getOptionalString("profile").orElse("default");
|
||||
SessionConfig sessionConfig = sessionCache.computeIfAbsent(key, (cid) -> createSession(activityDef, key, profileName));
|
||||
return sessionConfig.session;
|
||||
}
|
||||
|
||||
// cbopts=\".withLoadBalancingPolicy(LatencyAwarePolicy.builder(new TokenAwarePolicy(new DCAwareRoundRobinPolicy(\"dc1-us-east\", 0, false))).build()).withRetryPolicy(new LoggingRetryPolicy(DefaultRetryPolicy.INSTANCE))\"
|
||||
|
||||
private Session createSession(ActivityDef activityDef, String sessid) {
|
||||
private SessionConfig createSession(ActivityDef activityDef, String sessid, String profileName) {
|
||||
|
||||
String host = activityDef.getParams().getOptionalString("host").orElse("localhost");
|
||||
int port = activityDef.getParams().getOptionalInteger("port").orElse(9042);
|
||||
|
||||
String driverType = activityDef.getParams().getOptionalString("cqldriver").orElse("dse");
|
||||
activityDef.getParams().getOptionalString("cqldriver").ifPresent(v -> {
|
||||
logger.warn("The cqldriver parameter is not needed in this version of the driver.");
|
||||
});
|
||||
|
||||
Cluster.Builder builder =
|
||||
driverType.toLowerCase().equals("dse") ? DseCluster.builder() :
|
||||
driverType.toLowerCase().equals("oss") ? Cluster.builder() : null;
|
||||
|
||||
if (builder==null) {
|
||||
throw new RuntimeException("The driver type '" + driverType + "' is not recognized");
|
||||
// TODO: Figure out how to layer configs with the new TypeSafe Config layer in the Datastax Java Driver
|
||||
// TODO: Or give up and bulk import options into the map, because the config API is a labyrinth
|
||||
|
||||
CqlSessionBuilder builder = CqlSession.builder();
|
||||
//
|
||||
// OptionsMap optionsMap = new OptionsMap();
|
||||
//
|
||||
// OptionsMap defaults = OptionsMap.driverDefaults();
|
||||
// DriverConfigLoader cl = DriverConfigLoader.fromMap(defaults);
|
||||
// DriverConfig cfg = cl.getInitialConfig();
|
||||
|
||||
OptionsMap optionsMap = OptionsMap.driverDefaults();
|
||||
|
||||
builder.withConfigLoader(new MapBasedDriverConfigLoader())
|
||||
builder.withConfigLoader(optionsMap);
|
||||
|
||||
|
||||
Optional<Path> scb = activityDef.getParams().getOptionalString("secureconnectbundle")
|
||||
.map(Path::of);
|
||||
|
||||
Optional<List<String>> hosts = activityDef.getParams().getOptionalString("host", "hosts")
|
||||
.map(h -> h.split(",")).map(Arrays::asList);
|
||||
|
||||
Optional<Integer> port1 = activityDef.getParams().getOptionalInteger("port");
|
||||
|
||||
if (scb.isPresent()) {
|
||||
scb.map(b -> {
|
||||
logger.debug("adding secureconnectbundle: " + b.toString());
|
||||
return b;
|
||||
}).ifPresent(builder::withCloudSecureConnectBundle);
|
||||
|
||||
if (hosts.isPresent()) {
|
||||
logger.warn("The host parameter is not valid when using secureconnectbundle=");
|
||||
}
|
||||
if (port1.isPresent()) {
|
||||
logger.warn("the port parameter is not used with CQL when using secureconnectbundle=");
|
||||
}
|
||||
} else {
|
||||
hosts.orElse(List.of("localhost"))
|
||||
.stream()
|
||||
.map(h -> InetSocketAddress.createUnresolved(h,port))
|
||||
.peek(h-> logger.debug("adding contact endpoint: " + h.getHostName()+":"+h.getPort()))
|
||||
.forEachOrdered(builder::addContactPoint);
|
||||
}
|
||||
|
||||
logger.info("Using driver type '" + driverType.toUpperCase() + "'");
|
||||
|
||||
Optional<String> scb = activityDef.getParams()
|
||||
.getOptionalString("secureconnectbundle");
|
||||
scb.map(File::new)
|
||||
.ifPresent(builder::withCloudSecureConnectBundle);
|
||||
|
||||
activityDef.getParams()
|
||||
.getOptionalString("insights").map(Boolean::parseBoolean)
|
||||
.ifPresent(builder::withMonitorReporting);
|
||||
|
||||
String[] contactPoints = activityDef.getParams().getOptionalString("host")
|
||||
.map(h -> h.split(",")).orElse(null);
|
||||
|
||||
if (contactPoints == null) {
|
||||
contactPoints = activityDef.getParams().getOptionalString("hosts")
|
||||
.map(h -> h.split(",")).orElse(null);
|
||||
}
|
||||
if (contactPoints == null && scb.isEmpty()) {
|
||||
contactPoints = new String[]{"localhost"};
|
||||
}
|
||||
|
||||
if (contactPoints != null) {
|
||||
builder.addContactPoints(contactPoints);
|
||||
}
|
||||
|
||||
activityDef.getParams().getOptionalInteger("port").ifPresent(builder::withPort);
|
||||
|
||||
builder.withCompression(ProtocolOptions.Compression.NONE);
|
||||
// builder.withCompression(ProtocolOptions.Compression.NONE);
|
||||
// TODO add map based configuration with compression defaults
|
||||
|
||||
Optional<String> usernameOpt = activityDef.getParams().getOptionalString("username");
|
||||
Optional<String> passwordOpt = activityDef.getParams().getOptionalString("password");
|
||||
Optional<String> passfileOpt = activityDef.getParams().getOptionalString("passfile");
|
||||
Optional<String> authIdOpt = activityDef.getParams().getOptionalString("authid");
|
||||
|
||||
|
||||
if (usernameOpt.isPresent()) {
|
||||
String username = usernameOpt.get();
|
||||
@ -119,7 +155,11 @@ public class CQLSessionCache implements Shutdownable {
|
||||
logger.error(error);
|
||||
throw new RuntimeException(error);
|
||||
}
|
||||
builder.withCredentials(username, password);
|
||||
if (authIdOpt.isPresent()) {
|
||||
builder.withAuthCredentials(username, password, authIdOpt.get());
|
||||
} else {
|
||||
builder.withAuthCredentials(username, password);
|
||||
}
|
||||
}
|
||||
|
||||
Optional<String> clusteropts = activityDef.getParams().getOptionalString("cbopts");
|
||||
|
@ -1,70 +1,135 @@
|
||||
# cql driver
|
||||
# cql-d4 driver
|
||||
|
||||
This is the CQL version 4 driver for NoSQLBench. As it gets more use, we will make it the primary driver under the 'cql'
|
||||
name. For now, the 'cql' refers to the version 1.9 driver, while 'cqld4' refers to this one. The drivers will have
|
||||
identical features where possible, but new enhancements will be targeted at this one first.
|
||||
|
||||
In the alpha release of this NoSQLBench CQL driver, some of the options previously available on the CQL 1.9 driver will
|
||||
not be supported. We are working to add these in an idiomatic way ASAP.
|
||||
|
||||
This is an driver which allows for the execution of CQL statements. This driver supports both sync and async modes, with
|
||||
detailed metrics provided for both.
|
||||
|
||||
### Example activity definitions
|
||||
TEMPORARY EDITORS NOTE: This will use a more consistent layout as shown below. The topics are meant to be searchable in
|
||||
the newer doc system scheme.
|
||||
|
||||
Run a cql activity named 'cql1', with definitions from activities/cqldefs.yaml
|
||||
~~~
|
||||
... driver=cql alias=cql1 workload=cqldefs
|
||||
~~~
|
||||
## Activity Params
|
||||
|
||||
Run a cql activity defined by cqldefs.yaml, but with shortcut naming
|
||||
~~~
|
||||
... driver=cql workload=cqldefs
|
||||
~~~
|
||||
There are the parameters that you can provide when starting an activity with this driver.
|
||||
|
||||
Only run statement groups which match a tag regex
|
||||
~~~
|
||||
... driver=cql workload=cqldefs tags=group:'ddl.*'
|
||||
~~~
|
||||
Any parameter that is marked as *required* must be provided or an error will be thrown at activity startup. All other
|
||||
parameters are marked as *optional*.
|
||||
|
||||
Run the matching 'dml' statements, with 100 cycles, from [1000..1100)
|
||||
~~~
|
||||
... driver=cql workload=cqldefs tags=group:'dml.*' cycles=1000..1100
|
||||
~~~
|
||||
This last example shows that the cycle range is [inclusive..exclusive),
|
||||
to allow for stacking test intervals. This is standard across all
|
||||
activity types.
|
||||
Any parameter that is marked as *static* may not be changed while an activity is running. All other parameters are
|
||||
marked as *dynamic*, meaning that they may be changed while an activity is running via scripting.
|
||||
|
||||
### CQL ActivityType Parameters
|
||||
#### sessionid
|
||||
|
||||
*optional*, *static*
|
||||
|
||||
The `sessionid` parameter allows you to logically assign a named instance of a session and session configuration to each
|
||||
activity that you run. This allows for different driver settings to be used within the same scenario.
|
||||
|
||||
Default
|
||||
: default
|
||||
|
||||
Example:
|
||||
: `sessionid=test43`
|
||||
|
||||
#### profile
|
||||
|
||||
*optional*, *static*
|
||||
|
||||
Controls the configuration profile used by the driver. If you provide a value for this parameter, then a configuration
|
||||
file under the name must exist, or an error will be thrown. This a driver configuration file, as documented in [DataStax
|
||||
Java Driver - Configurat](https://docs.datastax.com/en/developer/java-driver/4.6/manual/core/configuration/).
|
||||
|
||||
The profile is keyed to the sessionid, as each session id will be configured with the named profile just as you would
|
||||
see with normal file-based driver configuration. Thus, changing the configuration within the profile will affect future
|
||||
operations which share the same session.
|
||||
|
||||
While the profile itself is not changeable after it has been set, the parameters that are in the profile may be
|
||||
dynamically changed, depending on how they are annotated below.
|
||||
|
||||
*All other driver settings are part of the named profile for an activity, and will override the values provided from the
|
||||
named profile unless otherwise stated. These overrides do not affect the named file, only the runtime behavior of the
|
||||
driver.*
|
||||
|
||||
Default
|
||||
: 'default'
|
||||
|
||||
Examples
|
||||
: `profile=experimental-settings`
|
||||
|
||||
#### secureconnectbundle
|
||||
|
||||
*optional*, *static*
|
||||
|
||||
This parameter is used to connect to Astra Database as a Service. This option accepts a path to the secure connect
|
||||
bundle that is downloaded from the Astra UI.
|
||||
|
||||
Default
|
||||
: undefined
|
||||
|
||||
Examples
|
||||
: `secureconnectbundle=/tmp/secure-connect-my_db.zip`
|
||||
: `secureconnectbundle="/home/automaton/secure-connect-my_db.zip"`
|
||||
|
||||
|
||||
#### hosts
|
||||
|
||||
*optional*, *static*
|
||||
|
||||
The host or hosts to use to connect to the cluster. If you specify multiple values here, use commas with no spaces.
|
||||
*This option is not valid when the `secureconnectbundle` option is used.*
|
||||
|
||||
Default
|
||||
: localhost
|
||||
|
||||
Examples
|
||||
: `host=192.168.1.25`
|
||||
: `host=192.168.1.25,testhost42`
|
||||
|
||||
#### port
|
||||
|
||||
*optional*, *static*
|
||||
|
||||
The port to connect with. *This option is not valid when the `secureconnectbundle` option is used.*
|
||||
|
||||
Default
|
||||
: 9042
|
||||
|
||||
- **cqldriver** - default: dse - The type of driver to use, either dse, or oss. If you need DSE-specific features, use
|
||||
the dse driver. If you are connecting to an OSS Apache Cassandra cluster, you must use the oss driver. The oss driver
|
||||
option is only available in nosqlbench.
|
||||
- **host** - The host or hosts to use for connection points to
|
||||
the cluster. If you specify multiple values here, use commas
|
||||
with no spaces.
|
||||
Examples:
|
||||
- `host=192.168.1.25`
|
||||
- `host=`192.168.1.25,testhost42`
|
||||
- **workload** - The workload definition which holds the schema and statement defs.
|
||||
see workload yaml location for additional details
|
||||
(no default, required)
|
||||
- **port** - The port to connect with
|
||||
- **cl** - An override to consistency levels for the activity. If
|
||||
this option is used, then all consistency levels will be replaced
|
||||
by this one for the current activity, and a log line explaining
|
||||
the difference with respect to the yaml will be emitted.
|
||||
This is not a dynamic parameter. It will only be applied at
|
||||
activity start.
|
||||
- **cbopts** - default: none - this is how you customize the cluster
|
||||
settings for the client, including policies, compression, etc. This
|
||||
is a string of *Java*-like method calls just as you would use them
|
||||
in the Cluster.Builder fluent API. They are evaluated inline
|
||||
with the default Cluster.Builder options not covered below.
|
||||
Example: cbopts=".withCompression(ProtocolOptions.Compression.NONE)"
|
||||
- `port=9042`
|
||||
|
||||
#### cl
|
||||
|
||||
*optional*, *static*
|
||||
|
||||
An override to consistency levels for the activity. If this option is used, then all consistency levels will be set to
|
||||
this by default for the current activity, and a log line explaining the difference with respect to the yaml will be
|
||||
emitted. This is not a dynamic parameter. It will only be applied at activity start.
|
||||
|
||||
|
||||
#### whitelist
|
||||
|
||||
|
||||
---- below this line needs to be curated for the new driver ----
|
||||
|
||||
|
||||
- **whitelist** default: none - Applies a whitelist policy to the load balancing
|
||||
policy in the driver. If used, a WhitelistPolicy(RoundRobinPolicy())
|
||||
will be created and added to the cluster builder on startup.
|
||||
Examples:
|
||||
- whitelist=127.0.0.1
|
||||
- whitelist=127.0.0.1:9042,127.0.0.2:1234
|
||||
|
||||
- **cbopts** - default: none - this is how you customize the cluster
|
||||
settings for the client, including policies, compression, etc. This
|
||||
is a string of *Java*-like method calls just as you would use them
|
||||
in the Cluster.Builder fluent API. They are evaluated inline
|
||||
with the default Cluster.Builder options not covered below.
|
||||
Example: cbopts=".withCompression(ProtocolOptions.Compression.NONE)"
|
||||
- **retrypolicy** default: none - Applies a retry policy in the driver
|
||||
The only option supported for this version is `retrypolicy=logging`,
|
||||
which uses the default retry policy, but with logging added.
|
||||
@ -238,11 +303,6 @@ activity types.
|
||||
code base. This is for dynamic codec loading with user-provided codecs mapped
|
||||
via the internal UDT APIs.
|
||||
default: false
|
||||
- **secureconnectbundle** - used to connect to CaaS, accepts a path to the secure connect bundle
|
||||
that is downloaded from the CaaS UI.
|
||||
Examples:
|
||||
- `secureconnectbundle=/tmp/secure-connect-my_db.zip`
|
||||
- `secureconnectbundle="/home/automaton/secure-connect-my_db.zip"`
|
||||
- **insights** - Set to false to disable the driver from sending insights monitoring information
|
||||
- `insights=false`
|
||||
- **tickduration** - sets the tickDuration (milliseconds) of HashedWheelTimer of the
|
||||
|
@ -4,7 +4,7 @@
|
||||
<parent>
|
||||
<groupId>io.nosqlbench</groupId>
|
||||
<artifactId>mvn-defaults</artifactId>
|
||||
<version>3.12.107-SNAPSHOT</version>
|
||||
<version>3.12.119-SNAPSHOT</version>
|
||||
<relativePath>../mvn-defaults</relativePath>
|
||||
</parent>
|
||||
|
||||
@ -24,7 +24,7 @@
|
||||
<dependency>
|
||||
<groupId>io.nosqlbench</groupId>
|
||||
<artifactId>driver-cql-shaded</artifactId>
|
||||
<version>3.12.107-SNAPSHOT</version>
|
||||
<version>3.12.119-SNAPSHOT</version>
|
||||
</dependency>
|
||||
|
||||
</dependencies>
|
||||
|
@ -5,7 +5,7 @@
|
||||
<parent>
|
||||
<artifactId>mvn-defaults</artifactId>
|
||||
<groupId>io.nosqlbench</groupId>
|
||||
<version>3.12.107-SNAPSHOT</version>
|
||||
<version>3.12.119-SNAPSHOT</version>
|
||||
<relativePath>../mvn-defaults</relativePath>
|
||||
</parent>
|
||||
|
||||
@ -20,7 +20,7 @@
|
||||
<dependency>
|
||||
<groupId>io.nosqlbench</groupId>
|
||||
<artifactId>engine-api</artifactId>
|
||||
<version>3.12.107-SNAPSHOT</version>
|
||||
<version>3.12.119-SNAPSHOT</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
|
@ -5,7 +5,7 @@
|
||||
<parent>
|
||||
<artifactId>mvn-defaults</artifactId>
|
||||
<groupId>io.nosqlbench</groupId>
|
||||
<version>3.12.107-SNAPSHOT</version>
|
||||
<version>3.12.119-SNAPSHOT</version>
|
||||
<relativePath>../mvn-defaults</relativePath>
|
||||
</parent>
|
||||
|
||||
@ -18,10 +18,11 @@
|
||||
</description>
|
||||
|
||||
<dependencies>
|
||||
|
||||
<dependency>
|
||||
<groupId>io.nosqlbench</groupId>
|
||||
<artifactId>engine-api</artifactId>
|
||||
<version>3.12.107-SNAPSHOT</version>
|
||||
<version>3.12.119-SNAPSHOT</version>
|
||||
</dependency>
|
||||
|
||||
<!-- test scope only -->
|
||||
|
85
driver-kafka/pom.xml
Normal file
85
driver-kafka/pom.xml
Normal file
@ -0,0 +1,85 @@
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
<parent>
|
||||
<artifactId>mvn-defaults</artifactId>
|
||||
<groupId>io.nosqlbench</groupId>
|
||||
<version>3.12.119-SNAPSHOT</version>
|
||||
<relativePath>../mvn-defaults</relativePath>
|
||||
</parent>
|
||||
|
||||
<artifactId>driver-kafka</artifactId>
|
||||
<packaging>jar</packaging>
|
||||
<name>${project.artifactId}</name>
|
||||
|
||||
<description>
|
||||
A Kafka driver for nosqlbench. This provides the ability to inject synthetic data
|
||||
into a kafka topic.
|
||||
</description>
|
||||
|
||||
<dependencies>
|
||||
|
||||
<!-- core dependencies -->
|
||||
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka-clients</artifactId>
|
||||
<version>2.0.0</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>io.nosqlbench</groupId>
|
||||
<artifactId>engine-api</artifactId>
|
||||
<version>3.12.119-SNAPSHOT</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>io.nosqlbench</groupId>
|
||||
<artifactId>driver-stdout</artifactId>
|
||||
<version>3.12.119-SNAPSHOT</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.slf4j</groupId>
|
||||
<artifactId>slf4j-api</artifactId>
|
||||
<version>1.7.25</version>
|
||||
</dependency>
|
||||
|
||||
<!-- test only scope -->
|
||||
|
||||
<dependency>
|
||||
<groupId>org.testng</groupId>
|
||||
<artifactId>testng</artifactId>
|
||||
<version>6.13.1</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.assertj</groupId>
|
||||
<artifactId>assertj-core-java8</artifactId>
|
||||
<version>1.0.0m1</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
|
||||
</dependencies>
|
||||
|
||||
<!-- <profiles>-->
|
||||
<!-- <profile>-->
|
||||
<!-- <id>shade</id>-->
|
||||
<!-- <activation>-->
|
||||
<!-- <activeByDefault>true</activeByDefault>-->
|
||||
<!-- </activation>-->
|
||||
<!-- <build>-->
|
||||
<!-- <plugins>-->
|
||||
<!-- <plugin>-->
|
||||
<!-- <artifactId>maven-shade-plugin</artifactId>-->
|
||||
<!-- <configuration>-->
|
||||
<!-- <finalName>${project.artifactId}</finalName>-->
|
||||
<!-- </configuration>-->
|
||||
<!-- </plugin>-->
|
||||
<!-- </plugins>-->
|
||||
<!-- </build>-->
|
||||
<!-- </profile>-->
|
||||
<!-- </profiles>-->
|
||||
|
||||
</project>
|
@ -0,0 +1,68 @@
|
||||
package com.datastax.ebdrivers.kafkaproducer;
|
||||
|
||||
import io.nosqlbench.activitytype.stdout.StdoutActivity;
|
||||
import io.nosqlbench.engine.api.activityimpl.ActivityDef;
|
||||
import org.apache.kafka.clients.producer.*;
|
||||
import org.apache.kafka.common.serialization.LongSerializer;
|
||||
import org.apache.kafka.common.serialization.StringSerializer;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Properties;
|
||||
import java.util.concurrent.Future;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
public class KafkaProducerActivity extends StdoutActivity {
|
||||
private final static Logger logger = LoggerFactory.getLogger(KafkaProducerActivity.class);
|
||||
private Producer<Long,String> producer = null;
|
||||
private String topic;
|
||||
|
||||
public KafkaProducerActivity(ActivityDef activityDef) {
|
||||
super(activityDef);
|
||||
}
|
||||
|
||||
public synchronized Producer<Long,String> getKafkaProducer() {
|
||||
if (producer!=null) {
|
||||
return producer;
|
||||
}
|
||||
Properties props = new Properties();
|
||||
String servers = Arrays.stream(activityDef.getParams().getOptionalString("host","hosts")
|
||||
.orElse("localhost" + ":9092")
|
||||
.split(","))
|
||||
.map(x -> x.indexOf(':') == -1 ? x + ":9092" : x)
|
||||
.collect(Collectors.joining(","));
|
||||
String clientId = activityDef.getParams().getOptionalString("clientid","client.id","client_id")
|
||||
.orElse("TestProducerClientId");
|
||||
String key_serializer =
|
||||
activityDef.getParams().getOptionalString("key_serializer").orElse(LongSerializer.class.getName());
|
||||
String value_serializer =
|
||||
activityDef.getParams().getOptionalString("value_serializer").orElse(StringSerializer.class.getName());
|
||||
|
||||
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, servers);
|
||||
props.put(ProducerConfig.CLIENT_ID_CONFIG, clientId);
|
||||
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, key_serializer);
|
||||
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, value_serializer);
|
||||
|
||||
producer = new KafkaProducer<>(props);
|
||||
return producer;
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void write(String statement) {
|
||||
Producer<Long, String> kafkaProducer = getKafkaProducer();
|
||||
ProducerRecord<Long, String> record = new ProducerRecord<>(topic, statement);
|
||||
Future<RecordMetadata> send = kafkaProducer.send(record);
|
||||
try {
|
||||
RecordMetadata result = send.get();
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onActivityDefUpdate(ActivityDef activityDef) {
|
||||
this.topic = activityDef.getParams().getOptionalString("topic").orElse("default-topic");
|
||||
super.onActivityDefUpdate(activityDef);
|
||||
}
|
||||
}
|
@ -0,0 +1,40 @@
|
||||
package com.datastax.ebdrivers.kafkaproducer;
|
||||
|
||||
import io.nosqlbench.activitytype.stdout.StdoutAction;
|
||||
import io.nosqlbench.activitytype.stdout.StdoutActivity;
|
||||
import io.nosqlbench.engine.api.activityapi.core.Action;
|
||||
import io.nosqlbench.engine.api.activityapi.core.ActionDispenser;
|
||||
import io.nosqlbench.engine.api.activityapi.core.ActivityType;
|
||||
import io.nosqlbench.engine.api.activityimpl.ActivityDef;
|
||||
import io.nosqlbench.nb.annotations.Service;
|
||||
|
||||
@Service(ActivityType.class)
|
||||
public class KafkaProducerActivityType implements ActivityType<KafkaProducerActivity> {
|
||||
@Override
|
||||
public String getName() {
|
||||
return "kafkaproducer";
|
||||
}
|
||||
|
||||
@Override
|
||||
public KafkaProducerActivity getActivity(ActivityDef activityDef) {
|
||||
return new KafkaProducerActivity(activityDef);
|
||||
}
|
||||
|
||||
private static class Dispenser implements ActionDispenser {
|
||||
private StdoutActivity activity;
|
||||
|
||||
private Dispenser(StdoutActivity activity) {
|
||||
this.activity = activity;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Action getAction(int slot) {
|
||||
return new StdoutAction(slot,this.activity);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public ActionDispenser getActionDispenser(KafkaProducerActivity activity) {
|
||||
return new Dispenser(activity);
|
||||
}
|
||||
}
|
32
driver-kafka/src/main/resources/kafkaproducer.md
Normal file
32
driver-kafka/src/main/resources/kafkaproducer.md
Normal file
@ -0,0 +1,32 @@
|
||||
# kafkaproducer
|
||||
|
||||
This is an activity type which allows for a stream of data to be sent to a kafka topic. It is based on the stdout
|
||||
activity statement format.
|
||||
|
||||
## Parameters
|
||||
|
||||
- **topic** - The topic to write to for this activity.
|
||||
|
||||
### Examples
|
||||
|
||||
Refer to the online standard YAML documentation for a detailed walk-through.
|
||||
An example yaml is below for sending structured JSON to a kafka topic:
|
||||
|
||||
bindings:
|
||||
price: Normal(10.0D,2.0D) -> double; Save('price') -> double;
|
||||
quantity: Normal(10000.0D,100.0D); Add(-10000.0D); Save('quantity') -> double;
|
||||
total: Identity(); Expr('price * quantity') -> double;
|
||||
client: WeightedStrings('ABC_TEST:3;DFG_TEST:3;STG_TEST:14');
|
||||
clientid: HashRange(0,1000000000) -> long;
|
||||
|
||||
statements:
|
||||
- |
|
||||
\{
|
||||
"trade": \{
|
||||
"price": {price},
|
||||
"quantity": {quantity},
|
||||
"total": {total},
|
||||
"client": "{client}",
|
||||
"clientid":"{clientid}"
|
||||
\}
|
||||
\}
|
49
driver-mongodb/pom.xml
Normal file
49
driver-mongodb/pom.xml
Normal file
@ -0,0 +1,49 @@
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
<artifactId>driver-mongodb</artifactId>
|
||||
<packaging>jar</packaging>
|
||||
|
||||
<parent>
|
||||
<artifactId>mvn-defaults</artifactId>
|
||||
<groupId>io.nosqlbench</groupId>
|
||||
<version>3.12.119-SNAPSHOT</version>
|
||||
<relativePath>../mvn-defaults</relativePath>
|
||||
</parent>
|
||||
|
||||
<name>${project.artifactId}</name>
|
||||
<description>
|
||||
An nosqlbench ActivityType (AT) driver module;
|
||||
MongoDB
|
||||
</description>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>io.nosqlbench</groupId>
|
||||
<artifactId>engine-api</artifactId>
|
||||
<version>3.12.119-SNAPSHOT</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.mongodb</groupId>
|
||||
<artifactId>mongodb-driver-sync</artifactId>
|
||||
<version>4.0.3</version>
|
||||
</dependency>
|
||||
|
||||
<!-- test scope only -->
|
||||
|
||||
<dependency>
|
||||
<groupId>org.assertj</groupId>
|
||||
<artifactId>assertj-core</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>junit</groupId>
|
||||
<artifactId>junit</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
|
||||
</dependencies>
|
||||
|
||||
</project>
|
@ -0,0 +1,81 @@
|
||||
package io.nosqlbench.driver.mongodb;
|
||||
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import com.codahale.metrics.Timer;
|
||||
import com.mongodb.ReadPreference;
|
||||
import com.mongodb.client.MongoDatabase;
|
||||
import io.nosqlbench.engine.api.activityapi.core.SyncAction;
|
||||
import io.nosqlbench.engine.api.activityapi.planning.OpSequence;
|
||||
import org.bson.Document;
|
||||
import org.bson.conversions.Bson;
|
||||
|
||||
public class MongoAction implements SyncAction {
|
||||
|
||||
private final static Logger logger = LoggerFactory.getLogger(MongoAction.class);
|
||||
|
||||
private final MongoActivity activity;
|
||||
private final int slot;
|
||||
|
||||
private OpSequence<ReadyMongoStatement> sequencer;
|
||||
|
||||
public MongoAction(MongoActivity activity, int slot) {
|
||||
this.activity = activity;
|
||||
this.slot = slot;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void init() {
|
||||
this.sequencer = activity.getOpSequencer();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int runCycle(long cycleValue) {
|
||||
ReadyMongoStatement rms;
|
||||
Bson queryBson;
|
||||
try (Timer.Context bindTime = activity.bindTimer.time()) {
|
||||
rms = sequencer.get(cycleValue);
|
||||
queryBson = rms.bind(cycleValue);
|
||||
|
||||
// Maybe show the query in log/console - only for diagnostic use
|
||||
if (activity.isShowQuery()) {
|
||||
logger.info("Query(cycle={}):\n{}", cycleValue, queryBson);
|
||||
}
|
||||
}
|
||||
|
||||
long nanoStartTime = System.nanoTime();
|
||||
for (int i = 1; i <= activity.getMaxTries(); i++) {
|
||||
activity.triesHisto.update(i);
|
||||
|
||||
try (Timer.Context resultTime = activity.resultTimer.time()) {
|
||||
MongoDatabase database = activity.getDatabase();
|
||||
ReadPreference readPreference = rms.getReadPreference();
|
||||
|
||||
// assuming the commands are one of these in the doc:
|
||||
// https://docs.mongodb.com/manual/reference/command/nav-crud/
|
||||
Document resultDoc = database.runCommand(queryBson, readPreference);
|
||||
|
||||
long resultNanos = System.nanoTime() - nanoStartTime;
|
||||
|
||||
// TODO: perhaps collect the operationTime from the resultDoc if any
|
||||
// https://docs.mongodb.com/manual/reference/method/db.runCommand/#command-response
|
||||
int ok = Double.valueOf((double) resultDoc.getOrDefault("ok", 0.0d)).intValue();
|
||||
if (ok == 1) {
|
||||
// success
|
||||
activity.resultSuccessTimer.update(resultNanos, TimeUnit.NANOSECONDS);
|
||||
}
|
||||
activity.resultSetSizeHisto.update(resultDoc.getInteger("n", 0));
|
||||
|
||||
return ok == 1 ? 0 : 1;
|
||||
} catch (Exception e) {
|
||||
logger.error("Failed to runCommand {} on cycle {}, tries {}", queryBson, cycleValue, i, e);
|
||||
}
|
||||
}
|
||||
|
||||
throw new RuntimeException(String.format("Exhausted max tries (%s) on cycle %s",
|
||||
cycleValue, activity.getMaxTries()));
|
||||
}
|
||||
}
|
@ -0,0 +1,139 @@
|
||||
package io.nosqlbench.driver.mongodb;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
import java.util.function.Function;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import com.codahale.metrics.Histogram;
|
||||
import com.codahale.metrics.Timer;
|
||||
import com.mongodb.client.MongoClient;
|
||||
import com.mongodb.client.MongoClients;
|
||||
import com.mongodb.client.MongoDatabase;
|
||||
import io.nosqlbench.engine.api.activityapi.core.ActivityDefObserver;
|
||||
import io.nosqlbench.engine.api.activityapi.planning.OpSequence;
|
||||
import io.nosqlbench.engine.api.activityapi.planning.SequencePlanner;
|
||||
import io.nosqlbench.engine.api.activityapi.planning.SequencerType;
|
||||
import io.nosqlbench.engine.api.activityconfig.ParsedStmt;
|
||||
import io.nosqlbench.engine.api.activityconfig.StatementsLoader;
|
||||
import io.nosqlbench.engine.api.activityconfig.yaml.StmtDef;
|
||||
import io.nosqlbench.engine.api.activityconfig.yaml.StmtsDocList;
|
||||
import io.nosqlbench.engine.api.activityimpl.ActivityDef;
|
||||
import io.nosqlbench.engine.api.activityimpl.SimpleActivity;
|
||||
import io.nosqlbench.engine.api.metrics.ActivityMetrics;
|
||||
import io.nosqlbench.engine.api.templating.StrInterpolator;
|
||||
import io.nosqlbench.engine.api.util.TagFilter;
|
||||
|
||||
public class MongoActivity extends SimpleActivity implements ActivityDefObserver {
|
||||
|
||||
private final static Logger logger = LoggerFactory.getLogger(MongoActivity.class);
|
||||
|
||||
private String yamlLoc;
|
||||
private String connectionString;
|
||||
private String databaseName;
|
||||
|
||||
private MongoClient client;
|
||||
private MongoDatabase mongoDatabase;
|
||||
private boolean showQuery;
|
||||
private int maxTries;
|
||||
|
||||
private OpSequence<ReadyMongoStatement> opSequence;
|
||||
|
||||
Timer bindTimer;
|
||||
Timer resultTimer;
|
||||
Timer resultSuccessTimer;
|
||||
Histogram resultSetSizeHisto;
|
||||
Histogram triesHisto;
|
||||
|
||||
public MongoActivity(ActivityDef activityDef) {
|
||||
super(activityDef);
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void onActivityDefUpdate(ActivityDef activityDef) {
|
||||
super.onActivityDefUpdate(activityDef);
|
||||
|
||||
// sanity check
|
||||
yamlLoc = activityDef.getParams().getOptionalString("yaml", "workload")
|
||||
.orElseThrow(() -> new IllegalArgumentException("yaml is not defined"));
|
||||
connectionString = activityDef.getParams().getOptionalString("connection")
|
||||
.orElseThrow(() -> new IllegalArgumentException("connection is not defined"));
|
||||
// TODO: support multiple databases
|
||||
databaseName = activityDef.getParams().getOptionalString("database")
|
||||
.orElseThrow(() -> new IllegalArgumentException("database is not defined"));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void initActivity() {
|
||||
logger.debug("initializing activity: " + this.activityDef.getAlias());
|
||||
onActivityDefUpdate(activityDef);
|
||||
|
||||
opSequence = initOpSequencer();
|
||||
setDefaultsFromOpSequence(opSequence);
|
||||
|
||||
client = MongoClients.create(connectionString);
|
||||
mongoDatabase = client.getDatabase(databaseName);
|
||||
showQuery = activityDef.getParams().getOptionalBoolean("showquery")
|
||||
.orElse(false);
|
||||
maxTries = activityDef.getParams().getOptionalInteger("maxtries")
|
||||
.orElse(10);
|
||||
|
||||
bindTimer = ActivityMetrics.timer(activityDef, "bind");
|
||||
resultTimer = ActivityMetrics.timer(activityDef, "result");
|
||||
resultSuccessTimer = ActivityMetrics.timer(activityDef, "result-success");
|
||||
resultSetSizeHisto = ActivityMetrics.histogram(activityDef, "resultset-size");
|
||||
triesHisto = ActivityMetrics.histogram(activityDef, "tries");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void shutdownActivity() {
|
||||
logger.debug("shutting down activity: " + this.activityDef.getAlias());
|
||||
if (client != null) {
|
||||
client.close();
|
||||
}
|
||||
}
|
||||
|
||||
OpSequence<ReadyMongoStatement> initOpSequencer() {
|
||||
SequencerType sequencerType = SequencerType.valueOf(
|
||||
activityDef.getParams().getOptionalString("seq").orElse("bucket")
|
||||
);
|
||||
SequencePlanner<ReadyMongoStatement> sequencer = new SequencePlanner<>(sequencerType);
|
||||
|
||||
StmtsDocList stmtsDocList = StatementsLoader.load(logger, yamlLoc, new StrInterpolator(activityDef), "activities");
|
||||
|
||||
String tagfilter = activityDef.getParams().getOptionalString("tags").orElse("");
|
||||
|
||||
TagFilter tagFilter = new TagFilter(tagfilter);
|
||||
stmtsDocList.getStmts().stream().map(tagFilter::matchesTaggedResult).forEach(r -> logger.info(r.getLog()));
|
||||
|
||||
List<StmtDef> stmts = stmtsDocList.getStmts(tagfilter);
|
||||
for (StmtDef stmt : stmts) {
|
||||
ParsedStmt parsed = stmt.getParsed().orError();
|
||||
String statement = parsed.getPositionalStatement(Function.identity());
|
||||
Objects.requireNonNull(statement);
|
||||
|
||||
sequencer.addOp(new ReadyMongoStatement(stmt),
|
||||
Long.parseLong(stmt.getParams().getOrDefault("ratio","1")));
|
||||
}
|
||||
|
||||
return sequencer.resolve();
|
||||
}
|
||||
|
||||
protected MongoDatabase getDatabase() {
|
||||
return mongoDatabase;
|
||||
}
|
||||
|
||||
protected OpSequence<ReadyMongoStatement> getOpSequencer() {
|
||||
return opSequence;
|
||||
}
|
||||
|
||||
protected boolean isShowQuery() {
|
||||
return showQuery;
|
||||
}
|
||||
|
||||
protected int getMaxTries() {
|
||||
return maxTries;
|
||||
}
|
||||
}
|
@ -0,0 +1,41 @@
|
||||
package io.nosqlbench.driver.mongodb;
|
||||
|
||||
import io.nosqlbench.engine.api.activityapi.core.Action;
|
||||
import io.nosqlbench.engine.api.activityapi.core.ActionDispenser;
|
||||
import io.nosqlbench.engine.api.activityapi.core.ActivityType;
|
||||
import io.nosqlbench.engine.api.activityimpl.ActivityDef;
|
||||
import io.nosqlbench.nb.annotations.Service;
|
||||
|
||||
@Service(ActivityType.class)
|
||||
public class MongoActivityType implements ActivityType<MongoActivity> {
|
||||
|
||||
@Override
|
||||
public String getName() {
|
||||
return "mongodb";
|
||||
}
|
||||
|
||||
@Override
|
||||
public MongoActivity getActivity(ActivityDef activityDef) {
|
||||
return new MongoActivity(activityDef);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ActionDispenser getActionDispenser(MongoActivity activity) {
|
||||
return new MongoActionDispenser(activity);
|
||||
}
|
||||
|
||||
private static class MongoActionDispenser implements ActionDispenser {
|
||||
|
||||
private final MongoActivity activity;
|
||||
|
||||
public MongoActionDispenser(MongoActivity activity)
|
||||
{
|
||||
this.activity = activity;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Action getAction(int slot) {
|
||||
return new MongoAction(activity, slot);
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,34 @@
|
||||
package io.nosqlbench.driver.mongodb;
|
||||
|
||||
import com.mongodb.ReadPreference;
|
||||
import io.nosqlbench.engine.api.activityconfig.yaml.StmtDef;
|
||||
import io.nosqlbench.virtdata.core.bindings.BindingsTemplate;
|
||||
import io.nosqlbench.virtdata.core.templates.ParsedTemplate;
|
||||
import io.nosqlbench.virtdata.core.templates.StringBindings;
|
||||
import io.nosqlbench.virtdata.core.templates.StringBindingsTemplate;
|
||||
import org.bson.Document;
|
||||
import org.bson.conversions.Bson;
|
||||
|
||||
public class ReadyMongoStatement {
|
||||
|
||||
private StringBindings bindings;
|
||||
private ReadPreference readPreference;
|
||||
|
||||
public ReadyMongoStatement(StmtDef stmtDef) {
|
||||
ParsedTemplate paramTemplate = new ParsedTemplate(stmtDef.getStmt(), stmtDef.getBindings());
|
||||
BindingsTemplate paramBindings = new BindingsTemplate(paramTemplate.getBindPoints());
|
||||
StringBindingsTemplate template = new StringBindingsTemplate(stmtDef.getStmt(), paramBindings);
|
||||
|
||||
this.bindings = template.resolve();
|
||||
this.readPreference = ReadPreference.valueOf(stmtDef.getParams()
|
||||
.getOrDefault("readPreference","primary"));
|
||||
}
|
||||
|
||||
public ReadPreference getReadPreference() {
|
||||
return readPreference;
|
||||
}
|
||||
|
||||
public Bson bind(long value) {
|
||||
return Document.parse(bindings.bind(value));
|
||||
}
|
||||
}
|
@ -0,0 +1,61 @@
|
||||
# nb -v run driver=mongodb yaml=mongodb-basic connection=mongodb://127.0.0.1 database=testdb tags=phase:rampup
|
||||
description: An exmaple of a basic mongo insert and find.
|
||||
|
||||
scenarios:
|
||||
default:
|
||||
- run driver=mongodb tags==phase:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
|
||||
- run driver=mongodb tags==phase:main cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
bindings:
|
||||
seq_key: Mod(<<keycount:1000000000>>); ToString() -> String
|
||||
seq_value: Hash(); Mod(<<valuecount:1000000000>>); ToString() -> String
|
||||
rw_key: <<keydist:Uniform(0,1000000000)->int>>; ToString() -> String
|
||||
rw_value: Hash(); <<valdist:Uniform(0,1000000000)->int>>; ToString() -> String
|
||||
|
||||
blocks:
|
||||
- name: rampup
|
||||
tags:
|
||||
phase: rampup
|
||||
statements:
|
||||
- rampup-insert: |
|
||||
{
|
||||
insert: "<<collection:keyvalue>>",
|
||||
documents: [ { key: {seq_key},
|
||||
value: {seq_value} } ]
|
||||
}
|
||||
params:
|
||||
readPreference: primary
|
||||
tags:
|
||||
name: rampup-insert
|
||||
- name: main-read
|
||||
tags:
|
||||
phase: main
|
||||
type: read
|
||||
params:
|
||||
ratio: <<read_ratio:5>>
|
||||
statements:
|
||||
- main-find: |
|
||||
{
|
||||
find: "<<collection:keyvalue>>",
|
||||
filter: { key: {rw_key} }
|
||||
}
|
||||
params:
|
||||
readPreference: primary
|
||||
tags:
|
||||
name: main-find
|
||||
- name: main-write
|
||||
tags:
|
||||
phase: main
|
||||
type: write
|
||||
params:
|
||||
ratio: <<write_ratio:5>>
|
||||
statements:
|
||||
- main-insert: |
|
||||
{
|
||||
insert: "<<collection:keyvalue>>",
|
||||
documents: [ { key: {rw_key},
|
||||
value: {rw_value} } ]
|
||||
}
|
||||
params:
|
||||
readPreference: primary
|
||||
tags:
|
||||
name: main-insert
|
20
driver-mongodb/src/main/resources/mongodb.md
Normal file
20
driver-mongodb/src/main/resources/mongodb.md
Normal file
@ -0,0 +1,20 @@
|
||||
# MongoDB Driver
|
||||
|
||||
This is a driver for MongoDB. It supports the `db.runCommand` API described in [here](https://docs.mongodb.com/manual/reference/command/).
|
||||
|
||||
### Example activity definitions
|
||||
|
||||
Run a mongodb activity with definitions from activities/mongodb-basic.yaml
|
||||
```
|
||||
... driver=mongodb yaml=activities/mongo-basic.yaml
|
||||
```
|
||||
|
||||
### MongoDB ActivityType Parameters
|
||||
|
||||
- **connection** (Mandatory) - connection string of the target MongoDB.
|
||||
|
||||
Example: `mongodb://127.0.0.1`
|
||||
|
||||
- **database** (Mandatory) - target database
|
||||
|
||||
Example: `testdb`
|
@ -0,0 +1,33 @@
|
||||
package io.nosqlbench.driver.mongodb;
|
||||
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
import io.nosqlbench.engine.api.activityapi.planning.OpSequence;
|
||||
import io.nosqlbench.engine.api.activityimpl.ActivityDef;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
public class MongoActivityTest {
|
||||
|
||||
private ActivityDef activityDef;
|
||||
|
||||
@Before
|
||||
public void setup() {
|
||||
String[] params = {
|
||||
"yaml=activities/mongodb-basic.yaml",
|
||||
"connection=mongodb://127.0.0.1",
|
||||
"database=nosqlbench_testdb"
|
||||
};
|
||||
activityDef = ActivityDef.parseActivityDef(String.join(";", params));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testInitOpSequencer() {
|
||||
MongoActivity mongoActivity = new MongoActivity(activityDef);
|
||||
mongoActivity.initActivity();
|
||||
|
||||
OpSequence<ReadyMongoStatement> sequence = mongoActivity.initOpSequencer();
|
||||
assertThat(sequence.getOps()).hasSize(3);
|
||||
}
|
||||
}
|
@ -0,0 +1,107 @@
|
||||
package io.nosqlbench.driver.mongodb;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
import java.util.function.Function;
|
||||
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import io.nosqlbench.engine.api.activityconfig.ParsedStmt;
|
||||
import io.nosqlbench.engine.api.activityconfig.StatementsLoader;
|
||||
import io.nosqlbench.engine.api.activityconfig.yaml.StmtDef;
|
||||
import io.nosqlbench.engine.api.activityconfig.yaml.StmtsDocList;
|
||||
import io.nosqlbench.engine.api.activityimpl.ActivityDef;
|
||||
import io.nosqlbench.engine.api.templating.StrInterpolator;
|
||||
import io.nosqlbench.virtdata.core.templates.BindPoint;
|
||||
import org.bson.conversions.Bson;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
public class ReadyMongoStatementTest {
|
||||
private final static Logger logger = LoggerFactory.getLogger(ReadyMongoStatementTest.class);
|
||||
|
||||
private ActivityDef activityDef;
|
||||
private StmtsDocList stmtsDocList;
|
||||
|
||||
@Before
|
||||
public void setup() {
|
||||
String[] params = {
|
||||
"yaml=activities/mongodb-basic.yaml",
|
||||
"database=nosqlbench_testdb",
|
||||
};
|
||||
activityDef = ActivityDef.parseActivityDef(String.join(";", params));
|
||||
String yaml_loc = activityDef.getParams().getOptionalString("yaml", "workload").orElse("default");
|
||||
stmtsDocList = StatementsLoader.load(logger, yaml_loc, new StrInterpolator(activityDef), "activities");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testResolvePhaseRampup() {
|
||||
String tagfilter = activityDef.getParams().getOptionalString("tags").orElse("phase:rampup");
|
||||
|
||||
List<StmtDef> stmts = stmtsDocList.getStmts(tagfilter);
|
||||
assertThat(stmts).hasSize(1);
|
||||
for (StmtDef stmt : stmts) {
|
||||
ParsedStmt parsed = stmt.getParsed().orError();
|
||||
assertThat(parsed.getBindPoints()).hasSize(2);
|
||||
|
||||
BindPoint seqKey = new BindPoint("seq_key", "Mod(1000000000); ToString() -> String");
|
||||
BindPoint seqValue = new BindPoint("seq_value", "Hash(); Mod(1000000000); ToString() -> String");
|
||||
assertThat(parsed.getBindPoints()).containsExactly(seqKey, seqValue);
|
||||
|
||||
String statement = parsed.getPositionalStatement(Function.identity());
|
||||
Objects.requireNonNull(statement);
|
||||
|
||||
ReadyMongoStatement readyMongoStatement = new ReadyMongoStatement(stmt);
|
||||
Bson bsonDoc = readyMongoStatement.bind(1L);
|
||||
assertThat(bsonDoc).isNotNull();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testResolvePhaseMainRead() {
|
||||
String tagfilter = activityDef.getParams().getOptionalString("tags").orElse("phase:main,name:main-find");
|
||||
|
||||
List<StmtDef> stmts = stmtsDocList.getStmts(tagfilter);
|
||||
assertThat(stmts).hasSize(1);
|
||||
for (StmtDef stmt : stmts) {
|
||||
ParsedStmt parsed = stmt.getParsed().orError();
|
||||
assertThat(parsed.getBindPoints()).hasSize(1);
|
||||
|
||||
BindPoint rwKey = new BindPoint("rw_key", "Uniform(0,1000000000)->int; ToString() -> String");
|
||||
assertThat(parsed.getBindPoints()).containsExactly(rwKey);
|
||||
|
||||
String statement = parsed.getPositionalStatement(Function.identity());
|
||||
Objects.requireNonNull(statement);
|
||||
|
||||
ReadyMongoStatement readyMongoStatement = new ReadyMongoStatement(stmt);
|
||||
Bson bsonDoc = readyMongoStatement.bind(1L);
|
||||
assertThat(bsonDoc).isNotNull();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testResolvePhaseMainWrite() {
|
||||
String tagfilter = activityDef.getParams().getOptionalString("tags").orElse("phase:main,name:main-insert");
|
||||
|
||||
List<StmtDef> stmts = stmtsDocList.getStmts(tagfilter);
|
||||
assertThat(stmts).hasSize(1);
|
||||
for (StmtDef stmt : stmts) {
|
||||
ParsedStmt parsed = stmt.getParsed().orError();
|
||||
assertThat(parsed.getBindPoints()).hasSize(2);
|
||||
|
||||
BindPoint rwKey = new BindPoint("rw_key", "Uniform(0,1000000000)->int; ToString() -> String");
|
||||
BindPoint rwValue = new BindPoint("rw_value", "Hash(); Uniform(0,1000000000)->int; ToString() -> String");
|
||||
assertThat(parsed.getBindPoints()).containsExactly(rwKey, rwValue);
|
||||
|
||||
String statement = parsed.getPositionalStatement(Function.identity());
|
||||
Objects.requireNonNull(statement);
|
||||
|
||||
ReadyMongoStatement readyMongoStatement = new ReadyMongoStatement(stmt);
|
||||
Bson bsonDoc = readyMongoStatement.bind(1L);
|
||||
assertThat(bsonDoc).isNotNull();
|
||||
}
|
||||
}
|
||||
}
|
@ -7,7 +7,7 @@
|
||||
<parent>
|
||||
<artifactId>mvn-defaults</artifactId>
|
||||
<groupId>io.nosqlbench</groupId>
|
||||
<version>3.12.107-SNAPSHOT</version>
|
||||
<version>3.12.119-SNAPSHOT</version>
|
||||
<relativePath>../mvn-defaults</relativePath>
|
||||
</parent>
|
||||
|
||||
@ -21,7 +21,7 @@
|
||||
<dependency>
|
||||
<groupId>io.nosqlbench</groupId>
|
||||
<artifactId>engine-api</artifactId>
|
||||
<version>3.12.107-SNAPSHOT</version>
|
||||
<version>3.12.119-SNAPSHOT</version>
|
||||
</dependency>
|
||||
|
||||
<!-- test scope only -->
|
||||
|
@ -7,7 +7,7 @@
|
||||
<parent>
|
||||
<artifactId>mvn-defaults</artifactId>
|
||||
<groupId>io.nosqlbench</groupId>
|
||||
<version>3.12.107-SNAPSHOT</version>
|
||||
<version>3.12.119-SNAPSHOT</version>
|
||||
<relativePath>../mvn-defaults</relativePath>
|
||||
</parent>
|
||||
|
||||
@ -23,13 +23,13 @@
|
||||
<dependency>
|
||||
<groupId>io.nosqlbench</groupId>
|
||||
<artifactId>engine-api</artifactId>
|
||||
<version>3.12.107-SNAPSHOT</version>
|
||||
<version>3.12.119-SNAPSHOT</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>io.nosqlbench</groupId>
|
||||
<artifactId>driver-stdout</artifactId>
|
||||
<version>3.12.107-SNAPSHOT</version>
|
||||
<version>3.12.119-SNAPSHOT</version>
|
||||
</dependency>
|
||||
|
||||
<!-- test scope only -->
|
||||
|
@ -7,7 +7,7 @@
|
||||
<parent>
|
||||
<artifactId>mvn-defaults</artifactId>
|
||||
<groupId>io.nosqlbench</groupId>
|
||||
<version>3.12.107-SNAPSHOT</version>
|
||||
<version>3.12.119-SNAPSHOT</version>
|
||||
<relativePath>../mvn-defaults</relativePath>
|
||||
</parent>
|
||||
|
||||
@ -22,7 +22,7 @@
|
||||
<dependency>
|
||||
<groupId>io.nosqlbench</groupId>
|
||||
<artifactId>engine-api</artifactId>
|
||||
<version>3.12.107-SNAPSHOT</version>
|
||||
<version>3.12.119-SNAPSHOT</version>
|
||||
</dependency>
|
||||
|
||||
|
||||
|
@ -5,7 +5,7 @@
|
||||
<parent>
|
||||
<artifactId>mvn-defaults</artifactId>
|
||||
<groupId>io.nosqlbench</groupId>
|
||||
<version>3.12.107-SNAPSHOT</version>
|
||||
<version>3.12.119-SNAPSHOT</version>
|
||||
<relativePath>../mvn-defaults</relativePath>
|
||||
</parent>
|
||||
|
||||
@ -22,19 +22,19 @@
|
||||
<dependency>
|
||||
<groupId>io.nosqlbench</groupId>
|
||||
<artifactId>nb-api</artifactId>
|
||||
<version>3.12.107-SNAPSHOT</version>
|
||||
<version>3.12.119-SNAPSHOT</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>io.nosqlbench</groupId>
|
||||
<artifactId>nb-annotations</artifactId>
|
||||
<version>3.12.107-SNAPSHOT</version>
|
||||
<version>3.12.119-SNAPSHOT</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>io.nosqlbench</groupId>
|
||||
<artifactId>virtdata-userlibs</artifactId>
|
||||
<version>3.12.107-SNAPSHOT</version>
|
||||
<version>3.12.119-SNAPSHOT</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
|
@ -244,7 +244,7 @@ public class SimpleActivity implements Activity {
|
||||
* by the provided ratios. Also, modify the ActivityDef with reasonable defaults when requested.
|
||||
* @param seq - The {@link OpSequence} to derive the defaults from
|
||||
*/
|
||||
public void setDefaultsFromOpSequence(OpSequence seq) {
|
||||
public void setDefaultsFromOpSequence(OpSequence<?> seq) {
|
||||
Optional<String> strideOpt = getParams().getOptionalString("stride");
|
||||
if (strideOpt.isEmpty()) {
|
||||
String stride = String.valueOf(seq.getSequence().length);
|
||||
|
@ -36,6 +36,11 @@ import java.util.regex.Pattern;
|
||||
public class ActivityMetrics {
|
||||
|
||||
private final static Logger logger = LoggerFactory.getLogger(ActivityMetrics.class);
|
||||
|
||||
public static final String HDRDIGITS_PARAM = "hdr_digits";
|
||||
public static final int DEFAULT_HDRDIGITS= 4;
|
||||
private static int _HDRDIGITS = DEFAULT_HDRDIGITS;
|
||||
|
||||
private static MetricRegistry registry;
|
||||
|
||||
public static MetricFilter METRIC_FILTER = (name, metric) -> {
|
||||
@ -43,6 +48,15 @@ public class ActivityMetrics {
|
||||
};
|
||||
private static List<MetricsCloseable> metricsCloseables = new ArrayList<>();
|
||||
|
||||
|
||||
public static int getHdrDigits() {
|
||||
return _HDRDIGITS;
|
||||
}
|
||||
|
||||
public static void setHdrDigits(int hdrDigits) {
|
||||
ActivityMetrics._HDRDIGITS = hdrDigits;
|
||||
}
|
||||
|
||||
private ActivityMetrics() {
|
||||
}
|
||||
|
||||
@ -88,6 +102,10 @@ public class ActivityMetrics {
|
||||
}
|
||||
/**
|
||||
* <p>Create a timer associated with an activity.</p>
|
||||
*
|
||||
* <p>If the provide ActivityDef contains a parameter "hdr_digits", then it will be used to set the number of
|
||||
* significant digits on the histogram's precision.</p>
|
||||
*
|
||||
* <p>This method ensures that if multiple threads attempt to create the same-named metric on a given activity,
|
||||
* that only one of them succeeds.</p>
|
||||
*
|
||||
@ -98,15 +116,25 @@ public class ActivityMetrics {
|
||||
public static Timer timer(ActivityDef activityDef, String name) {
|
||||
String fullMetricName = activityDef.getAlias() + "." + name;
|
||||
Timer registeredTimer = (Timer) register(activityDef, name, () ->
|
||||
new NicerTimer(fullMetricName, new DeltaHdrHistogramReservoir(fullMetricName, 4)));
|
||||
new NicerTimer(fullMetricName,
|
||||
new DeltaHdrHistogramReservoir(
|
||||
fullMetricName,
|
||||
activityDef.getParams().getOptionalInteger(HDRDIGITS_PARAM).orElse(_HDRDIGITS)
|
||||
)
|
||||
));
|
||||
return registeredTimer;
|
||||
}
|
||||
|
||||
/**
|
||||
* <p>Create a histogram associated with an activity.</p>
|
||||
* <p>Create an HDR histogram associated with an activity.</p>
|
||||
*
|
||||
* <p>If the provide ActivityDef contains a parameter "hdr_digits", then it will be used to set the number of
|
||||
* significant digits on the histogram's precision.</p>
|
||||
*
|
||||
* <p>This method ensures that if multiple threads attempt to create the same-named metric on a given activity,
|
||||
* that only one of them succeeds.</p>
|
||||
*
|
||||
*
|
||||
* @param activityDef an associated activity def
|
||||
* @param name a simple, descriptive name for the histogram
|
||||
* @return the histogram, perhaps a different one if it has already been registered
|
||||
@ -114,7 +142,13 @@ public class ActivityMetrics {
|
||||
public static Histogram histogram(ActivityDef activityDef, String name) {
|
||||
String fullMetricName = activityDef.getAlias() + "." + name;
|
||||
return (Histogram) register(activityDef, name, () ->
|
||||
new NicerHistogram(fullMetricName, new DeltaHdrHistogramReservoir(fullMetricName, 4)));
|
||||
new NicerHistogram(
|
||||
fullMetricName,
|
||||
new DeltaHdrHistogramReservoir(
|
||||
fullMetricName,
|
||||
activityDef.getParams().getOptionalInteger(HDRDIGITS_PARAM).orElse(_HDRDIGITS)
|
||||
)
|
||||
));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -4,7 +4,7 @@
|
||||
<parent>
|
||||
<artifactId>mvn-defaults</artifactId>
|
||||
<groupId>io.nosqlbench</groupId>
|
||||
<version>3.12.107-SNAPSHOT</version>
|
||||
<version>3.12.119-SNAPSHOT</version>
|
||||
<relativePath>../mvn-defaults</relativePath>
|
||||
</parent>
|
||||
|
||||
@ -23,7 +23,7 @@
|
||||
<dependency>
|
||||
<groupId>io.nosqlbench</groupId>
|
||||
<artifactId>engine-core</artifactId>
|
||||
<version>3.12.107-SNAPSHOT</version>
|
||||
<version>3.12.119-SNAPSHOT</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
@ -47,7 +47,7 @@
|
||||
<dependency>
|
||||
<groupId>io.nosqlbench</groupId>
|
||||
<artifactId>engine-docker</artifactId>
|
||||
<version>3.12.107-SNAPSHOT</version>
|
||||
<version>3.12.119-SNAPSHOT</version>
|
||||
</dependency>
|
||||
|
||||
</dependencies>
|
||||
|
@ -81,6 +81,8 @@ public class NBCLI {
|
||||
|
||||
ConsoleLogging.enableConsoleLogging(options.wantsConsoleLogLevel(), options.getConsoleLoggingPattern());
|
||||
|
||||
ActivityMetrics.setHdrDigits(options.getHdrDigits());
|
||||
|
||||
if (options.wantsBasicHelp()) {
|
||||
System.out.println(loadHelpFile("basic.md"));
|
||||
System.exit(0);
|
||||
|
@ -53,6 +53,7 @@ public class NBCLIOptions {
|
||||
private static final String WAIT_MILLIS = "waitmillis";
|
||||
private static final String EXPORT_CYCLE_LOG = "--export-cycle-log";
|
||||
private static final String IMPORT_CYCLE_LOG = "--import-cycle-log";
|
||||
private static final String HDR_DIGITS = "--hdr-digits";
|
||||
|
||||
// Execution Options
|
||||
|
||||
@ -127,7 +128,7 @@ public class NBCLIOptions {
|
||||
private final List<String> wantsToIncludePaths = new ArrayList<>();
|
||||
private Scenario.Engine engine = Scenario.Engine.Graalvm;
|
||||
private boolean graaljs_compat = false;
|
||||
|
||||
private int hdr_digits = 4;
|
||||
|
||||
public NBCLIOptions(String[] args) {
|
||||
parse(args);
|
||||
@ -213,6 +214,10 @@ public class NBCLIOptions {
|
||||
arglist.removeFirst();
|
||||
logsDirectory = readWordOrThrow(arglist, "a log directory");
|
||||
break;
|
||||
case HDR_DIGITS:
|
||||
arglist.removeFirst();
|
||||
hdr_digits = Integer.parseInt(readWordOrThrow(arglist, "significant digits"));
|
||||
break;
|
||||
case LOGS_MAX:
|
||||
arglist.removeFirst();
|
||||
logsMax = Integer.parseInt(readWordOrThrow(arglist, "max logfiles to keep"));
|
||||
@ -531,6 +536,10 @@ public class NBCLIOptions {
|
||||
// }
|
||||
|
||||
|
||||
public int getHdrDigits() {
|
||||
return hdr_digits;
|
||||
}
|
||||
|
||||
public String getProgressSpec() {
|
||||
ProgressSpec spec = parseProgressSpec(this.progressSpec);// sanity check
|
||||
if (spec.indicatorMode == IndicatorMode.console
|
||||
|
@ -5,7 +5,7 @@
|
||||
<parent>
|
||||
<artifactId>mvn-defaults</artifactId>
|
||||
<groupId>io.nosqlbench</groupId>
|
||||
<version>3.12.107-SNAPSHOT</version>
|
||||
<version>3.12.119-SNAPSHOT</version>
|
||||
<relativePath>../mvn-defaults</relativePath>
|
||||
</parent>
|
||||
|
||||
@ -28,7 +28,7 @@
|
||||
<dependency>
|
||||
<groupId>io.nosqlbench</groupId>
|
||||
<artifactId>engine-api</artifactId>
|
||||
<version>3.12.107-SNAPSHOT</version>
|
||||
<version>3.12.119-SNAPSHOT</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
|
@ -154,6 +154,7 @@ public class Scenario implements Callable<ScenarioResult> {
|
||||
scriptEngine.put("params", scenarioScriptParams);
|
||||
|
||||
if (engine == Engine.Graalvm) {
|
||||
// https://github.com/graalvm/graaljs/blob/master/docs/user/JavaInterop.md
|
||||
if (wantsGraaljsCompatMode) {
|
||||
scriptEngine.put("scenario", scenarioController);
|
||||
scriptEngine.put("metrics", new NashornMetricRegistryBindings(metricRegistry));
|
||||
|
@ -1,4 +1,4 @@
|
||||
package io.nosqlbench.core;
|
||||
package io.nosqlbench.engine.core;
|
||||
|
||||
import io.nosqlbench.engine.api.activityapi.core.*;
|
||||
import io.nosqlbench.engine.api.activityapi.output.OutputDispenser;
|
||||
@ -12,7 +12,6 @@ import io.nosqlbench.engine.api.activityimpl.input.CoreInputDispenser;
|
||||
import io.nosqlbench.engine.api.activityimpl.input.AtomicInput;
|
||||
import io.nosqlbench.engine.api.activityimpl.motor.CoreMotor;
|
||||
import io.nosqlbench.engine.api.activityimpl.motor.CoreMotorDispenser;
|
||||
import io.nosqlbench.engine.core.ActivityExecutor;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.junit.Test;
|
@ -1,4 +1,4 @@
|
||||
package io.nosqlbench.core;
|
||||
package io.nosqlbench.engine.core;
|
||||
|
||||
import io.nosqlbench.engine.api.activityapi.core.*;
|
||||
import io.nosqlbench.engine.core.fortesting.BlockingSegmentInput;
|
@ -1,4 +1,4 @@
|
||||
package io.nosqlbench.core;
|
||||
package io.nosqlbench.engine.core;
|
||||
|
||||
import io.nosqlbench.engine.api.scripting.ScriptEnvBuffer;
|
||||
import io.nosqlbench.engine.core.script.Scenario;
|
@ -0,0 +1,20 @@
|
||||
package io.nosqlbench.engine.core.experimental;
|
||||
|
||||
import org.junit.Test;
|
||||
|
||||
import java.util.concurrent.CompletableFuture;
|
||||
import java.util.concurrent.CompletionStage;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
|
||||
public class CompletableTests {
|
||||
|
||||
@Test
|
||||
public void testCompletionStages() {
|
||||
CompletableFuture<Object> f = new CompletableFuture<>();
|
||||
ExecutorService executorService = Executors.newCachedThreadPool();
|
||||
CompletableFuture<Object> objectCompletableFuture = f.completeAsync(() -> "foo", executorService);
|
||||
boolean bar = objectCompletableFuture.complete("bar");
|
||||
|
||||
}
|
||||
}
|
@ -4,7 +4,7 @@
|
||||
<parent>
|
||||
<artifactId>mvn-defaults</artifactId>
|
||||
<groupId>io.nosqlbench</groupId>
|
||||
<version>3.12.107-SNAPSHOT</version>
|
||||
<version>3.12.119-SNAPSHOT</version>
|
||||
<relativePath>../mvn-defaults</relativePath>
|
||||
</parent>
|
||||
|
||||
@ -65,7 +65,7 @@
|
||||
<dependency>
|
||||
<groupId>io.nosqlbench</groupId>
|
||||
<artifactId>engine-api</artifactId>
|
||||
<version>3.12.107-SNAPSHOT</version>
|
||||
<version>3.12.119-SNAPSHOT</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
|
@ -4,7 +4,7 @@
|
||||
<parent>
|
||||
<artifactId>mvn-defaults</artifactId>
|
||||
<groupId>io.nosqlbench</groupId>
|
||||
<version>3.12.107-SNAPSHOT</version>
|
||||
<version>3.12.119-SNAPSHOT</version>
|
||||
<relativePath>../mvn-defaults</relativePath>
|
||||
</parent>
|
||||
|
||||
@ -28,7 +28,7 @@
|
||||
<dependency>
|
||||
<groupId>io.nosqlbench</groupId>
|
||||
<artifactId>docsys</artifactId>
|
||||
<version>3.12.107-SNAPSHOT</version>
|
||||
<version>3.12.119-SNAPSHOT</version>
|
||||
</dependency>
|
||||
|
||||
</dependencies>
|
||||
|
@ -224,7 +224,7 @@ as any other parameter depending on the assignment operators as explained above.
|
||||
### alias
|
||||
|
||||
The `alias` parameter is, by default, set to the expanded name of WORKLOAD_SCENARIO_STEP, which means that each activity
|
||||
within the scenario has a distinct and symoblic name. This is important for distinguishing metrics from one another
|
||||
within the scenario has a distinct and symbolic name. This is important for distinguishing metrics from one another
|
||||
across workloads, named scenarios, and steps within a named scenario. The above words are interpolated into the alias as
|
||||
follows:
|
||||
|
||||
|
@ -360,9 +360,21 @@ In detail, the rendering appears as `0.0(A), 0.0(B), 0.0(C), 0.25(A),
|
||||
0.5(A), 0.5(B), 0.75(A)`, which yields `A B C A A B A` as the op
|
||||
sequence.
|
||||
|
||||
This sequencer is most useful when you want a stable ordering of
|
||||
operation from a rich mix of statement types, where each operations is
|
||||
spaced as evenly as possible over time, and where it is not important to
|
||||
control the cycle-by-cycle sequencing of statements.
|
||||
This sequencer is most useful when you want a stable ordering of operation from a rich mix of statement types, where
|
||||
each operations is spaced as evenly as possible over time, and where it is not important to control the cycle-by-cycle
|
||||
sequencing of statements.
|
||||
|
||||
## hdr_digits
|
||||
|
||||
- `hdr_digits=3`
|
||||
- _default_: `4`
|
||||
- _required_: no
|
||||
- _dynamic_: no
|
||||
|
||||
This parameter determines the number of significant digits used in all HDR histograms for metrics collected from this
|
||||
activity. The default of 4 allows 4 significant digits, which means *up to* 10000 distinct histogram buckets per named
|
||||
metric, per histogram interval. This does not mean that there _will be_ 10000 distinct buckets, but it means there could
|
||||
be if there is significant volume and variety in the measurements.
|
||||
|
||||
If you are running a scenario that creates many activities, then you can set `hdr_digits=1` on some of them to save
|
||||
client resources.
|
||||
|
@ -153,6 +153,17 @@ created for this name.
|
||||
|
||||
--session-name <name>
|
||||
|
||||
If you want to control the number of significant digits in all of the HDR metrics, including histograms and timers, then
|
||||
you can do so this way:
|
||||
|
||||
--hdr-digits 4
|
||||
|
||||
The default is 4 digits, which creates 10000 equisized histogram buckets for every named metric in every reporting
|
||||
interval. For longer running test or for test which do not require this level of precision in metrics, you can set this
|
||||
down to 3 or 2. Note that this only sets the global default. Each activity can also override this value with the
|
||||
hdr_digits parameter.
|
||||
|
||||
|
||||
Enlist engineblock to stand up your metrics infrastructure using a local docker runtime:
|
||||
|
||||
--docker-metrics
|
||||
|
@ -4,7 +4,7 @@
|
||||
<parent>
|
||||
<artifactId>mvn-defaults</artifactId>
|
||||
<groupId>io.nosqlbench</groupId>
|
||||
<version>3.12.107-SNAPSHOT</version>
|
||||
<version>3.12.119-SNAPSHOT</version>
|
||||
<relativePath>../mvn-defaults</relativePath>
|
||||
</parent>
|
||||
|
||||
@ -22,7 +22,7 @@
|
||||
<dependency>
|
||||
<groupId>io.nosqlbench</groupId>
|
||||
<artifactId>engine-api</artifactId>
|
||||
<version>3.12.107-SNAPSHOT</version>
|
||||
<version>3.12.119-SNAPSHOT</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
|
@ -0,0 +1,59 @@
|
||||
package io.nosqlbench.engine.extensions.http;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.URI;
|
||||
import java.net.http.HttpClient;
|
||||
import java.net.http.HttpRequest;
|
||||
import java.net.http.HttpResponse;
|
||||
|
||||
public class HttpPlugin {
|
||||
private HttpClient client = HttpClient.newHttpClient();
|
||||
|
||||
public HttpResponse<String> get(String url) throws IOException, InterruptedException {
|
||||
HttpRequest.Builder builder = HttpRequest.newBuilder();
|
||||
URI uri = URI.create(url);
|
||||
HttpRequest request = builder
|
||||
.uri(uri)
|
||||
.build();
|
||||
|
||||
HttpResponse<String> response = client.send(request,
|
||||
HttpResponse.BodyHandlers.ofString());
|
||||
|
||||
return response;
|
||||
}
|
||||
|
||||
public HttpResponse<String> post(String url) throws IOException, InterruptedException {
|
||||
return post(url, null, null);
|
||||
}
|
||||
|
||||
public HttpResponse<String> post(String url, String data, String contentType) throws IOException, InterruptedException {
|
||||
HttpRequest.Builder builder = HttpRequest.newBuilder();
|
||||
URI uri = URI.create(url);
|
||||
|
||||
HttpRequest request;
|
||||
if (data == null && contentType == null || contentType == null){
|
||||
request = builder
|
||||
.uri(uri)
|
||||
.POST(HttpRequest.BodyPublishers.noBody())
|
||||
.build();
|
||||
} else if (data == null) {
|
||||
request = builder
|
||||
.uri(uri)
|
||||
.header("Content-Type", contentType)
|
||||
.POST(HttpRequest.BodyPublishers.noBody())
|
||||
.build();
|
||||
} else {
|
||||
request = builder
|
||||
.uri(uri)
|
||||
.header("Content-Type", contentType)
|
||||
.POST(HttpRequest.BodyPublishers.ofString(data))
|
||||
.build();
|
||||
}
|
||||
|
||||
HttpResponse<String> response = client.send(request,
|
||||
HttpResponse.BodyHandlers.ofString());
|
||||
|
||||
return response;
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,27 @@
|
||||
package io.nosqlbench.engine.extensions.http;
|
||||
|
||||
import com.codahale.metrics.MetricRegistry;
|
||||
import io.nosqlbench.engine.api.extensions.ScriptingPluginInfo;
|
||||
import io.nosqlbench.engine.extensions.optimizers.BobyqaOptimizerPlugin;
|
||||
import io.nosqlbench.nb.annotations.Service;
|
||||
import org.slf4j.Logger;
|
||||
|
||||
import javax.script.ScriptContext;
|
||||
|
||||
@Service(ScriptingPluginInfo.class)
|
||||
public class HttpPluginData implements ScriptingPluginInfo<HttpPlugin> {
|
||||
@Override
|
||||
public String getDescription() {
|
||||
return "use http get and post in scripts";
|
||||
}
|
||||
|
||||
@Override
|
||||
public HttpPlugin getExtensionObject(Logger logger, MetricRegistry metricRegistry, ScriptContext scriptContext) {
|
||||
return new HttpPlugin();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getBaseVariableName() {
|
||||
return "http";
|
||||
}
|
||||
}
|
@ -3,7 +3,7 @@
|
||||
|
||||
<groupId>io.nosqlbench</groupId>
|
||||
<artifactId>mvn-defaults</artifactId>
|
||||
<version>3.12.107-SNAPSHOT</version>
|
||||
<version>3.12.119-SNAPSHOT</version>
|
||||
<packaging>pom</packaging>
|
||||
|
||||
<properties>
|
||||
|
@ -5,7 +5,7 @@
|
||||
<parent>
|
||||
<artifactId>mvn-defaults</artifactId>
|
||||
<groupId>io.nosqlbench</groupId>
|
||||
<version>3.12.107-SNAPSHOT</version>
|
||||
<version>3.12.119-SNAPSHOT</version>
|
||||
<relativePath>../mvn-defaults</relativePath>
|
||||
</parent>
|
||||
|
||||
|
@ -5,7 +5,7 @@
|
||||
<parent>
|
||||
<artifactId>mvn-defaults</artifactId>
|
||||
<groupId>io.nosqlbench</groupId>
|
||||
<version>3.12.107-SNAPSHOT</version>
|
||||
<version>3.12.119-SNAPSHOT</version>
|
||||
<relativePath>../mvn-defaults</relativePath>
|
||||
</parent>
|
||||
|
||||
@ -31,7 +31,7 @@
|
||||
<dependency>
|
||||
<groupId>io.nosqlbench</groupId>
|
||||
<artifactId>nb-annotations</artifactId>
|
||||
<version>3.12.107-SNAPSHOT</version>
|
||||
<version>3.12.119-SNAPSHOT</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
|
@ -1,3 +0,0 @@
|
||||
FROM openjdk:13-alpine
|
||||
COPY target/ target
|
||||
ENTRYPOINT ["java","-jar", "/target/nb.jar"]
|
47
nb/pom.xml
47
nb/pom.xml
@ -5,7 +5,7 @@
|
||||
<parent>
|
||||
<artifactId>mvn-defaults</artifactId>
|
||||
<groupId>io.nosqlbench</groupId>
|
||||
<version>3.12.107-SNAPSHOT</version>
|
||||
<version>3.12.119-SNAPSHOT</version>
|
||||
<relativePath>../mvn-defaults</relativePath>
|
||||
</parent>
|
||||
|
||||
@ -24,31 +24,36 @@
|
||||
<dependency>
|
||||
<groupId>io.nosqlbench</groupId>
|
||||
<artifactId>driver-web</artifactId>
|
||||
<version>3.12.107-SNAPSHOT</version>
|
||||
<version>3.12.119-SNAPSHOT</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>io.nosqlbench</groupId>
|
||||
<artifactId>driver-kafka</artifactId>
|
||||
<version>3.12.119-SNAPSHOT</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.nosqlbench</groupId>
|
||||
<artifactId>engine-cli</artifactId>
|
||||
<version>3.12.107-SNAPSHOT</version>
|
||||
<version>3.12.119-SNAPSHOT</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>io.nosqlbench</groupId>
|
||||
<artifactId>engine-docs</artifactId>
|
||||
<version>3.12.107-SNAPSHOT</version>
|
||||
<version>3.12.119-SNAPSHOT</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>io.nosqlbench</groupId>
|
||||
<artifactId>engine-core</artifactId>
|
||||
<version>3.12.107-SNAPSHOT</version>
|
||||
<version>3.12.119-SNAPSHOT</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>io.nosqlbench</groupId>
|
||||
<artifactId>engine-extensions</artifactId>
|
||||
<version>3.12.107-SNAPSHOT</version>
|
||||
<version>3.12.119-SNAPSHOT</version>
|
||||
</dependency>
|
||||
|
||||
<!-- <dependency>-->
|
||||
@ -60,39 +65,44 @@
|
||||
<dependency>
|
||||
<groupId>io.nosqlbench</groupId>
|
||||
<artifactId>driver-stdout</artifactId>
|
||||
<version>3.12.107-SNAPSHOT</version>
|
||||
<version>3.12.119-SNAPSHOT</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>io.nosqlbench</groupId>
|
||||
<artifactId>driver-diag</artifactId>
|
||||
<version>3.12.107-SNAPSHOT</version>
|
||||
<version>3.12.119-SNAPSHOT</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>io.nosqlbench</groupId>
|
||||
<artifactId>driver-tcp</artifactId>
|
||||
<version>3.12.107-SNAPSHOT</version>
|
||||
<version>3.12.119-SNAPSHOT</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>io.nosqlbench</groupId>
|
||||
<artifactId>driver-http</artifactId>
|
||||
<version>3.12.107-SNAPSHOT</version>
|
||||
<version>3.12.119-SNAPSHOT</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>io.nosqlbench</groupId>
|
||||
<artifactId>driver-cql-shaded</artifactId>
|
||||
<version>3.12.107-SNAPSHOT</version>
|
||||
<version>3.12.119-SNAPSHOT</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>io.nosqlbench</groupId>
|
||||
<artifactId>driver-cqlverify</artifactId>
|
||||
<version>3.12.107-SNAPSHOT</version>
|
||||
<version>3.12.119-SNAPSHOT</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>io.nosqlbench</groupId>
|
||||
<artifactId>driver-mongodb</artifactId>
|
||||
<version>3.12.119-SNAPSHOT</version>
|
||||
</dependency>
|
||||
|
||||
<!-- <dependency>-->
|
||||
<!-- <groupId>io.nosqlbench</groupId>-->
|
||||
@ -240,6 +250,19 @@
|
||||
</dependency>
|
||||
</dependencies>
|
||||
</profile>
|
||||
<profile>
|
||||
<id>with-mongodb</id>
|
||||
<activation>
|
||||
<activeByDefault>true</activeByDefault>
|
||||
</activation>
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>io.nosqlbench</groupId>
|
||||
<artifactId>driver-mongodb</artifactId>
|
||||
<version>3.12.119-SNAPSHOT</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
</profile>
|
||||
<profile>
|
||||
<id>build-nb-appimage</id>
|
||||
<activation>
|
||||
|
@ -4,7 +4,7 @@ description: |
|
||||
|
||||
scenarios:
|
||||
default:
|
||||
readout1: run driver===stdout format===readout cycles=1
|
||||
readout1: run driver===stdout format=readout cycles=1
|
||||
|
||||
bindings:
|
||||
cycle: Identity()
|
||||
|
14
pom.xml
14
pom.xml
@ -7,7 +7,7 @@
|
||||
<parent>
|
||||
<artifactId>mvn-defaults</artifactId>
|
||||
<groupId>io.nosqlbench</groupId>
|
||||
<version>3.12.107-SNAPSHOT</version>
|
||||
<version>3.12.119-SNAPSHOT</version>
|
||||
<relativePath>mvn-defaults</relativePath>
|
||||
</parent>
|
||||
|
||||
@ -43,6 +43,8 @@
|
||||
<module>driver-cql-shaded</module>
|
||||
<module>driver-cqlverify</module>
|
||||
<module>driver-web</module>
|
||||
<module>driver-kafka</module>
|
||||
<module>driver-mongodb</module>
|
||||
|
||||
<!-- VIRTDATA MODULES -->
|
||||
|
||||
@ -68,6 +70,16 @@
|
||||
<module>driver-cqld4</module>
|
||||
</modules>
|
||||
</profile>
|
||||
<profile>
|
||||
<id>with-mongodb</id>
|
||||
<activation>
|
||||
<activeByDefault>true</activeByDefault>
|
||||
</activation>
|
||||
<modules>
|
||||
<module>driver-mongodb</module>
|
||||
</modules>
|
||||
</profile>
|
||||
|
||||
</profiles>
|
||||
|
||||
<licenses>
|
||||
|
@ -7,7 +7,7 @@
|
||||
<parent>
|
||||
<groupId>io.nosqlbench</groupId>
|
||||
<artifactId>mvn-defaults</artifactId>
|
||||
<version>3.12.107-SNAPSHOT</version>
|
||||
<version>3.12.119-SNAPSHOT</version>
|
||||
<relativePath>../mvn-defaults</relativePath>
|
||||
</parent>
|
||||
|
||||
@ -23,14 +23,14 @@
|
||||
|
||||
<dependency>
|
||||
<groupId>io.nosqlbench</groupId>
|
||||
<version>3.12.107-SNAPSHOT</version>
|
||||
<version>3.12.119-SNAPSHOT</version>
|
||||
<artifactId>nb-api</artifactId>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>io.nosqlbench</groupId>
|
||||
<artifactId>virtdata-lang</artifactId>
|
||||
<version>3.12.107-SNAPSHOT</version>
|
||||
<version>3.12.119-SNAPSHOT</version>
|
||||
</dependency>
|
||||
|
||||
|
||||
|
@ -215,12 +215,15 @@ public class VirtDataComposer {
|
||||
}
|
||||
|
||||
Object[][] combinations = new Object[modulo][];
|
||||
|
||||
for (int row = 0; row < combinations.length; row++) {
|
||||
Object[] combination = new Object[allargs.length];
|
||||
int number = row;
|
||||
for (int pos = 0; pos < combination.length; pos++) {
|
||||
int selector = (int) (row / modulos[pos]);
|
||||
combination[pos] = allargs[pos][selector];
|
||||
int selector = (int) (number / modulos[pos]);
|
||||
Object[] allargspos = allargs[pos];
|
||||
Object objectatpos = allargspos[selector];
|
||||
combination[pos] = objectatpos;
|
||||
number %= modulos[pos];
|
||||
}
|
||||
combinations[row] = combination;
|
||||
|
@ -7,7 +7,7 @@
|
||||
<parent>
|
||||
<artifactId>mvn-defaults</artifactId>
|
||||
<groupId>io.nosqlbench</groupId>
|
||||
<version>3.12.107-SNAPSHOT</version>
|
||||
<version>3.12.119-SNAPSHOT</version>
|
||||
<relativePath>../mvn-defaults</relativePath>
|
||||
</parent>
|
||||
|
||||
|
@ -7,7 +7,7 @@
|
||||
<parent>
|
||||
<artifactId>mvn-defaults</artifactId>
|
||||
<groupId>io.nosqlbench</groupId>
|
||||
<version>3.12.107-SNAPSHOT</version>
|
||||
<version>3.12.119-SNAPSHOT</version>
|
||||
<relativePath>../mvn-defaults</relativePath>
|
||||
</parent>
|
||||
|
||||
@ -20,7 +20,7 @@
|
||||
<dependency>
|
||||
<groupId>io.nosqlbench</groupId>
|
||||
<artifactId>virtdata-api</artifactId>
|
||||
<version>3.12.107-SNAPSHOT</version>
|
||||
<version>3.12.119-SNAPSHOT</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
|
@ -49,12 +49,9 @@ import java.util.function.LongFunction;
|
||||
@ThreadSafeMapper
|
||||
public class CSVFrequencySampler implements LongFunction<String> {
|
||||
|
||||
private final String filename;
|
||||
private final String columnName;
|
||||
|
||||
private final String[] lines;
|
||||
private final AliasSamplerDoubleInt sampler;
|
||||
private Hash hash;
|
||||
private final Hash hash;
|
||||
|
||||
/**
|
||||
* Create a sampler of strings from the given CSV file. The CSV file must have plain CSV headers
|
||||
@ -64,8 +61,7 @@ public class CSVFrequencySampler implements LongFunction<String> {
|
||||
*/
|
||||
@Example({"CSVFrequencySampler('values.csv','modelno')","Read values.csv, count the frequency of values in 'modelno' column, and sample from this column proportionally"})
|
||||
public CSVFrequencySampler(String filename, String columnName) {
|
||||
this.filename = filename;
|
||||
this.columnName = columnName;
|
||||
String filename1 = filename;
|
||||
|
||||
this.hash=new Hash();
|
||||
|
||||
@ -86,7 +82,7 @@ public class CSVFrequencySampler implements LongFunction<String> {
|
||||
}
|
||||
int i = 0;
|
||||
for (String value : values) {
|
||||
frequencies.add(new EvProbD(i++,Double.valueOf(freq.getCount(value))));
|
||||
frequencies.add(new EvProbD(i++, (double) freq.getCount(value)));
|
||||
}
|
||||
sampler = new AliasSamplerDoubleInt(frequencies);
|
||||
lines = values.toArray(new String[0]);
|
||||
|
@ -4,7 +4,7 @@
|
||||
<parent>
|
||||
<artifactId>mvn-defaults</artifactId>
|
||||
<groupId>io.nosqlbench</groupId>
|
||||
<version>3.12.107-SNAPSHOT</version>
|
||||
<version>3.12.119-SNAPSHOT</version>
|
||||
<relativePath>../mvn-defaults</relativePath>
|
||||
</parent>
|
||||
|
||||
@ -22,13 +22,13 @@
|
||||
<dependency>
|
||||
<groupId>io.nosqlbench</groupId>
|
||||
<artifactId>virtdata-api</artifactId>
|
||||
<version>3.12.107-SNAPSHOT</version>
|
||||
<version>3.12.119-SNAPSHOT</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>io.nosqlbench</groupId>
|
||||
<artifactId>virtdata-lib-basics</artifactId>
|
||||
<version>3.12.107-SNAPSHOT</version>
|
||||
<version>3.12.119-SNAPSHOT</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
|
@ -7,7 +7,7 @@
|
||||
<parent>
|
||||
<artifactId>mvn-defaults</artifactId>
|
||||
<groupId>io.nosqlbench</groupId>
|
||||
<version>3.12.107-SNAPSHOT</version>
|
||||
<version>3.12.119-SNAPSHOT</version>
|
||||
<relativePath>../mvn-defaults</relativePath>
|
||||
</parent>
|
||||
|
||||
@ -20,13 +20,13 @@
|
||||
<dependency>
|
||||
<groupId>io.nosqlbench</groupId>
|
||||
<artifactId>virtdata-api</artifactId>
|
||||
<version>3.12.107-SNAPSHOT</version>
|
||||
<version>3.12.119-SNAPSHOT</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>io.nosqlbench</groupId>
|
||||
<artifactId>virtdata-lib-basics</artifactId>
|
||||
<version>3.12.107-SNAPSHOT</version>
|
||||
<version>3.12.119-SNAPSHOT</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
|
@ -4,7 +4,7 @@
|
||||
<parent>
|
||||
<artifactId>mvn-defaults</artifactId>
|
||||
<groupId>io.nosqlbench</groupId>
|
||||
<version>3.12.107-SNAPSHOT</version>
|
||||
<version>3.12.119-SNAPSHOT</version>
|
||||
<relativePath>../mvn-defaults</relativePath>
|
||||
</parent>
|
||||
|
||||
@ -24,7 +24,7 @@
|
||||
<dependency>
|
||||
<groupId>io.nosqlbench</groupId>
|
||||
<artifactId>virtdata-lib-basics</artifactId>
|
||||
<version>3.12.107-SNAPSHOT</version>
|
||||
<version>3.12.119-SNAPSHOT</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
|
@ -7,7 +7,7 @@
|
||||
<parent>
|
||||
<artifactId>mvn-defaults</artifactId>
|
||||
<groupId>io.nosqlbench</groupId>
|
||||
<version>3.12.107-SNAPSHOT</version>
|
||||
<version>3.12.119-SNAPSHOT</version>
|
||||
<relativePath>../mvn-defaults</relativePath>
|
||||
</parent>
|
||||
|
||||
@ -18,7 +18,7 @@
|
||||
<dependency>
|
||||
<groupId>io.nosqlbench</groupId>
|
||||
<artifactId>virtdata-api</artifactId>
|
||||
<version>3.12.107-SNAPSHOT</version>
|
||||
<version>3.12.119-SNAPSHOT</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
|
@ -4,7 +4,7 @@
|
||||
<parent>
|
||||
<artifactId>mvn-defaults</artifactId>
|
||||
<groupId>io.nosqlbench</groupId>
|
||||
<version>3.12.107-SNAPSHOT</version>
|
||||
<version>3.12.119-SNAPSHOT</version>
|
||||
<relativePath>../mvn-defaults</relativePath>
|
||||
</parent>
|
||||
|
||||
@ -17,32 +17,32 @@
|
||||
<dependency>
|
||||
<groupId>io.nosqlbench</groupId>
|
||||
<artifactId>virtdata-realdata</artifactId>
|
||||
<version>3.12.107-SNAPSHOT</version>
|
||||
<version>3.12.119-SNAPSHOT</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>io.nosqlbench</groupId>
|
||||
<artifactId>virtdata-lib-realer</artifactId>
|
||||
<version>3.12.107-SNAPSHOT</version>
|
||||
<version>3.12.119-SNAPSHOT</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.nosqlbench</groupId>
|
||||
<artifactId>virtdata-api</artifactId>
|
||||
<version>3.12.107-SNAPSHOT</version>
|
||||
<version>3.12.119-SNAPSHOT</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.nosqlbench</groupId>
|
||||
<artifactId>virtdata-lib-random</artifactId>
|
||||
<version>3.12.107-SNAPSHOT</version>
|
||||
<version>3.12.119-SNAPSHOT</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.nosqlbench</groupId>
|
||||
<version>3.12.107-SNAPSHOT</version>
|
||||
<version>3.12.119-SNAPSHOT</version>
|
||||
<artifactId>virtdata-lib-basics</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.nosqlbench</groupId>
|
||||
<version>3.12.107-SNAPSHOT</version>
|
||||
<version>3.12.119-SNAPSHOT</version>
|
||||
<artifactId>virtdata-lib-curves4</artifactId>
|
||||
</dependency>
|
||||
|
||||
@ -50,7 +50,7 @@
|
||||
<dependency>
|
||||
<groupId>io.nosqlbench</groupId>
|
||||
<artifactId>docsys</artifactId>
|
||||
<version>3.12.107-SNAPSHOT</version>
|
||||
<version>3.12.119-SNAPSHOT</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
|
@ -116,7 +116,8 @@ public class VirtDataGenDocsApp implements Runnable {
|
||||
Gson gson = new GsonBuilder().setPrettyPrinting().create();
|
||||
writer.append(gson.toJson(docsForFuncName));
|
||||
} else if (format.equals(FORMAT_MARKDOWN)) {
|
||||
writer.append(docsForFuncName.asMarkdown());
|
||||
String markdown = docsForFuncName.asMarkdown();
|
||||
writer.append(markdown);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -165,10 +166,12 @@ public class VirtDataGenDocsApp implements Runnable {
|
||||
FDoc docsinfo = new FDoc();
|
||||
List<DocFuncData> allDocs = VirtDataDocs.getAllDocs();
|
||||
for (DocFuncData docFuncData : allDocs) {
|
||||
FDocFunc FDocFunc = new FDocFunc(docFuncData);
|
||||
for (Category categoryName : FDocFunc.getCategories()) {
|
||||
FDocFunc fDocFunc = new FDocFunc(docFuncData);
|
||||
Set<Category> categories =
|
||||
fDocFunc.getCategories().size()==0 ? Set.of(Category.general) : fDocFunc.getCategories();
|
||||
for (Category categoryName : categories) {
|
||||
FDocCat fDocCat = docsinfo.addCategory(categoryName.toString());
|
||||
fDocCat.addFunctionDoc(FDocFunc);
|
||||
fDocCat.addFunctionDoc(fDocFunc);
|
||||
}
|
||||
}
|
||||
return docsinfo;
|
||||
|
@ -30,6 +30,6 @@ public class FDocCat implements Iterable<FDocFuncs> {
|
||||
public Iterator<FDocFuncs> iterator() {
|
||||
ArrayList<FDocFuncs> fdocs = new ArrayList<>(docsByFuncName.values());
|
||||
fdocs.sort(Comparator.comparing(FDocFuncs::getFunctionName));
|
||||
return docsByFuncName.values().iterator();
|
||||
return fdocs.iterator();
|
||||
}
|
||||
}
|
||||
|
@ -71,4 +71,12 @@ public class FDocFuncs implements Iterable<FDocFunc> {
|
||||
.replaceAll("java.net.","")
|
||||
.replaceAll("java.io.","");
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "FDocFuncs{" +
|
||||
"functionsByPackage=" + functionsByPackage +
|
||||
", functionName='" + functionName + '\'' +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
|
@ -20,7 +20,7 @@ public class IntegratedAliasMethodTests {
|
||||
public void testCSVFrequencySampler() {
|
||||
CSVFrequencySampler names= new CSVFrequencySampler("data/countries", "COUNTRY_CODE" );
|
||||
String n = names.apply(23);
|
||||
assertThat(n).isEqualTo("CZ");
|
||||
assertThat(n).isEqualTo("TK");
|
||||
}
|
||||
|
||||
@Test
|
||||
@ -31,6 +31,6 @@ public class IntegratedAliasMethodTests {
|
||||
','
|
||||
);
|
||||
String n = names.apply(23);
|
||||
assertThat(n).isEqualTo("CZ");
|
||||
assertThat(n).isEqualTo("TK");
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user