diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml deleted file mode 100644 index bc63bdc68..000000000 --- a/.github/workflows/docker.yml +++ /dev/null @@ -1,23 +0,0 @@ -name: Docker Release - -on: - push: - tags: - - 'nosqlbench-*' - -jobs: - build: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@master - - name: Build nosqlbench docker - run: cd nb && docker build -t nosqlbench -f ./Dockerfile-build ./ - - name: Publish to Registry - uses: elgohr/Publish-Docker-Github-Action@master - with: - name: nosqlbench/nosqlbench - username: ${{ secrets.DOCKER_USERNAME }} - password: ${{ secrets.DOCKER_PASSWORD }} - tag_names: true - dockerfile: Dockerfile - workdir: nb diff --git a/.github/workflows/dockerhub.yml b/.github/workflows/dockerhub.yml deleted file mode 100644 index 281ca0576..000000000 --- a/.github/workflows/dockerhub.yml +++ /dev/null @@ -1,34 +0,0 @@ -# This is a basic workflow to help you get started with Actions - -name: dockerhub - -on: - push: - # Sequence of patterns matched against refs/tags - tags: - - 'nosqlbench-*' # Push events to matching nosqlbench-[version] - -# A workflow run is made up of one or more jobs that can run sequentially or in parallel -jobs: - # This workflow contains a single job called "build" - build: - # The type of runner that the job will run on - runs-on: ubuntu-latest - - # Steps represent a sequence of tasks that will be executed as part of the job - steps: - # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it - - uses: actions/checkout@v2 - - name: Login to DockerHub Registry - run: echo ${{ secrets.DOCKERHUB_PASSWORD }} | docker login -u ${{ secrets.DOCKERHUB_USERNAME }} --password-stdin - - name: Get the version - id: vars - run: echo ::set-output name=tag::$(echo ${GITHUB_REF:10}) - - name: Build the tagged Docker image - run: docker build ./nb/ --file Dockerfile --tag nosqlbench/nosqlbench:${{steps.vars.outputs.tag}} - - name: Push the tagged Docker image - run: docker push nosqlbench/nosqlbench:${{steps.vars.outputs.tag}} - - name: Build the latest Docker image - run: docker build ./nb/ --file Dockerfile --tag nosqlbench/nosqlbench:latest - - name: Push the latest Docker image - run: docker push nosqlbench/nosqlbench:latest diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 2da42c5a4..7b0934d3b 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -9,8 +9,12 @@ jobs: release: runs-on: ubuntu-18.04 steps: - - uses: actions/checkout@v2 - - uses: actions/setup-java@v1 + + - name: checkout repo + uses: actions/checkout@v2 + + - name: setup java + uses: actions/setup-java@v1 with: java-version: '14' java-package: jdk @@ -21,7 +25,7 @@ jobs: env: GIT_RELEASE_BOT_NAME: "nb-droid" - - name: capture tty + - name: capture tty for gpg run: | echo "::set-env name=TTY::"$(tty) echo "::set-env name=GPG_TTY::"$(tty) @@ -37,6 +41,7 @@ jobs: - name: set git username run: git config --global user.email "${{ secrets.NBDROID_EMAIL }}" + - name: set git email run: git config --global user.name "${{ secrets.NBDROID_NAME }}" @@ -69,7 +74,7 @@ jobs: echo "::set-env name=RELEASE_VERSION::${RELEASE_VERSION}" echo "::set-env name=RELEASE_TAGNAME::${RELEASE_TAGNAME}" - - name: Prepare Summary + - name: prepare release summary id: prepare_summary run: | summary=$(scripts/release-notes.sh) @@ -111,25 +116,37 @@ jobs: MAVEN_REPO_SERVER_USERNAME: ${{ secrets.MVN_REPO_PRIVATE_REPO_USER }} MAVEN_REPO_SERVER_PASSWORD: ${{ secrets.MVN_REPO_PRIVATE_REPO_PASSWORD }} - - - name: upload artifacts + - name: bundle artifacts run: | pwd ls -l mkdir staging && cp nb/target/nb.jar nb/target/nb staging - - uses: actions/upload-artifact@v1 + + - name: upload artifacts + uses: actions/upload-artifact@v1 with: name: binaries path: staging - - name: upload guidebook + - name: docker push + uses: docker/build-push-action@v1 + with: + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} + repository: nosqlbench/nosqlbench + tags: latest, ${{ env.RELEASE_VERSION }} + tag_with_ref: false + + - name: bundle guidebook run: mkdir guidebook && cp -R nb/target/guidebook guidebook - - uses: actions/upload-artifact@v1 + + - name: upload guidebook + uses: actions/upload-artifact@v1 with: name: guidebook path: guidebook - - name: Create Release + - name: create release id: create_release uses: actions/create-release@v1 env: @@ -140,7 +157,8 @@ jobs: draft: false prerelease: false body: ${{ steps.prepare_summary.outputs.release_summary }} - - name: Upload nb.jar + + - name: upload nb.jar id: upload-nb-jar uses: actions/upload-release-asset@v1 env: @@ -150,7 +168,8 @@ jobs: asset_path: nb/target/nb.jar asset_name: nb.jar asset_content_type: application/octet-stream - - name: Upload nb + + - name: upload nb binary id: upload-nb-binary uses: actions/upload-release-asset@v1 env: @@ -161,43 +180,25 @@ jobs: asset_name: nb asset_content_type: application/octet-stream -# - name: find latest release -# run: | -# LATEST_GH_RELEASE_ID=$(curl --silent "https://api.github.com/repos/nosqlbench/nosqlbench/releases/latest" | jq -r .id) -# echo "::set-env name=LATEST_GH_RELEASE_ID::${LATEST_GH_RELEASE_ID}" -# - name: upload jar -# uses: actions/upload-release-asset@v1 -# env: -# GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} -# with: -# asset_path: nb/target/nb.jar -# asset_name: nb.jar -# asset_content_type: application/octet-stream -# upload_url: https://uploads.github.com/repos/nosqlbench/nosqlbench/releases/${{ env.LATEST_GH_RELEASE_ID }}/assets{?name,label} -# - name: upload binary -# uses: actions/upload-release-asset@v1 -# env: -# GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} -# with: -# asset_path: nb/target/nb -# asset_name: nb -# asset_content_type: application/octet-stream -# upload_url: https://uploads.github.com/repos/nosqlbench/nosqlbench/releases/${{ env.LATEST_GH_RELEASE_ID }}/assets{?name,label} - docs: needs: release runs-on: ubuntu-18.04 steps: + - name: set git username run: git config --global user.email "${{ secrets.NBDROID_EMAIL }}" + - name: set git email run: git config --global user.name "${{ secrets.NBDROID_NAME }}" + - name: download guidebook uses: actions/download-artifact@v1 with: name: guidebook path: guidebook + - run: ls -la + - name: clone nosqlbench-docs env: NBDROID_NAME: ${{ secrets.NBDROID_NAME }} @@ -209,6 +210,7 @@ jobs: find . git remote set-url origin https://${{secrets.NBDROID_NAME}}:${{secrets.NBDROID_TOKEN}}@github.com/nosqlbench/nosqlbench-docs.git git remote -v + - name: push changes env: NBDROID_NAME: ${{ secrets.NBDROID_NAME }} @@ -229,4 +231,3 @@ jobs: - diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 000000000..10b1df2c4 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,3 @@ +FROM openjdk:14-alpine +COPY nb/target/nb.jar nb.jar +ENTRYPOINT ["java","-jar", "nb.jar"] diff --git a/RELEASENOTES.md b/RELEASENOTES.md index e4dbece1e..21e1b409c 100644 --- a/RELEASENOTES.md +++ b/RELEASENOTES.md @@ -1,23 +1 @@ -7b61ee3a (HEAD -> master) Don't swallow exception in VirtdataComposer -2d4bf8d0 DateRangeFunc allows flexible signatures -8cad4414 improve debugger view of virtdata AST -2de8df4e incremental cql-d4 work -7fb0eb83 make cql-d4 optional via profile -be160856 organize virtdata entry points -4f2b2929 remove extraneous build file -6e74b5ab virtdata composer considers all arg type combinations -526dc5de longflow example -52501f40 (HEAD -> master) support graal-js in nashorn compat mode -9d0403a5 polyglot mode now does full type introspection -ae8506ca incremental work on cql-d4 -302c3ca4 higher order functions now consider all possible matches without explicity input and output types -5f63092e misc AST cleanups -087c0b80 (origin/master, origin/HEAD) release commit -2d4bf8d0 DateRangeFunc allows flexible signatures -8cad4414 improve debugger view of virtdata AST -2de8df4e incremental cql-d4 work -7fb0eb83 make cql-d4 optional via profile -be160856 organize virtdata entry points -4f2b2929 remove extraneous build file -6e74b5ab virtdata composer considers all arg type combinations -526dc5de longflow example +2a1284c3 (HEAD -> master) sync up mongo version and enable diff --git a/devdocs/bundled_docs.md b/devdocs/docstructure/bundled_docs.md similarity index 100% rename from devdocs/bundled_docs.md rename to devdocs/docstructure/bundled_docs.md diff --git a/devdocs/docstructure/docsketch.md b/devdocs/docstructure/docsketch.md new file mode 100644 index 000000000..24af27e0b --- /dev/null +++ b/devdocs/docstructure/docsketch.md @@ -0,0 +1,36 @@ +# Doc System + +This is a consolidation of all the doc system work thus far. This draft is meant to outline the basic features of the +doc system at a high level, but with suitable detail for an initial refactoring. In general this builds on existing work +in the doc system but with some adaptations for current needs, across CLI, apps, and reference material. + +## Content Organization + +All content loaded from any source is organized internally into a tree of sections by: + +* Front Matter Topics +* Header Level + +The source path of content does not matter. However, each unit of source material is considered its own section, with +zero or more additional subsections. + +A root section is the container of all sections which are not homed under another section. + +## Headings + +In some cases, it is appropriate to consolidate individual docs into larger views. In order to facilitate this, all +sections within markdown structure are enumerated according to + +- The front matter in the content source, specifically the topics assigned +- The heading structure within the doc + +Thus, when the doc content is processed into the cohesive view needed by a user, all sections of all provided content +are cross-referenced and organized into sections. + +The location of a document within the source filesystem or archive is not important. Topics + +## Content Naming + + + +## Content Searching diff --git a/devdocs/docsys.md b/devdocs/docstructure/docsys.md similarity index 100% rename from devdocs/docsys.md rename to devdocs/docstructure/docsys.md diff --git a/docsys/pom.xml b/docsys/pom.xml index a86d01555..7f9fb389f 100644 --- a/docsys/pom.xml +++ b/docsys/pom.xml @@ -9,7 +9,7 @@ mvn-defaults io.nosqlbench - 3.12.107-SNAPSHOT + 3.12.119-SNAPSHOT ../mvn-defaults @@ -18,7 +18,7 @@ io.nosqlbench nb-api - 3.12.107-SNAPSHOT + 3.12.119-SNAPSHOT @@ -112,7 +112,7 @@ io.nosqlbench virtdata-api - 3.12.107-SNAPSHOT + 3.12.119-SNAPSHOT diff --git a/driver-cql-shaded/pom.xml b/driver-cql-shaded/pom.xml index 43b488f51..f56c4e59e 100644 --- a/driver-cql-shaded/pom.xml +++ b/driver-cql-shaded/pom.xml @@ -4,7 +4,7 @@ io.nosqlbench mvn-defaults - 3.12.107-SNAPSHOT + 3.12.119-SNAPSHOT ../mvn-defaults @@ -23,7 +23,7 @@ io.nosqlbench engine-api - 3.12.107-SNAPSHOT + 3.12.119-SNAPSHOT diff --git a/driver-cql-shaded/src/main/java/io/nosqlbench/activitytype/cql/core/CqlActivity.java b/driver-cql-shaded/src/main/java/io/nosqlbench/activitytype/cql/core/CqlActivity.java index 3ad70e671..4099f20d4 100644 --- a/driver-cql-shaded/src/main/java/io/nosqlbench/activitytype/cql/core/CqlActivity.java +++ b/driver-cql-shaded/src/main/java/io/nosqlbench/activitytype/cql/core/CqlActivity.java @@ -136,7 +136,7 @@ public class CqlActivity extends SimpleActivity implements Activity, ActivityDef private void initSequencer() { Session session = getSession(); - Map fconfig = Map.of("cluster",session.getCluster()); + Map fconfig = Map.of("session",session); SequencerType sequencerType = SequencerType.valueOf( getParams().getOptionalString("seq").orElse("bucket") diff --git a/driver-cql-shaded/src/main/resources/cql.md b/driver-cql-shaded/src/main/resources/cql.md index 8aeaf78d0..9b0c5b4b2 100644 --- a/driver-cql-shaded/src/main/resources/cql.md +++ b/driver-cql-shaded/src/main/resources/cql.md @@ -195,10 +195,8 @@ activity types. The above traces every 1000th cycle to stdout. If the trace log is not specified, then 'tracelog' is assumed. If the filename is specified as stdout, then traces are dumped to stdout. -- **clusterid** - names the configuration to be used for this activity. Within - a given scenario, any activities that use the same name for clusterid will - share a session and cluster. - default: 'default' +- **sessionid** - names the configuration to be used for this activity. Within a given scenario, any activities that use + the same name for clusterid will share a session and cluster. default: 'default' - **drivermetrics** - enable reporting of driver metrics. default: false - **driverprefix** - set the metrics name that will prefix all CQL driver metrics. diff --git a/driver-cqld4/src/main/java/com/datastax/driver/core/M3PTokenFilter.java b/driver-cqld4/src/main/java/com/datastax/driver/core/M3PTokenFilter.java index fd942c546..663d898cc 100644 --- a/driver-cqld4/src/main/java/com/datastax/driver/core/M3PTokenFilter.java +++ b/driver-cqld4/src/main/java/com/datastax/driver/core/M3PTokenFilter.java @@ -1,5 +1,16 @@ package com.datastax.driver.core; +import com.datastax.oss.driver.api.core.ProtocolVersion; +import com.datastax.oss.driver.api.core.cql.Statement; +import com.datastax.oss.driver.api.core.metadata.Metadata; +import com.datastax.oss.driver.api.core.metadata.TokenMap; +import com.datastax.oss.driver.api.core.metadata.token.Token; +import com.datastax.oss.driver.api.core.metadata.token.TokenRange; +import com.datastax.oss.driver.api.core.session.Session; +import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; +import com.datastax.oss.driver.internal.core.metadata.token.Murmur3Token; +import org.jetbrains.annotations.NotNull; + import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.List; @@ -7,40 +18,32 @@ import java.util.OptionalLong; import java.util.Set; public class M3PTokenFilter { - private final TokenRange[] ranges; - private final ProtocolVersion protocolVersion; - private final CodecRegistry codecRegistry; - private final Metadata clusterMetadata; - private final Token.Factory factory; - public M3PTokenFilter(Set ranges, Cluster cluster) { - protocolVersion = cluster.getConfiguration().getProtocolOptions().getProtocolVersion(); - codecRegistry = cluster.getConfiguration().getCodecRegistry(); - clusterMetadata = cluster.getMetadata(); - factory = Token.getFactory(clusterMetadata.partitioner); - List rangeList = new ArrayList<>(); + private final TokenRange[] ranges; + + public M3PTokenFilter(Set ranges, Session session) { + TokenMap tokenMap = session.getMetadata().getTokenMap().orElseThrow(); + + List rangelist = new ArrayList<>(); + for (TokenRange range : ranges) { - if (!range.getStart().getType().equals(DataType.bigint())) { - throw new RuntimeException("This filter only works with bigint valued token types"); - } - rangeList.add(range); + rangelist.add(range); } - this.ranges=rangeList.toArray(new TokenRange[0]); + this.ranges = rangelist.toArray(new TokenRange[0]); if (this.ranges.length<1) { throw new RuntimeException("There were no tokens found. Please check your keyspace and cluster settings."); } } - public OptionalLong matches(Statement statement) { - ByteBuffer routingKey = statement.getRoutingKey(protocolVersion, codecRegistry); - Token token = factory.hash(routingKey); + public boolean matches(Statement statement) { + Token token = statement.getRoutingToken(); for (TokenRange range : ranges) { if (range.contains(token)) { - return OptionalLong.of((long)token.getValue()); + return true; } } - return OptionalLong.empty(); + return false; } diff --git a/driver-cqld4/src/main/java/com/datastax/driver/core/TokenRangeStmtFilter.java b/driver-cqld4/src/main/java/com/datastax/driver/core/TokenRangeStmtFilter.java index 9b1a0b234..ad7d46689 100644 --- a/driver-cqld4/src/main/java/com/datastax/driver/core/TokenRangeStmtFilter.java +++ b/driver-cqld4/src/main/java/com/datastax/driver/core/TokenRangeStmtFilter.java @@ -1,5 +1,15 @@ package com.datastax.driver.core; +import com.datastax.oss.driver.api.core.ProtocolVersion; +import com.datastax.oss.driver.api.core.cql.Statement; +import com.datastax.oss.driver.api.core.metadata.Metadata; +import com.datastax.oss.driver.api.core.metadata.token.Token; +import com.datastax.oss.driver.api.core.metadata.token.TokenRange; +import com.datastax.oss.driver.api.core.session.Session; +import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry; +import com.datastax.oss.driver.internal.core.metadata.token.Murmur3Token; +import com.datastax.oss.driver.internal.core.metadata.token.Murmur3TokenFactory; +import com.datastax.oss.driver.internal.core.metadata.token.Murmur3TokenRange; import io.nosqlbench.activitytype.cqld4.api.StatementFilter; import java.nio.ByteBuffer; @@ -13,41 +23,41 @@ public class TokenRangeStmtFilter implements StatementFilter { private final Metadata clusterMetadata; private final ProtocolVersion protocolVersion; private final CodecRegistry codecRegistry; - private final Token.Factory factory; +// private final Token.Factory factory; private TokenRange[] ranges; - public TokenRangeStmtFilter(Cluster cluster, String rangesSpec) { - clusterMetadata = cluster.getMetadata(); - protocolVersion = cluster.getConfiguration().getProtocolOptions().getProtocolVersion(); - codecRegistry = cluster.getConfiguration().getCodecRegistry(); - factory = Token.getFactory(clusterMetadata.partitioner); - ranges = parseRanges(factory, rangesSpec); + public TokenRangeStmtFilter(Session session, String rangesSpec) { + clusterMetadata = session.getMetadata(); + protocolVersion = session.getContext().getProtocolVersion(); + codecRegistry = session.getContext().getCodecRegistry(); + ranges = parseRanges(session, rangesSpec); } - private TokenRange[] parseRanges(Token.Factory factory, String rangesStr) { + private TokenRange[] parseRanges(Session session, String rangesStr) { String[] ranges = rangesStr.split(","); List tr = new ArrayList<>(); for (String range : ranges) { String[] interval = range.split(":"); - Token start = factory.fromString(interval[0]); - Token end = factory.fromString(interval[1]); - TokenRange tokenRange = new TokenRange(start, end, factory); + Murmur3TokenFactory m3f = new Murmur3TokenFactory(); + Token start = m3f.parse(interval[0]); + Token end = m3f.parse(interval[1]); + TokenRange tokenRange = m3f.range(start,end); tr.add(tokenRange); } - return tr.toArray(new TokenRange[tr.size()]); + return tr.toArray(new TokenRange[0]); } @Override - public boolean matches(Statement statement) { - ByteBuffer routingKey = statement.getRoutingKey(protocolVersion, codecRegistry); - Token token = factory.hash(routingKey); + public boolean matches(Statement statement) { + Token routingToken = statement.getRoutingToken(); for (TokenRange range : ranges) { - if (range.contains(token)) { + if (range.contains(routingToken)) { return true; } } return false; + } @Override diff --git a/driver-cqld4/src/main/java/com/datastax/driver/core/TokenRangeUtil.java b/driver-cqld4/src/main/java/com/datastax/driver/core/TokenRangeUtil.java deleted file mode 100644 index 3f4f23e0e..000000000 --- a/driver-cqld4/src/main/java/com/datastax/driver/core/TokenRangeUtil.java +++ /dev/null @@ -1,71 +0,0 @@ -package com.datastax.driver.core; - -import java.io.BufferedWriter; -import java.io.FileWriter; -import java.io.IOException; -import java.util.Comparator; -import java.util.Set; - -public class TokenRangeUtil { - - private final Metadata clusterMetadata; - private final ProtocolVersion protocolVersion; - private final CodecRegistry codecRegistry; - private final Token.Factory factory; - private final Cluster cluster; - - public TokenRangeUtil(Cluster cluster) { - this.cluster= cluster; - clusterMetadata = cluster.getMetadata(); - protocolVersion = cluster.getConfiguration().getProtocolOptions().getProtocolVersion(); - codecRegistry = cluster.getConfiguration().getCodecRegistry(); - factory = Token.getFactory(clusterMetadata.partitioner); - } - - public Set getTokenRangesFor(String keyspace, String hostaddress) { - Host host=null; - if (hostaddress.matches("\\d+")) { - int hostenum = Integer.parseInt(hostaddress); - host = clusterMetadata.getAllHosts().stream() - .sorted(Comparator.comparing(h -> h.getAddress().toString())) - .skip(hostenum) - .findFirst() - .orElseThrow(); - } else if (!hostaddress.isEmpty()) { - host = clusterMetadata.getAllHosts().stream() - .filter(h -> h.getAddress().toString().replaceAll("/","").equals(hostaddress)) - .findFirst() - .orElseThrow(); - } else { - throw new RuntimeException("You must specify a host enum in order or a host address."); - } - return clusterMetadata.getTokenRanges(keyspace,host); - } - - - public void printRanges(String tokensks) { - Set hosts = clusterMetadata.getAllHosts(); - - for (Host host : hosts) { - String address = host.getAddress().toString().substring(1); - BufferedWriter writer = null; - try { - writer = new BufferedWriter(new FileWriter("ranges-"+address)); - String ranges = getTokenRangesFor(tokensks, address).toString(); - writer.write(ranges); - - writer.close(); - } catch (IOException e) { - e.printStackTrace(); - throw new RuntimeException("Can't write token range files"); - } - } - - } - - - public M3PTokenFilter getFilterFor(Set ranges) { - return new M3PTokenFilter(ranges, this.cluster); - } - -} diff --git a/driver-cqld4/src/main/java/io/nosqlbench/activitytype/cqld4/api/StatementFilter.java b/driver-cqld4/src/main/java/io/nosqlbench/activitytype/cqld4/api/StatementFilter.java index 85e7b70cf..6beacd73c 100644 --- a/driver-cqld4/src/main/java/io/nosqlbench/activitytype/cqld4/api/StatementFilter.java +++ b/driver-cqld4/src/main/java/io/nosqlbench/activitytype/cqld4/api/StatementFilter.java @@ -3,5 +3,5 @@ package io.nosqlbench.activitytype.cqld4.api; import com.datastax.oss.driver.api.core.cql.Statement; public interface StatementFilter { - boolean matches(Statement statement); + boolean matches(Statement statement); } diff --git a/driver-cqld4/src/main/java/io/nosqlbench/activitytype/cqld4/codecsupport/UDTCodecInjector.java b/driver-cqld4/src/main/java/io/nosqlbench/activitytype/cqld4/codecsupport/UDTCodecInjector.java index b25aeeb1a..d5acdc7fa 100644 --- a/driver-cqld4/src/main/java/io/nosqlbench/activitytype/cqld4/codecsupport/UDTCodecInjector.java +++ b/driver-cqld4/src/main/java/io/nosqlbench/activitytype/cqld4/codecsupport/UDTCodecInjector.java @@ -13,11 +13,10 @@ public class UDTCodecInjector { private final static Logger logger = LoggerFactory.getLogger(UDTCodecInjector.class); private List codecProviders = new ArrayList<>(); - private List userTypes = new ArrayList<>(); public void injectUserProvidedCodecs(Session session, boolean allowAcrossKeyspaces) { - CodecRegistry registry = session.getCluster().getConfiguration().getCodecRegistry(); + CodecRegistry registry = session.getContext().getCodecRegistry(); ServiceLoader codecLoader = ServiceLoader.load(UserCodecProvider.class); diff --git a/driver-cqld4/src/main/java/io/nosqlbench/activitytype/cqld4/config/CQLD4OptionsMapper.java b/driver-cqld4/src/main/java/io/nosqlbench/activitytype/cqld4/config/CQLD4OptionsMapper.java new file mode 100644 index 000000000..8756050ff --- /dev/null +++ b/driver-cqld4/src/main/java/io/nosqlbench/activitytype/cqld4/config/CQLD4OptionsMapper.java @@ -0,0 +1,92 @@ +package io.nosqlbench.activitytype.cqld4.config; + +import com.datastax.oss.driver.api.core.config.DriverOption; +import com.datastax.oss.driver.api.core.config.OptionsMap; +import com.datastax.oss.driver.api.core.config.TypedDriverOption; +import com.datastax.oss.driver.api.core.data.CqlDuration; + +import java.math.BigDecimal; +import java.math.BigInteger; +import java.net.InetAddress; +import java.net.UnknownHostException; +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.time.*; +import java.util.UUID; + +public class CQLD4OptionsMapper { + + public static void apply(OptionsMap optionsMap, String name, String value) { + + for (TypedDriverOption builtin : TypedDriverOption.builtInValues()) { + DriverOption rawOption = builtin.getRawOption(); + String path = rawOption.getPath(); + if (name.equals(path)) { + Class rawType = builtin.getExpectedType().getRawType(); + Object convertedValue = adaptTypeValue(value, rawType, name); + TypedDriverOption option = (TypedDriverOption) builtin; + optionsMap.put(option, convertedValue); + return; + } + } + + throw new RuntimeException("Driver option " + name + " was not found in the available options."); + } + + private static Object adaptTypeValue(String value, Class rawOption, String optionName) { + switch (rawOption.getCanonicalName()) { + case "java.lang.Boolean": + return Boolean.parseBoolean(value); + case "java.lang.Byte": + return Byte.parseByte(value); + case "java.lang.Double": + return Double.parseDouble(value); + case "java.lang.Float": + return Float.parseFloat(value); + case "java.lang.Integer": + return Integer.parseInt(value); + case "java.lang.Long": + return Long.parseLong(value); + case "java.lang.Short": + return Short.parseShort(value); + case "java.time.Instant": + return Instant.parse(value); + case "java.time.ZonedDateTime": + return ZonedDateTime.parse(value); + case "java.time.LocalDate": + return LocalDate.parse(value); + case "java.time.LocalTime": + return LocalTime.parse(value); + case "java.nio.ByteBuffer": + return ByteBuffer.wrap(value.getBytes(StandardCharsets.UTF_8)); // What else to do here? + case "java.lang.String": + return value; + case "java.math.BigInteger": + return new BigInteger(value); + case "java.math.BigDecimal": + return new BigDecimal(value); + case "java.util.UUID": + return UUID.fromString(value); + case "java.net.InetAddress": + try { + return InetAddress.getByName(value); + } catch (UnknownHostException e) { + throw new RuntimeException(e); + } + case "com.datastax.oss.driver.api.core.data.CqlDuration": + return CqlDuration.from(value); + case "java.time.Duration:": + return Duration.parse(value); + default: +// These appear to be valid types, but there is no record of them used in driver configuration, +// nor a convenient way to convert them directly from known type and string value without invoking +// connected metadata machinery from an active session. +// case "com.datastax.oss.driver.api.core.data.TupleValue": +// case "com.datastax.oss.driver.api.core.data.UdtValue": + + throw new RuntimeException("The type converter for driver option named " + optionName + " was not " + + "found, or is unimplemented. Please file an issue at nosqlbench.io"); + } + } + +} diff --git a/driver-cqld4/src/main/java/io/nosqlbench/activitytype/cqld4/core/CQLBindHelper.java b/driver-cqld4/src/main/java/io/nosqlbench/activitytype/cqld4/core/CQLBindHelper.java index d77adc148..3b81b8f07 100644 --- a/driver-cqld4/src/main/java/io/nosqlbench/activitytype/cqld4/core/CQLBindHelper.java +++ b/driver-cqld4/src/main/java/io/nosqlbench/activitytype/cqld4/core/CQLBindHelper.java @@ -45,7 +45,7 @@ public class CQLBindHelper { for (ColumnDefinition def : defs) { ByteBuffer byteBuffer = row.getByteBuffer(def.getName()); - bound.setBytesUnsafe(def.getName(), byteBuffer); + bound=bound.setBytesUnsafe(def.getName(), byteBuffer); } return bound; } diff --git a/driver-cqld4/src/main/java/io/nosqlbench/activitytype/cqld4/core/CQLOptions.java b/driver-cqld4/src/main/java/io/nosqlbench/activitytype/cqld4/core/CQLOptions.java index e94f6a61a..9143026a8 100644 --- a/driver-cqld4/src/main/java/io/nosqlbench/activitytype/cqld4/core/CQLOptions.java +++ b/driver-cqld4/src/main/java/io/nosqlbench/activitytype/cqld4/core/CQLOptions.java @@ -1,226 +1,202 @@ package io.nosqlbench.activitytype.cqld4.core; -import com.datastax.oss.driver.api.core.connection.ReconnectionPolicy; -import com.datastax.oss.driver.api.core.context.DriverContext; -import com.datastax.oss.driver.api.core.loadbalancing.LoadBalancingPolicy; -import com.datastax.oss.driver.api.core.retry.RetryPolicy; -import com.datastax.oss.driver.api.core.session.Session; -import com.datastax.oss.driver.api.core.specex.SpeculativeExecutionPolicy; -import com.datastax.oss.driver.internal.core.connection.ConstantReconnectionPolicy; -import com.datastax.oss.driver.internal.core.connection.ExponentialReconnectionPolicy; -import com.datastax.oss.driver.internal.core.context.NettyOptions; -import com.datastax.oss.driver.internal.core.retry.DefaultRetryPolicy; -import com.datastax.oss.driver.internal.core.specex.ConstantSpeculativeExecutionPolicy; -import io.netty.util.HashedWheelTimer; -import io.nosqlbench.nb.api.errors.BasicError; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.net.InetSocketAddress; -import java.util.*; -import java.util.concurrent.ThreadFactory; -import java.util.concurrent.TimeUnit; -import java.util.regex.Matcher; -import java.util.regex.Pattern; -import java.util.stream.Collectors; - public class CQLOptions { - private final static Logger logger = LoggerFactory.getLogger(CQLOptions.class); - - private final static Pattern CORE_AND_MAX_RQ_PATTERN = Pattern.compile("(?\\d+)(:(?\\d+)(:(?\\d+))?)?(,(?\\d+)(:(?\\d+)(:(?\\d+))?)?)?(,?heartbeat_interval_s:(?\\d+))?(,?idle_timeout_s:(?\\d+))?(,?pool_timeout_ms:(?\\d+))?"); - private final static Pattern PERCENTILE_EAGER_PATTERN = Pattern.compile("^p(?[^:]+)(:(?\\d+))?(:(?\\d+)ms)?$"); - private final static Pattern CONSTANT_EAGER_PATTERN = Pattern.compile("^((?\\d++)ms)(:(?\\d+))?$"); - - private static ConstantSpeculativeExecutionPolicy constantPolicy(DriverContext context, int threshold, int executions) { - return new ConstantSpeculativeExecutionPolicy(threshold, executions); - } - - private static SpeculativeExecutionPolicy percentilePolicy(long tracked, double threshold, int executions) { - PerHostPercentileTracker tracker = newTracker(tracked); - return new PercentileSpeculativeExecutionPolicy(tracker, threshold, executions); - } - - private static PerHostPercentileTracker newTracker(long millis) { - return PerHostPercentileTracker.builder(millis).build(); - } - - public static PoolingOptions poolingOptionsFor(String spec) { - Matcher matcher = CORE_AND_MAX_RQ_PATTERN.matcher(spec); - if (matcher.matches()) { - PoolingOptions poolingOptions = new PoolingOptions(); - - Optional.ofNullable(matcher.group("core")).map(Integer::valueOf) - .ifPresent(core -> poolingOptions.setCoreConnectionsPerHost(HostDistance.LOCAL, core)); - Optional.ofNullable(matcher.group("max")).map(Integer::valueOf) - .ifPresent(max -> poolingOptions.setMaxConnectionsPerHost(HostDistance.LOCAL, max)); - Optional.ofNullable(matcher.group("rq")).map(Integer::valueOf) - .ifPresent(rq -> poolingOptions.setMaxRequestsPerConnection(HostDistance.LOCAL, rq)); - - Optional.ofNullable(matcher.group("rcore")).map(Integer::valueOf) - .ifPresent(rcore -> poolingOptions.setCoreConnectionsPerHost(HostDistance.REMOTE, rcore)); - Optional.ofNullable(matcher.group("rmax")).map(Integer::valueOf) - .ifPresent(rmax -> poolingOptions.setMaxConnectionsPerHost(HostDistance.REMOTE, rmax)); - Optional.ofNullable(matcher.group("rrq")).map(Integer::valueOf) - .ifPresent(rrq -> poolingOptions.setMaxRequestsPerConnection(HostDistance.REMOTE, rrq)); - - Optional.ofNullable(matcher.group("heartbeatinterval")).map(Integer::valueOf) - .ifPresent(poolingOptions::setHeartbeatIntervalSeconds); - - Optional.ofNullable(matcher.group("idletimeout")).map(Integer::valueOf) - .ifPresent(poolingOptions::setIdleTimeoutSeconds); - - Optional.ofNullable(matcher.group("pooltimeout")).map(Integer::valueOf) - .ifPresent(poolingOptions::setPoolTimeoutMillis); - - return poolingOptions; - } - throw new RuntimeException("No pooling options could be parsed from spec: " + spec); - - } - - public static RetryPolicy retryPolicyFor(String spec, Session session) { - Set retryBehaviors = Arrays.stream(spec.split(",")).map(String::toLowerCase).collect(Collectors.toSet()); - RetryPolicy retryPolicy = new DefaultRetryPolicy(session.getContext(),"default"); - - if (retryBehaviors.contains("default")) { - return retryPolicy; - } // add other mutually-exclusive behaviors here with checks, if we want to extend beyond "default" - - if (retryBehaviors.contains("logging")) { - retryPolicy = new LoggingRetryPolicy(retryPolicy); - } - - return retryPolicy; - } - - public static ReconnectionPolicy reconnectPolicyFor(String spec, Session session) { - if(spec.startsWith("exponential(")){ - String argsString = spec.substring(12); - String[] args = argsString.substring(0, argsString.length() - 1).split("[,;]"); - if (args.length != 2){ - throw new BasicError("Invalid reconnectionpolicy, try reconnectionpolicy=exponential(, )"); - } - long baseDelay = Long.parseLong(args[0]); - long maxDelay = Long.parseLong(args[1]); - ExponentialReconnectionPolicy exponentialReconnectionPolicy = new ExponentialReconnectionPolicy(session.getContext()); - }else if(spec.startsWith("constant(")){ - String argsString = spec.substring(9); - long constantDelayMs= Long.parseLong(argsString.substring(0, argsString.length() - 1)); - return new ConstantReconnectionPolicy(constantDelayMs); - } - throw new BasicError("Invalid reconnectionpolicy, try reconnectionpolicy=exponential(, ) or constant()"); - } - - public static SocketOptions socketOptionsFor(String spec) { - String[] assignments = spec.split("[,;]"); - Map values = new HashMap<>(); - for (String assignment : assignments) { - String[] namevalue = assignment.split("[:=]", 2); - String name = namevalue[0]; - String value = namevalue[1]; - values.put(name, value); - } - - SocketOptions options = new SocketOptions(); - Optional.ofNullable(values.get("read_timeout_ms")).map(Integer::parseInt).ifPresent( - options::setReadTimeoutMillis - ); - Optional.ofNullable(values.get("connect_timeout_ms")).map(Integer::parseInt).ifPresent( - options::setConnectTimeoutMillis - ); - Optional.ofNullable(values.get("keep_alive")).map(Boolean::parseBoolean).ifPresent( - options::setKeepAlive - ); - Optional.ofNullable(values.get("reuse_address")).map(Boolean::parseBoolean).ifPresent( - options::setReuseAddress - ); - Optional.ofNullable(values.get("so_linger")).map(Integer::parseInt).ifPresent( - options::setSoLinger - ); - Optional.ofNullable(values.get("tcp_no_delay")).map(Boolean::parseBoolean).ifPresent( - options::setTcpNoDelay - ); - Optional.ofNullable(values.get("receive_buffer_size")).map(Integer::parseInt).ifPresent( - options::setReceiveBufferSize - ); - Optional.ofNullable(values.get("send_buffer_size")).map(Integer::parseInt).ifPresent( - options::setSendBufferSize - ); - - return options; - } - - public static SpeculativeExecutionPolicy defaultSpeculativePolicy() { - PerHostPercentileTracker tracker = PerHostPercentileTracker - .builder(15000) - .build(); - PercentileSpeculativeExecutionPolicy defaultSpecPolicy = - new PercentileSpeculativeExecutionPolicy(tracker, 99.0, 5); - return defaultSpecPolicy; - } - - public static SpeculativeExecutionPolicy speculativeFor(String spec) { - Matcher pctileMatcher = PERCENTILE_EAGER_PATTERN.matcher(spec); - Matcher constantMatcher = CONSTANT_EAGER_PATTERN.matcher(spec); - if (pctileMatcher.matches()) { - double pctile = Double.valueOf(pctileMatcher.group("pctile")); - if (pctile > 100.0 || pctile < 0.0) { - throw new RuntimeException("pctile must be between 0.0 and 100.0"); - } - String executionsSpec = pctileMatcher.group("executions"); - String trackedSpec = pctileMatcher.group("tracked"); - int executions = (executionsSpec != null && !executionsSpec.isEmpty()) ? Integer.valueOf(executionsSpec) : 5; - int tracked = (trackedSpec != null && !trackedSpec.isEmpty()) ? Integer.valueOf(trackedSpec) : 15000; - logger.debug("speculative: Creating new percentile tracker policy from spec '" + spec + "'"); - return percentilePolicy(tracked, pctile, executions); - } else if (constantMatcher.matches()) { - int threshold = Integer.valueOf(constantMatcher.group("msThreshold")); - String executionsSpec = constantMatcher.group("executions"); - int executions = (executionsSpec != null && !executionsSpec.isEmpty()) ? Integer.valueOf(executionsSpec) : 5; - logger.debug("speculative: Creating new constant policy from spec '" + spec + "'"); - return constantPolicy(threshold, executions); - } else { - throw new RuntimeException("Unable to parse pattern for speculative option: " + spec + ", it must be in " + - "an accepted form, like p99.0:5:15000, or p99.0:5, or 5000ms:5"); - } - - } - - public static LoadBalancingPolicy whitelistFor(String s, LoadBalancingPolicy innerPolicy) { - String[] addrSpecs = s.split(","); - List sockAddrs = Arrays.stream(addrSpecs) - .map(CQLOptions::toSocketAddr) - .collect(Collectors.toList()); - if (innerPolicy == null) { - innerPolicy = new RoundRobinPolicy(); - } - return new WhiteListPolicy(innerPolicy, sockAddrs); - } - - public static NettyOptions withTickDuration(String tick) { - logger.info("Cluster builder using custom tick duration value for HashedWheelTimer: " + tick + " milliseconds"); - int tickDuration = Integer.valueOf(tick); - return new NettyOptions() { - public io.netty.util.Timer timer(ThreadFactory threadFactory) { - return new HashedWheelTimer( - threadFactory, tickDuration, TimeUnit.MILLISECONDS); - } - }; - } - - private static InetSocketAddress toSocketAddr(String addr) { - String[] addrs = addr.split(":", 2); - String inetHost = addrs[0]; - String inetPort = (addrs.length == 2) ? addrs[1] : "9042"; - return new InetSocketAddress(inetHost, Integer.valueOf(inetPort)); - } - - public static ProtocolOptions.Compression withCompression(String compspec) { - try { - return ProtocolOptions.Compression.valueOf(compspec); - } catch (IllegalArgumentException iae) { - throw new RuntimeException("Compression option '" + compspec + "' was specified, but only " + - Arrays.toString(ProtocolOptions.Compression.values()) + " are available."); - } - } +// private final static Logger logger = LoggerFactory.getLogger(CQLOptions.class); +// +// private final static Pattern CORE_AND_MAX_RQ_PATTERN = Pattern.compile("(?\\d+)(:(?\\d+)(:(?\\d+))?)?(,(?\\d+)(:(?\\d+)(:(?\\d+))?)?)?(,?heartbeat_interval_s:(?\\d+))?(,?idle_timeout_s:(?\\d+))?(,?pool_timeout_ms:(?\\d+))?"); +// private final static Pattern PERCENTILE_EAGER_PATTERN = Pattern.compile("^p(?[^:]+)(:(?\\d+))?(:(?\\d+)ms)?$"); +// private final static Pattern CONSTANT_EAGER_PATTERN = Pattern.compile("^((?\\d++)ms)(:(?\\d+))?$"); +// +// private static ConstantSpeculativeExecutionPolicy constantPolicy(DriverContext context, int threshold, int executions) { +// return new ConstantSpeculativeExecutionPolicy(threshold, executions); +// } +// +// private static SpeculativeExecutionPolicy percentilePolicy(long tracked, double threshold, int executions) { +// PerHostPercentileTracker tracker = newTracker(tracked); +// return new PercentileSpeculativeExecutionPolicy(tracker, threshold, executions); +// } +// +// private static PerHostPercentileTracker newTracker(long millis) { +// return PerHostPercentileTracker.builder(millis).build(); +// } +// +// public static PoolingOptions poolingOptionsFor(String spec) { +// Matcher matcher = CORE_AND_MAX_RQ_PATTERN.matcher(spec); +// if (matcher.matches()) { +// PoolingOptions poolingOptions = new PoolingOptions(); +// +// Optional.ofNullable(matcher.group("core")).map(Integer::valueOf) +// .ifPresent(core -> poolingOptions.setCoreConnectionsPerHost(HostDistance.LOCAL, core)); +// Optional.ofNullable(matcher.group("max")).map(Integer::valueOf) +// .ifPresent(max -> poolingOptions.setMaxConnectionsPerHost(HostDistance.LOCAL, max)); +// Optional.ofNullable(matcher.group("rq")).map(Integer::valueOf) +// .ifPresent(rq -> poolingOptions.setMaxRequestsPerConnection(HostDistance.LOCAL, rq)); +// +// Optional.ofNullable(matcher.group("rcore")).map(Integer::valueOf) +// .ifPresent(rcore -> poolingOptions.setCoreConnectionsPerHost(HostDistance.REMOTE, rcore)); +// Optional.ofNullable(matcher.group("rmax")).map(Integer::valueOf) +// .ifPresent(rmax -> poolingOptions.setMaxConnectionsPerHost(HostDistance.REMOTE, rmax)); +// Optional.ofNullable(matcher.group("rrq")).map(Integer::valueOf) +// .ifPresent(rrq -> poolingOptions.setMaxRequestsPerConnection(HostDistance.REMOTE, rrq)); +// +// Optional.ofNullable(matcher.group("heartbeatinterval")).map(Integer::valueOf) +// .ifPresent(poolingOptions::setHeartbeatIntervalSeconds); +// +// Optional.ofNullable(matcher.group("idletimeout")).map(Integer::valueOf) +// .ifPresent(poolingOptions::setIdleTimeoutSeconds); +// +// Optional.ofNullable(matcher.group("pooltimeout")).map(Integer::valueOf) +// .ifPresent(poolingOptions::setPoolTimeoutMillis); +// +// return poolingOptions; +// } +// throw new RuntimeException("No pooling options could be parsed from spec: " + spec); +// +// } +// +// public static RetryPolicy retryPolicyFor(String spec, Session session) { +// Set retryBehaviors = Arrays.stream(spec.split(",")).map(String::toLowerCase).collect(Collectors.toSet()); +// RetryPolicy retryPolicy = new DefaultRetryPolicy(session.getContext(),"default"); +// +// if (retryBehaviors.contains("default")) { +// return retryPolicy; +// } // add other mutually-exclusive behaviors here with checks, if we want to extend beyond "default" +// +// if (retryBehaviors.contains("logging")) { +// retryPolicy = new LoggingRetryPolicy(retryPolicy); +// } +// +// return retryPolicy; +// } +// +// public static ReconnectionPolicy reconnectPolicyFor(String spec, Session session) { +// if(spec.startsWith("exponential(")){ +// String argsString = spec.substring(12); +// String[] args = argsString.substring(0, argsString.length() - 1).split("[,;]"); +// if (args.length != 2){ +// throw new BasicError("Invalid reconnectionpolicy, try reconnectionpolicy=exponential(, )"); +// } +// long baseDelay = Long.parseLong(args[0]); +// long maxDelay = Long.parseLong(args[1]); +// ExponentialReconnectionPolicy exponentialReconnectionPolicy = new ExponentialReconnectionPolicy(session.getContext()); +// }else if(spec.startsWith("constant(")){ +// String argsString = spec.substring(9); +// long constantDelayMs= Long.parseLong(argsString.substring(0, argsString.length() - 1)); +// return new ConstantReconnectionPolicy(constantDelayMs); +// } +// throw new BasicError("Invalid reconnectionpolicy, try reconnectionpolicy=exponential(, ) or constant()"); +// } +// +// public static SocketOptions socketOptionsFor(String spec) { +// String[] assignments = spec.split("[,;]"); +// Map values = new HashMap<>(); +// for (String assignment : assignments) { +// String[] namevalue = assignment.split("[:=]", 2); +// String name = namevalue[0]; +// String value = namevalue[1]; +// values.put(name, value); +// } +// +// SocketOptions options = new SocketOptions(); +// Optional.ofNullable(values.get("read_timeout_ms")).map(Integer::parseInt).ifPresent( +// options::setReadTimeoutMillis +// ); +// Optional.ofNullable(values.get("connect_timeout_ms")).map(Integer::parseInt).ifPresent( +// options::setConnectTimeoutMillis +// ); +// Optional.ofNullable(values.get("keep_alive")).map(Boolean::parseBoolean).ifPresent( +// options::setKeepAlive +// ); +// Optional.ofNullable(values.get("reuse_address")).map(Boolean::parseBoolean).ifPresent( +// options::setReuseAddress +// ); +// Optional.ofNullable(values.get("so_linger")).map(Integer::parseInt).ifPresent( +// options::setSoLinger +// ); +// Optional.ofNullable(values.get("tcp_no_delay")).map(Boolean::parseBoolean).ifPresent( +// options::setTcpNoDelay +// ); +// Optional.ofNullable(values.get("receive_buffer_size")).map(Integer::parseInt).ifPresent( +// options::setReceiveBufferSize +// ); +// Optional.ofNullable(values.get("send_buffer_size")).map(Integer::parseInt).ifPresent( +// options::setSendBufferSize +// ); +// +// return options; +// } +// +// public static SpeculativeExecutionPolicy defaultSpeculativePolicy() { +// PerHostPercentileTracker tracker = PerHostPercentileTracker +// .builder(15000) +// .build(); +// PercentileSpeculativeExecutionPolicy defaultSpecPolicy = +// new PercentileSpeculativeExecutionPolicy(tracker, 99.0, 5); +// return defaultSpecPolicy; +// } +// +// public static SpeculativeExecutionPolicy speculativeFor(String spec) { +// Matcher pctileMatcher = PERCENTILE_EAGER_PATTERN.matcher(spec); +// Matcher constantMatcher = CONSTANT_EAGER_PATTERN.matcher(spec); +// if (pctileMatcher.matches()) { +// double pctile = Double.valueOf(pctileMatcher.group("pctile")); +// if (pctile > 100.0 || pctile < 0.0) { +// throw new RuntimeException("pctile must be between 0.0 and 100.0"); +// } +// String executionsSpec = pctileMatcher.group("executions"); +// String trackedSpec = pctileMatcher.group("tracked"); +// int executions = (executionsSpec != null && !executionsSpec.isEmpty()) ? Integer.valueOf(executionsSpec) : 5; +// int tracked = (trackedSpec != null && !trackedSpec.isEmpty()) ? Integer.valueOf(trackedSpec) : 15000; +// logger.debug("speculative: Creating new percentile tracker policy from spec '" + spec + "'"); +// return percentilePolicy(tracked, pctile, executions); +// } else if (constantMatcher.matches()) { +// int threshold = Integer.valueOf(constantMatcher.group("msThreshold")); +// String executionsSpec = constantMatcher.group("executions"); +// int executions = (executionsSpec != null && !executionsSpec.isEmpty()) ? Integer.valueOf(executionsSpec) : 5; +// logger.debug("speculative: Creating new constant policy from spec '" + spec + "'"); +// return constantPolicy(threshold, executions); +// } else { +// throw new RuntimeException("Unable to parse pattern for speculative option: " + spec + ", it must be in " + +// "an accepted form, like p99.0:5:15000, or p99.0:5, or 5000ms:5"); +// } +// +// } +// +// public static LoadBalancingPolicy whitelistFor(String s, LoadBalancingPolicy innerPolicy) { +// String[] addrSpecs = s.split(","); +// List sockAddrs = Arrays.stream(addrSpecs) +// .map(CQLOptions::toSocketAddr) +// .collect(Collectors.toList()); +// if (innerPolicy == null) { +// innerPolicy = new RoundRobinPolicy(); +// } +// return new WhiteListPolicy(innerPolicy, sockAddrs); +// } +// +// public static NettyOptions withTickDuration(String tick) { +// logger.info("Cluster builder using custom tick duration value for HashedWheelTimer: " + tick + " milliseconds"); +// int tickDuration = Integer.valueOf(tick); +// return new NettyOptions() { +// public io.netty.util.Timer timer(ThreadFactory threadFactory) { +// return new HashedWheelTimer( +// threadFactory, tickDuration, TimeUnit.MILLISECONDS); +// } +// }; +// } +// +// private static InetSocketAddress toSocketAddr(String addr) { +// String[] addrs = addr.split(":", 2); +// String inetHost = addrs[0]; +// String inetPort = (addrs.length == 2) ? addrs[1] : "9042"; +// return new InetSocketAddress(inetHost, Integer.valueOf(inetPort)); +// } +// +// public static ProtocolOptions.Compression withCompression(String compspec) { +// try { +// return ProtocolOptions.Compression.valueOf(compspec); +// } catch (IllegalArgumentException iae) { +// throw new RuntimeException("Compression option '" + compspec + "' was specified, but only " + +// Arrays.toString(ProtocolOptions.Compression.values()) + " are available."); +// } +// } } diff --git a/driver-cqld4/src/main/java/io/nosqlbench/activitytype/cqld4/core/CqlAction.java b/driver-cqld4/src/main/java/io/nosqlbench/activitytype/cqld4/core/CqlAction.java index 320363678..8fba3e419 100644 --- a/driver-cqld4/src/main/java/io/nosqlbench/activitytype/cqld4/core/CqlAction.java +++ b/driver-cqld4/src/main/java/io/nosqlbench/activitytype/cqld4/core/CqlAction.java @@ -1,8 +1,9 @@ package io.nosqlbench.activitytype.cqld4.core; import com.codahale.metrics.Timer; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.api.core.cql.Statement; +import com.datastax.oss.driver.api.core.config.DefaultDriverOption; +import com.datastax.oss.driver.api.core.cql.*; +import com.datastax.oss.driver.api.core.session.Session; import io.nosqlbench.activitytype.cqld4.api.ResultSetCycleOperator; import io.nosqlbench.activitytype.cqld4.api.RowCycleOperator; import io.nosqlbench.activitytype.cqld4.api.StatementFilter; @@ -22,6 +23,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.List; +import java.util.concurrent.CompletionStage; import java.util.concurrent.TimeUnit; @SuppressWarnings("Duplicates") @@ -81,7 +83,7 @@ public class CqlAction implements SyncAction, MultiPhaseAction, ActivityDefObser totalRowsFetchedForQuery = 0L; Statement statement; - ResultSetFuture resultSetFuture; + CompletionStage resultSetFuture; ReadyCQLStatement readyCQLStatement; int tries = 0; @@ -124,7 +126,7 @@ public class CqlAction implements SyncAction, MultiPhaseAction, ActivityDefObser } try (Timer.Context executeTime = cqlActivity.executeTimer.time()) { - resultSetFuture = cqlActivity.getSession().executeAsync(statement); + CompletionStage completion = cqlActivity.getSession().executeAsync(statement); } Timer.Context resultTime = cqlActivity.resultTimer.time(); @@ -149,7 +151,8 @@ public class CqlAction implements SyncAction, MultiPhaseAction, ActivityDefObser Row row = resultSet.one(); ColumnDefinitions defs = row.getColumnDefinitions(); if (retryReplace) { - statement = CQLBindHelper.rebindUnappliedStatement(statement, defs, row); + statement = + new CQLBindHelper(getCqlActivity().getSession()).rebindUnappliedStatement(statement, defs,row); } logger.trace(readyCQLStatement.getQueryString(cycleValue)); @@ -212,7 +215,7 @@ public class CqlAction implements SyncAction, MultiPhaseAction, ActivityDefObser readyCQLStatement.getQueryString(cycleValue), 1, cqlActivity.maxpages, - cqlActivity.getSession().getCluster().getConfiguration().getQueryOptions().getFetchSize() + cqlActivity.getSession().getContext().getConfig().getDefaultProfile().getInt(DefaultDriverOption.REQUEST_PAGE_SIZE) ); } } @@ -302,7 +305,7 @@ public class CqlAction implements SyncAction, MultiPhaseAction, ActivityDefObser pagingReadyStatement.getQueryString(cycleValue), pagesFetched, cqlActivity.maxpages, - cqlActivity.getSession().getCluster().getConfiguration().getQueryOptions().getFetchSize() + cqlActivity.getSession().getContext().getConfig().getDefaultProfile().getInt(DefaultDriverOption.REQUEST_PAGE_SIZE) ); } pagingResultSet = resultSet; diff --git a/driver-cqld4/src/main/java/io/nosqlbench/activitytype/cqld4/core/CqlActivity.java b/driver-cqld4/src/main/java/io/nosqlbench/activitytype/cqld4/core/CqlActivity.java index c3c2b8cf0..8be641e5d 100644 --- a/driver-cqld4/src/main/java/io/nosqlbench/activitytype/cqld4/core/CqlActivity.java +++ b/driver-cqld4/src/main/java/io/nosqlbench/activitytype/cqld4/core/CqlActivity.java @@ -4,6 +4,10 @@ import com.codahale.metrics.Histogram; import com.codahale.metrics.Meter; import com.codahale.metrics.Timer; import com.datastax.driver.core.*; +import com.datastax.oss.driver.api.core.ConsistencyLevel; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.DefaultConsistencyLevel; +import com.datastax.oss.driver.api.core.cql.*; import com.datastax.oss.driver.api.core.session.Session; import io.nosqlbench.activitytype.cqld4.codecsupport.UDTCodecInjector; import com.datastax.driver.core.TokenRangeStmtFilter; @@ -73,7 +77,7 @@ public class CqlActivity extends SimpleActivity implements Activity, ActivityDef Meter rowsCounter; private HashedCQLErrorHandler errorHandler; private OpSequence opsequence; - private Session session; + private CqlSession session; private int maxTries; private StatementFilter statementFilter; private Boolean showcql; @@ -85,6 +89,7 @@ public class CqlActivity extends SimpleActivity implements Activity, ActivityDef private long maxRetryDelay; private boolean retryReplace; private String pooling; + private String profileName; public CqlActivity(ActivityDef activityDef) { @@ -103,7 +108,8 @@ public class CqlActivity extends SimpleActivity implements Activity, ActivityDef @Override public synchronized void initActivity() { logger.debug("initializing activity: " + this.activityDef.getAlias()); - session = getSession(); + profileName = getParams().getOptionalString("profile").orElse("default"); + session = getSession(profileName); if (getParams().getOptionalBoolean("usercodecs").orElse(false)) { registerCodecs(session); @@ -125,9 +131,9 @@ public class CqlActivity extends SimpleActivity implements Activity, ActivityDef logger.debug("activity fully initialized: " + this.activityDef.getAlias()); } - public synchronized Session getSession() { + public synchronized CqlSession getSession(String profileName) { if (session == null) { - session = CQLSessionCache.get().getSession(this.getActivityDef()); + session = CQLSessionCache.get().getSession(this.getActivityDef(), profileName); } return session; } @@ -135,10 +141,10 @@ public class CqlActivity extends SimpleActivity implements Activity, ActivityDef private void initSequencer() { Session session = getSession(); - Map fconfig = Map.of("cluster",session.getCluster()); + Map fconfig = Map.of("session", session); SequencerType sequencerType = SequencerType.valueOf( - getParams().getOptionalString("seq").orElse("bucket") + getParams().getOptionalString("seq").orElse("bucket") ); SequencePlanner planner = new SequencePlanner<>(sequencerType); @@ -162,97 +168,100 @@ public class CqlActivity extends SimpleActivity implements Activity, ActivityDef boolean parametrized = Boolean.valueOf(stmtDef.getParams().getOrDefault("parametrized", "false")); long ratio = Long.valueOf(stmtDef.getParams().getOrDefault("ratio", "1")); - Optional cl = Optional.ofNullable( - stmtDef.getParams().getOrDefault("cl", null)).map(ConsistencyLevel::valueOf); - - Optional serial_cl = Optional.ofNullable( - stmtDef.getParams().getOrDefault("serial_cl", null)).map(ConsistencyLevel::valueOf); - - Optional idempotent = Optional.ofNullable(stmtDef.getParams().getOrDefault("idempotent", null)) - .map(Boolean::valueOf); - StringBuilder psummary = new StringBuilder(); boolean instrument = Optional.ofNullable(stmtDef.getParams() - .get("instrument")).map(Boolean::valueOf) - .orElse(getParams().getOptionalBoolean("instrument").orElse(false)); + .get("instrument")).map(Boolean::valueOf) + .orElse(getParams().getOptionalBoolean("instrument").orElse(false)); - String logresultcsv = stmtDef.getParams().getOrDefault("logresultcsv",""); + String logresultcsv = stmtDef.getParams().getOrDefault("logresultcsv", ""); String logresultcsv_act = getParams().getOptionalString("logresultcsv").orElse(""); if (!logresultcsv_act.isEmpty() && !logresultcsv_act.toLowerCase().equals("true")) { throw new RuntimeException("At the activity level, only logresultcsv=true is allowed, no other values."); } logresultcsv = !logresultcsv.isEmpty() ? logresultcsv : logresultcsv_act; - logresultcsv = !logresultcsv.toLowerCase().equals("true") ? logresultcsv : stmtDef.getName()+"--results.csv"; + logresultcsv = !logresultcsv.toLowerCase().equals("true") ? logresultcsv : stmtDef.getName() + "--results.csv"; logger.debug("readying statement[" + (prepared ? "" : "un") + "prepared]:" + parsed.getStmt()); ReadyCQLStatementTemplate template; String stmtForDriver = parsed.getPositionalStatement(s -> "?"); - if (prepared) { - psummary.append(" prepared=>").append(prepared); - PreparedStatement prepare = getSession().prepare(stmtForDriver); - cl.ifPresent((conlvl) -> { - psummary.append(" consistency_level=>").append(conlvl); - prepare.setConsistencyLevel(conlvl); - }); - serial_cl.ifPresent((scl) -> { - psummary.append(" serial_consistency_level=>").append(serial_cl); - prepare.setSerialConsistencyLevel(scl); - }); - idempotent.ifPresent((i) -> { - psummary.append(" idempotent=").append(idempotent); - prepare.setIdempotent(i); - }); - CqlBinderTypes binderType = CqlBinderTypes.valueOf(stmtDef.getParams() - .getOrDefault("binder", CqlBinderTypes.DEFAULT.toString())); - template = new ReadyCQLStatementTemplate(fconfig, binderType, getSession(), prepare, ratio, - parsed.getName()); - } else { - SimpleStatement simpleStatement = new SimpleStatement(stmtForDriver); - cl.ifPresent((conlvl) -> { + SimpleStatementBuilder stmtBuilder = SimpleStatement.builder(stmtForDriver); + psummary.append(" statement=>").append(stmtForDriver); + + Optional.ofNullable(stmtDef.getParams().getOrDefault("cl", null)) + .map(DefaultConsistencyLevel::valueOf) + .map(conlvl -> { psummary.append(" consistency_level=>").append(conlvl); - simpleStatement.setConsistencyLevel(conlvl); - }); - serial_cl.ifPresent((scl) -> { - psummary.append(" serial_consistency_level=>").append(scl); - simpleStatement.setSerialConsistencyLevel(scl); - }); - idempotent.ifPresent((i) -> { - psummary.append(" idempotent=>").append(i); - simpleStatement.setIdempotent(i); - }); + return conlvl; + }) + .ifPresent(stmtBuilder::setConsistencyLevel); + + Optional.ofNullable(stmtDef.getParams().getOrDefault("serial_cl", null)) + .map(DefaultConsistencyLevel::valueOf) + .map(sconlvel -> { + psummary.append(" serial_consistency_level=>").append(sconlvel); + return sconlvel; + }) + .ifPresent(stmtBuilder::setSerialConsistencyLevel); + + Optional.ofNullable(stmtDef.getParams().getOrDefault("idempotent", null)) + .map(Boolean::valueOf) + .map(idempotent -> { + psummary.append(" idempotent=").append(idempotent); + return idempotent; + }) + .ifPresent(stmtBuilder::setIdempotence); + + + if (prepared) { + PreparedStatement preparedStatement = getSession().prepare(stmtBuilder.build()); + + CqlBinderTypes binderType = CqlBinderTypes.valueOf(stmtDef.getParams() + .getOrDefault("binder", CqlBinderTypes.DEFAULT.toString())); + + template = new ReadyCQLStatementTemplate( + fconfig, + binderType, + getSession(), + preparedStatement, + ratio, + parsed.getName() + ); + } else { + SimpleStatement simpleStatement = SimpleStatement.newInstance(stmtForDriver); template = new ReadyCQLStatementTemplate(fconfig, getSession(), simpleStatement, ratio, parsed.getName(), parametrized); } + Optional.ofNullable(stmtDef.getParams().getOrDefault("save", null)) - .map(s -> s.split("[,; ]")) - .map(Save::new) - .ifPresent(save_op -> { - psummary.append(" save=>").append(save_op.toString()); - template.addRowCycleOperators(save_op); - }); + .map(s -> s.split("[,; ]")) + .map(Save::new) + .ifPresent(save_op -> { + psummary.append(" save=>").append(save_op.toString()); + template.addRowCycleOperators(save_op); + }); Optional.ofNullable(stmtDef.getParams().getOrDefault("rsoperators", null)) - .map(s -> s.split(",")) - .stream().flatMap(Arrays::stream) - .map(ResultSetCycleOperators::newOperator) - .forEach(rso -> { - psummary.append(" rsop=>").append(rso.toString()); - template.addResultSetOperators(rso); - }); + .map(s -> s.split(",")) + .stream().flatMap(Arrays::stream) + .map(ResultSetCycleOperators::newOperator) + .forEach(rso -> { + psummary.append(" rsop=>").append(rso.toString()); + template.addResultSetOperators(rso); + }); Optional.ofNullable(stmtDef.getParams().getOrDefault("rowoperators", null)) - .map(s -> s.split(",")) - .stream().flatMap(Arrays::stream) - .map(RowCycleOperators::newOperator) - .forEach(ro -> { - psummary.append(" rowop=>").append(ro.toString()); - template.addRowCycleOperators(ro); - }); + .map(s -> s.split(",")) + .stream().flatMap(Arrays::stream) + .map(RowCycleOperators::newOperator) + .forEach(ro -> { + psummary.append(" rowop=>").append(ro.toString()); + template.addRowCycleOperators(ro); + }); if (instrument) { logger.info("Adding per-statement success and error and resultset-size timers to statement '" + parsed.getName() + "'"); @@ -262,7 +271,7 @@ public class CqlActivity extends SimpleActivity implements Activity, ActivityDef if (!logresultcsv.isEmpty()) { logger.info("Adding per-statement result CSV logging to statement '" + parsed.getName() + "'"); - template.logResultCsv(this,logresultcsv); + template.logResultCsv(this, logresultcsv); psummary.append(" logresultcsv=>").append(logresultcsv); } @@ -297,9 +306,9 @@ public class CqlActivity extends SimpleActivity implements Activity, ActivityDef case "1": doclist = getVersion1StmtsDoc(interp, yaml_loc); logger.warn("DEPRECATED-FORMAT: Loaded yaml " + yaml_loc + " with compatibility mode. " + - "This will be deprecated in a future release."); + "This will be deprecated in a future release."); logger.warn("DEPRECATED-FORMAT: Please refer to " + - "http://docs.engineblock.io/user-guide/standard_yaml/ for more details."); + "http://docs.engineblock.io/user-guide/standard_yaml/ for more details."); break; case "2": doclist = StatementsLoader.load(logger, yaml_loc, interp, "activities"); @@ -307,22 +316,22 @@ public class CqlActivity extends SimpleActivity implements Activity, ActivityDef case "unset": try { logger.debug("You can suffix your yaml filename or url with the " + - "format version, such as :1 or :2. Assuming version 2."); + "format version, such as :1 or :2. Assuming version 2."); doclist = StatementsLoader.load(null, yaml_loc, interp, "activities"); } catch (Exception ignored) { try { doclist = getVersion1StmtsDoc(interp, yaml_loc); logger.warn("DEPRECATED-FORMAT: Loaded yaml " + yaml_loc + - " with compatibility mode. This will be deprecated in a future release."); + " with compatibility mode. This will be deprecated in a future release."); logger.warn("DEPRECATED-FORMAT: Please refer to " + - "http://docs.engineblock.io/user-guide/standard_yaml/ for more details."); + "http://docs.engineblock.io/user-guide/standard_yaml/ for more details."); } catch (Exception compatError) { logger.warn("Tried to load yaml in compatibility mode, " + - "since it failed to load with the standard format, " + - "but found an error:" + compatError); + "since it failed to load with the standard format, " + + "but found an error:" + compatError); logger.warn("The following detailed errors are provided only " + - "for the standard format. To force loading version 1 with detailed logging, add" + - " a version qualifier to your yaml filename or url like ':1'"); + "for the standard format. To force loading version 1 with detailed logging, add" + + " a version qualifier to your yaml filename or url like ':1'"); // retrigger the error again, this time with logging enabled. doclist = StatementsLoader.load(logger, yaml_loc, interp, "activities"); } @@ -330,7 +339,7 @@ public class CqlActivity extends SimpleActivity implements Activity, ActivityDef break; default: throw new RuntimeException("Unrecognized yaml format version, expected :1 or :2 " + - "at end of yaml file, but got " + yamlVersion + " instead."); + "at end of yaml file, but got " + yamlVersion + " instead."); } return doclist; @@ -393,10 +402,10 @@ public class CqlActivity extends SimpleActivity implements Activity, ActivityDef @Override public String toString() { return "CQLActivity {" + - "activityDef=" + activityDef + - ", session=" + session + - ", opSequence=" + this.opsequence + - '}'; + "activityDef=" + activityDef + + ", session=" + session + + ", opSequence=" + this.opsequence + + '}'; } @Override @@ -409,10 +418,10 @@ public class CqlActivity extends SimpleActivity implements Activity, ActivityDef ParameterMap params = activityDef.getParams(); Optional fetchSizeOption = params.getOptionalString("fetchsize"); - Cluster cluster = getSession().getCluster(); + if (fetchSizeOption.isPresent()) { int fetchSize = fetchSizeOption.flatMap(Unit::bytesFor).map(Double::intValue).orElseThrow(() -> new RuntimeException( - "Unable to parse fetch size from " + fetchSizeOption.get() + "Unable to parse fetch size from " + fetchSizeOption.get() )); if (fetchSize > 10000000 && fetchSize < 1000000000) { logger.warn("Setting the fetchsize to " + fetchSize + " is unlikely to give good performance."); @@ -420,6 +429,7 @@ public class CqlActivity extends SimpleActivity implements Activity, ActivityDef throw new RuntimeException("Setting the fetch size to " + fetchSize + " is likely to cause instability."); } logger.trace("setting fetchSize to " + fetchSize); + cluster.getConfiguration().getQueryOptions().setFetchSize(fetchSize); } @@ -431,8 +441,8 @@ public class CqlActivity extends SimpleActivity implements Activity, ActivityDef this.maxpages = params.getOptionalInteger("maxpages").orElse(1); this.statementFilter = params.getOptionalString("tokens") - .map(s -> new TokenRangeStmtFilter(cluster, s)) - .orElse(null); + .map(s -> new TokenRangeStmtFilter(cluster, s)) + .orElse(null); if (statementFilter != null) { logger.info("filtering statements" + statementFilter); @@ -441,13 +451,13 @@ public class CqlActivity extends SimpleActivity implements Activity, ActivityDef errorHandler = configureErrorHandler(); params.getOptionalString("trace") - .map(SimpleConfig::new) - .map(TraceLogger::new) - .ifPresent( - tl -> { - addResultSetCycleOperator(tl); - addStatementModifier(tl); - }); + .map(SimpleConfig::new) + .map(TraceLogger::new) + .ifPresent( + tl -> { + addResultSetCycleOperator(tl); + addStatementModifier(tl); + }); this.maxTotalOpsInFlight = params.getOptionalLong("async").orElse(1L); @@ -504,8 +514,8 @@ public class CqlActivity extends SimpleActivity implements Activity, ActivityDef HashedCQLErrorHandler newerrorHandler = new HashedCQLErrorHandler(exceptionCountMetrics); String errors = activityDef.getParams() - .getOptionalString("errors") - .orElse("stop,retryable->retry,unverified->stop"); + .getOptionalString("errors") + .orElse("stop,retryable->retry,unverified->stop"); String[] handlerSpecs = errors.split(","); @@ -514,32 +524,32 @@ public class CqlActivity extends SimpleActivity implements Activity, ActivityDef if (keyval.length == 1) { String verb = keyval[0]; newerrorHandler.setDefaultHandler( - new NBCycleErrorHandler( - ErrorResponse.valueOf(verb), - exceptionCountMetrics, - exceptionHistoMetrics, - !getParams().getOptionalLong("async").isPresent() - ) + new NBCycleErrorHandler( + ErrorResponse.valueOf(verb), + exceptionCountMetrics, + exceptionHistoMetrics, + !getParams().getOptionalLong("async").isPresent() + ) ); } else { String pattern = keyval[0]; String verb = keyval[1]; if (newerrorHandler.getGroupNames().contains(pattern)) { NBCycleErrorHandler handler = - new NBCycleErrorHandler( - ErrorResponse.valueOf(verb), - exceptionCountMetrics, - exceptionHistoMetrics, - !getParams().getOptionalLong("async").isPresent() - ); + new NBCycleErrorHandler( + ErrorResponse.valueOf(verb), + exceptionCountMetrics, + exceptionHistoMetrics, + !getParams().getOptionalLong("async").isPresent() + ); logger.info("Handling error group '" + pattern + "' with handler:" + handler); newerrorHandler.setHandlerForGroup(pattern, handler); } else { NBCycleErrorHandler handler = new NBCycleErrorHandler( - ErrorResponse.valueOf(keyval[1]), - exceptionCountMetrics, - exceptionHistoMetrics, - !getParams().getOptionalLong("async").isPresent() + ErrorResponse.valueOf(keyval[1]), + exceptionCountMetrics, + exceptionHistoMetrics, + !getParams().getOptionalLong("async").isPresent() ); logger.info("Handling error pattern '" + pattern + "' with handler:" + handler); newerrorHandler.setHandlerForPattern(keyval[0], handler); diff --git a/driver-cqld4/src/main/java/io/nosqlbench/activitytype/cqld4/core/CqlActivityType.java b/driver-cqld4/src/main/java/io/nosqlbench/activitytype/cqld4/core/CqlActivityType.java index f564c85f0..28d117e52 100644 --- a/driver-cqld4/src/main/java/io/nosqlbench/activitytype/cqld4/core/CqlActivityType.java +++ b/driver-cqld4/src/main/java/io/nosqlbench/activitytype/cqld4/core/CqlActivityType.java @@ -1,9 +1,9 @@ package io.nosqlbench.activitytype.cqld4.core; -import com.datastax.driver.core.LocalDate; -import com.datastax.driver.core.TupleValue; -import com.datastax.driver.core.UDTValue; +import com.datastax.oss.driver.api.core.data.TupleValue; +import com.datastax.oss.driver.api.core.type.UserDefinedType; +import io.nosqlbench.activitytype.cqld4.codecsupport.UDTJavaType; import io.nosqlbench.engine.api.activityapi.core.ActionDispenser; import io.nosqlbench.engine.api.activityapi.core.ActivityType; import io.nosqlbench.engine.api.activityimpl.ActivityDef; @@ -14,6 +14,7 @@ import java.math.BigInteger; import java.net.InetAddress; import java.nio.ByteBuffer; import java.time.Instant; +import java.time.LocalDate; import java.time.LocalTime; import java.util.*; @@ -76,7 +77,7 @@ public class CqlActivityType implements ActivityType { typemap.put("timestamp", Instant.class); typemap.put("tinyint",byte.class); typemap.put("tuple", TupleValue.class); - typemap.put("", UDTValue.class); + typemap.put("", UserDefinedType.class); typemap.put("uuid",UUID.class); typemap.put("timeuuid",UUID.class); typemap.put("varchar",String.class); diff --git a/driver-cqld4/src/main/java/io/nosqlbench/activitytype/cqld4/core/CqlAsyncAction.java b/driver-cqld4/src/main/java/io/nosqlbench/activitytype/cqld4/core/CqlAsyncAction.java index 00681d3e7..beede72aa 100644 --- a/driver-cqld4/src/main/java/io/nosqlbench/activitytype/cqld4/core/CqlAsyncAction.java +++ b/driver-cqld4/src/main/java/io/nosqlbench/activitytype/cqld4/core/CqlAsyncAction.java @@ -2,7 +2,6 @@ package io.nosqlbench.activitytype.cqld4.core; import com.codahale.metrics.Timer; import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.driver.core.ResultSetFuture; import io.nosqlbench.activitytype.cqld4.api.ErrorResponse; import io.nosqlbench.activitytype.cqld4.api.ResultSetCycleOperator; import io.nosqlbench.activitytype.cqld4.api.RowCycleOperator; @@ -13,8 +12,6 @@ import io.nosqlbench.activitytype.cqld4.errorhandling.exceptions.CQLCycleWithSta import io.nosqlbench.activitytype.cqld4.errorhandling.exceptions.ChangeUnappliedCycleException; import io.nosqlbench.activitytype.cqld4.errorhandling.exceptions.UnexpectedPagingException; import io.nosqlbench.activitytype.cqld4.statements.core.ReadyCQLStatement; -import com.google.common.util.concurrent.Futures; -import com.google.common.util.concurrent.ListenableFuture; import io.nosqlbench.engine.api.activityapi.core.BaseAsyncAction; import io.nosqlbench.engine.api.activityapi.core.ops.fluent.opfacets.FailedOp; import io.nosqlbench.engine.api.activityapi.core.ops.fluent.opfacets.StartedOp; @@ -115,8 +112,8 @@ public class CqlAsyncAction extends BaseAsyncAction { // The execute timer covers only the point at which EB hands the op to the driver to be executed try (Timer.Context executeTime = activity.executeTimer.time()) { - cqlop.future = activity.getSession().executeAsync(cqlop.statement); - Futures.addCallback(cqlop.future, cqlop); + cqlop.completionStage = activity.getSession().executeAsync(cqlop.statement); + Futures.addCallback(cqlop.completionStage, cqlop); } } @@ -234,8 +231,8 @@ public class CqlAsyncAction extends BaseAsyncAction { if (errorStatus.isRetryable() && cqlop.triesAttempted < maxTries) { startedOp.retry(); try (Timer.Context executeTime = activity.executeTimer.time()) { - cqlop.future = activity.getSession().executeAsync(cqlop.statement); - Futures.addCallback(cqlop.future, cqlop); + cqlop.completionStage = activity.getSession().executeAsync(cqlop.statement); + Futures.addCallback(cqlop.completionStage, cqlop); return; } } diff --git a/driver-cqld4/src/main/java/io/nosqlbench/activitytype/cqld4/core/CqlOpData.java b/driver-cqld4/src/main/java/io/nosqlbench/activitytype/cqld4/core/CqlOpData.java index cc2647db1..7c4372d01 100644 --- a/driver-cqld4/src/main/java/io/nosqlbench/activitytype/cqld4/core/CqlOpData.java +++ b/driver-cqld4/src/main/java/io/nosqlbench/activitytype/cqld4/core/CqlOpData.java @@ -1,14 +1,17 @@ package io.nosqlbench.activitytype.cqld4.core; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.driver.core.ResultSetFuture; +import com.datastax.oss.driver.api.core.cql.AsyncResultSet; +import com.datastax.oss.driver.api.core.cql.Row; import com.datastax.oss.driver.api.core.cql.Statement; import io.nosqlbench.activitytype.cqld4.statements.core.ReadyCQLStatement; -import com.google.common.util.concurrent.FutureCallback; import io.nosqlbench.engine.api.activityapi.core.ops.fluent.opfacets.StartedOp; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CompletionStage; + +public class CqlOpData extends CompletableFuture { -public class CqlOpData implements FutureCallback { final long cycle; + public CompletionStage completionStage; // op state is managed via callbacks, we keep a ref here StartedOp startedOp; @@ -19,8 +22,6 @@ public class CqlOpData implements FutureCallback { ReadyCQLStatement readyCQLStatement; Statement statement; - ResultSetFuture future; - ResultSet resultSet; long totalRowsFetchedForQuery; long totalPagesFetchedForQuery; @@ -28,6 +29,7 @@ public class CqlOpData implements FutureCallback { public Throwable throwable; public long resultAt; private long errorAt; + private Iterable page; public CqlOpData(long cycle, CqlAsyncAction action) { this.cycle = cycle; @@ -35,18 +37,20 @@ public class CqlOpData implements FutureCallback { } @Override - public void onSuccess(ResultSet result) { - this.resultSet = result; - this.resultAt = System.nanoTime(); - action.onSuccess(startedOp); - + public boolean completeExceptionally(Throwable ex) { + this.throwable=ex; + this.errorAt = System.nanoTime(); + action.onFailure(startedOp); + return true; } @Override - public void onFailure(Throwable throwable) { - this.throwable=throwable; - this.errorAt = System.nanoTime(); - action.onFailure(startedOp); + public boolean complete(AsyncResultSet value) { + this.page = value.currentPage(); + this.resultAt = System.nanoTime(); + action.onSuccess(startedOp); + return true; + // ? return !value.hasMorePages(); } } diff --git a/driver-cqld4/src/main/java/io/nosqlbench/activitytype/cqld4/errorhandling/ExceptionMap.java b/driver-cqld4/src/main/java/io/nosqlbench/activitytype/cqld4/errorhandling/ExceptionMap.java index 437457184..edff4a117 100644 --- a/driver-cqld4/src/main/java/io/nosqlbench/activitytype/cqld4/errorhandling/ExceptionMap.java +++ b/driver-cqld4/src/main/java/io/nosqlbench/activitytype/cqld4/errorhandling/ExceptionMap.java @@ -33,6 +33,8 @@ public class ExceptionMap { + // DriverException subtypes + put(AllNodesFailedException.class, DriverException.class); put(NoNodeAvailableException.class, AllNodesFailedException.class); put(BusyConnectionException.class, DriverException.class); @@ -53,34 +55,22 @@ public class ExceptionMap { put(UnavailableException.class, QueryExecutionException.class); put(QueryValidationException.class, CoordinatorException.class); put(AlreadyExistsException.class, QueryValidationException.class); - put(InvalidConfigurationInQueryException.class, QueryValidationException.class); put(InvalidQueryException.class, QueryValidationException.class); + put(InvalidConfigurationInQueryException.class, QueryValidationException.class); put(SyntaxError.class, QueryValidationException.class); put(UnauthorizedException.class, QueryValidationException.class); put(ServerError.class,CoordinatorException.class); - put(UnfitClientException.class, CoordinatorException.class); - put(DriverExecutionException.class, DriverException.class); - put(DriverTimeoutException.class, DriverException.class); - put(FrameTooLongException.class, DriverException.class); - put(HeartbeatException.class,DriverException.class); - put(InvalidKeyspaceException.class,DriverException.class); - put(RequestThrottlingException.class,DriverException.class); - put(UnsupportedProtocolVersionException.class, DriverException.class); + put(UnfitClientException.class, CoordinatorException.class); + put(DriverExecutionException.class, DriverException.class); + put(DriverTimeoutException.class, DriverException.class); + put(FrameTooLongException.class, DriverException.class); + put(HeartbeatException.class,DriverException.class); + put(InvalidKeyspaceException.class,DriverException.class); + put(RequestThrottlingException.class,DriverException.class); + put(UnsupportedProtocolVersionException.class, DriverException.class); - - - - - put(UnpreparedException.class, QueryValidationException.class); - put(InvalidTypeException.class, DriverException.class); - put(FunctionExecutionException.class, QueryValidationException.class); - put(DriverInternalError.class, DriverException.class); + // package org.apache.tinkerpop.gremlin.driver.exception; put(ConnectionException.class, DriverException.class); - put(TransportException.class, ConnectionException.class); - put(OperationTimedOutException.class, ConnectionException.class); - put(PagingStateException.class, DriverException.class); - put(UnresolvedUserTypeException.class, DriverException.class); - put(UnsupportedFeatureException.class, DriverException.class); put(ChangeUnappliedCycleException.class, CqlGenericCycleException.class); put(ResultSetVerificationException.class, CqlGenericCycleException.class); diff --git a/driver-cqld4/src/main/java/io/nosqlbench/activitytype/cqld4/errorhandling/NBCycleErrorHandler.java b/driver-cqld4/src/main/java/io/nosqlbench/activitytype/cqld4/errorhandling/NBCycleErrorHandler.java index 2dc80b533..7c605252c 100644 --- a/driver-cqld4/src/main/java/io/nosqlbench/activitytype/cqld4/errorhandling/NBCycleErrorHandler.java +++ b/driver-cqld4/src/main/java/io/nosqlbench/activitytype/cqld4/errorhandling/NBCycleErrorHandler.java @@ -2,7 +2,6 @@ package io.nosqlbench.activitytype.cqld4.errorhandling; import io.nosqlbench.activitytype.cqld4.api.ErrorResponse; import io.nosqlbench.activitytype.cqld4.errorhandling.exceptions.CQLCycleWithStatementException; -import io.nosqlbench.activitytype.cqld4.errorhandling.exceptions.CQLExceptionDetailer; import io.nosqlbench.engine.api.activityapi.errorhandling.CycleErrorHandler; import io.nosqlbench.engine.api.metrics.ExceptionCountMetrics; import io.nosqlbench.engine.api.metrics.ExceptionHistoMetrics; diff --git a/driver-cqld4/src/main/java/io/nosqlbench/activitytype/cqld4/errorhandling/exceptions/CQLExceptionDetailer.java b/driver-cqld4/src/main/java/io/nosqlbench/activitytype/cqld4/errorhandling/exceptions/CQLExceptionDetailer.java deleted file mode 100644 index fe7d24787..000000000 --- a/driver-cqld4/src/main/java/io/nosqlbench/activitytype/cqld4/errorhandling/exceptions/CQLExceptionDetailer.java +++ /dev/null @@ -1,25 +0,0 @@ -package io.nosqlbench.activitytype.cqld4.errorhandling.exceptions; - -import com.datastax.driver.core.exceptions.ReadTimeoutException; -import com.datastax.driver.core.exceptions.WriteTimeoutException; - -public class CQLExceptionDetailer { - - public static String messageFor(long cycle, Throwable e) { - - if (e instanceof ReadTimeoutException) { - ReadTimeoutException rte = (ReadTimeoutException) e; - return rte.getMessage() + - ", coordinator: " + rte.getHost() + - ", wasDataRetrieved: " + rte.wasDataRetrieved(); - } - - if (e instanceof WriteTimeoutException) { - WriteTimeoutException wte = (WriteTimeoutException) e; - return wte.getMessage() + - ", coordinator: " + wte.getHost(); - } - - return e.getMessage(); - } -} diff --git a/driver-cqld4/src/main/java/io/nosqlbench/activitytype/cqld4/errorhandling/exceptions/CQLResultSetException.java b/driver-cqld4/src/main/java/io/nosqlbench/activitytype/cqld4/errorhandling/exceptions/CQLResultSetException.java index e9395ae2b..82472cd30 100644 --- a/driver-cqld4/src/main/java/io/nosqlbench/activitytype/cqld4/errorhandling/exceptions/CQLResultSetException.java +++ b/driver-cqld4/src/main/java/io/nosqlbench/activitytype/cqld4/errorhandling/exceptions/CQLResultSetException.java @@ -1,8 +1,8 @@ package io.nosqlbench.activitytype.cqld4.errorhandling.exceptions; -import com.datastax.driver.core.BoundStatement; +import com.datastax.oss.driver.api.core.cql.BoundStatement; import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.driver.core.SimpleStatement; +import com.datastax.oss.driver.api.core.cql.SimpleStatement; import com.datastax.oss.driver.api.core.cql.Statement; public abstract class CQLResultSetException extends CqlGenericCycleException { diff --git a/driver-cqld4/src/main/java/io/nosqlbench/activitytype/cqld4/errorhandling/exceptions/UnexpectedPagingException.java b/driver-cqld4/src/main/java/io/nosqlbench/activitytype/cqld4/errorhandling/exceptions/UnexpectedPagingException.java index 1effe9fac..225aeec26 100644 --- a/driver-cqld4/src/main/java/io/nosqlbench/activitytype/cqld4/errorhandling/exceptions/UnexpectedPagingException.java +++ b/driver-cqld4/src/main/java/io/nosqlbench/activitytype/cqld4/errorhandling/exceptions/UnexpectedPagingException.java @@ -49,7 +49,9 @@ public class UnexpectedPagingException extends CqlGenericCycleException { sb.append("Additional paging would be required to read the results from this query fully" + ", but the user has not explicitly indicated that paging was expected.") .append(" fetched/allowed: ").append(fetchedPages).append("/").append(maxpages) - .append(" fetchSize(").append(fetchSize).append("): ").append(queryString); + .append(" fetchSize(").append(fetchSize).append("): ").append(queryString).append(", note this value " + + "is shown for reference from the default driver profile. If you are using a custom profile, it may be " + + "different."); return sb.toString(); } } diff --git a/driver-cqld4/src/main/java/io/nosqlbench/activitytype/cqld4/statements/binders/CqlBinderTypes.java b/driver-cqld4/src/main/java/io/nosqlbench/activitytype/cqld4/statements/binders/CqlBinderTypes.java index a4ddc37fe..cd3398f24 100644 --- a/driver-cqld4/src/main/java/io/nosqlbench/activitytype/cqld4/statements/binders/CqlBinderTypes.java +++ b/driver-cqld4/src/main/java/io/nosqlbench/activitytype/cqld4/statements/binders/CqlBinderTypes.java @@ -5,23 +5,23 @@ import com.datastax.oss.driver.api.core.cql.Statement; import com.datastax.oss.driver.api.core.session.Session; import io.nosqlbench.virtdata.core.bindings.ValuesArrayBinder; +import java.util.function.Function; + public enum CqlBinderTypes { - direct_array, - unset_aware, - diagnostic; + direct_array(s -> new DirectArrayValuesBinder()), + unset_aware(UnsettableValuesBinder::new), + diagnostic(s -> new DiagnosticPreparedBinder()); + + private final Function>> mapper; + + CqlBinderTypes(Function>> mapper) { + this.mapper = mapper; + } public final static CqlBinderTypes DEFAULT = unset_aware; - public ValuesArrayBinder get(Session session) { - if (this==direct_array) { - return new DirectArrayValuesBinder(); - } else if (this== unset_aware) { - return new UnsettableValuesBinder(session); - } else if (this==diagnostic) { - return new DiagnosticPreparedBinder(); - } else { - throw new RuntimeException("Impossible-ish statement branch"); - } + public ValuesArrayBinder> get(Session session) { + return mapper.apply(session); } } diff --git a/driver-cqld4/src/main/java/io/nosqlbench/activitytype/cqld4/statements/binders/DiagnosticPreparedBinder.java b/driver-cqld4/src/main/java/io/nosqlbench/activitytype/cqld4/statements/binders/DiagnosticPreparedBinder.java index 97dd6ca2c..15357cfce 100644 --- a/driver-cqld4/src/main/java/io/nosqlbench/activitytype/cqld4/statements/binders/DiagnosticPreparedBinder.java +++ b/driver-cqld4/src/main/java/io/nosqlbench/activitytype/cqld4/statements/binders/DiagnosticPreparedBinder.java @@ -16,10 +16,10 @@ import java.util.List; * Other binders will call to this one in an exception handler when needed in * order to explain in more detail what is happening for users. */ -public class DiagnosticPreparedBinder implements ValuesArrayBinder { +public class DiagnosticPreparedBinder implements ValuesArrayBinder> { public static final Logger logger = LoggerFactory.getLogger(DiagnosticPreparedBinder.class); @Override - public Statement bindValues(PreparedStatement prepared, Object[] values) { + public Statement bindValues(PreparedStatement prepared, Object[] values) { ColumnDefinitions columnDefinitions = prepared.getVariableDefinitions(); BoundStatement bound = prepared.bind(); diff --git a/driver-cqld4/src/main/java/io/nosqlbench/activitytype/cqld4/statements/binders/DirectArrayValuesBinder.java b/driver-cqld4/src/main/java/io/nosqlbench/activitytype/cqld4/statements/binders/DirectArrayValuesBinder.java index 236e768f0..dcc5f6c90 100644 --- a/driver-cqld4/src/main/java/io/nosqlbench/activitytype/cqld4/statements/binders/DirectArrayValuesBinder.java +++ b/driver-cqld4/src/main/java/io/nosqlbench/activitytype/cqld4/statements/binders/DirectArrayValuesBinder.java @@ -18,7 +18,7 @@ import java.util.Arrays; * to be modeled, and at no clear benefit. Thus the {@link CqlBinderTypes#unset_aware} one * will become the default. */ -public class DirectArrayValuesBinder implements ValuesArrayBinder { +public class DirectArrayValuesBinder implements ValuesArrayBinder> { public final static Logger logger = LoggerFactory.getLogger(DirectArrayValuesBinder.class); @Override diff --git a/driver-cqld4/src/main/java/io/nosqlbench/activitytype/cqld4/statements/binders/UnsettableValuesBinder.java b/driver-cqld4/src/main/java/io/nosqlbench/activitytype/cqld4/statements/binders/UnsettableValuesBinder.java index 32aaeb94f..7d6ac27f3 100644 --- a/driver-cqld4/src/main/java/io/nosqlbench/activitytype/cqld4/statements/binders/UnsettableValuesBinder.java +++ b/driver-cqld4/src/main/java/io/nosqlbench/activitytype/cqld4/statements/binders/UnsettableValuesBinder.java @@ -17,7 +17,7 @@ import java.util.ArrayList; import java.util.Iterator; import java.util.List; -public class UnsettableValuesBinder implements ValuesArrayBinder { +public class UnsettableValuesBinder implements ValuesArrayBinder> { private final static Logger logger = LoggerFactory.getLogger(UnsettableValuesBinder.class); private final Session session; diff --git a/driver-cqld4/src/main/java/io/nosqlbench/activitytype/cqld4/statements/core/CQLSessionCache.java b/driver-cqld4/src/main/java/io/nosqlbench/activitytype/cqld4/statements/core/CQLSessionCache.java index 2770361ae..bd985958f 100644 --- a/driver-cqld4/src/main/java/io/nosqlbench/activitytype/cqld4/statements/core/CQLSessionCache.java +++ b/driver-cqld4/src/main/java/io/nosqlbench/activitytype/cqld4/statements/core/CQLSessionCache.java @@ -1,10 +1,17 @@ package io.nosqlbench.activitytype.cqld4.statements.core; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.CqlSessionBuilder; +import com.datastax.oss.driver.api.core.config.*; import com.datastax.oss.driver.api.core.loadbalancing.LoadBalancingPolicy; +import com.datastax.oss.driver.api.core.metadata.EndPoint; import com.datastax.oss.driver.api.core.retry.RetryPolicy; import com.datastax.oss.driver.api.core.session.Session; import com.datastax.oss.driver.api.core.specex.SpeculativeExecutionPolicy; +import com.datastax.oss.driver.internal.core.config.map.MapBasedDriverConfigLoader; +import com.datastax.oss.driver.internal.core.config.typesafe.DefaultDriverConfigLoader; import com.datastax.oss.driver.internal.core.retry.DefaultRetryPolicy; +import com.typesafe.config.ConfigFactory; import io.nosqlbench.activitytype.cqld4.core.CQLOptions; import io.nosqlbench.activitytype.cqld4.core.ProxyTranslator; import io.nosqlbench.engine.api.activityapi.core.Shutdownable; @@ -13,6 +20,7 @@ import io.nosqlbench.engine.api.metrics.ActivityMetrics; import io.nosqlbench.engine.api.scripting.NashornEvaluator; import io.nosqlbench.engine.api.util.SSLKsFactory; import org.apache.tinkerpop.gremlin.driver.Cluster; +import org.graalvm.options.OptionMap; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -23,13 +31,24 @@ import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; import java.util.*; +import java.util.concurrent.ConcurrentHashMap; public class CQLSessionCache implements Shutdownable { private final static Logger logger = LoggerFactory.getLogger(CQLSessionCache.class); private final static String DEFAULT_SESSION_ID = "default"; private static CQLSessionCache instance = new CQLSessionCache(); - private Map sessionCache = new HashMap<>(); + private Map sessionCache = new HashMap<>(); + + + private final static class SessionConfig extends ConcurrentHashMap { + public CqlSession session; + public Map config = new ConcurrentHashMap<>(); + + public SessionConfig(CqlSession session) { + this.session = session; + } + } private CQLSessionCache() { } @@ -39,66 +58,83 @@ public class CQLSessionCache implements Shutdownable { } public void stopSession(ActivityDef activityDef) { - String key = activityDef.getParams().getOptionalString("clusterid").orElse(DEFAULT_SESSION_ID); - Session session = sessionCache.get(key); - session.close(); + String key = activityDef.getParams().getOptionalString("sessionid").orElse(DEFAULT_SESSION_ID); + SessionConfig sessionConfig = sessionCache.get(key); + sessionConfig.session.close(); } - public Session getSession(ActivityDef activityDef) { - String key = activityDef.getParams().getOptionalString("clusterid").orElse(DEFAULT_SESSION_ID); - return sessionCache.computeIfAbsent(key, (cid) -> createSession(activityDef, key)); + public CqlSession getSession(ActivityDef activityDef) { + String key = activityDef.getParams().getOptionalString("sessionid").orElse(DEFAULT_SESSION_ID); + String profileName = activityDef.getParams().getOptionalString("profile").orElse("default"); + SessionConfig sessionConfig = sessionCache.computeIfAbsent(key, (cid) -> createSession(activityDef, key, profileName)); + return sessionConfig.session; } // cbopts=\".withLoadBalancingPolicy(LatencyAwarePolicy.builder(new TokenAwarePolicy(new DCAwareRoundRobinPolicy(\"dc1-us-east\", 0, false))).build()).withRetryPolicy(new LoggingRetryPolicy(DefaultRetryPolicy.INSTANCE))\" - private Session createSession(ActivityDef activityDef, String sessid) { + private SessionConfig createSession(ActivityDef activityDef, String sessid, String profileName) { String host = activityDef.getParams().getOptionalString("host").orElse("localhost"); int port = activityDef.getParams().getOptionalInteger("port").orElse(9042); - String driverType = activityDef.getParams().getOptionalString("cqldriver").orElse("dse"); + activityDef.getParams().getOptionalString("cqldriver").ifPresent(v -> { + logger.warn("The cqldriver parameter is not needed in this version of the driver."); + }); - Cluster.Builder builder = - driverType.toLowerCase().equals("dse") ? DseCluster.builder() : - driverType.toLowerCase().equals("oss") ? Cluster.builder() : null; - if (builder==null) { - throw new RuntimeException("The driver type '" + driverType + "' is not recognized"); + // TODO: Figure out how to layer configs with the new TypeSafe Config layer in the Datastax Java Driver + // TODO: Or give up and bulk import options into the map, because the config API is a labyrinth + + CqlSessionBuilder builder = CqlSession.builder(); +// +// OptionsMap optionsMap = new OptionsMap(); +// +// OptionsMap defaults = OptionsMap.driverDefaults(); +// DriverConfigLoader cl = DriverConfigLoader.fromMap(defaults); +// DriverConfig cfg = cl.getInitialConfig(); + + OptionsMap optionsMap = OptionsMap.driverDefaults(); + + builder.withConfigLoader(new MapBasedDriverConfigLoader()) + builder.withConfigLoader(optionsMap); + + + Optional scb = activityDef.getParams().getOptionalString("secureconnectbundle") + .map(Path::of); + + Optional> hosts = activityDef.getParams().getOptionalString("host", "hosts") + .map(h -> h.split(",")).map(Arrays::asList); + + Optional port1 = activityDef.getParams().getOptionalInteger("port"); + + if (scb.isPresent()) { + scb.map(b -> { + logger.debug("adding secureconnectbundle: " + b.toString()); + return b; + }).ifPresent(builder::withCloudSecureConnectBundle); + + if (hosts.isPresent()) { + logger.warn("The host parameter is not valid when using secureconnectbundle="); + } + if (port1.isPresent()) { + logger.warn("the port parameter is not used with CQL when using secureconnectbundle="); + } + } else { + hosts.orElse(List.of("localhost")) + .stream() + .map(h -> InetSocketAddress.createUnresolved(h,port)) + .peek(h-> logger.debug("adding contact endpoint: " + h.getHostName()+":"+h.getPort())) + .forEachOrdered(builder::addContactPoint); } - logger.info("Using driver type '" + driverType.toUpperCase() + "'"); - - Optional scb = activityDef.getParams() - .getOptionalString("secureconnectbundle"); - scb.map(File::new) - .ifPresent(builder::withCloudSecureConnectBundle); - - activityDef.getParams() - .getOptionalString("insights").map(Boolean::parseBoolean) - .ifPresent(builder::withMonitorReporting); - - String[] contactPoints = activityDef.getParams().getOptionalString("host") - .map(h -> h.split(",")).orElse(null); - - if (contactPoints == null) { - contactPoints = activityDef.getParams().getOptionalString("hosts") - .map(h -> h.split(",")).orElse(null); - } - if (contactPoints == null && scb.isEmpty()) { - contactPoints = new String[]{"localhost"}; - } - - if (contactPoints != null) { - builder.addContactPoints(contactPoints); - } - - activityDef.getParams().getOptionalInteger("port").ifPresent(builder::withPort); - - builder.withCompression(ProtocolOptions.Compression.NONE); +// builder.withCompression(ProtocolOptions.Compression.NONE); + // TODO add map based configuration with compression defaults Optional usernameOpt = activityDef.getParams().getOptionalString("username"); Optional passwordOpt = activityDef.getParams().getOptionalString("password"); Optional passfileOpt = activityDef.getParams().getOptionalString("passfile"); + Optional authIdOpt = activityDef.getParams().getOptionalString("authid"); + if (usernameOpt.isPresent()) { String username = usernameOpt.get(); @@ -119,7 +155,11 @@ public class CQLSessionCache implements Shutdownable { logger.error(error); throw new RuntimeException(error); } - builder.withCredentials(username, password); + if (authIdOpt.isPresent()) { + builder.withAuthCredentials(username, password, authIdOpt.get()); + } else { + builder.withAuthCredentials(username, password); + } } Optional clusteropts = activityDef.getParams().getOptionalString("cbopts"); diff --git a/driver-cqld4/src/main/resources/cqld4.md b/driver-cqld4/src/main/resources/cqld4.md index 6dc73721e..2c90ed31d 100644 --- a/driver-cqld4/src/main/resources/cqld4.md +++ b/driver-cqld4/src/main/resources/cqld4.md @@ -1,70 +1,135 @@ -# cql driver +# cql-d4 driver This is the CQL version 4 driver for NoSQLBench. As it gets more use, we will make it the primary driver under the 'cql' name. For now, the 'cql' refers to the version 1.9 driver, while 'cqld4' refers to this one. The drivers will have identical features where possible, but new enhancements will be targeted at this one first. +In the alpha release of this NoSQLBench CQL driver, some of the options previously available on the CQL 1.9 driver will +not be supported. We are working to add these in an idiomatic way ASAP. + This is an driver which allows for the execution of CQL statements. This driver supports both sync and async modes, with detailed metrics provided for both. -### Example activity definitions +TEMPORARY EDITORS NOTE: This will use a more consistent layout as shown below. The topics are meant to be searchable in +the newer doc system scheme. -Run a cql activity named 'cql1', with definitions from activities/cqldefs.yaml -~~~ -... driver=cql alias=cql1 workload=cqldefs -~~~ +## Activity Params -Run a cql activity defined by cqldefs.yaml, but with shortcut naming -~~~ -... driver=cql workload=cqldefs -~~~ +There are the parameters that you can provide when starting an activity with this driver. -Only run statement groups which match a tag regex -~~~ -... driver=cql workload=cqldefs tags=group:'ddl.*' -~~~ +Any parameter that is marked as *required* must be provided or an error will be thrown at activity startup. All other +parameters are marked as *optional*. -Run the matching 'dml' statements, with 100 cycles, from [1000..1100) -~~~ -... driver=cql workload=cqldefs tags=group:'dml.*' cycles=1000..1100 -~~~ -This last example shows that the cycle range is [inclusive..exclusive), -to allow for stacking test intervals. This is standard across all -activity types. +Any parameter that is marked as *static* may not be changed while an activity is running. All other parameters are +marked as *dynamic*, meaning that they may be changed while an activity is running via scripting. + +#### sessionid + +*optional*, *static* + +The `sessionid` parameter allows you to logically assign a named instance of a session and session configuration to each +activity that you run. This allows for different driver settings to be used within the same scenario. + +Default +: default + +Example: +: `sessionid=test43` + +#### profile + +*optional*, *static* + +Controls the configuration profile used by the driver. If you provide a value for this parameter, then a configuration +file under the name must exist, or an error will be thrown. This a driver configuration file, as documented in [DataStax +Java Driver - Configurat](https://docs.datastax.com/en/developer/java-driver/4.6/manual/core/configuration/). + +The profile is keyed to the sessionid, as each session id will be configured with the named profile just as you would +see with normal file-based driver configuration. Thus, changing the configuration within the profile will affect future +operations which share the same session. + +While the profile itself is not changeable after it has been set, the parameters that are in the profile may be +dynamically changed, depending on how they are annotated below. + +*All other driver settings are part of the named profile for an activity, and will override the values provided from the +named profile unless otherwise stated. These overrides do not affect the named file, only the runtime behavior of the +driver.* + +Default +: 'default' + +Examples +: `profile=experimental-settings` + +#### secureconnectbundle + +*optional*, *static* + +This parameter is used to connect to Astra Database as a Service. This option accepts a path to the secure connect +bundle that is downloaded from the Astra UI. + +Default +: undefined + +Examples +: `secureconnectbundle=/tmp/secure-connect-my_db.zip` +: `secureconnectbundle="/home/automaton/secure-connect-my_db.zip"` + + +#### hosts + +*optional*, *static* + +The host or hosts to use to connect to the cluster. If you specify multiple values here, use commas with no spaces. +*This option is not valid when the `secureconnectbundle` option is used.* + +Default +: localhost + +Examples +: `host=192.168.1.25` +: `host=192.168.1.25,testhost42` + +#### port + +*optional*, *static* + +The port to connect with. *This option is not valid when the `secureconnectbundle` option is used.* + +Default +: 9042 + +Examples: +- `port=9042` + +#### cl + +*optional*, *static* + +An override to consistency levels for the activity. If this option is used, then all consistency levels will be set to +this by default for the current activity, and a log line explaining the difference with respect to the yaml will be +emitted. This is not a dynamic parameter. It will only be applied at activity start. + + +#### whitelist + + +---- below this line needs to be curated for the new driver ---- -### CQL ActivityType Parameters -- **cqldriver** - default: dse - The type of driver to use, either dse, or oss. If you need DSE-specific features, use - the dse driver. If you are connecting to an OSS Apache Cassandra cluster, you must use the oss driver. The oss driver - option is only available in nosqlbench. -- **host** - The host or hosts to use for connection points to - the cluster. If you specify multiple values here, use commas - with no spaces. - Examples: - - `host=192.168.1.25` - - `host=`192.168.1.25,testhost42` -- **workload** - The workload definition which holds the schema and statement defs. - see workload yaml location for additional details - (no default, required) -- **port** - The port to connect with -- **cl** - An override to consistency levels for the activity. If - this option is used, then all consistency levels will be replaced - by this one for the current activity, and a log line explaining - the difference with respect to the yaml will be emitted. - This is not a dynamic parameter. It will only be applied at - activity start. -- **cbopts** - default: none - this is how you customize the cluster - settings for the client, including policies, compression, etc. This - is a string of *Java*-like method calls just as you would use them - in the Cluster.Builder fluent API. They are evaluated inline - with the default Cluster.Builder options not covered below. - Example: cbopts=".withCompression(ProtocolOptions.Compression.NONE)" - **whitelist** default: none - Applies a whitelist policy to the load balancing policy in the driver. If used, a WhitelistPolicy(RoundRobinPolicy()) will be created and added to the cluster builder on startup. Examples: - whitelist=127.0.0.1 - whitelist=127.0.0.1:9042,127.0.0.2:1234 + +- **cbopts** - default: none - this is how you customize the cluster + settings for the client, including policies, compression, etc. This + is a string of *Java*-like method calls just as you would use them + in the Cluster.Builder fluent API. They are evaluated inline + with the default Cluster.Builder options not covered below. + Example: cbopts=".withCompression(ProtocolOptions.Compression.NONE)" - **retrypolicy** default: none - Applies a retry policy in the driver The only option supported for this version is `retrypolicy=logging`, which uses the default retry policy, but with logging added. @@ -238,11 +303,6 @@ activity types. code base. This is for dynamic codec loading with user-provided codecs mapped via the internal UDT APIs. default: false -- **secureconnectbundle** - used to connect to CaaS, accepts a path to the secure connect bundle - that is downloaded from the CaaS UI. - Examples: - - `secureconnectbundle=/tmp/secure-connect-my_db.zip` - - `secureconnectbundle="/home/automaton/secure-connect-my_db.zip"` - **insights** - Set to false to disable the driver from sending insights monitoring information - `insights=false` - **tickduration** - sets the tickDuration (milliseconds) of HashedWheelTimer of the diff --git a/driver-cqlverify/pom.xml b/driver-cqlverify/pom.xml index 57aa3642c..e904a3c30 100644 --- a/driver-cqlverify/pom.xml +++ b/driver-cqlverify/pom.xml @@ -4,7 +4,7 @@ io.nosqlbench mvn-defaults - 3.12.107-SNAPSHOT + 3.12.119-SNAPSHOT ../mvn-defaults @@ -24,7 +24,7 @@ io.nosqlbench driver-cql-shaded - 3.12.107-SNAPSHOT + 3.12.119-SNAPSHOT diff --git a/driver-diag/pom.xml b/driver-diag/pom.xml index 332b2decf..099a39716 100644 --- a/driver-diag/pom.xml +++ b/driver-diag/pom.xml @@ -5,7 +5,7 @@ mvn-defaults io.nosqlbench - 3.12.107-SNAPSHOT + 3.12.119-SNAPSHOT ../mvn-defaults @@ -20,7 +20,7 @@ io.nosqlbench engine-api - 3.12.107-SNAPSHOT + 3.12.119-SNAPSHOT diff --git a/driver-http/pom.xml b/driver-http/pom.xml index a658fceb5..8410c120a 100644 --- a/driver-http/pom.xml +++ b/driver-http/pom.xml @@ -5,7 +5,7 @@ mvn-defaults io.nosqlbench - 3.12.107-SNAPSHOT + 3.12.119-SNAPSHOT ../mvn-defaults @@ -18,10 +18,11 @@ + io.nosqlbench engine-api - 3.12.107-SNAPSHOT + 3.12.119-SNAPSHOT diff --git a/driver-kafka/pom.xml b/driver-kafka/pom.xml new file mode 100644 index 000000000..b44eabcfb --- /dev/null +++ b/driver-kafka/pom.xml @@ -0,0 +1,85 @@ + + 4.0.0 + + + mvn-defaults + io.nosqlbench + 3.12.119-SNAPSHOT + ../mvn-defaults + + + driver-kafka + jar + ${project.artifactId} + + + A Kafka driver for nosqlbench. This provides the ability to inject synthetic data + into a kafka topic. + + + + + + + + org.apache.kafka + kafka-clients + 2.0.0 + + + + io.nosqlbench + engine-api + 3.12.119-SNAPSHOT + + + + io.nosqlbench + driver-stdout + 3.12.119-SNAPSHOT + + + + org.slf4j + slf4j-api + 1.7.25 + + + + + + org.testng + testng + 6.13.1 + test + + + + org.assertj + assertj-core-java8 + 1.0.0m1 + test + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/driver-kafka/src/main/java/com/datastax/ebdrivers/kafkaproducer/KafkaProducerActivity.java b/driver-kafka/src/main/java/com/datastax/ebdrivers/kafkaproducer/KafkaProducerActivity.java new file mode 100644 index 000000000..907fa786a --- /dev/null +++ b/driver-kafka/src/main/java/com/datastax/ebdrivers/kafkaproducer/KafkaProducerActivity.java @@ -0,0 +1,68 @@ +package com.datastax.ebdrivers.kafkaproducer; + +import io.nosqlbench.activitytype.stdout.StdoutActivity; +import io.nosqlbench.engine.api.activityimpl.ActivityDef; +import org.apache.kafka.clients.producer.*; +import org.apache.kafka.common.serialization.LongSerializer; +import org.apache.kafka.common.serialization.StringSerializer; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.Arrays; +import java.util.Properties; +import java.util.concurrent.Future; +import java.util.stream.Collectors; + +public class KafkaProducerActivity extends StdoutActivity { + private final static Logger logger = LoggerFactory.getLogger(KafkaProducerActivity.class); + private Producer producer = null; + private String topic; + + public KafkaProducerActivity(ActivityDef activityDef) { + super(activityDef); + } + + public synchronized Producer getKafkaProducer() { + if (producer!=null) { + return producer; + } + Properties props = new Properties(); + String servers = Arrays.stream(activityDef.getParams().getOptionalString("host","hosts") + .orElse("localhost" + ":9092") + .split(",")) + .map(x -> x.indexOf(':') == -1 ? x + ":9092" : x) + .collect(Collectors.joining(",")); + String clientId = activityDef.getParams().getOptionalString("clientid","client.id","client_id") + .orElse("TestProducerClientId"); + String key_serializer = + activityDef.getParams().getOptionalString("key_serializer").orElse(LongSerializer.class.getName()); + String value_serializer = + activityDef.getParams().getOptionalString("value_serializer").orElse(StringSerializer.class.getName()); + + props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, servers); + props.put(ProducerConfig.CLIENT_ID_CONFIG, clientId); + props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, key_serializer); + props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, value_serializer); + + producer = new KafkaProducer<>(props); + return producer; + } + + @Override + public synchronized void write(String statement) { + Producer kafkaProducer = getKafkaProducer(); + ProducerRecord record = new ProducerRecord<>(topic, statement); + Future send = kafkaProducer.send(record); + try { + RecordMetadata result = send.get(); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + @Override + public void onActivityDefUpdate(ActivityDef activityDef) { + this.topic = activityDef.getParams().getOptionalString("topic").orElse("default-topic"); + super.onActivityDefUpdate(activityDef); + } +} diff --git a/driver-kafka/src/main/java/com/datastax/ebdrivers/kafkaproducer/KafkaProducerActivityType.java b/driver-kafka/src/main/java/com/datastax/ebdrivers/kafkaproducer/KafkaProducerActivityType.java new file mode 100644 index 000000000..ad62a29a4 --- /dev/null +++ b/driver-kafka/src/main/java/com/datastax/ebdrivers/kafkaproducer/KafkaProducerActivityType.java @@ -0,0 +1,40 @@ +package com.datastax.ebdrivers.kafkaproducer; + +import io.nosqlbench.activitytype.stdout.StdoutAction; +import io.nosqlbench.activitytype.stdout.StdoutActivity; +import io.nosqlbench.engine.api.activityapi.core.Action; +import io.nosqlbench.engine.api.activityapi.core.ActionDispenser; +import io.nosqlbench.engine.api.activityapi.core.ActivityType; +import io.nosqlbench.engine.api.activityimpl.ActivityDef; +import io.nosqlbench.nb.annotations.Service; + +@Service(ActivityType.class) +public class KafkaProducerActivityType implements ActivityType { + @Override + public String getName() { + return "kafkaproducer"; + } + + @Override + public KafkaProducerActivity getActivity(ActivityDef activityDef) { + return new KafkaProducerActivity(activityDef); + } + + private static class Dispenser implements ActionDispenser { + private StdoutActivity activity; + + private Dispenser(StdoutActivity activity) { + this.activity = activity; + } + + @Override + public Action getAction(int slot) { + return new StdoutAction(slot,this.activity); + } + } + + @Override + public ActionDispenser getActionDispenser(KafkaProducerActivity activity) { + return new Dispenser(activity); + } +} diff --git a/driver-kafka/src/main/resources/kafkaproducer.md b/driver-kafka/src/main/resources/kafkaproducer.md new file mode 100644 index 000000000..158da885e --- /dev/null +++ b/driver-kafka/src/main/resources/kafkaproducer.md @@ -0,0 +1,32 @@ +# kafkaproducer + +This is an activity type which allows for a stream of data to be sent to a kafka topic. It is based on the stdout +activity statement format. + +## Parameters + +- **topic** - The topic to write to for this activity. + +### Examples + +Refer to the online standard YAML documentation for a detailed walk-through. +An example yaml is below for sending structured JSON to a kafka topic: + + bindings: + price: Normal(10.0D,2.0D) -> double; Save('price') -> double; + quantity: Normal(10000.0D,100.0D); Add(-10000.0D); Save('quantity') -> double; + total: Identity(); Expr('price * quantity') -> double; + client: WeightedStrings('ABC_TEST:3;DFG_TEST:3;STG_TEST:14'); + clientid: HashRange(0,1000000000) -> long; + + statements: + - | + \{ + "trade": \{ + "price": {price}, + "quantity": {quantity}, + "total": {total}, + "client": "{client}", + "clientid":"{clientid}" + \} + \} diff --git a/driver-mongodb/pom.xml b/driver-mongodb/pom.xml new file mode 100644 index 000000000..1a60093a8 --- /dev/null +++ b/driver-mongodb/pom.xml @@ -0,0 +1,49 @@ + + 4.0.0 + + driver-mongodb + jar + + + mvn-defaults + io.nosqlbench + 3.12.119-SNAPSHOT + ../mvn-defaults + + + ${project.artifactId} + + An nosqlbench ActivityType (AT) driver module; + MongoDB + + + + + io.nosqlbench + engine-api + 3.12.119-SNAPSHOT + + + + org.mongodb + mongodb-driver-sync + 4.0.3 + + + + + + org.assertj + assertj-core + test + + + + junit + junit + test + + + + + diff --git a/driver-mongodb/src/main/java/io/nosqlbench/driver/mongodb/MongoAction.java b/driver-mongodb/src/main/java/io/nosqlbench/driver/mongodb/MongoAction.java new file mode 100644 index 000000000..64c51813c --- /dev/null +++ b/driver-mongodb/src/main/java/io/nosqlbench/driver/mongodb/MongoAction.java @@ -0,0 +1,81 @@ +package io.nosqlbench.driver.mongodb; + +import java.util.concurrent.TimeUnit; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.codahale.metrics.Timer; +import com.mongodb.ReadPreference; +import com.mongodb.client.MongoDatabase; +import io.nosqlbench.engine.api.activityapi.core.SyncAction; +import io.nosqlbench.engine.api.activityapi.planning.OpSequence; +import org.bson.Document; +import org.bson.conversions.Bson; + +public class MongoAction implements SyncAction { + + private final static Logger logger = LoggerFactory.getLogger(MongoAction.class); + + private final MongoActivity activity; + private final int slot; + + private OpSequence sequencer; + + public MongoAction(MongoActivity activity, int slot) { + this.activity = activity; + this.slot = slot; + } + + @Override + public void init() { + this.sequencer = activity.getOpSequencer(); + } + + @Override + public int runCycle(long cycleValue) { + ReadyMongoStatement rms; + Bson queryBson; + try (Timer.Context bindTime = activity.bindTimer.time()) { + rms = sequencer.get(cycleValue); + queryBson = rms.bind(cycleValue); + + // Maybe show the query in log/console - only for diagnostic use + if (activity.isShowQuery()) { + logger.info("Query(cycle={}):\n{}", cycleValue, queryBson); + } + } + + long nanoStartTime = System.nanoTime(); + for (int i = 1; i <= activity.getMaxTries(); i++) { + activity.triesHisto.update(i); + + try (Timer.Context resultTime = activity.resultTimer.time()) { + MongoDatabase database = activity.getDatabase(); + ReadPreference readPreference = rms.getReadPreference(); + + // assuming the commands are one of these in the doc: + // https://docs.mongodb.com/manual/reference/command/nav-crud/ + Document resultDoc = database.runCommand(queryBson, readPreference); + + long resultNanos = System.nanoTime() - nanoStartTime; + + // TODO: perhaps collect the operationTime from the resultDoc if any + // https://docs.mongodb.com/manual/reference/method/db.runCommand/#command-response + int ok = Double.valueOf((double) resultDoc.getOrDefault("ok", 0.0d)).intValue(); + if (ok == 1) { + // success + activity.resultSuccessTimer.update(resultNanos, TimeUnit.NANOSECONDS); + } + activity.resultSetSizeHisto.update(resultDoc.getInteger("n", 0)); + + return ok == 1 ? 0 : 1; + } catch (Exception e) { + logger.error("Failed to runCommand {} on cycle {}, tries {}", queryBson, cycleValue, i, e); + } + } + + throw new RuntimeException(String.format("Exhausted max tries (%s) on cycle %s", + cycleValue, activity.getMaxTries())); + } +} diff --git a/driver-mongodb/src/main/java/io/nosqlbench/driver/mongodb/MongoActivity.java b/driver-mongodb/src/main/java/io/nosqlbench/driver/mongodb/MongoActivity.java new file mode 100644 index 000000000..ad553cf79 --- /dev/null +++ b/driver-mongodb/src/main/java/io/nosqlbench/driver/mongodb/MongoActivity.java @@ -0,0 +1,139 @@ +package io.nosqlbench.driver.mongodb; + +import java.util.List; +import java.util.Objects; +import java.util.function.Function; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.codahale.metrics.Histogram; +import com.codahale.metrics.Timer; +import com.mongodb.client.MongoClient; +import com.mongodb.client.MongoClients; +import com.mongodb.client.MongoDatabase; +import io.nosqlbench.engine.api.activityapi.core.ActivityDefObserver; +import io.nosqlbench.engine.api.activityapi.planning.OpSequence; +import io.nosqlbench.engine.api.activityapi.planning.SequencePlanner; +import io.nosqlbench.engine.api.activityapi.planning.SequencerType; +import io.nosqlbench.engine.api.activityconfig.ParsedStmt; +import io.nosqlbench.engine.api.activityconfig.StatementsLoader; +import io.nosqlbench.engine.api.activityconfig.yaml.StmtDef; +import io.nosqlbench.engine.api.activityconfig.yaml.StmtsDocList; +import io.nosqlbench.engine.api.activityimpl.ActivityDef; +import io.nosqlbench.engine.api.activityimpl.SimpleActivity; +import io.nosqlbench.engine.api.metrics.ActivityMetrics; +import io.nosqlbench.engine.api.templating.StrInterpolator; +import io.nosqlbench.engine.api.util.TagFilter; + +public class MongoActivity extends SimpleActivity implements ActivityDefObserver { + + private final static Logger logger = LoggerFactory.getLogger(MongoActivity.class); + + private String yamlLoc; + private String connectionString; + private String databaseName; + + private MongoClient client; + private MongoDatabase mongoDatabase; + private boolean showQuery; + private int maxTries; + + private OpSequence opSequence; + + Timer bindTimer; + Timer resultTimer; + Timer resultSuccessTimer; + Histogram resultSetSizeHisto; + Histogram triesHisto; + + public MongoActivity(ActivityDef activityDef) { + super(activityDef); + } + + @Override + public synchronized void onActivityDefUpdate(ActivityDef activityDef) { + super.onActivityDefUpdate(activityDef); + + // sanity check + yamlLoc = activityDef.getParams().getOptionalString("yaml", "workload") + .orElseThrow(() -> new IllegalArgumentException("yaml is not defined")); + connectionString = activityDef.getParams().getOptionalString("connection") + .orElseThrow(() -> new IllegalArgumentException("connection is not defined")); + // TODO: support multiple databases + databaseName = activityDef.getParams().getOptionalString("database") + .orElseThrow(() -> new IllegalArgumentException("database is not defined")); + } + + @Override + public void initActivity() { + logger.debug("initializing activity: " + this.activityDef.getAlias()); + onActivityDefUpdate(activityDef); + + opSequence = initOpSequencer(); + setDefaultsFromOpSequence(opSequence); + + client = MongoClients.create(connectionString); + mongoDatabase = client.getDatabase(databaseName); + showQuery = activityDef.getParams().getOptionalBoolean("showquery") + .orElse(false); + maxTries = activityDef.getParams().getOptionalInteger("maxtries") + .orElse(10); + + bindTimer = ActivityMetrics.timer(activityDef, "bind"); + resultTimer = ActivityMetrics.timer(activityDef, "result"); + resultSuccessTimer = ActivityMetrics.timer(activityDef, "result-success"); + resultSetSizeHisto = ActivityMetrics.histogram(activityDef, "resultset-size"); + triesHisto = ActivityMetrics.histogram(activityDef, "tries"); + } + + @Override + public void shutdownActivity() { + logger.debug("shutting down activity: " + this.activityDef.getAlias()); + if (client != null) { + client.close(); + } + } + + OpSequence initOpSequencer() { + SequencerType sequencerType = SequencerType.valueOf( + activityDef.getParams().getOptionalString("seq").orElse("bucket") + ); + SequencePlanner sequencer = new SequencePlanner<>(sequencerType); + + StmtsDocList stmtsDocList = StatementsLoader.load(logger, yamlLoc, new StrInterpolator(activityDef), "activities"); + + String tagfilter = activityDef.getParams().getOptionalString("tags").orElse(""); + + TagFilter tagFilter = new TagFilter(tagfilter); + stmtsDocList.getStmts().stream().map(tagFilter::matchesTaggedResult).forEach(r -> logger.info(r.getLog())); + + List stmts = stmtsDocList.getStmts(tagfilter); + for (StmtDef stmt : stmts) { + ParsedStmt parsed = stmt.getParsed().orError(); + String statement = parsed.getPositionalStatement(Function.identity()); + Objects.requireNonNull(statement); + + sequencer.addOp(new ReadyMongoStatement(stmt), + Long.parseLong(stmt.getParams().getOrDefault("ratio","1"))); + } + + return sequencer.resolve(); + } + + protected MongoDatabase getDatabase() { + return mongoDatabase; + } + + protected OpSequence getOpSequencer() { + return opSequence; + } + + protected boolean isShowQuery() { + return showQuery; + } + + protected int getMaxTries() { + return maxTries; + } +} diff --git a/driver-mongodb/src/main/java/io/nosqlbench/driver/mongodb/MongoActivityType.java b/driver-mongodb/src/main/java/io/nosqlbench/driver/mongodb/MongoActivityType.java new file mode 100644 index 000000000..b37f60c02 --- /dev/null +++ b/driver-mongodb/src/main/java/io/nosqlbench/driver/mongodb/MongoActivityType.java @@ -0,0 +1,41 @@ +package io.nosqlbench.driver.mongodb; + +import io.nosqlbench.engine.api.activityapi.core.Action; +import io.nosqlbench.engine.api.activityapi.core.ActionDispenser; +import io.nosqlbench.engine.api.activityapi.core.ActivityType; +import io.nosqlbench.engine.api.activityimpl.ActivityDef; +import io.nosqlbench.nb.annotations.Service; + +@Service(ActivityType.class) +public class MongoActivityType implements ActivityType { + + @Override + public String getName() { + return "mongodb"; + } + + @Override + public MongoActivity getActivity(ActivityDef activityDef) { + return new MongoActivity(activityDef); + } + + @Override + public ActionDispenser getActionDispenser(MongoActivity activity) { + return new MongoActionDispenser(activity); + } + + private static class MongoActionDispenser implements ActionDispenser { + + private final MongoActivity activity; + + public MongoActionDispenser(MongoActivity activity) + { + this.activity = activity; + } + + @Override + public Action getAction(int slot) { + return new MongoAction(activity, slot); + } + } +} diff --git a/driver-mongodb/src/main/java/io/nosqlbench/driver/mongodb/ReadyMongoStatement.java b/driver-mongodb/src/main/java/io/nosqlbench/driver/mongodb/ReadyMongoStatement.java new file mode 100644 index 000000000..9c974d51e --- /dev/null +++ b/driver-mongodb/src/main/java/io/nosqlbench/driver/mongodb/ReadyMongoStatement.java @@ -0,0 +1,34 @@ +package io.nosqlbench.driver.mongodb; + +import com.mongodb.ReadPreference; +import io.nosqlbench.engine.api.activityconfig.yaml.StmtDef; +import io.nosqlbench.virtdata.core.bindings.BindingsTemplate; +import io.nosqlbench.virtdata.core.templates.ParsedTemplate; +import io.nosqlbench.virtdata.core.templates.StringBindings; +import io.nosqlbench.virtdata.core.templates.StringBindingsTemplate; +import org.bson.Document; +import org.bson.conversions.Bson; + +public class ReadyMongoStatement { + + private StringBindings bindings; + private ReadPreference readPreference; + + public ReadyMongoStatement(StmtDef stmtDef) { + ParsedTemplate paramTemplate = new ParsedTemplate(stmtDef.getStmt(), stmtDef.getBindings()); + BindingsTemplate paramBindings = new BindingsTemplate(paramTemplate.getBindPoints()); + StringBindingsTemplate template = new StringBindingsTemplate(stmtDef.getStmt(), paramBindings); + + this.bindings = template.resolve(); + this.readPreference = ReadPreference.valueOf(stmtDef.getParams() + .getOrDefault("readPreference","primary")); + } + + public ReadPreference getReadPreference() { + return readPreference; + } + + public Bson bind(long value) { + return Document.parse(bindings.bind(value)); + } +} diff --git a/driver-mongodb/src/main/resources/activities/mongodb-basic.yaml b/driver-mongodb/src/main/resources/activities/mongodb-basic.yaml new file mode 100644 index 000000000..f62872474 --- /dev/null +++ b/driver-mongodb/src/main/resources/activities/mongodb-basic.yaml @@ -0,0 +1,61 @@ +# nb -v run driver=mongodb yaml=mongodb-basic connection=mongodb://127.0.0.1 database=testdb tags=phase:rampup +description: An exmaple of a basic mongo insert and find. + +scenarios: + default: + - run driver=mongodb tags==phase:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto + - run driver=mongodb tags==phase:main cycles===TEMPLATE(main-cycles,10000000) threads=auto +bindings: + seq_key: Mod(<>); ToString() -> String + seq_value: Hash(); Mod(<>); ToString() -> String + rw_key: <int>>; ToString() -> String + rw_value: Hash(); <int>>; ToString() -> String + +blocks: + - name: rampup + tags: + phase: rampup + statements: + - rampup-insert: | + { + insert: "<>", + documents: [ { key: {seq_key}, + value: {seq_value} } ] + } + params: + readPreference: primary + tags: + name: rampup-insert + - name: main-read + tags: + phase: main + type: read + params: + ratio: <> + statements: + - main-find: | + { + find: "<>", + filter: { key: {rw_key} } + } + params: + readPreference: primary + tags: + name: main-find + - name: main-write + tags: + phase: main + type: write + params: + ratio: <> + statements: + - main-insert: | + { + insert: "<>", + documents: [ { key: {rw_key}, + value: {rw_value} } ] + } + params: + readPreference: primary + tags: + name: main-insert diff --git a/driver-mongodb/src/main/resources/mongodb.md b/driver-mongodb/src/main/resources/mongodb.md new file mode 100644 index 000000000..ff31d49dc --- /dev/null +++ b/driver-mongodb/src/main/resources/mongodb.md @@ -0,0 +1,20 @@ +# MongoDB Driver + +This is a driver for MongoDB. It supports the `db.runCommand` API described in [here](https://docs.mongodb.com/manual/reference/command/). + +### Example activity definitions + +Run a mongodb activity with definitions from activities/mongodb-basic.yaml +``` +... driver=mongodb yaml=activities/mongo-basic.yaml +``` + +### MongoDB ActivityType Parameters + +- **connection** (Mandatory) - connection string of the target MongoDB. + + Example: `mongodb://127.0.0.1` + +- **database** (Mandatory) - target database + + Example: `testdb` diff --git a/driver-mongodb/src/test/java/io/nosqlbench/driver/mongodb/MongoActivityTest.java b/driver-mongodb/src/test/java/io/nosqlbench/driver/mongodb/MongoActivityTest.java new file mode 100644 index 000000000..47a15adfd --- /dev/null +++ b/driver-mongodb/src/test/java/io/nosqlbench/driver/mongodb/MongoActivityTest.java @@ -0,0 +1,33 @@ +package io.nosqlbench.driver.mongodb; + +import org.junit.Before; +import org.junit.Test; + +import io.nosqlbench.engine.api.activityapi.planning.OpSequence; +import io.nosqlbench.engine.api.activityimpl.ActivityDef; + +import static org.assertj.core.api.Assertions.assertThat; + +public class MongoActivityTest { + + private ActivityDef activityDef; + + @Before + public void setup() { + String[] params = { + "yaml=activities/mongodb-basic.yaml", + "connection=mongodb://127.0.0.1", + "database=nosqlbench_testdb" + }; + activityDef = ActivityDef.parseActivityDef(String.join(";", params)); + } + + @Test + public void testInitOpSequencer() { + MongoActivity mongoActivity = new MongoActivity(activityDef); + mongoActivity.initActivity(); + + OpSequence sequence = mongoActivity.initOpSequencer(); + assertThat(sequence.getOps()).hasSize(3); + } +} diff --git a/driver-mongodb/src/test/java/io/nosqlbench/driver/mongodb/ReadyMongoStatementTest.java b/driver-mongodb/src/test/java/io/nosqlbench/driver/mongodb/ReadyMongoStatementTest.java new file mode 100644 index 000000000..7c1acde25 --- /dev/null +++ b/driver-mongodb/src/test/java/io/nosqlbench/driver/mongodb/ReadyMongoStatementTest.java @@ -0,0 +1,107 @@ +package io.nosqlbench.driver.mongodb; + +import java.util.List; +import java.util.Objects; +import java.util.function.Function; + +import org.junit.Before; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import io.nosqlbench.engine.api.activityconfig.ParsedStmt; +import io.nosqlbench.engine.api.activityconfig.StatementsLoader; +import io.nosqlbench.engine.api.activityconfig.yaml.StmtDef; +import io.nosqlbench.engine.api.activityconfig.yaml.StmtsDocList; +import io.nosqlbench.engine.api.activityimpl.ActivityDef; +import io.nosqlbench.engine.api.templating.StrInterpolator; +import io.nosqlbench.virtdata.core.templates.BindPoint; +import org.bson.conversions.Bson; + +import static org.assertj.core.api.Assertions.assertThat; + +public class ReadyMongoStatementTest { + private final static Logger logger = LoggerFactory.getLogger(ReadyMongoStatementTest.class); + + private ActivityDef activityDef; + private StmtsDocList stmtsDocList; + + @Before + public void setup() { + String[] params = { + "yaml=activities/mongodb-basic.yaml", + "database=nosqlbench_testdb", + }; + activityDef = ActivityDef.parseActivityDef(String.join(";", params)); + String yaml_loc = activityDef.getParams().getOptionalString("yaml", "workload").orElse("default"); + stmtsDocList = StatementsLoader.load(logger, yaml_loc, new StrInterpolator(activityDef), "activities"); + } + + @Test + public void testResolvePhaseRampup() { + String tagfilter = activityDef.getParams().getOptionalString("tags").orElse("phase:rampup"); + + List stmts = stmtsDocList.getStmts(tagfilter); + assertThat(stmts).hasSize(1); + for (StmtDef stmt : stmts) { + ParsedStmt parsed = stmt.getParsed().orError(); + assertThat(parsed.getBindPoints()).hasSize(2); + + BindPoint seqKey = new BindPoint("seq_key", "Mod(1000000000); ToString() -> String"); + BindPoint seqValue = new BindPoint("seq_value", "Hash(); Mod(1000000000); ToString() -> String"); + assertThat(parsed.getBindPoints()).containsExactly(seqKey, seqValue); + + String statement = parsed.getPositionalStatement(Function.identity()); + Objects.requireNonNull(statement); + + ReadyMongoStatement readyMongoStatement = new ReadyMongoStatement(stmt); + Bson bsonDoc = readyMongoStatement.bind(1L); + assertThat(bsonDoc).isNotNull(); + } + } + + @Test + public void testResolvePhaseMainRead() { + String tagfilter = activityDef.getParams().getOptionalString("tags").orElse("phase:main,name:main-find"); + + List stmts = stmtsDocList.getStmts(tagfilter); + assertThat(stmts).hasSize(1); + for (StmtDef stmt : stmts) { + ParsedStmt parsed = stmt.getParsed().orError(); + assertThat(parsed.getBindPoints()).hasSize(1); + + BindPoint rwKey = new BindPoint("rw_key", "Uniform(0,1000000000)->int; ToString() -> String"); + assertThat(parsed.getBindPoints()).containsExactly(rwKey); + + String statement = parsed.getPositionalStatement(Function.identity()); + Objects.requireNonNull(statement); + + ReadyMongoStatement readyMongoStatement = new ReadyMongoStatement(stmt); + Bson bsonDoc = readyMongoStatement.bind(1L); + assertThat(bsonDoc).isNotNull(); + } + } + + @Test + public void testResolvePhaseMainWrite() { + String tagfilter = activityDef.getParams().getOptionalString("tags").orElse("phase:main,name:main-insert"); + + List stmts = stmtsDocList.getStmts(tagfilter); + assertThat(stmts).hasSize(1); + for (StmtDef stmt : stmts) { + ParsedStmt parsed = stmt.getParsed().orError(); + assertThat(parsed.getBindPoints()).hasSize(2); + + BindPoint rwKey = new BindPoint("rw_key", "Uniform(0,1000000000)->int; ToString() -> String"); + BindPoint rwValue = new BindPoint("rw_value", "Hash(); Uniform(0,1000000000)->int; ToString() -> String"); + assertThat(parsed.getBindPoints()).containsExactly(rwKey, rwValue); + + String statement = parsed.getPositionalStatement(Function.identity()); + Objects.requireNonNull(statement); + + ReadyMongoStatement readyMongoStatement = new ReadyMongoStatement(stmt); + Bson bsonDoc = readyMongoStatement.bind(1L); + assertThat(bsonDoc).isNotNull(); + } + } +} diff --git a/driver-stdout/pom.xml b/driver-stdout/pom.xml index 2d95e7246..c9475c1a8 100644 --- a/driver-stdout/pom.xml +++ b/driver-stdout/pom.xml @@ -7,7 +7,7 @@ mvn-defaults io.nosqlbench - 3.12.107-SNAPSHOT + 3.12.119-SNAPSHOT ../mvn-defaults @@ -21,7 +21,7 @@ io.nosqlbench engine-api - 3.12.107-SNAPSHOT + 3.12.119-SNAPSHOT diff --git a/driver-tcp/pom.xml b/driver-tcp/pom.xml index 887ff63db..a993f589a 100644 --- a/driver-tcp/pom.xml +++ b/driver-tcp/pom.xml @@ -7,7 +7,7 @@ mvn-defaults io.nosqlbench - 3.12.107-SNAPSHOT + 3.12.119-SNAPSHOT ../mvn-defaults @@ -23,13 +23,13 @@ io.nosqlbench engine-api - 3.12.107-SNAPSHOT + 3.12.119-SNAPSHOT io.nosqlbench driver-stdout - 3.12.107-SNAPSHOT + 3.12.119-SNAPSHOT diff --git a/driver-web/pom.xml b/driver-web/pom.xml index 6c9462013..5e21d4441 100644 --- a/driver-web/pom.xml +++ b/driver-web/pom.xml @@ -7,7 +7,7 @@ mvn-defaults io.nosqlbench - 3.12.107-SNAPSHOT + 3.12.119-SNAPSHOT ../mvn-defaults @@ -22,7 +22,7 @@ io.nosqlbench engine-api - 3.12.107-SNAPSHOT + 3.12.119-SNAPSHOT diff --git a/engine-api/pom.xml b/engine-api/pom.xml index 605f9bb56..4180e960d 100644 --- a/engine-api/pom.xml +++ b/engine-api/pom.xml @@ -5,7 +5,7 @@ mvn-defaults io.nosqlbench - 3.12.107-SNAPSHOT + 3.12.119-SNAPSHOT ../mvn-defaults @@ -22,19 +22,19 @@ io.nosqlbench nb-api - 3.12.107-SNAPSHOT + 3.12.119-SNAPSHOT io.nosqlbench nb-annotations - 3.12.107-SNAPSHOT + 3.12.119-SNAPSHOT io.nosqlbench virtdata-userlibs - 3.12.107-SNAPSHOT + 3.12.119-SNAPSHOT diff --git a/engine-api/src/main/java/io/nosqlbench/engine/api/activityimpl/SimpleActivity.java b/engine-api/src/main/java/io/nosqlbench/engine/api/activityimpl/SimpleActivity.java index 73d1e57c0..19a267c96 100644 --- a/engine-api/src/main/java/io/nosqlbench/engine/api/activityimpl/SimpleActivity.java +++ b/engine-api/src/main/java/io/nosqlbench/engine/api/activityimpl/SimpleActivity.java @@ -244,7 +244,7 @@ public class SimpleActivity implements Activity { * by the provided ratios. Also, modify the ActivityDef with reasonable defaults when requested. * @param seq - The {@link OpSequence} to derive the defaults from */ - public void setDefaultsFromOpSequence(OpSequence seq) { + public void setDefaultsFromOpSequence(OpSequence seq) { Optional strideOpt = getParams().getOptionalString("stride"); if (strideOpt.isEmpty()) { String stride = String.valueOf(seq.getSequence().length); diff --git a/engine-api/src/main/java/io/nosqlbench/engine/api/metrics/ActivityMetrics.java b/engine-api/src/main/java/io/nosqlbench/engine/api/metrics/ActivityMetrics.java index 90df7ce82..a6d3f55f8 100644 --- a/engine-api/src/main/java/io/nosqlbench/engine/api/metrics/ActivityMetrics.java +++ b/engine-api/src/main/java/io/nosqlbench/engine/api/metrics/ActivityMetrics.java @@ -36,6 +36,11 @@ import java.util.regex.Pattern; public class ActivityMetrics { private final static Logger logger = LoggerFactory.getLogger(ActivityMetrics.class); + + public static final String HDRDIGITS_PARAM = "hdr_digits"; + public static final int DEFAULT_HDRDIGITS= 4; + private static int _HDRDIGITS = DEFAULT_HDRDIGITS; + private static MetricRegistry registry; public static MetricFilter METRIC_FILTER = (name, metric) -> { @@ -43,6 +48,15 @@ public class ActivityMetrics { }; private static List metricsCloseables = new ArrayList<>(); + + public static int getHdrDigits() { + return _HDRDIGITS; + } + + public static void setHdrDigits(int hdrDigits) { + ActivityMetrics._HDRDIGITS = hdrDigits; + } + private ActivityMetrics() { } @@ -88,6 +102,10 @@ public class ActivityMetrics { } /** *

Create a timer associated with an activity.

+ * + *

If the provide ActivityDef contains a parameter "hdr_digits", then it will be used to set the number of + * significant digits on the histogram's precision.

+ * *

This method ensures that if multiple threads attempt to create the same-named metric on a given activity, * that only one of them succeeds.

* @@ -98,15 +116,25 @@ public class ActivityMetrics { public static Timer timer(ActivityDef activityDef, String name) { String fullMetricName = activityDef.getAlias() + "." + name; Timer registeredTimer = (Timer) register(activityDef, name, () -> - new NicerTimer(fullMetricName, new DeltaHdrHistogramReservoir(fullMetricName, 4))); + new NicerTimer(fullMetricName, + new DeltaHdrHistogramReservoir( + fullMetricName, + activityDef.getParams().getOptionalInteger(HDRDIGITS_PARAM).orElse(_HDRDIGITS) + ) + )); return registeredTimer; } /** - *

Create a histogram associated with an activity.

+ *

Create an HDR histogram associated with an activity.

+ * + *

If the provide ActivityDef contains a parameter "hdr_digits", then it will be used to set the number of + * significant digits on the histogram's precision.

+ * *

This method ensures that if multiple threads attempt to create the same-named metric on a given activity, * that only one of them succeeds.

* + * * @param activityDef an associated activity def * @param name a simple, descriptive name for the histogram * @return the histogram, perhaps a different one if it has already been registered @@ -114,7 +142,13 @@ public class ActivityMetrics { public static Histogram histogram(ActivityDef activityDef, String name) { String fullMetricName = activityDef.getAlias() + "." + name; return (Histogram) register(activityDef, name, () -> - new NicerHistogram(fullMetricName, new DeltaHdrHistogramReservoir(fullMetricName, 4))); + new NicerHistogram( + fullMetricName, + new DeltaHdrHistogramReservoir( + fullMetricName, + activityDef.getParams().getOptionalInteger(HDRDIGITS_PARAM).orElse(_HDRDIGITS) + ) + )); } /** diff --git a/engine-cli/pom.xml b/engine-cli/pom.xml index 341d8710c..4ed1e7e85 100644 --- a/engine-cli/pom.xml +++ b/engine-cli/pom.xml @@ -4,7 +4,7 @@ mvn-defaults io.nosqlbench - 3.12.107-SNAPSHOT + 3.12.119-SNAPSHOT ../mvn-defaults @@ -23,7 +23,7 @@ io.nosqlbench engine-core - 3.12.107-SNAPSHOT + 3.12.119-SNAPSHOT @@ -47,7 +47,7 @@ io.nosqlbench engine-docker - 3.12.107-SNAPSHOT + 3.12.119-SNAPSHOT
diff --git a/engine-cli/src/main/java/io/nosqlbench/engine/cli/NBCLI.java b/engine-cli/src/main/java/io/nosqlbench/engine/cli/NBCLI.java index c839cc819..eb13436d9 100644 --- a/engine-cli/src/main/java/io/nosqlbench/engine/cli/NBCLI.java +++ b/engine-cli/src/main/java/io/nosqlbench/engine/cli/NBCLI.java @@ -81,6 +81,8 @@ public class NBCLI { ConsoleLogging.enableConsoleLogging(options.wantsConsoleLogLevel(), options.getConsoleLoggingPattern()); + ActivityMetrics.setHdrDigits(options.getHdrDigits()); + if (options.wantsBasicHelp()) { System.out.println(loadHelpFile("basic.md")); System.exit(0); diff --git a/engine-cli/src/main/java/io/nosqlbench/engine/cli/NBCLIOptions.java b/engine-cli/src/main/java/io/nosqlbench/engine/cli/NBCLIOptions.java index 4975bb95d..d99080f81 100644 --- a/engine-cli/src/main/java/io/nosqlbench/engine/cli/NBCLIOptions.java +++ b/engine-cli/src/main/java/io/nosqlbench/engine/cli/NBCLIOptions.java @@ -53,6 +53,7 @@ public class NBCLIOptions { private static final String WAIT_MILLIS = "waitmillis"; private static final String EXPORT_CYCLE_LOG = "--export-cycle-log"; private static final String IMPORT_CYCLE_LOG = "--import-cycle-log"; + private static final String HDR_DIGITS = "--hdr-digits"; // Execution Options @@ -127,7 +128,7 @@ public class NBCLIOptions { private final List wantsToIncludePaths = new ArrayList<>(); private Scenario.Engine engine = Scenario.Engine.Graalvm; private boolean graaljs_compat = false; - + private int hdr_digits = 4; public NBCLIOptions(String[] args) { parse(args); @@ -213,6 +214,10 @@ public class NBCLIOptions { arglist.removeFirst(); logsDirectory = readWordOrThrow(arglist, "a log directory"); break; + case HDR_DIGITS: + arglist.removeFirst(); + hdr_digits = Integer.parseInt(readWordOrThrow(arglist, "significant digits")); + break; case LOGS_MAX: arglist.removeFirst(); logsMax = Integer.parseInt(readWordOrThrow(arglist, "max logfiles to keep")); @@ -531,6 +536,10 @@ public class NBCLIOptions { // } + public int getHdrDigits() { + return hdr_digits; + } + public String getProgressSpec() { ProgressSpec spec = parseProgressSpec(this.progressSpec);// sanity check if (spec.indicatorMode == IndicatorMode.console diff --git a/engine-core/pom.xml b/engine-core/pom.xml index dd17e65f5..b7ca6eb6e 100644 --- a/engine-core/pom.xml +++ b/engine-core/pom.xml @@ -5,7 +5,7 @@ mvn-defaults io.nosqlbench - 3.12.107-SNAPSHOT + 3.12.119-SNAPSHOT ../mvn-defaults @@ -28,7 +28,7 @@ io.nosqlbench engine-api - 3.12.107-SNAPSHOT + 3.12.119-SNAPSHOT diff --git a/engine-core/src/main/java/io/nosqlbench/engine/core/script/Scenario.java b/engine-core/src/main/java/io/nosqlbench/engine/core/script/Scenario.java index 8dfde7b4b..95802df3f 100644 --- a/engine-core/src/main/java/io/nosqlbench/engine/core/script/Scenario.java +++ b/engine-core/src/main/java/io/nosqlbench/engine/core/script/Scenario.java @@ -154,6 +154,7 @@ public class Scenario implements Callable { scriptEngine.put("params", scenarioScriptParams); if (engine == Engine.Graalvm) { + // https://github.com/graalvm/graaljs/blob/master/docs/user/JavaInterop.md if (wantsGraaljsCompatMode) { scriptEngine.put("scenario", scenarioController); scriptEngine.put("metrics", new NashornMetricRegistryBindings(metricRegistry)); diff --git a/engine-core/src/test/java/io/nosqlbench/core/ActivityExecutorTest.java b/engine-core/src/test/java/io/nosqlbench/engine/core/ActivityExecutorTest.java similarity index 98% rename from engine-core/src/test/java/io/nosqlbench/core/ActivityExecutorTest.java rename to engine-core/src/test/java/io/nosqlbench/engine/core/ActivityExecutorTest.java index 79641fb66..daa6bed16 100644 --- a/engine-core/src/test/java/io/nosqlbench/core/ActivityExecutorTest.java +++ b/engine-core/src/test/java/io/nosqlbench/engine/core/ActivityExecutorTest.java @@ -1,4 +1,4 @@ -package io.nosqlbench.core; +package io.nosqlbench.engine.core; import io.nosqlbench.engine.api.activityapi.core.*; import io.nosqlbench.engine.api.activityapi.output.OutputDispenser; @@ -12,7 +12,6 @@ import io.nosqlbench.engine.api.activityimpl.input.CoreInputDispenser; import io.nosqlbench.engine.api.activityimpl.input.AtomicInput; import io.nosqlbench.engine.api.activityimpl.motor.CoreMotor; import io.nosqlbench.engine.api.activityimpl.motor.CoreMotorDispenser; -import io.nosqlbench.engine.core.ActivityExecutor; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.junit.Test; diff --git a/engine-core/src/test/java/io/nosqlbench/core/CoreMotorTest.java b/engine-core/src/test/java/io/nosqlbench/engine/core/CoreMotorTest.java similarity index 99% rename from engine-core/src/test/java/io/nosqlbench/core/CoreMotorTest.java rename to engine-core/src/test/java/io/nosqlbench/engine/core/CoreMotorTest.java index 7c88006b1..451af81e2 100644 --- a/engine-core/src/test/java/io/nosqlbench/core/CoreMotorTest.java +++ b/engine-core/src/test/java/io/nosqlbench/engine/core/CoreMotorTest.java @@ -1,4 +1,4 @@ -package io.nosqlbench.core; +package io.nosqlbench.engine.core; import io.nosqlbench.engine.api.activityapi.core.*; import io.nosqlbench.engine.core.fortesting.BlockingSegmentInput; diff --git a/engine-core/src/test/java/io/nosqlbench/core/ScenarioTest.java b/engine-core/src/test/java/io/nosqlbench/engine/core/ScenarioTest.java similarity index 97% rename from engine-core/src/test/java/io/nosqlbench/core/ScenarioTest.java rename to engine-core/src/test/java/io/nosqlbench/engine/core/ScenarioTest.java index 8fd07277c..7e3bb2484 100644 --- a/engine-core/src/test/java/io/nosqlbench/core/ScenarioTest.java +++ b/engine-core/src/test/java/io/nosqlbench/engine/core/ScenarioTest.java @@ -1,4 +1,4 @@ -package io.nosqlbench.core; +package io.nosqlbench.engine.core; import io.nosqlbench.engine.api.scripting.ScriptEnvBuffer; import io.nosqlbench.engine.core.script.Scenario; diff --git a/engine-core/src/test/java/io/nosqlbench/engine/core/experimental/CompletableTests.java b/engine-core/src/test/java/io/nosqlbench/engine/core/experimental/CompletableTests.java new file mode 100644 index 000000000..b187d63ff --- /dev/null +++ b/engine-core/src/test/java/io/nosqlbench/engine/core/experimental/CompletableTests.java @@ -0,0 +1,20 @@ +package io.nosqlbench.engine.core.experimental; + +import org.junit.Test; + +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CompletionStage; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; + +public class CompletableTests { + + @Test + public void testCompletionStages() { + CompletableFuture f = new CompletableFuture<>(); + ExecutorService executorService = Executors.newCachedThreadPool(); + CompletableFuture objectCompletableFuture = f.completeAsync(() -> "foo", executorService); + boolean bar = objectCompletableFuture.complete("bar"); + + } +} diff --git a/engine-docker/pom.xml b/engine-docker/pom.xml index c8ec031ce..b0282c36b 100644 --- a/engine-docker/pom.xml +++ b/engine-docker/pom.xml @@ -4,7 +4,7 @@ mvn-defaults io.nosqlbench - 3.12.107-SNAPSHOT + 3.12.119-SNAPSHOT ../mvn-defaults @@ -65,7 +65,7 @@ io.nosqlbench engine-api - 3.12.107-SNAPSHOT + 3.12.119-SNAPSHOT diff --git a/engine-docs/pom.xml b/engine-docs/pom.xml index 8d79bcbce..4cfbae01b 100644 --- a/engine-docs/pom.xml +++ b/engine-docs/pom.xml @@ -4,7 +4,7 @@ mvn-defaults io.nosqlbench - 3.12.107-SNAPSHOT + 3.12.119-SNAPSHOT ../mvn-defaults @@ -28,7 +28,7 @@ io.nosqlbench docsys - 3.12.107-SNAPSHOT + 3.12.119-SNAPSHOT diff --git a/engine-docs/src/main/resources/docs-for-nb/designing_workloads/10_named_scenarios.md b/engine-docs/src/main/resources/docs-for-nb/designing_workloads/10_named_scenarios.md index bf54490d7..302029b13 100644 --- a/engine-docs/src/main/resources/docs-for-nb/designing_workloads/10_named_scenarios.md +++ b/engine-docs/src/main/resources/docs-for-nb/designing_workloads/10_named_scenarios.md @@ -224,7 +224,7 @@ as any other parameter depending on the assignment operators as explained above. ### alias The `alias` parameter is, by default, set to the expanded name of WORKLOAD_SCENARIO_STEP, which means that each activity -within the scenario has a distinct and symoblic name. This is important for distinguishing metrics from one another +within the scenario has a distinct and symbolic name. This is important for distinguishing metrics from one another across workloads, named scenarios, and steps within a named scenario. The above words are interpolated into the alias as follows: diff --git a/engine-docs/src/main/resources/docs-for-nb/reference/activity_parameters.md b/engine-docs/src/main/resources/docs-for-nb/reference/activity_parameters.md index 473ec5002..d51de83d7 100644 --- a/engine-docs/src/main/resources/docs-for-nb/reference/activity_parameters.md +++ b/engine-docs/src/main/resources/docs-for-nb/reference/activity_parameters.md @@ -360,9 +360,21 @@ In detail, the rendering appears as `0.0(A), 0.0(B), 0.0(C), 0.25(A), 0.5(A), 0.5(B), 0.75(A)`, which yields `A B C A A B A` as the op sequence. -This sequencer is most useful when you want a stable ordering of -operation from a rich mix of statement types, where each operations is -spaced as evenly as possible over time, and where it is not important to -control the cycle-by-cycle sequencing of statements. +This sequencer is most useful when you want a stable ordering of operation from a rich mix of statement types, where +each operations is spaced as evenly as possible over time, and where it is not important to control the cycle-by-cycle +sequencing of statements. +## hdr_digits +- `hdr_digits=3` +- _default_: `4` +- _required_: no +- _dynamic_: no + +This parameter determines the number of significant digits used in all HDR histograms for metrics collected from this +activity. The default of 4 allows 4 significant digits, which means *up to* 10000 distinct histogram buckets per named +metric, per histogram interval. This does not mean that there _will be_ 10000 distinct buckets, but it means there could +be if there is significant volume and variety in the measurements. + +If you are running a scenario that creates many activities, then you can set `hdr_digits=1` on some of them to save +client resources. diff --git a/engine-docs/src/main/resources/docs-for-nb/reference/command_line.md b/engine-docs/src/main/resources/docs-for-nb/reference/command_line.md index 14c612463..8da6d8904 100644 --- a/engine-docs/src/main/resources/docs-for-nb/reference/command_line.md +++ b/engine-docs/src/main/resources/docs-for-nb/reference/command_line.md @@ -153,6 +153,17 @@ created for this name. --session-name +If you want to control the number of significant digits in all of the HDR metrics, including histograms and timers, then +you can do so this way: + + --hdr-digits 4 + +The default is 4 digits, which creates 10000 equisized histogram buckets for every named metric in every reporting +interval. For longer running test or for test which do not require this level of precision in metrics, you can set this +down to 3 or 2. Note that this only sets the global default. Each activity can also override this value with the +hdr_digits parameter. + + Enlist engineblock to stand up your metrics infrastructure using a local docker runtime: --docker-metrics diff --git a/engine-extensions/pom.xml b/engine-extensions/pom.xml index 99a888fd8..61578960e 100644 --- a/engine-extensions/pom.xml +++ b/engine-extensions/pom.xml @@ -4,7 +4,7 @@ mvn-defaults io.nosqlbench - 3.12.107-SNAPSHOT + 3.12.119-SNAPSHOT ../mvn-defaults @@ -22,7 +22,7 @@ io.nosqlbench engine-api - 3.12.107-SNAPSHOT + 3.12.119-SNAPSHOT diff --git a/engine-extensions/src/main/java/io/nosqlbench/engine/extensions/http/HttpPlugin.java b/engine-extensions/src/main/java/io/nosqlbench/engine/extensions/http/HttpPlugin.java new file mode 100644 index 000000000..5b8b13c18 --- /dev/null +++ b/engine-extensions/src/main/java/io/nosqlbench/engine/extensions/http/HttpPlugin.java @@ -0,0 +1,59 @@ +package io.nosqlbench.engine.extensions.http; + +import java.io.IOException; +import java.net.URI; +import java.net.http.HttpClient; +import java.net.http.HttpRequest; +import java.net.http.HttpResponse; + +public class HttpPlugin { + private HttpClient client = HttpClient.newHttpClient(); + + public HttpResponse get(String url) throws IOException, InterruptedException { + HttpRequest.Builder builder = HttpRequest.newBuilder(); + URI uri = URI.create(url); + HttpRequest request = builder + .uri(uri) + .build(); + + HttpResponse response = client.send(request, + HttpResponse.BodyHandlers.ofString()); + + return response; + } + + public HttpResponse post(String url) throws IOException, InterruptedException { + return post(url, null, null); + } + + public HttpResponse post(String url, String data, String contentType) throws IOException, InterruptedException { + HttpRequest.Builder builder = HttpRequest.newBuilder(); + URI uri = URI.create(url); + + HttpRequest request; + if (data == null && contentType == null || contentType == null){ + request = builder + .uri(uri) + .POST(HttpRequest.BodyPublishers.noBody()) + .build(); + } else if (data == null) { + request = builder + .uri(uri) + .header("Content-Type", contentType) + .POST(HttpRequest.BodyPublishers.noBody()) + .build(); + } else { + request = builder + .uri(uri) + .header("Content-Type", contentType) + .POST(HttpRequest.BodyPublishers.ofString(data)) + .build(); + } + + HttpResponse response = client.send(request, + HttpResponse.BodyHandlers.ofString()); + + return response; + } + +} diff --git a/engine-extensions/src/main/java/io/nosqlbench/engine/extensions/http/HttpPluginData.java b/engine-extensions/src/main/java/io/nosqlbench/engine/extensions/http/HttpPluginData.java new file mode 100644 index 000000000..bfaf28171 --- /dev/null +++ b/engine-extensions/src/main/java/io/nosqlbench/engine/extensions/http/HttpPluginData.java @@ -0,0 +1,27 @@ +package io.nosqlbench.engine.extensions.http; + +import com.codahale.metrics.MetricRegistry; +import io.nosqlbench.engine.api.extensions.ScriptingPluginInfo; +import io.nosqlbench.engine.extensions.optimizers.BobyqaOptimizerPlugin; +import io.nosqlbench.nb.annotations.Service; +import org.slf4j.Logger; + +import javax.script.ScriptContext; + +@Service(ScriptingPluginInfo.class) +public class HttpPluginData implements ScriptingPluginInfo { + @Override + public String getDescription() { + return "use http get and post in scripts"; + } + + @Override + public HttpPlugin getExtensionObject(Logger logger, MetricRegistry metricRegistry, ScriptContext scriptContext) { + return new HttpPlugin(); + } + + @Override + public String getBaseVariableName() { + return "http"; + } +} diff --git a/mvn-defaults/pom.xml b/mvn-defaults/pom.xml index 687a9d4fc..cb530218e 100644 --- a/mvn-defaults/pom.xml +++ b/mvn-defaults/pom.xml @@ -3,7 +3,7 @@ io.nosqlbench mvn-defaults - 3.12.107-SNAPSHOT + 3.12.119-SNAPSHOT pom diff --git a/nb-annotations/pom.xml b/nb-annotations/pom.xml index cb273d857..0a139d7ad 100644 --- a/nb-annotations/pom.xml +++ b/nb-annotations/pom.xml @@ -5,7 +5,7 @@ mvn-defaults io.nosqlbench - 3.12.107-SNAPSHOT + 3.12.119-SNAPSHOT ../mvn-defaults diff --git a/nb-api/pom.xml b/nb-api/pom.xml index b4b015e2f..200cacfd1 100644 --- a/nb-api/pom.xml +++ b/nb-api/pom.xml @@ -5,7 +5,7 @@ mvn-defaults io.nosqlbench - 3.12.107-SNAPSHOT + 3.12.119-SNAPSHOT ../mvn-defaults @@ -31,7 +31,7 @@ io.nosqlbench nb-annotations - 3.12.107-SNAPSHOT + 3.12.119-SNAPSHOT diff --git a/nb/Dockerfile b/nb/Dockerfile deleted file mode 100644 index 4c0c07e8d..000000000 --- a/nb/Dockerfile +++ /dev/null @@ -1,3 +0,0 @@ -FROM openjdk:13-alpine -COPY target/ target -ENTRYPOINT ["java","-jar", "/target/nb.jar"] diff --git a/nb/pom.xml b/nb/pom.xml index 916535ae3..82338994a 100644 --- a/nb/pom.xml +++ b/nb/pom.xml @@ -5,7 +5,7 @@ mvn-defaults io.nosqlbench - 3.12.107-SNAPSHOT + 3.12.119-SNAPSHOT ../mvn-defaults @@ -24,31 +24,36 @@ io.nosqlbench driver-web - 3.12.107-SNAPSHOT + 3.12.119-SNAPSHOT + + io.nosqlbench + driver-kafka + 3.12.119-SNAPSHOT + io.nosqlbench engine-cli - 3.12.107-SNAPSHOT + 3.12.119-SNAPSHOT io.nosqlbench engine-docs - 3.12.107-SNAPSHOT + 3.12.119-SNAPSHOT io.nosqlbench engine-core - 3.12.107-SNAPSHOT + 3.12.119-SNAPSHOT io.nosqlbench engine-extensions - 3.12.107-SNAPSHOT + 3.12.119-SNAPSHOT @@ -60,39 +65,44 @@ io.nosqlbench driver-stdout - 3.12.107-SNAPSHOT + 3.12.119-SNAPSHOT io.nosqlbench driver-diag - 3.12.107-SNAPSHOT + 3.12.119-SNAPSHOT io.nosqlbench driver-tcp - 3.12.107-SNAPSHOT + 3.12.119-SNAPSHOT io.nosqlbench driver-http - 3.12.107-SNAPSHOT + 3.12.119-SNAPSHOT io.nosqlbench driver-cql-shaded - 3.12.107-SNAPSHOT + 3.12.119-SNAPSHOT io.nosqlbench driver-cqlverify - 3.12.107-SNAPSHOT + 3.12.119-SNAPSHOT + + io.nosqlbench + driver-mongodb + 3.12.119-SNAPSHOT + @@ -240,6 +250,19 @@ + + with-mongodb + + true + + + + io.nosqlbench + driver-mongodb + 3.12.119-SNAPSHOT + + + build-nb-appimage diff --git a/nb/src/main/resources/examples/bindings-probcurves.yaml b/nb/src/main/resources/examples/bindings-probcurves.yaml index 62f48c3ee..b9d6d1694 100644 --- a/nb/src/main/resources/examples/bindings-probcurves.yaml +++ b/nb/src/main/resources/examples/bindings-probcurves.yaml @@ -4,7 +4,7 @@ description: | scenarios: default: - readout1: run driver===stdout format===readout cycles=1 + readout1: run driver===stdout format=readout cycles=1 bindings: cycle: Identity() diff --git a/pom.xml b/pom.xml index 9b39aab75..8a85a109a 100644 --- a/pom.xml +++ b/pom.xml @@ -7,7 +7,7 @@ mvn-defaults io.nosqlbench - 3.12.107-SNAPSHOT + 3.12.119-SNAPSHOT mvn-defaults @@ -43,6 +43,8 @@ driver-cql-shaded driver-cqlverify driver-web + driver-kafka + driver-mongodb @@ -68,6 +70,16 @@ driver-cqld4 + + with-mongodb + + true + + + driver-mongodb + + + diff --git a/virtdata-api/pom.xml b/virtdata-api/pom.xml index 7f9872a6c..7c831be81 100644 --- a/virtdata-api/pom.xml +++ b/virtdata-api/pom.xml @@ -7,7 +7,7 @@ io.nosqlbench mvn-defaults - 3.12.107-SNAPSHOT + 3.12.119-SNAPSHOT ../mvn-defaults @@ -23,14 +23,14 @@ io.nosqlbench - 3.12.107-SNAPSHOT + 3.12.119-SNAPSHOT nb-api io.nosqlbench virtdata-lang - 3.12.107-SNAPSHOT + 3.12.119-SNAPSHOT diff --git a/virtdata-api/src/main/java/io/nosqlbench/virtdata/core/bindings/VirtDataComposer.java b/virtdata-api/src/main/java/io/nosqlbench/virtdata/core/bindings/VirtDataComposer.java index 6375ef850..76ad8f45b 100644 --- a/virtdata-api/src/main/java/io/nosqlbench/virtdata/core/bindings/VirtDataComposer.java +++ b/virtdata-api/src/main/java/io/nosqlbench/virtdata/core/bindings/VirtDataComposer.java @@ -215,12 +215,15 @@ public class VirtDataComposer { } Object[][] combinations = new Object[modulo][]; + for (int row = 0; row < combinations.length; row++) { Object[] combination = new Object[allargs.length]; int number = row; for (int pos = 0; pos < combination.length; pos++) { - int selector = (int) (row / modulos[pos]); - combination[pos] = allargs[pos][selector]; + int selector = (int) (number / modulos[pos]); + Object[] allargspos = allargs[pos]; + Object objectatpos = allargspos[selector]; + combination[pos] = objectatpos; number %= modulos[pos]; } combinations[row] = combination; diff --git a/virtdata-lang/pom.xml b/virtdata-lang/pom.xml index aab957b0d..352a30d99 100644 --- a/virtdata-lang/pom.xml +++ b/virtdata-lang/pom.xml @@ -7,7 +7,7 @@ mvn-defaults io.nosqlbench - 3.12.107-SNAPSHOT + 3.12.119-SNAPSHOT ../mvn-defaults diff --git a/virtdata-lib-basics/pom.xml b/virtdata-lib-basics/pom.xml index 4b5aa0fef..8fbe14030 100644 --- a/virtdata-lib-basics/pom.xml +++ b/virtdata-lib-basics/pom.xml @@ -7,7 +7,7 @@ mvn-defaults io.nosqlbench - 3.12.107-SNAPSHOT + 3.12.119-SNAPSHOT ../mvn-defaults @@ -20,7 +20,7 @@ io.nosqlbench virtdata-api - 3.12.107-SNAPSHOT + 3.12.119-SNAPSHOT diff --git a/virtdata-lib-basics/src/main/java/io/nosqlbench/virtdata/library/basics/shared/distributions/CSVFrequencySampler.java b/virtdata-lib-basics/src/main/java/io/nosqlbench/virtdata/library/basics/shared/distributions/CSVFrequencySampler.java index 0f1b2da8a..7fcd8e31e 100644 --- a/virtdata-lib-basics/src/main/java/io/nosqlbench/virtdata/library/basics/shared/distributions/CSVFrequencySampler.java +++ b/virtdata-lib-basics/src/main/java/io/nosqlbench/virtdata/library/basics/shared/distributions/CSVFrequencySampler.java @@ -49,12 +49,9 @@ import java.util.function.LongFunction; @ThreadSafeMapper public class CSVFrequencySampler implements LongFunction { - private final String filename; - private final String columnName; - private final String[] lines; private final AliasSamplerDoubleInt sampler; - private Hash hash; + private final Hash hash; /** * Create a sampler of strings from the given CSV file. The CSV file must have plain CSV headers @@ -64,8 +61,7 @@ public class CSVFrequencySampler implements LongFunction { */ @Example({"CSVFrequencySampler('values.csv','modelno')","Read values.csv, count the frequency of values in 'modelno' column, and sample from this column proportionally"}) public CSVFrequencySampler(String filename, String columnName) { - this.filename = filename; - this.columnName = columnName; + String filename1 = filename; this.hash=new Hash(); @@ -86,7 +82,7 @@ public class CSVFrequencySampler implements LongFunction { } int i = 0; for (String value : values) { - frequencies.add(new EvProbD(i++,Double.valueOf(freq.getCount(value)))); + frequencies.add(new EvProbD(i++, (double) freq.getCount(value))); } sampler = new AliasSamplerDoubleInt(frequencies); lines = values.toArray(new String[0]); diff --git a/virtdata-lib-curves4/pom.xml b/virtdata-lib-curves4/pom.xml index bd1018bb3..710145f28 100644 --- a/virtdata-lib-curves4/pom.xml +++ b/virtdata-lib-curves4/pom.xml @@ -4,7 +4,7 @@ mvn-defaults io.nosqlbench - 3.12.107-SNAPSHOT + 3.12.119-SNAPSHOT ../mvn-defaults @@ -22,13 +22,13 @@ io.nosqlbench virtdata-api - 3.12.107-SNAPSHOT + 3.12.119-SNAPSHOT io.nosqlbench virtdata-lib-basics - 3.12.107-SNAPSHOT + 3.12.119-SNAPSHOT diff --git a/virtdata-lib-random/pom.xml b/virtdata-lib-random/pom.xml index 39a993654..9c9e0d752 100644 --- a/virtdata-lib-random/pom.xml +++ b/virtdata-lib-random/pom.xml @@ -7,7 +7,7 @@ mvn-defaults io.nosqlbench - 3.12.107-SNAPSHOT + 3.12.119-SNAPSHOT ../mvn-defaults @@ -20,13 +20,13 @@ io.nosqlbench virtdata-api - 3.12.107-SNAPSHOT + 3.12.119-SNAPSHOT io.nosqlbench virtdata-lib-basics - 3.12.107-SNAPSHOT + 3.12.119-SNAPSHOT diff --git a/virtdata-lib-realer/pom.xml b/virtdata-lib-realer/pom.xml index a54c45690..2a781cfce 100644 --- a/virtdata-lib-realer/pom.xml +++ b/virtdata-lib-realer/pom.xml @@ -4,7 +4,7 @@ mvn-defaults io.nosqlbench - 3.12.107-SNAPSHOT + 3.12.119-SNAPSHOT ../mvn-defaults @@ -24,7 +24,7 @@ io.nosqlbench virtdata-lib-basics - 3.12.107-SNAPSHOT + 3.12.119-SNAPSHOT diff --git a/virtdata-realdata/pom.xml b/virtdata-realdata/pom.xml index 2e9dff3eb..c102dca97 100644 --- a/virtdata-realdata/pom.xml +++ b/virtdata-realdata/pom.xml @@ -7,7 +7,7 @@ mvn-defaults io.nosqlbench - 3.12.107-SNAPSHOT + 3.12.119-SNAPSHOT ../mvn-defaults @@ -18,7 +18,7 @@ io.nosqlbench virtdata-api - 3.12.107-SNAPSHOT + 3.12.119-SNAPSHOT diff --git a/virtdata-userlibs/pom.xml b/virtdata-userlibs/pom.xml index 6d14a38fd..a6c5dc8e1 100644 --- a/virtdata-userlibs/pom.xml +++ b/virtdata-userlibs/pom.xml @@ -4,7 +4,7 @@ mvn-defaults io.nosqlbench - 3.12.107-SNAPSHOT + 3.12.119-SNAPSHOT ../mvn-defaults @@ -17,32 +17,32 @@ io.nosqlbench virtdata-realdata - 3.12.107-SNAPSHOT + 3.12.119-SNAPSHOT io.nosqlbench virtdata-lib-realer - 3.12.107-SNAPSHOT + 3.12.119-SNAPSHOT io.nosqlbench virtdata-api - 3.12.107-SNAPSHOT + 3.12.119-SNAPSHOT io.nosqlbench virtdata-lib-random - 3.12.107-SNAPSHOT + 3.12.119-SNAPSHOT io.nosqlbench - 3.12.107-SNAPSHOT + 3.12.119-SNAPSHOT virtdata-lib-basics io.nosqlbench - 3.12.107-SNAPSHOT + 3.12.119-SNAPSHOT virtdata-lib-curves4 @@ -50,7 +50,7 @@ io.nosqlbench docsys - 3.12.107-SNAPSHOT + 3.12.119-SNAPSHOT diff --git a/virtdata-userlibs/src/main/java/io/nosqlbench/virtdata/userlibs/apps/docsapp/VirtDataGenDocsApp.java b/virtdata-userlibs/src/main/java/io/nosqlbench/virtdata/userlibs/apps/docsapp/VirtDataGenDocsApp.java index 1d71a1a61..3658c8f36 100644 --- a/virtdata-userlibs/src/main/java/io/nosqlbench/virtdata/userlibs/apps/docsapp/VirtDataGenDocsApp.java +++ b/virtdata-userlibs/src/main/java/io/nosqlbench/virtdata/userlibs/apps/docsapp/VirtDataGenDocsApp.java @@ -116,7 +116,8 @@ public class VirtDataGenDocsApp implements Runnable { Gson gson = new GsonBuilder().setPrettyPrinting().create(); writer.append(gson.toJson(docsForFuncName)); } else if (format.equals(FORMAT_MARKDOWN)) { - writer.append(docsForFuncName.asMarkdown()); + String markdown = docsForFuncName.asMarkdown(); + writer.append(markdown); } } } @@ -165,10 +166,12 @@ public class VirtDataGenDocsApp implements Runnable { FDoc docsinfo = new FDoc(); List allDocs = VirtDataDocs.getAllDocs(); for (DocFuncData docFuncData : allDocs) { - FDocFunc FDocFunc = new FDocFunc(docFuncData); - for (Category categoryName : FDocFunc.getCategories()) { + FDocFunc fDocFunc = new FDocFunc(docFuncData); + Set categories = + fDocFunc.getCategories().size()==0 ? Set.of(Category.general) : fDocFunc.getCategories(); + for (Category categoryName : categories) { FDocCat fDocCat = docsinfo.addCategory(categoryName.toString()); - fDocCat.addFunctionDoc(FDocFunc); + fDocCat.addFunctionDoc(fDocFunc); } } return docsinfo; diff --git a/virtdata-userlibs/src/main/java/io/nosqlbench/virtdata/userlibs/apps/docsapp/fdocs/FDocCat.java b/virtdata-userlibs/src/main/java/io/nosqlbench/virtdata/userlibs/apps/docsapp/fdocs/FDocCat.java index 0f8e151a5..57430afb3 100644 --- a/virtdata-userlibs/src/main/java/io/nosqlbench/virtdata/userlibs/apps/docsapp/fdocs/FDocCat.java +++ b/virtdata-userlibs/src/main/java/io/nosqlbench/virtdata/userlibs/apps/docsapp/fdocs/FDocCat.java @@ -30,6 +30,6 @@ public class FDocCat implements Iterable { public Iterator iterator() { ArrayList fdocs = new ArrayList<>(docsByFuncName.values()); fdocs.sort(Comparator.comparing(FDocFuncs::getFunctionName)); - return docsByFuncName.values().iterator(); + return fdocs.iterator(); } } diff --git a/virtdata-userlibs/src/main/java/io/nosqlbench/virtdata/userlibs/apps/docsapp/fdocs/FDocFuncs.java b/virtdata-userlibs/src/main/java/io/nosqlbench/virtdata/userlibs/apps/docsapp/fdocs/FDocFuncs.java index dc12eafc0..4a3474a99 100644 --- a/virtdata-userlibs/src/main/java/io/nosqlbench/virtdata/userlibs/apps/docsapp/fdocs/FDocFuncs.java +++ b/virtdata-userlibs/src/main/java/io/nosqlbench/virtdata/userlibs/apps/docsapp/fdocs/FDocFuncs.java @@ -71,4 +71,12 @@ public class FDocFuncs implements Iterable { .replaceAll("java.net.","") .replaceAll("java.io.",""); } + + @Override + public String toString() { + return "FDocFuncs{" + + "functionsByPackage=" + functionsByPackage + + ", functionName='" + functionName + '\'' + + '}'; + } } diff --git a/virtdata-userlibs/src/test/java/io/virtdata/IntegratedAliasMethodTests.java b/virtdata-userlibs/src/test/java/io/virtdata/IntegratedAliasMethodTests.java index 124703ddf..b785af536 100644 --- a/virtdata-userlibs/src/test/java/io/virtdata/IntegratedAliasMethodTests.java +++ b/virtdata-userlibs/src/test/java/io/virtdata/IntegratedAliasMethodTests.java @@ -20,7 +20,7 @@ public class IntegratedAliasMethodTests { public void testCSVFrequencySampler() { CSVFrequencySampler names= new CSVFrequencySampler("data/countries", "COUNTRY_CODE" ); String n = names.apply(23); - assertThat(n).isEqualTo("CZ"); + assertThat(n).isEqualTo("TK"); } @Test @@ -31,6 +31,6 @@ public class IntegratedAliasMethodTests { ',' ); String n = names.apply(23); - assertThat(n).isEqualTo("CZ"); + assertThat(n).isEqualTo("TK"); } }