mirror of
https://github.com/nosqlbench/nosqlbench.git
synced 2025-02-25 18:55:28 -06:00
Merge branch 'main' into driver-api
# Conflicts: # RELEASENOTES.md # driver-cql-shaded/pom.xml # driver-cqld3-shaded/pom.xml # driver-diag/pom.xml # driver-dsegraph-shaded/pom.xml # driver-http/pom.xml # driver-jdbc/pom.xml # driver-jms/pom.xml # driver-jmx/pom.xml # driver-kafka/pom.xml # driver-mongodb/pom.xml # driver-pulsar/pom.xml # driver-stdout/pom.xml # driver-tcp/pom.xml # driver-web/pom.xml # engine-api/src/test/java/io/nosqlbench/engine/api/activityapi/ratelimits/TestRateLimiterPerf1E8.java # mvn-defaults/pom.xml # nb-api/pom.xml # nb/pom.xml
This commit is contained in:
commit
666279decd
16
DOWNLOADS.md
16
DOWNLOADS.md
@ -4,29 +4,21 @@
|
|||||||
|
|
||||||
The latest release of NoSQLBench is always available from github releases.
|
The latest release of NoSQLBench is always available from github releases.
|
||||||
|
|
||||||
-
|
- download [the latest release of nb](https://github.com/nosqlbench/nosqlbench/releases/latest/download/nb), a linux binary
|
||||||
download [the latest release of nb](https://github.com/nosqlbench/nosqlbench/releases/latest/download/nb)
|
|
||||||
, a linux binary
|
|
||||||
- To download it with curl,
|
- To download it with curl,
|
||||||
use `curl -L -O https://github.com/nosqlbench/nosqlbench/releases/latest/download/nb`
|
use `curl -L -O https://github.com/nosqlbench/nosqlbench/releases/latest/download/nb`
|
||||||
.
|
|
||||||
- (be sure to `chmod +x nb` once you download it)
|
- (be sure to `chmod +x nb` once you download it)
|
||||||
-
|
- download [the latest release of nb.jar](https://github.com/nosqlbench/nosqlbench/releases/latest/download/nb.jar), a single-jar application.
|
||||||
download [the latest release of nb.jar](https://github.com/nosqlbench/nosqlbench/releases/latest/download/nb.jar)
|
|
||||||
, a single-jar application.
|
|
||||||
- This requires java 15 or later, make sure your `java -version`
|
- This requires java 15 or later, make sure your `java -version`
|
||||||
command says that you are on Java 15 or later.
|
command says that you are on Java 15 or later.
|
||||||
- To download it with curl,
|
- To download it with curl, use `curl -L -O https://github.com/nosqlbench/nosqlbench/releases/latest/download/nb.jar`.
|
||||||
use `curl -L -O https://github.com/nosqlbench/nosqlbench/releases/latest/download/nb.jar`
|
|
||||||
.
|
|
||||||
|
|
||||||
## Docker
|
## Docker
|
||||||
|
|
||||||
You can use a live docker image for the latest nosqlbench.
|
You can use a live docker image for the latest nosqlbench.
|
||||||
|
|
||||||
1. run `docker pull nosqlbench/nosqlbench`
|
1. run `docker pull nosqlbench/nosqlbench`
|
||||||
2. docserver `docker run -p 12345:12345 --rm --name nb-docs nosqlbench/nosqlbench docserver http://0.0.0.0:12345
|
2. docserver `docker run -p 12345:12345 --rm --name nb-docs nosqlbench/nosqlbench docserver http://0.0.0.0:12345`
|
||||||
`
|
|
||||||
3. Any other command can be run against your nosqlbench docker images using this form.
|
3. Any other command can be run against your nosqlbench docker images using this form.
|
||||||
|
|
||||||
Links to docker images:
|
Links to docker images:
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
FROM openjdk:15-alpine
|
FROM adoptopenjdk/openjdk15:alpine-slim
|
||||||
RUN apk --no-cache add curl
|
RUN apk --no-cache add curl
|
||||||
|
|
||||||
COPY nb/target/nb.jar nb.jar
|
COPY nb/target/nb.jar nb.jar
|
||||||
|
34
README.md
34
README.md
@ -85,19 +85,27 @@ available, but more work is needed to support them fully. Here is what is suppor
|
|||||||
|
|
||||||
## Thanks
|
## Thanks
|
||||||
|
|
||||||
<table cellspacing="0" cellpadding="0" align="left">
|
<table cellspacing="1" cellpadding="1" style="border: 0px" align="left">
|
||||||
<tr>
|
<tr>
|
||||||
<td><a href="https://datastax.com" target="_blank"><img src="https://www.datastax.com/sites/default/files/2020-12/datastax-logotype-positive.png" alt="DataStax" width="250"/></a></td>
|
<td width="20%"><a href="https://datastax.com" target="_blank"><img src="https://www.datastax.com/sites/default/files/2020-12/datastax-logotype-positive.png" alt="DataStax" width="250"/></a></td>
|
||||||
|
<td>This project is sponsored by <a href="https://www.datastax.com">DataStax</a> -- The Open,
|
||||||
|
Multi-Cloud Stack for Modern Data Apps built on Apache Cassandra™, Kubernetes *Based*, Developer *Ready* &
|
||||||
|
Cloud *Delivered* and designed from the ground up to run anywhere, on any cloud, in any datacenter, and in
|
||||||
|
every possible combination. DataStax delivers the ultimate hybrid and multi-cloud database.
|
||||||
|
</td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td><a href="https://www.yourkit.com/"><img src="https://www.yourkit.com/images/yklogo.png" alt="YourKit Logo"></a></td>
|
||||||
|
<td>This project uses tools provided by YourKit, LLC. YourKit supports open source projects with its full-featured Java
|
||||||
|
Profiler. YourKit, LLC is the creator of <a href="https://www.yourkit.com/java/profiler/">YourKit Java Profiler</a> and
|
||||||
|
<a href="https://www.yourkit.com/.net/profiler/">YourKit .NET Profiler</a>, innovative and intelligent tools for
|
||||||
|
profiling Java and .NET applications.
|
||||||
|
</td>
|
||||||
|
</tr>
|
||||||
|
|
||||||
|
<tr>
|
||||||
|
<td><a href="https://www.netlify.com"
|
||||||
|
><img src="https://www.netlify.com/img/global/badges/netlify-dark.svg" alt="Deploys by Netlify"/></a></td>
|
||||||
|
<td>This site (soon to be) deployed by Netlify!</td>
|
||||||
</tr>
|
</tr>
|
||||||
</table>
|
</table>
|
||||||
|
|
||||||
This project is sponsored by [DataStax](https://www.datastax.com) -- The Open, Multi-Cloud Stack for Modern Data Apps built on
|
|
||||||
Apache Cassandra™, Kubernetes *Based*, Developer *Ready* & Cloud *Delivered* and designed from the ground up to run anywhere,
|
|
||||||
on any cloud, in any datacenter, and in every possible combination. DataStax delivers the ultimate hybrid and multi-cloud database.
|
|
||||||
|
|
||||||

|
|
||||||
|
|
||||||
This project uses tools provided by YourKit, LLC. YourKit supports open source projects with its full-featured Java
|
|
||||||
Profiler. YourKit, LLC is the creator of <a href="https://www.yourkit.com/java/profiler/">YourKit Java Profiler</a> and
|
|
||||||
<a href="https://www.yourkit.com/.net/profiler/">YourKit .NET Profiler</a>, innovative and intelligent tools for
|
|
||||||
profiling Java and .NET applications.
|
|
||||||
|
@ -1,3 +1,9 @@
|
|||||||
|
- 3b674983 (HEAD -> main, origin/main) Merge pull request #362 from yabinmeng/main
|
||||||
|
- bf98d644 Merge branch 'nosqlbench:main' into main
|
||||||
|
- 793af965 Ignore abnormal message processing error for Shared and Key_Shared subscription type.
|
||||||
|
- f32caf1e Merge pull request #361 from ivansenic/ise-mongo-update
|
||||||
|
- e8b32584 add writeConcern to the mongodb crud workflows
|
||||||
|
- retrigger release
|
||||||
- 663e3010 (HEAD -> main) correct test for current functional APIs
|
- 663e3010 (HEAD -> main) correct test for current functional APIs
|
||||||
- e981f808 (origin/main) General improvements and bug-fixes. Thanks to @XN137
|
- e981f808 (origin/main) General improvements and bug-fixes. Thanks to @XN137
|
||||||
- 5b4a695f update ANTLR to 4.2.9 in all places
|
- 5b4a695f update ANTLR to 4.2.9 in all places
|
||||||
|
@ -12,7 +12,7 @@
|
|||||||
<parent>
|
<parent>
|
||||||
<artifactId>mvn-defaults</artifactId>
|
<artifactId>mvn-defaults</artifactId>
|
||||||
<groupId>io.nosqlbench</groupId>
|
<groupId>io.nosqlbench</groupId>
|
||||||
<version>4.15.52-SNAPSHOT</version>
|
<version>4.15.58-SNAPSHOT</version>
|
||||||
<relativePath>../mvn-defaults</relativePath>
|
<relativePath>../mvn-defaults</relativePath>
|
||||||
</parent>
|
</parent>
|
||||||
|
|
||||||
@ -21,7 +21,7 @@
|
|||||||
<dependency>
|
<dependency>
|
||||||
<groupId>io.nosqlbench</groupId>
|
<groupId>io.nosqlbench</groupId>
|
||||||
<artifactId>nb-api</artifactId>
|
<artifactId>nb-api</artifactId>
|
||||||
<version>4.15.52-SNAPSHOT</version>
|
<version>4.15.58-SNAPSHOT</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
|
||||||
<dependency>
|
<dependency>
|
||||||
@ -77,7 +77,7 @@
|
|||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.apache.commons</groupId>
|
<groupId>org.apache.commons</groupId>
|
||||||
<artifactId>commons-compress</artifactId>
|
<artifactId>commons-compress</artifactId>
|
||||||
<version>1.20</version>
|
<version>1.21</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
|
||||||
<dependency>
|
<dependency>
|
||||||
@ -117,7 +117,7 @@
|
|||||||
<dependency>
|
<dependency>
|
||||||
<groupId>io.nosqlbench</groupId>
|
<groupId>io.nosqlbench</groupId>
|
||||||
<artifactId>virtdata-api</artifactId>
|
<artifactId>virtdata-api</artifactId>
|
||||||
<version>4.15.52-SNAPSHOT</version>
|
<version>4.15.58-SNAPSHOT</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
|
||||||
</dependencies>
|
</dependencies>
|
||||||
|
@ -5,7 +5,7 @@
|
|||||||
<parent>
|
<parent>
|
||||||
<groupId>io.nosqlbench</groupId>
|
<groupId>io.nosqlbench</groupId>
|
||||||
<artifactId>mvn-defaults</artifactId>
|
<artifactId>mvn-defaults</artifactId>
|
||||||
<version>4.15.52-SNAPSHOT</version>
|
<version>4.15.58-SNAPSHOT</version>
|
||||||
<relativePath>../mvn-defaults</relativePath>
|
<relativePath>../mvn-defaults</relativePath>
|
||||||
</parent>
|
</parent>
|
||||||
|
|
||||||
@ -21,7 +21,7 @@
|
|||||||
<dependency>
|
<dependency>
|
||||||
<groupId>io.nosqlbench</groupId>
|
<groupId>io.nosqlbench</groupId>
|
||||||
<artifactId>driver-jdbc</artifactId>
|
<artifactId>driver-jdbc</artifactId>
|
||||||
<version>4.15.52-SNAPSHOT</version>
|
<version>4.15.58-SNAPSHOT</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.postgresql</groupId>
|
<groupId>org.postgresql</groupId>
|
||||||
|
@ -4,7 +4,7 @@
|
|||||||
<parent>
|
<parent>
|
||||||
<groupId>io.nosqlbench</groupId>
|
<groupId>io.nosqlbench</groupId>
|
||||||
<artifactId>mvn-defaults</artifactId>
|
<artifactId>mvn-defaults</artifactId>
|
||||||
<version>4.15.52-SNAPSHOT</version>
|
<version>4.15.58-SNAPSHOT</version>
|
||||||
<relativePath>../mvn-defaults</relativePath>
|
<relativePath>../mvn-defaults</relativePath>
|
||||||
</parent>
|
</parent>
|
||||||
|
|
||||||
@ -20,10 +20,16 @@
|
|||||||
|
|
||||||
<!-- core dependencies -->
|
<!-- core dependencies -->
|
||||||
|
|
||||||
|
<dependency>
|
||||||
|
<groupId>io.nosqlbench</groupId>
|
||||||
|
<artifactId>engine-api</artifactId>
|
||||||
|
<version>4.15.58-SNAPSHOT</version>
|
||||||
|
</dependency>
|
||||||
|
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>io.nosqlbench</groupId>
|
<groupId>io.nosqlbench</groupId>
|
||||||
<artifactId>drivers-api</artifactId>
|
<artifactId>drivers-api</artifactId>
|
||||||
<version>4.15.52-SNAPSHOT</version>
|
<version>4.15.58-SNAPSHOT</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
|
||||||
<dependency>
|
<dependency>
|
||||||
@ -77,14 +83,8 @@
|
|||||||
<artifactId>netty-codec-haproxy</artifactId>
|
<artifactId>netty-codec-haproxy</artifactId>
|
||||||
<version>4.1.54.Final</version>
|
<version>4.1.54.Final</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
|
||||||
<groupId>io.nosqlbench</groupId>
|
|
||||||
<artifactId>engine-api</artifactId>
|
|
||||||
<version>4.15.52-SNAPSHOT</version>
|
|
||||||
<scope>compile</scope>
|
|
||||||
</dependency>
|
|
||||||
|
|
||||||
<!-- <dependency>-->
|
<!-- <dependency>-->
|
||||||
<!-- <groupId>io.netty</groupId>-->
|
<!-- <groupId>io.netty</groupId>-->
|
||||||
<!-- <artifactId>netty-transport-native-epoll</artifactId>-->
|
<!-- <artifactId>netty-transport-native-epoll</artifactId>-->
|
||||||
<!-- <version>4.1.47.Final</version>-->
|
<!-- <version>4.1.47.Final</version>-->
|
||||||
|
@ -4,7 +4,7 @@
|
|||||||
<parent>
|
<parent>
|
||||||
<groupId>io.nosqlbench</groupId>
|
<groupId>io.nosqlbench</groupId>
|
||||||
<artifactId>mvn-defaults</artifactId>
|
<artifactId>mvn-defaults</artifactId>
|
||||||
<version>4.15.52-SNAPSHOT</version>
|
<version>4.15.58-SNAPSHOT</version>
|
||||||
<relativePath>../mvn-defaults</relativePath>
|
<relativePath>../mvn-defaults</relativePath>
|
||||||
</parent>
|
</parent>
|
||||||
|
|
||||||
@ -21,10 +21,16 @@
|
|||||||
|
|
||||||
<!-- core dependencies -->
|
<!-- core dependencies -->
|
||||||
|
|
||||||
|
<dependency>
|
||||||
|
<groupId>io.nosqlbench</groupId>
|
||||||
|
<artifactId>engine-api</artifactId>
|
||||||
|
<version>4.15.58-SNAPSHOT</version>
|
||||||
|
</dependency>
|
||||||
|
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>io.nosqlbench</groupId>
|
<groupId>io.nosqlbench</groupId>
|
||||||
<artifactId>drivers-api</artifactId>
|
<artifactId>drivers-api</artifactId>
|
||||||
<version>4.15.52-SNAPSHOT</version>
|
<version>4.15.58-SNAPSHOT</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
|
||||||
<dependency>
|
<dependency>
|
||||||
@ -67,12 +73,6 @@
|
|||||||
<artifactId>netty-codec-haproxy</artifactId>
|
<artifactId>netty-codec-haproxy</artifactId>
|
||||||
<version>4.1.54.Final</version>
|
<version>4.1.54.Final</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
|
||||||
<groupId>io.nosqlbench</groupId>
|
|
||||||
<artifactId>engine-api</artifactId>
|
|
||||||
<version>4.15.52-SNAPSHOT</version>
|
|
||||||
<scope>compile</scope>
|
|
||||||
</dependency>
|
|
||||||
|
|
||||||
<!-- <dependency>-->
|
<!-- <dependency>-->
|
||||||
<!-- <groupId>io.netty</groupId>-->
|
<!-- <groupId>io.netty</groupId>-->
|
||||||
|
@ -4,7 +4,7 @@
|
|||||||
<parent>
|
<parent>
|
||||||
<groupId>io.nosqlbench</groupId>
|
<groupId>io.nosqlbench</groupId>
|
||||||
<artifactId>mvn-defaults</artifactId>
|
<artifactId>mvn-defaults</artifactId>
|
||||||
<version>4.15.52-SNAPSHOT</version>
|
<version>4.15.58-SNAPSHOT</version>
|
||||||
<relativePath>../mvn-defaults</relativePath>
|
<relativePath>../mvn-defaults</relativePath>
|
||||||
</parent>
|
</parent>
|
||||||
|
|
||||||
@ -24,13 +24,13 @@
|
|||||||
<dependency>
|
<dependency>
|
||||||
<groupId>io.nosqlbench</groupId>
|
<groupId>io.nosqlbench</groupId>
|
||||||
<artifactId>driver-cql-shaded</artifactId>
|
<artifactId>driver-cql-shaded</artifactId>
|
||||||
<version>4.15.52-SNAPSHOT</version>
|
<version>4.15.58-SNAPSHOT</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>io.nosqlbench</groupId>
|
<groupId>io.nosqlbench</groupId>
|
||||||
<artifactId>drivers-api</artifactId>
|
<artifactId>drivers-api</artifactId>
|
||||||
<version>4.15.52-SNAPSHOT</version>
|
<version>4.15.58-SNAPSHOT</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
|
||||||
</dependencies>
|
</dependencies>
|
||||||
|
@ -5,7 +5,7 @@
|
|||||||
<parent>
|
<parent>
|
||||||
<artifactId>mvn-defaults</artifactId>
|
<artifactId>mvn-defaults</artifactId>
|
||||||
<groupId>io.nosqlbench</groupId>
|
<groupId>io.nosqlbench</groupId>
|
||||||
<version>4.15.52-SNAPSHOT</version>
|
<version>4.15.58-SNAPSHOT</version>
|
||||||
<relativePath>../mvn-defaults</relativePath>
|
<relativePath>../mvn-defaults</relativePath>
|
||||||
</parent>
|
</parent>
|
||||||
|
|
||||||
@ -20,15 +20,17 @@
|
|||||||
|
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>io.nosqlbench</groupId>
|
<groupId>io.nosqlbench</groupId>
|
||||||
<artifactId>drivers-api</artifactId>
|
<artifactId>engine-api</artifactId>
|
||||||
<version>4.15.52-SNAPSHOT</version>
|
<version>4.15.58-SNAPSHOT</version>
|
||||||
|
<scope>compile</scope>
|
||||||
|
</dependency>
|
||||||
|
|
||||||
|
<dependency>
|
||||||
|
<groupId>io.nosqlbench</groupId>
|
||||||
|
<artifactId>drivers-api</artifactId>
|
||||||
|
<version>4.15.58-SNAPSHOT</version>
|
||||||
|
<scope>compile</scope>
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
|
||||||
<groupId>io.nosqlbench</groupId>
|
|
||||||
<artifactId>engine-api</artifactId>
|
|
||||||
<version>4.15.52-SNAPSHOT</version>
|
|
||||||
<scope>compile</scope>
|
|
||||||
</dependency>
|
|
||||||
|
|
||||||
</dependencies>
|
</dependencies>
|
||||||
|
|
||||||
|
@ -4,7 +4,7 @@
|
|||||||
<parent>
|
<parent>
|
||||||
<groupId>io.nosqlbench</groupId>
|
<groupId>io.nosqlbench</groupId>
|
||||||
<artifactId>mvn-defaults</artifactId>
|
<artifactId>mvn-defaults</artifactId>
|
||||||
<version>4.15.52-SNAPSHOT</version>
|
<version>4.15.58-SNAPSHOT</version>
|
||||||
<relativePath>../mvn-defaults</relativePath>
|
<relativePath>../mvn-defaults</relativePath>
|
||||||
</parent>
|
</parent>
|
||||||
|
|
||||||
@ -20,10 +20,16 @@
|
|||||||
|
|
||||||
<!-- core dependencies -->
|
<!-- core dependencies -->
|
||||||
|
|
||||||
|
<dependency>
|
||||||
|
<groupId>io.nosqlbench</groupId>
|
||||||
|
<artifactId>engine-api</artifactId>
|
||||||
|
<version>4.15.58-SNAPSHOT</version>
|
||||||
|
</dependency>
|
||||||
|
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>io.nosqlbench</groupId>
|
<groupId>io.nosqlbench</groupId>
|
||||||
<artifactId>drivers-api</artifactId>
|
<artifactId>drivers-api</artifactId>
|
||||||
<version>4.15.52-SNAPSHOT</version>
|
<version>4.15.58-SNAPSHOT</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
|
||||||
<dependency>
|
<dependency>
|
||||||
@ -117,12 +123,6 @@
|
|||||||
<artifactId>snakeyaml</artifactId>
|
<artifactId>snakeyaml</artifactId>
|
||||||
<version>1.23</version>
|
<version>1.23</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
|
||||||
<groupId>io.nosqlbench</groupId>
|
|
||||||
<artifactId>engine-api</artifactId>
|
|
||||||
<version>4.15.52-SNAPSHOT</version>
|
|
||||||
<scope>compile</scope>
|
|
||||||
</dependency>
|
|
||||||
|
|
||||||
|
|
||||||
</dependencies>
|
</dependencies>
|
||||||
|
@ -4,7 +4,7 @@
|
|||||||
<parent>
|
<parent>
|
||||||
<artifactId>mvn-defaults</artifactId>
|
<artifactId>mvn-defaults</artifactId>
|
||||||
<groupId>io.nosqlbench</groupId>
|
<groupId>io.nosqlbench</groupId>
|
||||||
<version>4.15.52-SNAPSHOT</version>
|
<version>4.15.58-SNAPSHOT</version>
|
||||||
<relativePath>../mvn-defaults</relativePath>
|
<relativePath>../mvn-defaults</relativePath>
|
||||||
</parent>
|
</parent>
|
||||||
|
|
||||||
@ -21,15 +21,17 @@
|
|||||||
|
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>io.nosqlbench</groupId>
|
<groupId>io.nosqlbench</groupId>
|
||||||
<artifactId>drivers-api</artifactId>
|
<artifactId>engine-api</artifactId>
|
||||||
<version>4.15.52-SNAPSHOT</version>
|
<version>4.15.58-SNAPSHOT</version>
|
||||||
|
<scope>compile</scope>
|
||||||
|
</dependency>
|
||||||
|
|
||||||
|
<dependency>
|
||||||
|
<groupId>io.nosqlbench</groupId>
|
||||||
|
<artifactId>drivers-api</artifactId>
|
||||||
|
<version>4.15.58-SNAPSHOT</version>
|
||||||
|
<scope>compile</scope>
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
|
||||||
<groupId>io.nosqlbench</groupId>
|
|
||||||
<artifactId>engine-api</artifactId>
|
|
||||||
<version>4.15.52-SNAPSHOT</version>
|
|
||||||
<scope>compile</scope>
|
|
||||||
</dependency>
|
|
||||||
|
|
||||||
</dependencies>
|
</dependencies>
|
||||||
|
|
||||||
|
@ -1,5 +1,7 @@
|
|||||||
package io.nosqlbench.activitytype.http;
|
package io.nosqlbench.activitytype.http;
|
||||||
|
|
||||||
|
import io.nosqlbench.activitytype.http.statuscodes.HttpStatusCodes;
|
||||||
|
|
||||||
import java.io.PrintStream;
|
import java.io.PrintStream;
|
||||||
import java.net.http.HttpClient;
|
import java.net.http.HttpClient;
|
||||||
import java.net.http.HttpHeaders;
|
import java.net.http.HttpHeaders;
|
||||||
@ -39,9 +41,9 @@ public class HttpConsoleFormats {
|
|||||||
private final static long _DATA10 = 1L << 6;
|
private final static long _DATA10 = 1L << 6;
|
||||||
private final static long _DATA100 = 1L << 7;
|
private final static long _DATA100 = 1L << 7;
|
||||||
private final static long _DATA1000 = 1L << 8;
|
private final static long _DATA1000 = 1L << 8;
|
||||||
|
private final static long _CODES = 1L << 9;
|
||||||
|
|
||||||
enum Diag {
|
enum Diag {
|
||||||
|
|
||||||
headers(_HEADERS),
|
headers(_HEADERS),
|
||||||
stats(_STATS),
|
stats(_STATS),
|
||||||
data(_DATA),
|
data(_DATA),
|
||||||
@ -51,8 +53,9 @@ public class HttpConsoleFormats {
|
|||||||
redirects(_REDIRECTS),
|
redirects(_REDIRECTS),
|
||||||
requests(_REQUESTS),
|
requests(_REQUESTS),
|
||||||
responses(_RESPONSES),
|
responses(_RESPONSES),
|
||||||
|
codes(_CODES),
|
||||||
brief(_HEADERS | _STATS | _REQUESTS | _RESPONSES | _DATA10),
|
brief(_HEADERS | _STATS | _REQUESTS | _RESPONSES | _DATA10),
|
||||||
all(_HEADERS | _STATS | _REDIRECTS | _REQUESTS | _RESPONSES | _DATA);
|
all(_HEADERS | _STATS | _REDIRECTS | _REQUESTS | _RESPONSES | _DATA | _CODES);
|
||||||
|
|
||||||
private final long mask;
|
private final long mask;
|
||||||
|
|
||||||
@ -191,6 +194,10 @@ public class HttpConsoleFormats {
|
|||||||
out.println(RESPONSE_CUE + (caption != null ? caption : " RESPONSE") +
|
out.println(RESPONSE_CUE + (caption != null ? caption : " RESPONSE") +
|
||||||
" status=" + response.statusCode() + " took=" + (nanos / 1_000_000) + "ms");
|
" status=" + response.statusCode() + " took=" + (nanos / 1_000_000) + "ms");
|
||||||
|
|
||||||
|
if (Diag.codes.includedIn(mask)) {
|
||||||
|
out.println(DETAIL_CUE + "STATUS: " + HttpStatusCodes.lookup(response.statusCode()));
|
||||||
|
}
|
||||||
|
|
||||||
if (e != null) {
|
if (e != null) {
|
||||||
out.println(MESSAGE_CUE + " EXCEPTION: " + e.getMessage());
|
out.println(MESSAGE_CUE + " EXCEPTION: " + e.getMessage());
|
||||||
}
|
}
|
||||||
@ -218,7 +225,9 @@ public class HttpConsoleFormats {
|
|||||||
|
|
||||||
String contentLenStr = response.headers().map().getOrDefault("content-length", List.of("0")).get(0);
|
String contentLenStr = response.headers().map().getOrDefault("content-length", List.of("0")).get(0);
|
||||||
Long contentLength = Long.parseLong(contentLenStr);
|
Long contentLength = Long.parseLong(contentLenStr);
|
||||||
if (contentLength == 0L) {
|
String body = response.body();
|
||||||
|
|
||||||
|
if (contentLength == 0L && (body==null||body.length()==0)) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -230,22 +239,24 @@ public class HttpConsoleFormats {
|
|||||||
} else {
|
} else {
|
||||||
String contentType = contentTypeList.get(0).toLowerCase();
|
String contentType = contentTypeList.get(0).toLowerCase();
|
||||||
if (isPrintableContentType(contentType)) {
|
if (isPrintableContentType(contentType)) {
|
||||||
toprint = response.body();
|
if (body!=null) {
|
||||||
|
toprint = body;
|
||||||
|
}
|
||||||
if (toprint == null) {
|
if (toprint == null) {
|
||||||
toprint = "content-length was " + contentLength + ", but body was null";
|
toprint = "content-length was " + contentLength + ", but body was null";
|
||||||
}
|
}
|
||||||
|
|
||||||
if (Diag.data1000.includedIn(mask)) {
|
if (Diag.data1000.includedIn(mask)) {
|
||||||
if (toprint.length() > 1000) {
|
if (toprint.length() > 1000) {
|
||||||
toprint = toprint.substring(0, 1000) + "\n--truncated at 1000 characters--\n";
|
toprint = toprint.substring(0, 1000) + "\n^^--truncated at 1000 characters--^^\n";
|
||||||
}
|
}
|
||||||
} else if (Diag.data100.includedIn(mask)) {
|
} else if (Diag.data100.includedIn(mask)) {
|
||||||
if (toprint.length() > 100) {
|
if (toprint.length() > 100) {
|
||||||
toprint = toprint.substring(0, 100) + "\n--truncated at 100 characters--\n";
|
toprint = toprint.substring(0, 100) + "\n^^--truncated at 100 characters--^^\n";
|
||||||
}
|
}
|
||||||
} else if (Diag.data10.includedIn(mask)) {
|
} else if (Diag.data10.includedIn(mask)) {
|
||||||
if (toprint.length() > 10) {
|
if (toprint.length() > 10) {
|
||||||
toprint = toprint.substring(0, 10) + "\n--truncated at 10 characters--\n";
|
toprint = toprint.substring(0, 10) + "\n^^--truncated at 10 characters--^^\n";
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -0,0 +1,64 @@
|
|||||||
|
package io.nosqlbench.activitytype.http.statuscodes;
|
||||||
|
|
||||||
|
import io.nosqlbench.nb.api.content.Content;
|
||||||
|
import io.nosqlbench.nb.api.content.NBIO;
|
||||||
|
import org.apache.commons.csv.CSVFormat;
|
||||||
|
import org.apache.commons.csv.CSVParser;
|
||||||
|
import org.apache.commons.csv.CSVRecord;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.io.InputStreamReader;
|
||||||
|
|
||||||
|
public class HttpStatusCodes {
|
||||||
|
|
||||||
|
private static final IetfStatusCode[] codes = loadMap();
|
||||||
|
|
||||||
|
private static IetfStatusCode[] loadMap() {
|
||||||
|
Content<?> csv = NBIO.local().name("ietf-http-status-codes").extension("csv").one();
|
||||||
|
InputStreamReader isr = new InputStreamReader(csv.getInputStream());
|
||||||
|
IetfStatusCode[] codes = new IetfStatusCode[600];
|
||||||
|
|
||||||
|
try {
|
||||||
|
CSVParser parser = new CSVParser(isr,CSVFormat.DEFAULT.withFirstRecordAsHeader());
|
||||||
|
for (CSVRecord record : parser) {
|
||||||
|
String values = record.get("Value");
|
||||||
|
String description = record.get("Description");
|
||||||
|
String reference = record.get("Reference");
|
||||||
|
|
||||||
|
int min, max=0;
|
||||||
|
if (values.contains("-")) {
|
||||||
|
min=Integer.parseInt(values.substring(0,values.indexOf('-')));
|
||||||
|
max=Integer.parseInt(values.substring(values.indexOf('-')));
|
||||||
|
} else {
|
||||||
|
min = max = Integer.parseInt(values);
|
||||||
|
}
|
||||||
|
HttpStatusRanges category = HttpStatusRanges.valueOfCode(min);
|
||||||
|
IetfStatusCode code = new IetfStatusCode(values,description,reference,category);
|
||||||
|
for (int value = min; value <=max ; value++) {
|
||||||
|
codes[value]=code;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
} catch (IOException e) {
|
||||||
|
throw new RuntimeException(e);
|
||||||
|
}
|
||||||
|
|
||||||
|
return codes;
|
||||||
|
}
|
||||||
|
|
||||||
|
public static IetfStatusCode lookup(int code) {
|
||||||
|
if (code<1||code>codes.length-1) {
|
||||||
|
return UNKNOWN(code);
|
||||||
|
}
|
||||||
|
IetfStatusCode found = codes[code];
|
||||||
|
if (found!=null) {
|
||||||
|
return found;
|
||||||
|
} else {
|
||||||
|
return UNKNOWN(code);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private static IetfStatusCode UNKNOWN(int code) {
|
||||||
|
return new IetfStatusCode(String.valueOf(code),null, "[check https://www.iana.org/assignments/http-status-codes/http-status-codes.xhtml]", HttpStatusRanges.valueOfCode(code));
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,35 @@
|
|||||||
|
package io.nosqlbench.activitytype.http.statuscodes;
|
||||||
|
|
||||||
|
enum HttpStatusRanges {
|
||||||
|
Informational("INFORMATIONAL", 100, 199, "Request received, continuing process"),
|
||||||
|
Success("SUCCESS",200, 299, "Request successfully received, understood, and accepted"),
|
||||||
|
Redirection("REDIRECTION", 300, 399, "Further action must be taken in order to complete the request."),
|
||||||
|
Client_Error("CLIENT_ERROR",400, 499, "The request contains bad syntax or cannot be fulfilled."),
|
||||||
|
Server_Error("SERVER_ERROR",500, 599, "The server failed to fulfill an apparently valid request."),
|
||||||
|
Unknown("UNKNOWN_ERROR",0,0,"This error type is not known based on IANA registered HTTP status codes.");
|
||||||
|
|
||||||
|
private final String name;
|
||||||
|
private final String description;
|
||||||
|
private final int min;
|
||||||
|
private final int max;
|
||||||
|
|
||||||
|
HttpStatusRanges(String name, int min, int max, String description) {
|
||||||
|
this.name = name;
|
||||||
|
this.min = min;
|
||||||
|
this.max = max;
|
||||||
|
this.description = description;
|
||||||
|
}
|
||||||
|
|
||||||
|
public static HttpStatusRanges valueOfCode(int code) {
|
||||||
|
for (HttpStatusRanges value : HttpStatusRanges.values()) {
|
||||||
|
if (code >= value.min && code <= value.max) {
|
||||||
|
return value;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return HttpStatusRanges.Unknown;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String toString() {
|
||||||
|
return this.name + " (" + this.description + ")";
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,47 @@
|
|||||||
|
package io.nosqlbench.activitytype.http.statuscodes;
|
||||||
|
|
||||||
|
public class IetfStatusCode {
|
||||||
|
private final String values;
|
||||||
|
private final String description;
|
||||||
|
private final String reference;
|
||||||
|
private final HttpStatusRanges category;
|
||||||
|
|
||||||
|
public IetfStatusCode(String values, String description, String reference, HttpStatusRanges category) {
|
||||||
|
this.values = values;
|
||||||
|
this.description = description;
|
||||||
|
this.reference = reference;
|
||||||
|
this.category = category;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getValues() {
|
||||||
|
return values;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getDescription() {
|
||||||
|
return description;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getReference() {
|
||||||
|
return reference;
|
||||||
|
}
|
||||||
|
|
||||||
|
public HttpStatusRanges getCategory() {
|
||||||
|
return category;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String toString(int code) {
|
||||||
|
if (values.equals(String.valueOf(code))) {
|
||||||
|
return toString();
|
||||||
|
} else {
|
||||||
|
return code + ": " + this;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public String toString() {
|
||||||
|
String ref = reference
|
||||||
|
.replaceFirst("\\[RFC(\\d+), Section (.+?)]","[https://www.iana.org/go/rfc$1#section-$2]") // https://www.rfc-editor.org/rfc/rfc7231.html#section-6.3.1
|
||||||
|
.replaceFirst("\\[RFC(\\d+)(.*)]","[https://www.iana.org/go/rfc$1$2]"); // https://www.iana.org/go/rfc7231
|
||||||
|
|
||||||
|
return (values!=null ? values : "") + (description!=null ? ", "+description :"") + ", " + ref + ", " + category.toString();
|
||||||
|
}
|
||||||
|
}
|
@ -7,9 +7,9 @@ description: |
|
|||||||
|
|
||||||
scenarios:
|
scenarios:
|
||||||
default:
|
default:
|
||||||
- run driver=cql tags==phase:schema threads==1 cycles==UNDEF
|
schema: run driver=cql tags==phase:schema threads==1 cycles==UNDEF
|
||||||
- run driver=http tags==phase:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
|
rampup: run driver=http tags==phase:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
|
||||||
- run driver=http tags==phase:main cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
main: run driver=http tags==phase:main cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||||
bindings:
|
bindings:
|
||||||
# To enable an optional weighted set of hosts in place of a load balancer
|
# To enable an optional weighted set of hosts in place of a load balancer
|
||||||
# Examples
|
# Examples
|
||||||
@ -20,9 +20,9 @@ bindings:
|
|||||||
# http request id
|
# http request id
|
||||||
request_id: ToHashedUUID(); ToString();
|
request_id: ToHashedUUID(); ToString();
|
||||||
|
|
||||||
seq_key: Mod(<<keycount:1000000000>>); ToString() -> String
|
seq_key: Mod(<<keycount:10000000>>); ToString() -> String
|
||||||
seq_value: Hash(); Mod(<<valuecount:1000000000>>); ToString() -> String
|
seq_value: Hash(); Mod(<<valuecount:1000000000>>); ToString() -> String
|
||||||
rw_key: <<keydist:Uniform(0,1000000000)->int>>; ToString() -> String
|
rw_key: <<keydist:Uniform(0,10000000)->int>>; ToString() -> String
|
||||||
rw_value: Hash(); <<valdist:Uniform(0,1000000000)->int>>; ToString() -> String
|
rw_value: Hash(); <<valdist:Uniform(0,1000000000)->int>>; ToString() -> String
|
||||||
|
|
||||||
blocks:
|
blocks:
|
||||||
@ -42,6 +42,14 @@ blocks:
|
|||||||
}
|
}
|
||||||
tags:
|
tags:
|
||||||
name: create-keyspace
|
name: create-keyspace
|
||||||
|
- drop-table: DELETE <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/schemas/keyspaces/<<keyspace:baselines>>/tables/<<table:keyvalue>>
|
||||||
|
Accept: "application/json"
|
||||||
|
X-Cassandra-Request-Id: "{request_id}"
|
||||||
|
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
|
||||||
|
Content-Type: "application/json"
|
||||||
|
tags:
|
||||||
|
name: drop-table
|
||||||
|
ok-status: "[2-4][0-9][0-9]"
|
||||||
- create-table: POST <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/schemas/keyspaces/<<keyspace:baselines>>/tables
|
- create-table: POST <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/schemas/keyspaces/<<keyspace:baselines>>/tables
|
||||||
Accept: "application/json"
|
Accept: "application/json"
|
||||||
X-Cassandra-Request-Id: "{request_id}"
|
X-Cassandra-Request-Id: "{request_id}"
|
||||||
@ -120,7 +128,7 @@ blocks:
|
|||||||
phase: main
|
phase: main
|
||||||
type: read
|
type: read
|
||||||
params:
|
params:
|
||||||
ratio: 5
|
ratio: <<read_ratio:5>>
|
||||||
statements:
|
statements:
|
||||||
- main-select: GET <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/keyspaces/<<keyspace:baselines>>/<<table:keyvalue>>/{rw_key}
|
- main-select: GET <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/keyspaces/<<keyspace:baselines>>/<<table:keyvalue>>/{rw_key}
|
||||||
Accept: "application/json"
|
Accept: "application/json"
|
||||||
@ -129,12 +137,13 @@ blocks:
|
|||||||
Content-Type: "application/json"
|
Content-Type: "application/json"
|
||||||
tags:
|
tags:
|
||||||
name: main-select
|
name: main-select
|
||||||
|
ok-status: "[2-4][0-9][0-9]"
|
||||||
- name: main-write
|
- name: main-write
|
||||||
tags:
|
tags:
|
||||||
phase: main
|
phase: main
|
||||||
type: write
|
type: write
|
||||||
params:
|
params:
|
||||||
ratio: 5
|
ratio: <<write_ratio:5>>
|
||||||
statements:
|
statements:
|
||||||
- main-write: POST <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/keyspaces/<<keyspace:baselines>>/<<table:keyvalue>>
|
- main-write: POST <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/keyspaces/<<keyspace:baselines>>/<<table:keyvalue>>
|
||||||
Accept: "application/json"
|
Accept: "application/json"
|
||||||
|
@ -0,0 +1,67 @@
|
|||||||
|
---
|
||||||
|
title: Documents API CRUD Basic
|
||||||
|
weight: 2
|
||||||
|
---
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
The Documents API CRUD Basic workflow targets Stargate's Documents API using generated JSON documents.
|
||||||
|
The documents used are sharing the same structure and are approximately half a kilobyte in size each:
|
||||||
|
|
||||||
|
* each document has 13 leaf values, with a maximum depth of 3
|
||||||
|
* there is at least one `string`, `boolean`, `number` and `null` leaf
|
||||||
|
* there is one array with `double` values and one with `string` values
|
||||||
|
* there is one empty array and one empty map
|
||||||
|
|
||||||
|
The example JSON looks like:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"user_id":"56fd76f6-081d-401a-85eb-b1d9e5bba058",
|
||||||
|
"created_on":1476743286,
|
||||||
|
"gender":"F",
|
||||||
|
"full_name":"Andrew Daniels",
|
||||||
|
"married":true,
|
||||||
|
"address":{
|
||||||
|
"primary":{
|
||||||
|
"cc":"IO",
|
||||||
|
"city":"Okmulgee"
|
||||||
|
},
|
||||||
|
"secondary":{
|
||||||
|
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"coordinates":[
|
||||||
|
64.65964627052323,
|
||||||
|
-122.35334535072856
|
||||||
|
],
|
||||||
|
"children":[
|
||||||
|
|
||||||
|
],
|
||||||
|
"friends":[
|
||||||
|
"3df498b1-9568-4584-96fd-76f6081da01a"
|
||||||
|
],
|
||||||
|
"debt":null
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
In contrast to other workflows, this one is not split into ramp-up and main phases.
|
||||||
|
Instead, there is only the main phase with 4 different load types (write, read, update and delete).
|
||||||
|
|
||||||
|
## Named Scenarios
|
||||||
|
|
||||||
|
### default
|
||||||
|
|
||||||
|
The default scenario for http-docsapi-crud-basic.yaml runs each type of the main phase sequentially: write, read, update and delete.
|
||||||
|
This means that setting cycles for each of the phases should be done using the: `write-cycles`, `read-cycles`, `update-cycles` and `delete-cycles`.
|
||||||
|
The default value for all 4 cycles variables is the amount of documents to process (see [Workload Parameters](#workload-parameters)).
|
||||||
|
|
||||||
|
Note that error handling is set to `errors=timer,warn`, which means that in case of HTTP errors the scenario is not stopped.
|
||||||
|
|
||||||
|
## Workload Parameters
|
||||||
|
|
||||||
|
- `docscount` - the number of documents to process in each step of a scenario (default: `10_000_000`)
|
||||||
|
|
||||||
|
Note that if number of documents is higher than `read-cycles` you would experience misses, which will result in `HTTP 404` and smaller latencies.
|
||||||
|
|
||||||
|
|
@ -0,0 +1,175 @@
|
|||||||
|
# nb -v run driver=http yaml=http-docsapi-crud-basic tags=phase:schema stargate_host=my_stargate_host auth_token=$AUTH_TOKEN
|
||||||
|
|
||||||
|
description: |
|
||||||
|
This workload emulates CRUD operations for the Stargate Documents API.
|
||||||
|
It generates a simple JSON document to be used for writes and updates.
|
||||||
|
Note that stargate_port should reflect the port where the Docs API is exposed (defaults to 8082).
|
||||||
|
|
||||||
|
scenarios:
|
||||||
|
default:
|
||||||
|
schema: run driver=http tags==phase:schema threads==1 cycles==UNDEF
|
||||||
|
write: run driver=http tags==phase:main,type:write cycles===TEMPLATE(write-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
|
||||||
|
read: run driver=http tags==phase:main,type:read cycles===TEMPLATE(read-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
|
||||||
|
update: run driver=http tags==phase:main,type:update cycles===TEMPLATE(update-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
|
||||||
|
delete: run driver=http tags==phase:main,type:delete cycles===TEMPLATE(delete-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
|
||||||
|
|
||||||
|
bindings:
|
||||||
|
# To enable an optional weighted set of hosts in place of a load balancer
|
||||||
|
# Examples
|
||||||
|
# single host: stargate_host=host1
|
||||||
|
# multiple hosts: stargate_host=host1,host2,host3
|
||||||
|
# multiple weighted hosts: stargate_host=host1:3,host2:7
|
||||||
|
weighted_hosts: WeightedStrings('<<stargate_host:stargate>>')
|
||||||
|
# http request id
|
||||||
|
request_id: ToHashedUUID(); ToString();
|
||||||
|
|
||||||
|
seq_key: Mod(<<docscount:10000000>>); ToString() -> String
|
||||||
|
random_key: Uniform(0,<<docscount:10000000>>); ToString() -> String
|
||||||
|
|
||||||
|
user_id: ToHashedUUID(); ToString() -> String
|
||||||
|
created_on: Uniform(1262304000,1577836800) -> long
|
||||||
|
gender: WeightedStrings('M:10;F:10;O:1')
|
||||||
|
full_name: FullNames()
|
||||||
|
married: ModuloToBoolean()
|
||||||
|
city: Cities()
|
||||||
|
country_code: CountryCodes()
|
||||||
|
lat: Uniform(-180d, 180d)
|
||||||
|
lng: Hash() -> long; Uniform(-180d, 180d)
|
||||||
|
friend_id: Add(-1); ToHashedUUID(); ToString() -> String
|
||||||
|
|
||||||
|
blocks:
|
||||||
|
- tags:
|
||||||
|
phase: schema
|
||||||
|
statements:
|
||||||
|
- create-keyspace: POST <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/schemas/keyspaces
|
||||||
|
Accept: "application/json"
|
||||||
|
X-Cassandra-Request-Id: "{request_id}"
|
||||||
|
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
|
||||||
|
Content-Type: "application/json"
|
||||||
|
body: |
|
||||||
|
{
|
||||||
|
"name": "<<keyspace:docs_crud_basic>>",
|
||||||
|
"replicas": <<rf:1>>
|
||||||
|
}
|
||||||
|
tags:
|
||||||
|
name: create-keyspace
|
||||||
|
|
||||||
|
- delete-docs-collection: DELETE <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/namespaces/<<keyspace:docs_crud_basic>>/collections/<<table:docs_collection>>
|
||||||
|
Accept: "application/json"
|
||||||
|
X-Cassandra-Request-Id: "{request_id}"
|
||||||
|
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
|
||||||
|
tags:
|
||||||
|
name: delete-table
|
||||||
|
ok-status: "[2-4][0-9][0-9]"
|
||||||
|
|
||||||
|
- create-docs-collection: POST <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/namespaces/<<keyspace:docs_crud_basic>>/collections
|
||||||
|
Accept: "application/json"
|
||||||
|
X-Cassandra-Request-Id: "{request_id}"
|
||||||
|
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
|
||||||
|
Content-Type: "application/json"
|
||||||
|
body: |
|
||||||
|
{
|
||||||
|
"name": "<<table:docs_collection>>"
|
||||||
|
}
|
||||||
|
tags:
|
||||||
|
name: create-table
|
||||||
|
|
||||||
|
- name: main-write
|
||||||
|
tags:
|
||||||
|
phase: main
|
||||||
|
type: write
|
||||||
|
statements:
|
||||||
|
- write-document: PUT <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/namespaces/<<keyspace:docs_crud_basic>>/collections/<<table:docs_collection>>/{seq_key}
|
||||||
|
Accept: "application/json"
|
||||||
|
X-Cassandra-Request-Id: "{request_id}"
|
||||||
|
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
|
||||||
|
Content-Type: "application/json"
|
||||||
|
body: |
|
||||||
|
{
|
||||||
|
"user_id": "{user_id}",
|
||||||
|
"created_on": {created_on},
|
||||||
|
"gender": "{gender}",
|
||||||
|
"full_name": "{full_name}",
|
||||||
|
"married": {married},
|
||||||
|
"address": {
|
||||||
|
"primary": {
|
||||||
|
"city": "{city}",
|
||||||
|
"cc": "{country_code}"
|
||||||
|
},
|
||||||
|
"secondary": {}
|
||||||
|
},
|
||||||
|
"coordinates": [
|
||||||
|
{lat},
|
||||||
|
{lng}
|
||||||
|
],
|
||||||
|
"children": [],
|
||||||
|
"friends": [
|
||||||
|
"{friend_id}"
|
||||||
|
],
|
||||||
|
"debt": null
|
||||||
|
}
|
||||||
|
tags:
|
||||||
|
name: write-document
|
||||||
|
|
||||||
|
- name: main-read
|
||||||
|
tags:
|
||||||
|
phase: main
|
||||||
|
type: read
|
||||||
|
statements:
|
||||||
|
- read-document: GET <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/namespaces/<<keyspace:docs_crud_basic>>/collections/<<table:docs_collection>>/{random_key}
|
||||||
|
Accept: "application/json"
|
||||||
|
X-Cassandra-Request-Id: "{request_id}"
|
||||||
|
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
|
||||||
|
ok-status: "[2-4][0-9][0-9]"
|
||||||
|
tags:
|
||||||
|
name: read-document
|
||||||
|
|
||||||
|
- name: main-update
|
||||||
|
tags:
|
||||||
|
phase: main
|
||||||
|
type: update
|
||||||
|
statements:
|
||||||
|
- update-document: PUT <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/namespaces/<<keyspace:docs_crud_basic>>/collections/<<table:docs_collection>>/{random_key}
|
||||||
|
Accept: "application/json"
|
||||||
|
X-Cassandra-Request-Id: "{request_id}"
|
||||||
|
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
|
||||||
|
Content-Type: "application/json"
|
||||||
|
body: |
|
||||||
|
{
|
||||||
|
"user_id": "{user_id}",
|
||||||
|
"created_on": {created_on},
|
||||||
|
"gender": "{gender}",
|
||||||
|
"full_name": "{full_name}",
|
||||||
|
"married": {married},
|
||||||
|
"address": {
|
||||||
|
"primary": {
|
||||||
|
"city": "{city}",
|
||||||
|
"cc": "{country_code}"
|
||||||
|
},
|
||||||
|
"secondary": {}
|
||||||
|
},
|
||||||
|
"coordinates": [
|
||||||
|
{lat},
|
||||||
|
{lng}
|
||||||
|
],
|
||||||
|
"children": [],
|
||||||
|
"friends": [
|
||||||
|
"{friend_id}"
|
||||||
|
],
|
||||||
|
"debt": null
|
||||||
|
}
|
||||||
|
tags:
|
||||||
|
name: update-document
|
||||||
|
|
||||||
|
- name: main-delete
|
||||||
|
tags:
|
||||||
|
phase: main
|
||||||
|
type: delete
|
||||||
|
statements:
|
||||||
|
- delete-document: DELETE <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/namespaces/<<keyspace:docs_crud_basic>>/collections/<<table:docs_collection>>/{seq_key}
|
||||||
|
Accept: "application/json"
|
||||||
|
X-Cassandra-Request-Id: "{request_id}"
|
||||||
|
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
|
||||||
|
ok-status: "[2-4][0-9][0-9]"
|
||||||
|
tags:
|
||||||
|
name: delete-document
|
@ -0,0 +1,44 @@
|
|||||||
|
---
|
||||||
|
title: Documents API CRUD using an external Dataset
|
||||||
|
weight: 3
|
||||||
|
---
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
The Documents API CRUD Dataset workflow targets Stargate's Documents API using JSON documents from an external dataset.
|
||||||
|
The [dataset](#dataset) is mandatory and should contain a JSON document per row that should be used as the input for write and update operations.
|
||||||
|
This workflow is perfect for testing Stargate performance using your own JSON dataset or any other realistic dataset.
|
||||||
|
|
||||||
|
In contrast to other workflows, this one is not split into ramp-up and main phases.
|
||||||
|
Instead, there is only the main phase with 4 different load types (write, read, update and delete).
|
||||||
|
|
||||||
|
## Named Scenarios
|
||||||
|
|
||||||
|
### default
|
||||||
|
|
||||||
|
The default scenario for http-docsapi-crud-dataset.yaml runs each type of the main phase sequentially: write, read, update and delete.
|
||||||
|
This means that setting cycles for each of the phases should be done using the: `write-cycles`, `read-cycles`, `update-cycles` and `delete-cycles`.
|
||||||
|
The default value for all 4 cycles variables is the amount of documents to process (see [Workload Parameters](#workload-parameters)).
|
||||||
|
|
||||||
|
Note that error handling is set to `errors=timer,warn`, which means that in case of HTTP errors the scenario is not stopped.
|
||||||
|
|
||||||
|
## Dataset
|
||||||
|
|
||||||
|
### JSON Documents
|
||||||
|
|
||||||
|
As explained above, in order to run the workflow a file containing JSON documents is needed.
|
||||||
|
If you don't have a dataset at hand, please have a look at [awesome-json-datasets](https://github.com/jdorfman/awesome-json-datasets).
|
||||||
|
You can use exposed public APIs to create a realistic dataset of your choice.
|
||||||
|
|
||||||
|
For example, you can easily create a dataset containing [Bitcoin unconfirmed transactions](https://gist.github.com/ivansenic/e280a89aba6420acb4f587d3779af774).
|
||||||
|
|
||||||
|
```bash
|
||||||
|
curl 'https://blockchain.info/unconfirmed-transactions?format=json&limit=5000' | jq -c '.txs | .[]' > blockchain-unconfirmed-transactions.json
|
||||||
|
```
|
||||||
|
|
||||||
|
Above command creates a dataset with 5.000 latest unconfirmed transactions.
|
||||||
|
|
||||||
|
## Workload Parameters
|
||||||
|
|
||||||
|
- `docscount` - the number of documents to process in each step of a scenario (default: `10_000_000`)
|
||||||
|
- `dataset_file` - the file to read the JSON documents from (note that if number of documents in a file is smaller than the `docscount` parameter, the documents will be reused)
|
@ -0,0 +1,122 @@
|
|||||||
|
# nb -v run driver=http yaml=http-docsapi-crud-dataset tags=phase:schema stargate_host=my_stargate_host auth_token=$AUTH_TOKEN dataset_file=path/to/data.json
|
||||||
|
|
||||||
|
description: |
|
||||||
|
This workload emulates CRUD operations for the Stargate Documents API.
|
||||||
|
It requires a data set file, where each line is a single JSON document to be used for writes and updates.
|
||||||
|
Note that stargate_port should reflect the port where the Docs API is exposed (defaults to 8082).
|
||||||
|
|
||||||
|
scenarios:
|
||||||
|
default:
|
||||||
|
schema: run driver=http tags==phase:schema threads==1 cycles==UNDEF
|
||||||
|
write: run driver=http tags==phase:main,type:write cycles===TEMPLATE(write-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
|
||||||
|
read: run driver=http tags==phase:main,type:read cycles===TEMPLATE(read-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
|
||||||
|
update: run driver=http tags==phase:main,type:update cycles===TEMPLATE(update-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
|
||||||
|
delete: run driver=http tags==phase:main,type:delete cycles===TEMPLATE(delete-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
|
||||||
|
|
||||||
|
bindings:
|
||||||
|
# To enable an optional weighted set of hosts in place of a load balancer
|
||||||
|
# Examples
|
||||||
|
# single host: stargate_host=host1
|
||||||
|
# multiple hosts: stargate_host=host1,host2,host3
|
||||||
|
# multiple weighted hosts: stargate_host=host1:3,host2:7
|
||||||
|
weighted_hosts: WeightedStrings('<<stargate_host:stargate>>')
|
||||||
|
# http request id
|
||||||
|
request_id: ToHashedUUID(); ToString();
|
||||||
|
|
||||||
|
seq_key: Mod(<<docscount:10000000>>); ToString() -> String
|
||||||
|
random_key: Uniform(0,<<docscount:10000000>>); ToString() -> String
|
||||||
|
|
||||||
|
blocks:
|
||||||
|
- tags:
|
||||||
|
phase: schema
|
||||||
|
statements:
|
||||||
|
- create-keyspace: POST <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/schemas/keyspaces
|
||||||
|
Accept: "application/json"
|
||||||
|
X-Cassandra-Request-Id: "{request_id}"
|
||||||
|
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
|
||||||
|
Content-Type: "application/json"
|
||||||
|
body: |
|
||||||
|
{
|
||||||
|
"name": "<<keyspace:docs_crud_dataset>>",
|
||||||
|
"replicas": <<rf:1>>
|
||||||
|
}
|
||||||
|
tags:
|
||||||
|
name: create-keyspace
|
||||||
|
|
||||||
|
- delete-docs-collection: DELETE <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/namespaces/<<keyspace:docs_crud_dataset>>/collections/<<table:docs_collection>>
|
||||||
|
Accept: "application/json"
|
||||||
|
X-Cassandra-Request-Id: "{request_id}"
|
||||||
|
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
|
||||||
|
tags:
|
||||||
|
name: delete-table
|
||||||
|
ok-status: "[2-4][0-9][0-9]"
|
||||||
|
|
||||||
|
- create-docs-collection: POST <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/namespaces/<<keyspace:docs_crud_dataset>>/collections
|
||||||
|
Accept: "application/json"
|
||||||
|
X-Cassandra-Request-Id: "{request_id}"
|
||||||
|
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
|
||||||
|
Content-Type: "application/json"
|
||||||
|
body: |
|
||||||
|
{
|
||||||
|
"name": "<<table:docs_collection>>"
|
||||||
|
}
|
||||||
|
tags:
|
||||||
|
name: create-table
|
||||||
|
|
||||||
|
- name: main-write
|
||||||
|
tags:
|
||||||
|
phase: main
|
||||||
|
type: write
|
||||||
|
statements:
|
||||||
|
- write-document: PUT <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/namespaces/<<keyspace:docs_crud_dataset>>/collections/<<table:docs_collection>>/{seq_key}
|
||||||
|
Accept: "application/json"
|
||||||
|
X-Cassandra-Request-Id: "{request_id}"
|
||||||
|
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
|
||||||
|
Content-Type: "application/json"
|
||||||
|
body: "{document_json}"
|
||||||
|
tags:
|
||||||
|
name: write-document
|
||||||
|
bindings:
|
||||||
|
document_json: ModuloLineToString('<<dataset_file>>');
|
||||||
|
|
||||||
|
- name: main-read
|
||||||
|
tags:
|
||||||
|
phase: main
|
||||||
|
type: read
|
||||||
|
statements:
|
||||||
|
- read-document: GET <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/namespaces/<<keyspace:docs_crud_dataset>>/collections/<<table:docs_collection>>/{random_key}
|
||||||
|
Accept: "application/json"
|
||||||
|
X-Cassandra-Request-Id: "{request_id}"
|
||||||
|
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
|
||||||
|
ok-status: "[2-4][0-9][0-9]"
|
||||||
|
tags:
|
||||||
|
name: read-document
|
||||||
|
|
||||||
|
- name: main-update
|
||||||
|
tags:
|
||||||
|
phase: main
|
||||||
|
type: update
|
||||||
|
statements:
|
||||||
|
- update-document: PUT <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/namespaces/<<keyspace:docs_crud_dataset>>/collections/<<table:docs_collection>>/{random_key}
|
||||||
|
Accept: "application/json"
|
||||||
|
X-Cassandra-Request-Id: "{request_id}"
|
||||||
|
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
|
||||||
|
Content-Type: "application/json"
|
||||||
|
body: "{document_json}"
|
||||||
|
tags:
|
||||||
|
name: update-document
|
||||||
|
bindings:
|
||||||
|
document_json: ModuloLineToString('<<dataset_file>>');
|
||||||
|
|
||||||
|
- name: main-delete
|
||||||
|
tags:
|
||||||
|
phase: main
|
||||||
|
type: delete
|
||||||
|
statements:
|
||||||
|
- update-document: DELETE <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/namespaces/<<keyspace:docs_crud_dataset>>/collections/<<table:docs_collection>>/{seq_key}
|
||||||
|
Accept: "application/json"
|
||||||
|
X-Cassandra-Request-Id: "{request_id}"
|
||||||
|
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
|
||||||
|
ok-status: "[2-4][0-9][0-9]"
|
||||||
|
tags:
|
||||||
|
name: delete-document
|
@ -1,4 +1,4 @@
|
|||||||
# nb -v run driver=http yaml=http-docsapi-keyvalue tags=phase:schema host=my_stargate_host stargate_host=my_stargate_host auth_token=$AUTH_TOKEN
|
# nb -v run driver=http yaml=http-docsapi-keyvalue tags=phase:schema stargate_host=my_stargate_host auth_token=$AUTH_TOKEN
|
||||||
|
|
||||||
description: |
|
description: |
|
||||||
This workload emulates a key-value data model and access patterns.
|
This workload emulates a key-value data model and access patterns.
|
||||||
@ -10,9 +10,9 @@ description: |
|
|||||||
|
|
||||||
scenarios:
|
scenarios:
|
||||||
default:
|
default:
|
||||||
- run driver=http tags==phase:schema threads==1 cycles==UNDEF
|
schema: run driver=http tags==phase:schema threads==1 cycles==UNDEF
|
||||||
- run driver=http tags==phase:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
|
rampup: run driver=http tags==phase:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
|
||||||
- run driver=http tags==phase:main cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
main: run driver=http tags==phase:main cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||||
bindings:
|
bindings:
|
||||||
# To enable an optional weighted set of hosts in place of a load balancer
|
# To enable an optional weighted set of hosts in place of a load balancer
|
||||||
# Examples
|
# Examples
|
||||||
@ -23,10 +23,10 @@ bindings:
|
|||||||
# http request id
|
# http request id
|
||||||
request_id: ToHashedUUID(); ToString();
|
request_id: ToHashedUUID(); ToString();
|
||||||
|
|
||||||
seq_key: Mod(<<keycount:1000000000>>); ToString() -> String
|
seq_key: Mod(<<keycount:10000000>>); ToString() -> String
|
||||||
seq_value: Hash(); Mod(<<valuecount:1000000000>>); ToString() -> String
|
seq_value: Hash(); Mod(<<valuecount:10000000>>); ToString() -> String
|
||||||
rw_key: <<keydist:Uniform(0,1000000000)->int>>; ToString() -> String
|
rw_key: <<keydist:Uniform(0,<<keycount:10000000>>)->int>>; ToString() -> String
|
||||||
rw_value: Hash(); <<valdist:Uniform(0,1000000000)->int>>; ToString() -> String
|
rw_value: Hash(); <<valdist:Uniform(0,<<keycount:10000000>>)->int>>; ToString() -> String
|
||||||
|
|
||||||
blocks:
|
blocks:
|
||||||
- tags:
|
- tags:
|
||||||
@ -44,6 +44,13 @@ blocks:
|
|||||||
}
|
}
|
||||||
tags:
|
tags:
|
||||||
name: create-keyspace
|
name: create-keyspace
|
||||||
|
- delete-docs-collection : DELETE <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/namespaces/<<keyspace:docs_keyvalue>>/collections/<<table:docs_collection>>
|
||||||
|
Accept: "application/json"
|
||||||
|
X-Cassandra-Request-Id: "{request_id}"
|
||||||
|
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
|
||||||
|
tags:
|
||||||
|
name: delete-table
|
||||||
|
ok-status: "[2-4][0-9][0-9]"
|
||||||
- create-docs-collection : POST <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/namespaces/<<keyspace:docs_keyvalue>>/collections
|
- create-docs-collection : POST <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/namespaces/<<keyspace:docs_keyvalue>>/collections
|
||||||
Accept: "application/json"
|
Accept: "application/json"
|
||||||
X-Cassandra-Request-Id: "{request_id}"
|
X-Cassandra-Request-Id: "{request_id}"
|
||||||
@ -77,7 +84,7 @@ blocks:
|
|||||||
phase: main
|
phase: main
|
||||||
type: read
|
type: read
|
||||||
params:
|
params:
|
||||||
ratio: <<read_ratio:1>>
|
ratio: <<read_ratio:5>>
|
||||||
statements:
|
statements:
|
||||||
- main-select: GET <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/namespaces/<<keyspace:docs_keyvalue>>/collections/<<table:docs_collection>>/{rw_key}
|
- main-select: GET <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/namespaces/<<keyspace:docs_keyvalue>>/collections/<<table:docs_collection>>/{rw_key}
|
||||||
Accept: "application/json"
|
Accept: "application/json"
|
||||||
@ -85,13 +92,14 @@ blocks:
|
|||||||
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
|
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
|
||||||
tags:
|
tags:
|
||||||
name: main-select
|
name: main-select
|
||||||
|
ok-status: "[2-4][0-9][0-9]"
|
||||||
|
|
||||||
- name: main-write
|
- name: main-write
|
||||||
tags:
|
tags:
|
||||||
phase: main
|
phase: main
|
||||||
type: write
|
type: write
|
||||||
params:
|
params:
|
||||||
ratio: <<write_ratio:9>>
|
ratio: <<write_ratio:5>>
|
||||||
statements:
|
statements:
|
||||||
- main-write: PUT <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/namespaces/<<keyspace:docs_keyvalue>>/collections/<<table:docs_collection>>/{rw_key}
|
- main-write: PUT <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/namespaces/<<keyspace:docs_keyvalue>>/collections/<<table:docs_collection>>/{rw_key}
|
||||||
Accept: "application/json"
|
Accept: "application/json"
|
||||||
|
@ -8,9 +8,9 @@ description: |
|
|||||||
|
|
||||||
scenarios:
|
scenarios:
|
||||||
default:
|
default:
|
||||||
- run driver=cql tags==phase:schema threads==1 cycles==UNDEF
|
schema: run driver=cql tags==phase:schema threads==1 cycles==UNDEF
|
||||||
- run driver=http tags==phase:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
|
rampup: run driver=http tags==phase:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
|
||||||
- run driver=http tags==phase:main cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
main: run driver=http tags==phase:main cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||||
bindings:
|
bindings:
|
||||||
# To enable an optional weighted set of hosts in place of a load balancer
|
# To enable an optional weighted set of hosts in place of a load balancer
|
||||||
# Examples
|
# Examples
|
||||||
@ -21,9 +21,9 @@ bindings:
|
|||||||
# http request id
|
# http request id
|
||||||
request_id: ToHashedUUID(); ToString();
|
request_id: ToHashedUUID(); ToString();
|
||||||
|
|
||||||
seq_key: Mod(<<keycount:1000000000>>); ToString() -> String
|
seq_key: Mod(<<keycount:10000000>>); ToString() -> String
|
||||||
seq_value: Hash(); Mod(<<valuecount:1000000000>>); ToString() -> String
|
seq_value: Hash(); Mod(<<valuecount:1000000000>>); ToString() -> String
|
||||||
rw_key: <<keydist:Uniform(0,1000000000)->int>>; ToString() -> String
|
rw_key: <<keydist:Uniform(0,10000000)->int>>; ToString() -> String
|
||||||
rw_value: Hash(); <<valdist:Uniform(0,1000000000)->int>>; ToString() -> String
|
rw_value: Hash(); <<valdist:Uniform(0,1000000000)->int>>; ToString() -> String
|
||||||
|
|
||||||
blocks:
|
blocks:
|
||||||
@ -79,7 +79,7 @@ blocks:
|
|||||||
phase: main
|
phase: main
|
||||||
type: read
|
type: read
|
||||||
params:
|
params:
|
||||||
ratio: 5
|
ratio: <<read_ratio:5>>
|
||||||
statements:
|
statements:
|
||||||
- main-select: POST <<protocol:http>>://{weighted_hosts}:<<stargate_port:8080>><<path_prefix:>>/graphql/<<keyspace:gqlcf_keyvalue>>
|
- main-select: POST <<protocol:http>>://{weighted_hosts}:<<stargate_port:8080>><<path_prefix:>>/graphql/<<keyspace:gqlcf_keyvalue>>
|
||||||
Accept: "application/json"
|
Accept: "application/json"
|
||||||
@ -95,7 +95,7 @@ blocks:
|
|||||||
phase: main
|
phase: main
|
||||||
type: write
|
type: write
|
||||||
params:
|
params:
|
||||||
ratio: 5
|
ratio: <<write_ratio:5>>
|
||||||
statements:
|
statements:
|
||||||
- main-write: POST <<protocol:http>>://{weighted_hosts}:<<stargate_port:8080>><<path_prefix:>>/graphql/<<keyspace:gqlcf_keyvalue>>
|
- main-write: POST <<protocol:http>>://{weighted_hosts}:<<stargate_port:8080>><<path_prefix:>>/graphql/<<keyspace:gqlcf_keyvalue>>
|
||||||
Accept: "application/json"
|
Accept: "application/json"
|
||||||
|
@ -13,9 +13,9 @@ description: |
|
|||||||
|
|
||||||
scenarios:
|
scenarios:
|
||||||
default:
|
default:
|
||||||
- run driver=http tags==phase:schema threads==1 cycles==UNDEF
|
schema: run driver=http tags==phase:schema threads==1 cycles==UNDEF
|
||||||
- run driver=http tags==phase:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
|
rampup: run driver=http tags==phase:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
|
||||||
- run driver=http tags==phase:main cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
main: run driver=http tags==phase:main cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||||
bindings:
|
bindings:
|
||||||
# To enable an optional weighted set of hosts in place of a load balancer
|
# To enable an optional weighted set of hosts in place of a load balancer
|
||||||
# Examples
|
# Examples
|
||||||
@ -26,9 +26,9 @@ bindings:
|
|||||||
# http request id
|
# http request id
|
||||||
request_id: ToHashedUUID(); ToString();
|
request_id: ToHashedUUID(); ToString();
|
||||||
|
|
||||||
seq_key: Mod(<<keycount:1000000000>>); ToString() -> String
|
seq_key: Mod(<<keycount:10000000>>); ToString() -> String
|
||||||
seq_value: Hash(); Mod(<<valuecount:1000000000>>); ToString() -> String
|
seq_value: Hash(); Mod(<<valuecount:1000000000>>); ToString() -> String
|
||||||
rw_key: <<keydist:Uniform(0,1000000000)->int>>; ToString() -> String
|
rw_key: <<keydist:Uniform(0,10000000)->int>>; ToString() -> String
|
||||||
rw_value: Hash(); <<valdist:Uniform(0,1000000000)->int>>; ToString() -> String
|
rw_value: Hash(); <<valdist:Uniform(0,1000000000)->int>>; ToString() -> String
|
||||||
|
|
||||||
blocks:
|
blocks:
|
||||||
|
@ -106,7 +106,7 @@ statements:
|
|||||||
|
|
||||||
The above two examples are semantically identical, only the format is
|
The above two examples are semantically identical, only the format is
|
||||||
different. Notice that the expansion of the URI is still captured in a
|
different. Notice that the expansion of the URI is still captured in a
|
||||||
field called uri, with all of the dynamic pieces stitched together in the
|
field called uri, with all the dynamic pieces stitched together in the
|
||||||
value. You can't use arbitrary request fields. Every request field must
|
value. You can't use arbitrary request fields. Every request field must
|
||||||
from (method, uri, version, body, ok-status, ok-body) or otherwise be
|
from (method, uri, version, body, ok-status, ok-body) or otherwise be
|
||||||
capitalized to signify an HTTP header.
|
capitalized to signify an HTTP header.
|
||||||
@ -122,7 +122,7 @@ cached at startup.
|
|||||||
|
|
||||||
## Request Fields
|
## Request Fields
|
||||||
|
|
||||||
At a minimum, a **URI** must be provided. These are enough to build a
|
At a minimum, a **URI** must be provided. This is enough to build a
|
||||||
request with. All other request fields are optional and have reasonable
|
request with. All other request fields are optional and have reasonable
|
||||||
defaults:
|
defaults:
|
||||||
|
|
||||||
@ -172,7 +172,7 @@ By default, a request which encounters an exception is retried up to 10
|
|||||||
times. If you want to change this, set another value to the
|
times. If you want to change this, set another value to the
|
||||||
`retries=` activity parameters.
|
`retries=` activity parameters.
|
||||||
|
|
||||||
Presently, no determination is made about whether or not an errored
|
Presently, no determination is made about whether an errored
|
||||||
response *should* be retryable, but it is possible to configure this if
|
response *should* be retryable, but it is possible to configure this if
|
||||||
you have a specific exception type that indicates a retryable operation.
|
you have a specific exception type that indicates a retryable operation.
|
||||||
|
|
||||||
@ -200,7 +200,10 @@ Presently, this driver only does basic request-response style requests.
|
|||||||
Thus, adding headers which take TCP socket control away from the
|
Thus, adding headers which take TCP socket control away from the
|
||||||
HttpClient will likely yield inconsistent (or undefined)
|
HttpClient will likely yield inconsistent (or undefined)
|
||||||
results. Support may be added for long-lived connections in a future
|
results. Support may be added for long-lived connections in a future
|
||||||
release.
|
release. However, chunked encoding responses are supported, although they
|
||||||
|
will be received fully before being processed further. Connecting to a long-lived
|
||||||
|
connection that streams chunked encoding responses indefinitely will have
|
||||||
|
undefined results.
|
||||||
|
|
||||||
## HTTP Activity Parameters
|
## HTTP Activity Parameters
|
||||||
|
|
||||||
@ -217,11 +220,11 @@ release.
|
|||||||
including only brief details as explained below.
|
including only brief details as explained below.
|
||||||
|
|
||||||
This setting is a selector for what level of verbosity you will get on
|
This setting is a selector for what level of verbosity you will get on
|
||||||
the console. If you set this to true, you'll get every request and
|
the console. If you set this to `diag=all`, you'll get every request and
|
||||||
response logged to console. This is only for verifying that a test is
|
response logged to console. This is only for verifying that a test is
|
||||||
configured and to spot check services before running higher scale tests.
|
configured and to spot check services before running higher scale tests.
|
||||||
|
|
||||||
All of the data shown in diagnostics is post-hoc, directly from the
|
All the data shown in diagnostics is post-hoc, directly from the
|
||||||
response provided by the internal HTTP client in the Java runtime.
|
response provided by the internal HTTP client in the Java runtime.
|
||||||
|
|
||||||
If you want finer control over how much information diagnostics
|
If you want finer control over how much information diagnostics
|
||||||
@ -229,18 +232,19 @@ release.
|
|||||||
|
|
||||||
- headers - show headers
|
- headers - show headers
|
||||||
- stats - show basic stats of each request
|
- stats - show basic stats of each request
|
||||||
|
- data - show all of each response body this setting
|
||||||
- data10 - show only the first 10 characters of each response body
|
- data10 - show only the first 10 characters of each response body
|
||||||
|
this setting supersedes `data`
|
||||||
- data100 - show only the first 100 characters of each response body
|
- data100 - show only the first 100 characters of each response body
|
||||||
this setting supersedes `data10`
|
this setting supersedes `data10`
|
||||||
- data1000 - show only the first 1000 characters of each response body
|
- data1000 - show only the first 1000 characters of each response body
|
||||||
this setting supersedes `data100`
|
this setting supersedes `data100`
|
||||||
- data - show all of each response body this setting
|
|
||||||
supersedes `data1000`
|
|
||||||
- redirects - show details for interstitial request which are made
|
- redirects - show details for interstitial request which are made
|
||||||
when the client follows a redirect directive like a `location`
|
when the client follows a redirect directive like a `location`
|
||||||
header.
|
header
|
||||||
- requests - show details for requests
|
- requests - show details for requests
|
||||||
- responses - show details for responses
|
- responses - show details for responses
|
||||||
|
- codes - shows explanatory details (high-level) of http response status codes
|
||||||
- brief - Show headers, stats, requests, responses, and 10 characters
|
- brief - Show headers, stats, requests, responses, and 10 characters
|
||||||
- all - Show everything, including full payloads and redirects
|
- all - Show everything, including full payloads and redirects
|
||||||
- a modulo - any number, like 3000 - causes the diagnostics to be
|
- a modulo - any number, like 3000 - causes the diagnostics to be
|
||||||
@ -248,12 +252,10 @@ release.
|
|||||||
then you will get the brief diagnostic output for every 300th
|
then you will get the brief diagnostic output for every 300th
|
||||||
response.
|
response.
|
||||||
|
|
||||||
The requests, responses, and redirects setting work intersectionally.
|
The requests, responses, and redirects settings work in combination.
|
||||||
For example, if you specify responses, and redirect, but not requests,
|
For example, if you specify responses, and redirect, but not requests,
|
||||||
then you will only see the response portion of all calls made by the
|
then you will only see the response portion of all calls made by the
|
||||||
client.
|
client. All available filters layer together in this way.
|
||||||
|
|
||||||
All of the diagnostic filters are incrementally added.
|
|
||||||
|
|
||||||
- **timeout** - default: forever - Sets the timeout of each request in
|
- **timeout** - default: forever - Sets the timeout of each request in
|
||||||
milliseconds.
|
milliseconds.
|
||||||
|
74
driver-http/src/main/resources/ietf-http-status-codes.csv
Normal file
74
driver-http/src/main/resources/ietf-http-status-codes.csv
Normal file
@ -0,0 +1,74 @@
|
|||||||
|
Value,Description,Reference
|
||||||
|
100,Continue,"[RFC7231, Section 6.2.1]"
|
||||||
|
101,Switching Protocols,"[RFC7231, Section 6.2.2]"
|
||||||
|
102,Processing,[RFC2518]
|
||||||
|
103,Early Hints,[RFC8297]
|
||||||
|
104-199,Unassigned,
|
||||||
|
200,OK,"[RFC7231, Section 6.3.1]"
|
||||||
|
201,Created,"[RFC7231, Section 6.3.2]"
|
||||||
|
202,Accepted,"[RFC7231, Section 6.3.3]"
|
||||||
|
203,Non-Authoritative Information,"[RFC7231, Section 6.3.4]"
|
||||||
|
204,No Content,"[RFC7231, Section 6.3.5]"
|
||||||
|
205,Reset Content,"[RFC7231, Section 6.3.6]"
|
||||||
|
206,Partial Content,"[RFC7233, Section 4.1]"
|
||||||
|
207,Multi-Status,[RFC4918]
|
||||||
|
208,Already Reported,[RFC5842]
|
||||||
|
209-225,Unassigned,
|
||||||
|
226,IM Used,[RFC3229]
|
||||||
|
227-299,Unassigned,
|
||||||
|
300,Multiple Choices,"[RFC7231, Section 6.4.1]"
|
||||||
|
301,Moved Permanently,"[RFC7231, Section 6.4.2]"
|
||||||
|
302,Found,"[RFC7231, Section 6.4.3]"
|
||||||
|
303,See Other,"[RFC7231, Section 6.4.4]"
|
||||||
|
304,Not Modified,"[RFC7232, Section 4.1]"
|
||||||
|
305,Use Proxy,"[RFC7231, Section 6.4.5]"
|
||||||
|
306,(Unused),"[RFC7231, Section 6.4.6]"
|
||||||
|
307,Temporary Redirect,"[RFC7231, Section 6.4.7]"
|
||||||
|
308,Permanent Redirect,[RFC7538]
|
||||||
|
309-399,Unassigned,
|
||||||
|
400,Bad Request,"[RFC7231, Section 6.5.1]"
|
||||||
|
401,Unauthorized,"[RFC7235, Section 3.1]"
|
||||||
|
402,Payment Required,"[RFC7231, Section 6.5.2]"
|
||||||
|
403,Forbidden,"[RFC7231, Section 6.5.3]"
|
||||||
|
404,Not Found,"[RFC7231, Section 6.5.4]"
|
||||||
|
405,Method Not Allowed,"[RFC7231, Section 6.5.5]"
|
||||||
|
406,Not Acceptable,"[RFC7231, Section 6.5.6]"
|
||||||
|
407,Proxy Authentication Required,"[RFC7235, Section 3.2]"
|
||||||
|
408,Request Timeout,"[RFC7231, Section 6.5.7]"
|
||||||
|
409,Conflict,"[RFC7231, Section 6.5.8]"
|
||||||
|
410,Gone,"[RFC7231, Section 6.5.9]"
|
||||||
|
411,Length Required,"[RFC7231, Section 6.5.10]"
|
||||||
|
412,Precondition Failed,"[RFC7232, Section 4.2][RFC8144, Section 3.2]"
|
||||||
|
413,Payload Too Large,"[RFC7231, Section 6.5.11]"
|
||||||
|
414,URI Too Long,"[RFC7231, Section 6.5.12]"
|
||||||
|
415,Unsupported Media Type,"[RFC7231, Section 6.5.13][RFC7694, Section 3]"
|
||||||
|
416,Range Not Satisfiable,"[RFC7233, Section 4.4]"
|
||||||
|
417,Expectation Failed,"[RFC7231, Section 6.5.14]"
|
||||||
|
418-420,Unassigned,
|
||||||
|
421,Misdirected Request,"[RFC7540, Section 9.1.2]"
|
||||||
|
422,Unprocessable Entity,[RFC4918]
|
||||||
|
423,Locked,[RFC4918]
|
||||||
|
424,Failed Dependency,[RFC4918]
|
||||||
|
425,Too Early,[RFC8470]
|
||||||
|
426,Upgrade Required,"[RFC7231, Section 6.5.15]"
|
||||||
|
427,Unassigned,
|
||||||
|
428,Precondition Required,[RFC6585]
|
||||||
|
429,Too Many Requests,[RFC6585]
|
||||||
|
430,Unassigned,
|
||||||
|
431,Request Header Fields Too Large,[RFC6585]
|
||||||
|
432-450,Unassigned,
|
||||||
|
451,Unavailable For Legal Reasons,[RFC7725]
|
||||||
|
452-499,Unassigned,
|
||||||
|
500,Internal Server Error,"[RFC7231, Section 6.6.1]"
|
||||||
|
501,Not Implemented,"[RFC7231, Section 6.6.2]"
|
||||||
|
502,Bad Gateway,"[RFC7231, Section 6.6.3]"
|
||||||
|
503,Service Unavailable,"[RFC7231, Section 6.6.4]"
|
||||||
|
504,Gateway Timeout,"[RFC7231, Section 6.6.5]"
|
||||||
|
505,HTTP Version Not Supported,"[RFC7231, Section 6.6.6]"
|
||||||
|
506,Variant Also Negotiates,[RFC2295]
|
||||||
|
507,Insufficient Storage,[RFC4918]
|
||||||
|
508,Loop Detected,[RFC5842]
|
||||||
|
509,Unassigned,
|
||||||
|
510,Not Extended,[RFC2774]
|
||||||
|
511,Network Authentication Required,[RFC6585]
|
||||||
|
512-599,Unassigned,
|
|
@ -0,0 +1,43 @@
|
|||||||
|
package io.nosqlbench.activitytype.http.statuscodes;
|
||||||
|
|
||||||
|
import org.junit.jupiter.api.Test;
|
||||||
|
|
||||||
|
import static org.assertj.core.api.Assertions.assertThat;
|
||||||
|
|
||||||
|
public class HttpStatusCodesTest {
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testLookup() {
|
||||||
|
IetfStatusCode result = HttpStatusCodes.lookup(404);
|
||||||
|
assertThat(result.getCategory()).isSameAs(HttpStatusRanges.Client_Error);
|
||||||
|
assertThat(result.getReference()).isEqualTo("[RFC7231, Section 6.5.4]");
|
||||||
|
assertThat(result.getValues()).isEqualTo("404");
|
||||||
|
assertThat(result.getDescription()).isEqualTo("Not Found");
|
||||||
|
System.out.println(result.toString(404));
|
||||||
|
assertThat(result.toString(404)).isEqualTo("404, Not Found, [https://www.iana.org/go/rfc7231#section-6.5.4], CLIENT_ERROR (The request contains bad syntax or cannot be fulfilled.)");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testUnknownCodeLookupGap() {
|
||||||
|
IetfStatusCode result = HttpStatusCodes.lookup(496);
|
||||||
|
assertThat(result.getCategory()).isSameAs(HttpStatusRanges.Client_Error);
|
||||||
|
assertThat(result.getReference()).isEqualTo("[check https://www.iana.org/assignments/http-status-codes/http-status-codes.xhtml]");
|
||||||
|
assertThat(result.getValues()).isEqualTo("496");
|
||||||
|
assertThat(result.getDescription()).isNullOrEmpty();
|
||||||
|
System.out.println(result.toString(496));
|
||||||
|
assertThat(result.toString(496)).isEqualTo("496, [check https://www.iana.org/assignments/http-status-codes/http-status-codes.xhtml], CLIENT_ERROR (The request contains bad syntax or cannot be fulfilled.)");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testUnknownCodeLookupRange() {
|
||||||
|
IetfStatusCode result = HttpStatusCodes.lookup(747);
|
||||||
|
assertThat(result.getCategory()).isSameAs(HttpStatusRanges.Unknown);
|
||||||
|
assertThat(result.getReference()).isEqualTo("[check https://www.iana.org/assignments/http-status-codes/http-status-codes.xhtml]");
|
||||||
|
assertThat(result.getValues()).isEqualTo("747");
|
||||||
|
assertThat(result.getDescription()).isNullOrEmpty();
|
||||||
|
System.out.println(result.toString(747));
|
||||||
|
assertThat(result.toString(747)).isEqualTo("747, [check https://www.iana.org/assignments/http-status-codes/http-status-codes.xhtml], UNKNOWN_ERROR (This error type is not known based on IANA registered HTTP status codes.)");
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
@ -3,7 +3,7 @@
|
|||||||
<parent>
|
<parent>
|
||||||
<artifactId>nosqlbench</artifactId>
|
<artifactId>nosqlbench</artifactId>
|
||||||
<groupId>io.nosqlbench</groupId>
|
<groupId>io.nosqlbench</groupId>
|
||||||
<version>4.15.52-SNAPSHOT</version>
|
<version>4.15.58-SNAPSHOT</version>
|
||||||
</parent>
|
</parent>
|
||||||
<modelVersion>4.0.0</modelVersion>
|
<modelVersion>4.0.0</modelVersion>
|
||||||
|
|
||||||
@ -19,7 +19,8 @@
|
|||||||
<dependency>
|
<dependency>
|
||||||
<groupId>io.nosqlbench</groupId>
|
<groupId>io.nosqlbench</groupId>
|
||||||
<artifactId>engine-api</artifactId>
|
<artifactId>engine-api</artifactId>
|
||||||
<version>4.15.52-SNAPSHOT</version>
|
<version>4.15.58-SNAPSHOT</version>
|
||||||
|
<scope>compile</scope>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
|
||||||
</dependencies>
|
</dependencies>
|
||||||
|
@ -4,7 +4,7 @@
|
|||||||
<parent>
|
<parent>
|
||||||
<artifactId>mvn-defaults</artifactId>
|
<artifactId>mvn-defaults</artifactId>
|
||||||
<groupId>io.nosqlbench</groupId>
|
<groupId>io.nosqlbench</groupId>
|
||||||
<version>4.15.52-SNAPSHOT</version>
|
<version>4.15.58-SNAPSHOT</version>
|
||||||
<relativePath>../mvn-defaults</relativePath>
|
<relativePath>../mvn-defaults</relativePath>
|
||||||
</parent>
|
</parent>
|
||||||
|
|
||||||
@ -39,14 +39,14 @@
|
|||||||
<!-- core dependencies -->
|
<!-- core dependencies -->
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>io.nosqlbench</groupId>
|
<groupId>io.nosqlbench</groupId>
|
||||||
<artifactId>drivers-api</artifactId>
|
<artifactId>engine-api</artifactId>
|
||||||
<version>4.15.52-SNAPSHOT</version>
|
<version>4.15.58-SNAPSHOT</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>io.nosqlbench</groupId>
|
<groupId>io.nosqlbench</groupId>
|
||||||
<artifactId>driver-stdout</artifactId>
|
<artifactId>driver-stdout</artifactId>
|
||||||
<version>4.15.52-SNAPSHOT</version>
|
<version>4.15.58-SNAPSHOT</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
|
||||||
<!-- https://mvnrepository.com/artifact/org.apache.commons/commons-lang3 -->
|
<!-- https://mvnrepository.com/artifact/org.apache.commons/commons-lang3 -->
|
||||||
|
@ -5,7 +5,7 @@
|
|||||||
<parent>
|
<parent>
|
||||||
<artifactId>mvn-defaults</artifactId>
|
<artifactId>mvn-defaults</artifactId>
|
||||||
<groupId>io.nosqlbench</groupId>
|
<groupId>io.nosqlbench</groupId>
|
||||||
<version>4.15.52-SNAPSHOT</version>
|
<version>4.15.58-SNAPSHOT</version>
|
||||||
<relativePath>../mvn-defaults</relativePath>
|
<relativePath>../mvn-defaults</relativePath>
|
||||||
</parent>
|
</parent>
|
||||||
|
|
||||||
@ -22,12 +22,13 @@
|
|||||||
<dependency>
|
<dependency>
|
||||||
<groupId>io.nosqlbench</groupId>
|
<groupId>io.nosqlbench</groupId>
|
||||||
<artifactId>drivers-api</artifactId>
|
<artifactId>drivers-api</artifactId>
|
||||||
<version>4.15.52-SNAPSHOT</version>
|
<version>4.15.58-SNAPSHOT</version>
|
||||||
|
<scope>compile</scope>
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>io.nosqlbench</groupId>
|
<groupId>io.nosqlbench</groupId>
|
||||||
<artifactId>engine-api</artifactId>
|
<artifactId>engine-api</artifactId>
|
||||||
<version>4.15.52-SNAPSHOT</version>
|
<version>4.15.58-SNAPSHOT</version>
|
||||||
<scope>compile</scope>
|
<scope>compile</scope>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
|
||||||
|
@ -4,7 +4,7 @@
|
|||||||
<parent>
|
<parent>
|
||||||
<artifactId>mvn-defaults</artifactId>
|
<artifactId>mvn-defaults</artifactId>
|
||||||
<groupId>io.nosqlbench</groupId>
|
<groupId>io.nosqlbench</groupId>
|
||||||
<version>4.15.52-SNAPSHOT</version>
|
<version>4.15.58-SNAPSHOT</version>
|
||||||
<relativePath>../mvn-defaults</relativePath>
|
<relativePath>../mvn-defaults</relativePath>
|
||||||
</parent>
|
</parent>
|
||||||
|
|
||||||
@ -41,10 +41,16 @@
|
|||||||
<version>5.5.1</version>
|
<version>5.5.1</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
|
||||||
|
<dependency>
|
||||||
|
<groupId>io.nosqlbench</groupId>
|
||||||
|
<artifactId>engine-api</artifactId>
|
||||||
|
<version>4.15.58-SNAPSHOT</version>
|
||||||
|
</dependency>
|
||||||
|
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>io.nosqlbench</groupId>
|
<groupId>io.nosqlbench</groupId>
|
||||||
<artifactId>driver-stdout</artifactId>
|
<artifactId>driver-stdout</artifactId>
|
||||||
<version>4.15.52-SNAPSHOT</version>
|
<version>4.15.58-SNAPSHOT</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
|
||||||
<!-- <dependency>-->
|
<!-- <dependency>-->
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
<parent>
|
<parent>
|
||||||
<artifactId>mvn-defaults</artifactId>
|
<artifactId>mvn-defaults</artifactId>
|
||||||
<groupId>io.nosqlbench</groupId>
|
<groupId>io.nosqlbench</groupId>
|
||||||
<version>4.15.52-SNAPSHOT</version>
|
<version>4.15.58-SNAPSHOT</version>
|
||||||
<relativePath>../mvn-defaults</relativePath>
|
<relativePath>../mvn-defaults</relativePath>
|
||||||
</parent>
|
</parent>
|
||||||
|
|
||||||
@ -18,11 +18,16 @@
|
|||||||
</description>
|
</description>
|
||||||
|
|
||||||
<dependencies>
|
<dependencies>
|
||||||
|
<dependency>
|
||||||
|
<groupId>io.nosqlbench</groupId>
|
||||||
|
<artifactId>engine-api</artifactId>
|
||||||
|
<version>4.15.58-SNAPSHOT</version>
|
||||||
|
</dependency>
|
||||||
|
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>io.nosqlbench</groupId>
|
<groupId>io.nosqlbench</groupId>
|
||||||
<artifactId>drivers-api</artifactId>
|
<artifactId>drivers-api</artifactId>
|
||||||
<version>4.15.52-SNAPSHOT</version>
|
<version>4.15.58-SNAPSHOT</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
|
||||||
<dependency>
|
<dependency>
|
||||||
@ -30,12 +35,6 @@
|
|||||||
<artifactId>mongodb-driver-sync</artifactId>
|
<artifactId>mongodb-driver-sync</artifactId>
|
||||||
<version>4.0.3</version>
|
<version>4.0.3</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
|
||||||
<groupId>io.nosqlbench</groupId>
|
|
||||||
<artifactId>engine-api</artifactId>
|
|
||||||
<version>4.15.52-SNAPSHOT</version>
|
|
||||||
<scope>compile</scope>
|
|
||||||
</dependency>
|
|
||||||
|
|
||||||
</dependencies>
|
</dependencies>
|
||||||
|
|
||||||
|
@ -0,0 +1,14 @@
|
|||||||
|
---
|
||||||
|
title: mongoDB CRUD Basic
|
||||||
|
weight: 1
|
||||||
|
---
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
The mongoDB CRUD Basic workflow emulates CRUD operations for the mongoDB using generated JSON documents.
|
||||||
|
It's a counterpart of the Stargate's Documents API CRUD Basic workflow.
|
||||||
|
Please refer to [http-docsapi-crud-basic.md](../../../../../driver-http/src/main/resources/activities/documents-api/http-docsapi-crud-basic.md) for the general workflow design details.
|
||||||
|
|
||||||
|
## Indexing
|
||||||
|
|
||||||
|
To simulate a realistic situation as much as possible, this workflow creates 3 additional indexes (apart from `_id`) for the collection where documents are stored.
|
@ -0,0 +1,185 @@
|
|||||||
|
# nb -v run driver=mongodb yaml=mongodb-crud-basic tags=phase:schema connection=mongodb://127.0.0.1 database=testdb
|
||||||
|
|
||||||
|
description: |
|
||||||
|
This workload emulates CRUD operations for the mongoDB.
|
||||||
|
It generates a simple JSON document to be used for writes and updates.
|
||||||
|
It's a counterpart of the Stargate's Documents API CRUD Basic workflow.
|
||||||
|
|
||||||
|
scenarios:
|
||||||
|
default:
|
||||||
|
schema: run driver=mongodb tags==phase:schema threads==1 cycles==UNDEF
|
||||||
|
write: run driver=mongodb tags==phase:main,type:write cycles===TEMPLATE(write-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
|
||||||
|
read: run driver=mongodb tags==phase:main,type:read cycles===TEMPLATE(read-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
|
||||||
|
update: run driver=mongodb tags==phase:main,type:update cycles===TEMPLATE(update-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
|
||||||
|
delete: run driver=mongodb tags==phase:main,type:delete cycles===TEMPLATE(delete-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
|
||||||
|
|
||||||
|
bindings:
|
||||||
|
seq_key: Mod(<<docscount:10000000>>); ToString() -> String
|
||||||
|
random_key: Uniform(0,<<docscount:10000000>>); ToString() -> String
|
||||||
|
|
||||||
|
user_id: ToHashedUUID(); ToString() -> String
|
||||||
|
created_on: Uniform(1262304000,1577836800) -> long
|
||||||
|
gender: WeightedStrings('M:10;F:10;O:1')
|
||||||
|
full_name: FullNames()
|
||||||
|
married: ModuloToBoolean()
|
||||||
|
city: Cities()
|
||||||
|
country_code: CountryCodes()
|
||||||
|
lat: Uniform(-180d, 180d)
|
||||||
|
lng: Hash() -> long; Uniform(-180d, 180d)
|
||||||
|
friend_id: Add(-1); ToHashedUUID(); ToString() -> String
|
||||||
|
|
||||||
|
blocks:
|
||||||
|
- tags:
|
||||||
|
phase: schema
|
||||||
|
statements:
|
||||||
|
- dummy-insert: |
|
||||||
|
{
|
||||||
|
insert: "<<collection:crud_basic>>",
|
||||||
|
documents: [ { _id: "dummyyyy" } ]
|
||||||
|
}
|
||||||
|
|
||||||
|
- drop-collection: |
|
||||||
|
{
|
||||||
|
drop: "<<collection:crud_basic>>"
|
||||||
|
}
|
||||||
|
tags:
|
||||||
|
name: drop-collection
|
||||||
|
|
||||||
|
- create-collection: |
|
||||||
|
{
|
||||||
|
create: "<<collection:crud_basic>>"
|
||||||
|
}
|
||||||
|
tags:
|
||||||
|
name: create-collection
|
||||||
|
|
||||||
|
- create-indexes: |
|
||||||
|
{
|
||||||
|
createIndexes: "<<collection:crud_basic>>",
|
||||||
|
indexes: [
|
||||||
|
{
|
||||||
|
key: { user_id: 1 },
|
||||||
|
name: "user_id_idx",
|
||||||
|
unique: true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
key: { created_on: 1 },
|
||||||
|
name: "created_on_idx"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
key: { gender: 1 },
|
||||||
|
name: "gender_idx"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
tags:
|
||||||
|
name: create-indexes
|
||||||
|
|
||||||
|
- name: main-write
|
||||||
|
tags:
|
||||||
|
phase: main
|
||||||
|
type: write
|
||||||
|
statements:
|
||||||
|
- write-document: |
|
||||||
|
{
|
||||||
|
insert: "<<collection:crud_basic>>",
|
||||||
|
writeConcern: { w: "majority" },
|
||||||
|
documents: [
|
||||||
|
{
|
||||||
|
"_id": "{seq_key}",
|
||||||
|
"user_id": "{user_id}",
|
||||||
|
"created_on": {created_on},
|
||||||
|
"gender": "{gender}",
|
||||||
|
"full_name": "{full_name}",
|
||||||
|
"married": {married},
|
||||||
|
"address": {
|
||||||
|
"primary": {
|
||||||
|
"city": "{city}",
|
||||||
|
"cc": "{country_code}"
|
||||||
|
},
|
||||||
|
"secondary": {}
|
||||||
|
},
|
||||||
|
"coordinates": [
|
||||||
|
{lat},
|
||||||
|
{lng}
|
||||||
|
],
|
||||||
|
"children": [],
|
||||||
|
"friends": [
|
||||||
|
"{friend_id}"
|
||||||
|
],
|
||||||
|
"debt": null
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
tags:
|
||||||
|
name: write-document
|
||||||
|
|
||||||
|
- name: main-read
|
||||||
|
tags:
|
||||||
|
phase: main
|
||||||
|
type: read
|
||||||
|
statements:
|
||||||
|
- read-document: |
|
||||||
|
{
|
||||||
|
find: "<<collection:crud_basic>>",
|
||||||
|
filter: { _id: "{random_key}" }
|
||||||
|
}
|
||||||
|
tags:
|
||||||
|
name: read-document
|
||||||
|
|
||||||
|
- name: main-update
|
||||||
|
tags:
|
||||||
|
phase: main
|
||||||
|
type: update
|
||||||
|
statements:
|
||||||
|
- update-document: |
|
||||||
|
{
|
||||||
|
update: "<<collection:crud_basic>>",
|
||||||
|
writeConcern: { w: "majority" },
|
||||||
|
updates: [
|
||||||
|
{
|
||||||
|
q: { _id: "{random_key}" },
|
||||||
|
u: {
|
||||||
|
"_id": "{seq_key}",
|
||||||
|
"user_id": "{user_id}",
|
||||||
|
"created_on": {created_on},
|
||||||
|
"gender": "{gender}",
|
||||||
|
"full_name": "{full_name}",
|
||||||
|
"married": {married},
|
||||||
|
"address": {
|
||||||
|
"primary": {
|
||||||
|
"city": "{city}",
|
||||||
|
"cc": "{country_code}"
|
||||||
|
},
|
||||||
|
"secondary": {}
|
||||||
|
},
|
||||||
|
"coordinates": [
|
||||||
|
{lat},
|
||||||
|
{lng}
|
||||||
|
],
|
||||||
|
"children": [],
|
||||||
|
"friends": [
|
||||||
|
"{friend_id}"
|
||||||
|
],
|
||||||
|
"debt": null
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
tags:
|
||||||
|
name: update-document
|
||||||
|
|
||||||
|
- name: main-delete
|
||||||
|
tags:
|
||||||
|
phase: main
|
||||||
|
type: delete
|
||||||
|
statements:
|
||||||
|
- delete-document: |
|
||||||
|
{
|
||||||
|
delete: "<<collection:crud_basic>>",
|
||||||
|
deletes: [
|
||||||
|
{
|
||||||
|
q: { _id: "{seq_key}" },
|
||||||
|
limit: 1
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
@ -0,0 +1,18 @@
|
|||||||
|
---
|
||||||
|
title: mongoDB CRUD Dataset
|
||||||
|
weight: 2
|
||||||
|
---
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
The mongoDB CRUD Dataset workflow emulates CRUD operations for the mongoDB using JSON documents from an external dataset.
|
||||||
|
It's a counterpart of the Stargate's Documents API CRUD Dataset workflow.
|
||||||
|
Please refer to [http-docsapi-crud-dataset.md](../../../../../driver-http/src/main/resources/activities/documents-api/http-docsapi-crud-dataset.md) for the general workflow design details.
|
||||||
|
|
||||||
|
## Indexing
|
||||||
|
|
||||||
|
To simulate a realistic situation as much as possible, this workflow allows creation of the indexes using the parameter:
|
||||||
|
|
||||||
|
* `indexes` - Specifies the indexes to create. Each document in the array specifies a separate index. Corresponds to the `indexes` field in the [mongoDB *createIndexes* command](https://docs.mongodb.com/manual/reference/command/createIndexes/#mongodb-dbcommand-dbcmd.createIndexes).
|
||||||
|
|
||||||
|
If parameter `indexes` is not specify, a dummy sparse index will be created.
|
@ -0,0 +1,116 @@
|
|||||||
|
# nb -v run driver=mongodb yaml=mongodb-crud-dataset tags=phase:schema connection=mongodb://127.0.0.1 database=testdb dataset_file=path/to/data.json
|
||||||
|
|
||||||
|
description: |
|
||||||
|
This workload emulates CRUD operations for the mongoDB.
|
||||||
|
It requires a data set file, where each line is a single JSON document to be used for writes and updates.
|
||||||
|
It's a counterpart of the Stargate's Documents API CRUD Dataset workflow.
|
||||||
|
|
||||||
|
scenarios:
|
||||||
|
default:
|
||||||
|
schema: run driver=mongodb tags==phase:schema threads==1 cycles==UNDEF
|
||||||
|
write: run driver=mongodb tags==phase:main,type:write cycles===TEMPLATE(write-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
|
||||||
|
read: run driver=mongodb tags==phase:main,type:read cycles===TEMPLATE(read-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
|
||||||
|
update: run driver=mongodb tags==phase:main,type:update cycles===TEMPLATE(update-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
|
||||||
|
delete: run driver=mongodb tags==phase:main,type:delete cycles===TEMPLATE(delete-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
|
||||||
|
|
||||||
|
bindings:
|
||||||
|
seq_key: Mod(<<docscount:10000000>>); ToString() -> String
|
||||||
|
random_key: Uniform(0,<<docscount:10000000>>); ToString() -> String
|
||||||
|
|
||||||
|
blocks:
|
||||||
|
- tags:
|
||||||
|
phase: schema
|
||||||
|
statements:
|
||||||
|
- dummy-insert: |
|
||||||
|
{
|
||||||
|
insert: "<<collection:crud_dataset>>",
|
||||||
|
documents: [ { _id: "dummyyyy" } ]
|
||||||
|
}
|
||||||
|
|
||||||
|
- drop-collection: |
|
||||||
|
{
|
||||||
|
drop: "<<collection:crud_dataset>>"
|
||||||
|
}
|
||||||
|
tags:
|
||||||
|
name: drop-collection
|
||||||
|
|
||||||
|
- create-collection: |
|
||||||
|
{
|
||||||
|
create: "<<collection:crud_dataset>>"
|
||||||
|
}
|
||||||
|
tags:
|
||||||
|
name: create-collection
|
||||||
|
|
||||||
|
- create-indexes: |
|
||||||
|
{
|
||||||
|
createIndexes: "<<collection:crud_dataset>>",
|
||||||
|
indexes: <<indexes:[ { key: { dummy : 1 }, name: "dummy_idx", sparse: true } ]>>
|
||||||
|
}
|
||||||
|
tags:
|
||||||
|
name: create-indexes
|
||||||
|
|
||||||
|
- name: main-write
|
||||||
|
tags:
|
||||||
|
phase: main
|
||||||
|
type: write
|
||||||
|
statements:
|
||||||
|
- write-document: |
|
||||||
|
{
|
||||||
|
insert: "<<collection:crud_dataset>>",
|
||||||
|
writeConcern: { w: "majority" },
|
||||||
|
documents: [ { "_id": "{seq_key}", {document_json_without_id} ]
|
||||||
|
}
|
||||||
|
tags:
|
||||||
|
name: write-document
|
||||||
|
bindings:
|
||||||
|
document_json_without_id: ModuloLineToString('<<dataset_file>>'); ReplaceRegex('^\{', '')
|
||||||
|
|
||||||
|
- name: main-read
|
||||||
|
tags:
|
||||||
|
phase: main
|
||||||
|
type: read
|
||||||
|
statements:
|
||||||
|
- read-document: |
|
||||||
|
{
|
||||||
|
find: "<<collection:crud_dataset>>",
|
||||||
|
filter: { _id: "{random_key}" }
|
||||||
|
}
|
||||||
|
tags:
|
||||||
|
name: read-document
|
||||||
|
|
||||||
|
- name: main-update
|
||||||
|
tags:
|
||||||
|
phase: main
|
||||||
|
type: update
|
||||||
|
statements:
|
||||||
|
- update-document: |
|
||||||
|
{
|
||||||
|
update: "<<collection:crud_dataset>>",
|
||||||
|
writeConcern: { w: "majority" },
|
||||||
|
updates: [
|
||||||
|
{
|
||||||
|
q: { _id: "{random_key}" },
|
||||||
|
u: { "_id": "{random_key}", {document_json_without_id}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
tags:
|
||||||
|
name: update-document
|
||||||
|
bindings:
|
||||||
|
document_json_without_id: ModuloLineToString('<<dataset_file>>'); ReplaceRegex('^\{', '')
|
||||||
|
|
||||||
|
- name: main-delete
|
||||||
|
tags:
|
||||||
|
phase: main
|
||||||
|
type: delete
|
||||||
|
statements:
|
||||||
|
- delete-document: |
|
||||||
|
{
|
||||||
|
delete: "<<collection:crud_dataset>>",
|
||||||
|
deletes: [
|
||||||
|
{
|
||||||
|
q: { _id: "{seq_key}" },
|
||||||
|
limit: 1
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
@ -4,7 +4,7 @@
|
|||||||
<parent>
|
<parent>
|
||||||
<artifactId>mvn-defaults</artifactId>
|
<artifactId>mvn-defaults</artifactId>
|
||||||
<groupId>io.nosqlbench</groupId>
|
<groupId>io.nosqlbench</groupId>
|
||||||
<version>4.15.52-SNAPSHOT</version>
|
<version>4.15.58-SNAPSHOT</version>
|
||||||
<relativePath>../mvn-defaults</relativePath>
|
<relativePath>../mvn-defaults</relativePath>
|
||||||
</parent>
|
</parent>
|
||||||
|
|
||||||
@ -37,10 +37,16 @@
|
|||||||
<version>${pulsar.version}</version>
|
<version>${pulsar.version}</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
|
||||||
|
<dependency>
|
||||||
|
<groupId>io.nosqlbench</groupId>
|
||||||
|
<artifactId>engine-api</artifactId>
|
||||||
|
<version>4.15.58-SNAPSHOT</version>
|
||||||
|
</dependency>
|
||||||
|
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>io.nosqlbench</groupId>
|
<groupId>io.nosqlbench</groupId>
|
||||||
<artifactId>driver-stdout</artifactId>
|
<artifactId>driver-stdout</artifactId>
|
||||||
<version>4.15.52-SNAPSHOT</version>
|
<version>4.15.58-SNAPSHOT</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
|
||||||
<!-- https://mvnrepository.com/artifact/commons-beanutils/commons-beanutils -->
|
<!-- https://mvnrepository.com/artifact/commons-beanutils/commons-beanutils -->
|
||||||
|
@ -41,7 +41,7 @@ public class PulsarAction implements SyncAction {
|
|||||||
pulsarOp = readyPulsarOp.apply(cycle);
|
pulsarOp = readyPulsarOp.apply(cycle);
|
||||||
} catch (Exception bindException) {
|
} catch (Exception bindException) {
|
||||||
// if diagnostic mode ...
|
// if diagnostic mode ...
|
||||||
activity.getErrorhandler().handleError(bindException, cycle, 0);
|
activity.getErrorHandler().handleError(bindException, cycle, 0);
|
||||||
throw new RuntimeException(
|
throw new RuntimeException(
|
||||||
"while binding request in cycle " + cycle + ": " + bindException.getMessage(), bindException
|
"while binding request in cycle " + cycle + ": " + bindException.getMessage(), bindException
|
||||||
);
|
);
|
||||||
@ -56,7 +56,7 @@ public class PulsarAction implements SyncAction {
|
|||||||
break;
|
break;
|
||||||
} catch (RuntimeException err) {
|
} catch (RuntimeException err) {
|
||||||
ErrorDetail errorDetail = activity
|
ErrorDetail errorDetail = activity
|
||||||
.getErrorhandler()
|
.getErrorHandler()
|
||||||
.handleError(err, cycle, System.nanoTime() - start);
|
.handleError(err, cycle, System.nanoTime() - start);
|
||||||
if (!errorDetail.isRetryable()) {
|
if (!errorDetail.isRetryable()) {
|
||||||
break;
|
break;
|
||||||
|
@ -20,117 +20,74 @@ import org.apache.logging.log4j.LogManager;
|
|||||||
import org.apache.logging.log4j.Logger;
|
import org.apache.logging.log4j.Logger;
|
||||||
import org.apache.pulsar.client.admin.PulsarAdmin;
|
import org.apache.pulsar.client.admin.PulsarAdmin;
|
||||||
import org.apache.pulsar.client.admin.PulsarAdminBuilder;
|
import org.apache.pulsar.client.admin.PulsarAdminBuilder;
|
||||||
import org.apache.pulsar.client.admin.internal.PulsarAdminImpl;
|
import org.apache.pulsar.client.api.*;
|
||||||
import org.apache.pulsar.client.api.PulsarClientException;
|
|
||||||
import org.apache.pulsar.client.impl.conf.ClientConfigurationData;
|
import java.util.Map;
|
||||||
|
|
||||||
public class PulsarActivity extends SimpleActivity implements ActivityDefObserver {
|
public class PulsarActivity extends SimpleActivity implements ActivityDefObserver {
|
||||||
|
|
||||||
private final static Logger logger = LogManager.getLogger(PulsarActivity.class);
|
private final static Logger logger = LogManager.getLogger(PulsarActivity.class);
|
||||||
|
|
||||||
public Timer bindTimer;
|
private Counter bytesCounter;
|
||||||
public Timer executeTimer;
|
private Histogram messageSizeHistogram;
|
||||||
public Counter bytesCounter;
|
private Timer bindTimer;
|
||||||
public Histogram messagesizeHistogram;
|
private Timer executeTimer;
|
||||||
public Timer createTransactionTimer;
|
private Timer createTransactionTimer;
|
||||||
public Timer commitTransactionTimer;
|
private Timer commitTransactionTimer;
|
||||||
|
|
||||||
|
// Metrics for NB Pulsar driver milestone: https://github.com/nosqlbench/nosqlbench/milestone/11
|
||||||
|
// - end-to-end latency
|
||||||
|
private Histogram e2eMsgProcLatencyHistogram;
|
||||||
|
|
||||||
private PulsarSpaceCache pulsarCache;
|
private PulsarSpaceCache pulsarCache;
|
||||||
private PulsarAdmin pulsarAdmin;
|
|
||||||
|
|
||||||
private PulsarNBClientConf clientConf;
|
private PulsarNBClientConf pulsarNBClientConf;
|
||||||
// e.g. pulsar://localhost:6650
|
|
||||||
private String pulsarSvcUrl;
|
private String pulsarSvcUrl;
|
||||||
// e.g. http://localhost:8080
|
|
||||||
private String webSvcUrl;
|
private String webSvcUrl;
|
||||||
|
private PulsarAdmin pulsarAdmin;
|
||||||
|
private PulsarClient pulsarClient;
|
||||||
|
private Schema<?> pulsarSchema;
|
||||||
|
|
||||||
private NBErrorHandler errorhandler;
|
private NBErrorHandler errorHandler;
|
||||||
private OpSequence<OpDispenser<PulsarOp>> sequencer;
|
private OpSequence<OpDispenser<PulsarOp>> sequencer;
|
||||||
private volatile Throwable asyncOperationFailure;
|
private volatile Throwable asyncOperationFailure;
|
||||||
|
|
||||||
// private Supplier<PulsarSpace> clientSupplier;
|
|
||||||
// private ThreadLocal<Supplier<PulsarClient>> tlClientSupplier;
|
|
||||||
|
|
||||||
public PulsarActivity(ActivityDef activityDef) {
|
public PulsarActivity(ActivityDef activityDef) {
|
||||||
super(activityDef);
|
super(activityDef);
|
||||||
}
|
}
|
||||||
|
|
||||||
private void initPulsarAdmin() {
|
@Override
|
||||||
|
public void shutdownActivity() {
|
||||||
|
super.shutdownActivity();
|
||||||
|
|
||||||
PulsarAdminBuilder adminBuilder =
|
for (PulsarSpace pulsarSpace : pulsarCache.getAssociatedPulsarSpace()) {
|
||||||
PulsarAdmin.builder()
|
pulsarSpace.shutdownPulsarSpace();
|
||||||
.serviceHttpUrl(webSvcUrl);
|
|
||||||
|
|
||||||
try {
|
|
||||||
String authPluginClassName =
|
|
||||||
(String) clientConf.getClientConfValue(PulsarActivityUtil.CLNT_CONF_KEY.authPulginClassName.label);
|
|
||||||
String authParams =
|
|
||||||
(String) clientConf.getClientConfValue(PulsarActivityUtil.CLNT_CONF_KEY.authParams.label);
|
|
||||||
|
|
||||||
String useTlsStr =
|
|
||||||
(String) clientConf.getClientConfValue(PulsarActivityUtil.CLNT_CONF_KEY.useTls.label);
|
|
||||||
boolean useTls = BooleanUtils.toBoolean(useTlsStr);
|
|
||||||
|
|
||||||
String tlsTrustCertsFilePath =
|
|
||||||
(String) clientConf.getClientConfValue(PulsarActivityUtil.CLNT_CONF_KEY.tlsTrustCertsFilePath.label);
|
|
||||||
|
|
||||||
String tlsAllowInsecureConnectionStr =
|
|
||||||
(String) clientConf.getClientConfValue(PulsarActivityUtil.CLNT_CONF_KEY.tlsAllowInsecureConnection.label);
|
|
||||||
boolean tlsAllowInsecureConnection = BooleanUtils.toBoolean(tlsAllowInsecureConnectionStr);
|
|
||||||
|
|
||||||
String tlsHostnameVerificationEnableStr =
|
|
||||||
(String) clientConf.getClientConfValue(PulsarActivityUtil.CLNT_CONF_KEY.tlsHostnameVerificationEnable.label);
|
|
||||||
boolean tlsHostnameVerificationEnable = BooleanUtils.toBoolean(tlsHostnameVerificationEnableStr);
|
|
||||||
|
|
||||||
if ( !StringUtils.isAnyBlank(authPluginClassName, authParams) ) {
|
|
||||||
adminBuilder.authentication(authPluginClassName, authParams);
|
|
||||||
}
|
|
||||||
|
|
||||||
if ( useTls ) {
|
|
||||||
adminBuilder
|
|
||||||
.useKeyStoreTls(true)
|
|
||||||
.enableTlsHostnameVerification(tlsHostnameVerificationEnable);
|
|
||||||
|
|
||||||
if (!StringUtils.isBlank(tlsTrustCertsFilePath))
|
|
||||||
adminBuilder.tlsTrustCertsFilePath(tlsTrustCertsFilePath);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Put this outside "if (useTls)" block for easier handling of "tlsAllowInsecureConnection"
|
|
||||||
adminBuilder.allowTlsInsecureConnection(tlsAllowInsecureConnection);
|
|
||||||
pulsarAdmin = adminBuilder.build();
|
|
||||||
|
|
||||||
// Not supported in Pulsar 2.8.0
|
|
||||||
// ClientConfigurationData configurationData = pulsarAdmin.getClientConfigData();
|
|
||||||
// logger.debug(configurationData.toString());
|
|
||||||
|
|
||||||
} catch (PulsarClientException e) {
|
|
||||||
logger.error("Fail to create PulsarAdmin from global configuration!");
|
|
||||||
throw new RuntimeException("Fail to create PulsarAdmin from global configuration!");
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void initActivity() {
|
public void initActivity() {
|
||||||
super.initActivity();
|
super.initActivity();
|
||||||
|
bytesCounter = ActivityMetrics.counter(activityDef, "bytes");
|
||||||
|
messageSizeHistogram = ActivityMetrics.histogram(activityDef, "message_size");
|
||||||
bindTimer = ActivityMetrics.timer(activityDef, "bind");
|
bindTimer = ActivityMetrics.timer(activityDef, "bind");
|
||||||
executeTimer = ActivityMetrics.timer(activityDef, "execute");
|
executeTimer = ActivityMetrics.timer(activityDef, "execute");
|
||||||
createTransactionTimer = ActivityMetrics.timer(activityDef, "createtransaction");
|
createTransactionTimer = ActivityMetrics.timer(activityDef, "create_transaction");
|
||||||
commitTransactionTimer = ActivityMetrics.timer(activityDef, "committransaction");
|
commitTransactionTimer = ActivityMetrics.timer(activityDef, "commit_transaction");
|
||||||
|
|
||||||
bytesCounter = ActivityMetrics.counter(activityDef, "bytes");
|
e2eMsgProcLatencyHistogram = ActivityMetrics.histogram(activityDef, "e2e_msg_latency");
|
||||||
messagesizeHistogram = ActivityMetrics.histogram(activityDef, "messagesize");
|
|
||||||
|
|
||||||
String pulsarClntConfFile =
|
String pulsarClntConfFile =
|
||||||
activityDef.getParams().getOptionalString("config").orElse("config.properties");
|
activityDef.getParams().getOptionalString("config").orElse("config.properties");
|
||||||
clientConf = new PulsarNBClientConf(pulsarClntConfFile);
|
pulsarNBClientConf = new PulsarNBClientConf(pulsarClntConfFile);
|
||||||
|
|
||||||
pulsarSvcUrl =
|
pulsarSvcUrl =
|
||||||
activityDef.getParams().getOptionalString("service_url").orElse("pulsar://localhost:6650");
|
activityDef.getParams().getOptionalString("service_url").orElse("pulsar://localhost:6650");
|
||||||
webSvcUrl =
|
webSvcUrl =
|
||||||
activityDef.getParams().getOptionalString("web_url").orElse("http://localhost:8080");
|
activityDef.getParams().getOptionalString("web_url").orElse("http://localhost:8080");
|
||||||
|
|
||||||
initPulsarAdmin();
|
initPulsarAdminAndClientObj();
|
||||||
|
createPulsarSchemaFromConf();
|
||||||
|
|
||||||
pulsarCache = new PulsarSpaceCache(this);
|
pulsarCache = new PulsarSpaceCache(this);
|
||||||
|
|
||||||
@ -138,60 +95,20 @@ public class PulsarActivity extends SimpleActivity implements ActivityDefObserve
|
|||||||
setDefaultsFromOpSequence(sequencer);
|
setDefaultsFromOpSequence(sequencer);
|
||||||
onActivityDefUpdate(activityDef);
|
onActivityDefUpdate(activityDef);
|
||||||
|
|
||||||
this.errorhandler = new NBErrorHandler(
|
this.errorHandler = new NBErrorHandler(
|
||||||
() -> activityDef.getParams().getOptionalString("errors").orElse("stop"),
|
() -> activityDef.getParams().getOptionalString("errors").orElse("stop"),
|
||||||
this::getExceptionMetrics
|
this::getExceptionMetrics
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
public NBErrorHandler getErrorhandler() {
|
|
||||||
return errorhandler;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public synchronized void onActivityDefUpdate(ActivityDef activityDef) {
|
public synchronized void onActivityDefUpdate(ActivityDef activityDef) {
|
||||||
super.onActivityDefUpdate(activityDef);
|
super.onActivityDefUpdate(activityDef);
|
||||||
}
|
}
|
||||||
|
|
||||||
public OpSequence<OpDispenser<PulsarOp>> getSequencer() {
|
public NBErrorHandler getErrorHandler() { return errorHandler; }
|
||||||
return sequencer;
|
|
||||||
}
|
|
||||||
|
|
||||||
public PulsarNBClientConf getPulsarConf() {
|
public OpSequence<OpDispenser<PulsarOp>> getSequencer() { return sequencer; }
|
||||||
return clientConf;
|
|
||||||
}
|
|
||||||
|
|
||||||
public String getPulsarSvcUrl() {
|
|
||||||
return pulsarSvcUrl;
|
|
||||||
}
|
|
||||||
|
|
||||||
public String getWebSvcUrl() { return webSvcUrl; }
|
|
||||||
|
|
||||||
public PulsarAdmin getPulsarAdmin() { return pulsarAdmin; }
|
|
||||||
|
|
||||||
public Timer getBindTimer() {
|
|
||||||
return bindTimer;
|
|
||||||
}
|
|
||||||
|
|
||||||
public Timer getExecuteTimer() {
|
|
||||||
return this.executeTimer;
|
|
||||||
}
|
|
||||||
|
|
||||||
public Counter getBytesCounter() {
|
|
||||||
return bytesCounter;
|
|
||||||
}
|
|
||||||
|
|
||||||
public Timer getCreateTransactionTimer() {
|
|
||||||
return createTransactionTimer;
|
|
||||||
}
|
|
||||||
|
|
||||||
public Timer getCommitTransactionTimer() {
|
|
||||||
return commitTransactionTimer;
|
|
||||||
}
|
|
||||||
|
|
||||||
public Histogram getMessagesizeHistogram() {
|
|
||||||
return messagesizeHistogram;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void failOnAsyncOperationFailure() {
|
public void failOnAsyncOperationFailure() {
|
||||||
if (asyncOperationFailure != null) {
|
if (asyncOperationFailure != null) {
|
||||||
@ -202,4 +119,116 @@ public class PulsarActivity extends SimpleActivity implements ActivityDefObserve
|
|||||||
public void asyncOperationFailed(Throwable ex) {
|
public void asyncOperationFailed(Throwable ex) {
|
||||||
this.asyncOperationFailure = ex;
|
this.asyncOperationFailure = ex;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Initialize
|
||||||
|
* - PulsarAdmin object for adding/deleting tenant, namespace, and topic
|
||||||
|
* - PulsarClient object for message publishing and consuming
|
||||||
|
*/
|
||||||
|
private void initPulsarAdminAndClientObj() {
|
||||||
|
PulsarAdminBuilder adminBuilder =
|
||||||
|
PulsarAdmin.builder()
|
||||||
|
.serviceHttpUrl(webSvcUrl);
|
||||||
|
|
||||||
|
ClientBuilder clientBuilder = PulsarClient.builder();
|
||||||
|
|
||||||
|
try {
|
||||||
|
Map<String, Object> clientConfMap = pulsarNBClientConf.getClientConfMap();
|
||||||
|
|
||||||
|
// Override "client.serviceUrl" setting in config.properties
|
||||||
|
clientConfMap.remove("serviceUrl");
|
||||||
|
clientBuilder.loadConf(clientConfMap).serviceUrl(pulsarSvcUrl);
|
||||||
|
|
||||||
|
// Pulsar Authentication
|
||||||
|
String authPluginClassName =
|
||||||
|
(String) pulsarNBClientConf.getClientConfValue(PulsarActivityUtil.CLNT_CONF_KEY.authPulginClassName.label);
|
||||||
|
String authParams =
|
||||||
|
(String) pulsarNBClientConf.getClientConfValue(PulsarActivityUtil.CLNT_CONF_KEY.authParams.label);
|
||||||
|
|
||||||
|
if ( !StringUtils.isAnyBlank(authPluginClassName, authParams) ) {
|
||||||
|
adminBuilder.authentication(authPluginClassName, authParams);
|
||||||
|
clientBuilder.authentication(authPluginClassName, authParams);
|
||||||
|
}
|
||||||
|
|
||||||
|
String useTlsStr =
|
||||||
|
(String) pulsarNBClientConf.getClientConfValue(PulsarActivityUtil.CLNT_CONF_KEY.useTls.label);
|
||||||
|
boolean useTls = BooleanUtils.toBoolean(useTlsStr);
|
||||||
|
|
||||||
|
String tlsTrustCertsFilePath =
|
||||||
|
(String) pulsarNBClientConf.getClientConfValue(PulsarActivityUtil.CLNT_CONF_KEY.tlsTrustCertsFilePath.label);
|
||||||
|
|
||||||
|
String tlsAllowInsecureConnectionStr =
|
||||||
|
(String) pulsarNBClientConf.getClientConfValue(PulsarActivityUtil.CLNT_CONF_KEY.tlsAllowInsecureConnection.label);
|
||||||
|
boolean tlsAllowInsecureConnection = BooleanUtils.toBoolean(tlsAllowInsecureConnectionStr);
|
||||||
|
|
||||||
|
String tlsHostnameVerificationEnableStr =
|
||||||
|
(String) pulsarNBClientConf.getClientConfValue(PulsarActivityUtil.CLNT_CONF_KEY.tlsHostnameVerificationEnable.label);
|
||||||
|
boolean tlsHostnameVerificationEnable = BooleanUtils.toBoolean(tlsHostnameVerificationEnableStr);
|
||||||
|
|
||||||
|
if ( useTls ) {
|
||||||
|
adminBuilder
|
||||||
|
.enableTlsHostnameVerification(tlsHostnameVerificationEnable);
|
||||||
|
|
||||||
|
clientBuilder
|
||||||
|
.enableTlsHostnameVerification(tlsHostnameVerificationEnable);
|
||||||
|
|
||||||
|
if (!StringUtils.isBlank(tlsTrustCertsFilePath)) {
|
||||||
|
adminBuilder.tlsTrustCertsFilePath(tlsTrustCertsFilePath);
|
||||||
|
clientBuilder.tlsTrustCertsFilePath(tlsTrustCertsFilePath);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Put this outside "if (useTls)" block for easier handling of "tlsAllowInsecureConnection"
|
||||||
|
adminBuilder.allowTlsInsecureConnection(tlsAllowInsecureConnection);
|
||||||
|
clientBuilder.allowTlsInsecureConnection(tlsAllowInsecureConnection);
|
||||||
|
|
||||||
|
pulsarAdmin = adminBuilder.build();
|
||||||
|
pulsarClient = clientBuilder.build();
|
||||||
|
|
||||||
|
////////////////
|
||||||
|
// Not supported in Pulsar 2.8.0
|
||||||
|
//
|
||||||
|
// ClientConfigurationData configurationData = pulsarAdmin.getClientConfigData();
|
||||||
|
// logger.debug(configurationData.toString());
|
||||||
|
|
||||||
|
} catch (PulsarClientException e) {
|
||||||
|
logger.error("Fail to create PulsarAdmin and/or PulsarClient object from the global configuration!");
|
||||||
|
throw new RuntimeException("Fail to create PulsarAdmin and/or PulsarClient object from global configuration!");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get Pulsar schema from the definition string
|
||||||
|
*/
|
||||||
|
private void createPulsarSchemaFromConf() {
|
||||||
|
Object value = pulsarNBClientConf.getSchemaConfValue("schema.type");
|
||||||
|
String schemaType = (value != null) ? value.toString() : "";
|
||||||
|
|
||||||
|
if (PulsarActivityUtil.isAvroSchemaTypeStr(schemaType)) {
|
||||||
|
value = pulsarNBClientConf.getSchemaConfValue("schema.definition");
|
||||||
|
String schemaDefStr = (value != null) ? value.toString() : "";
|
||||||
|
pulsarSchema = PulsarActivityUtil.getAvroSchema(schemaType, schemaDefStr);
|
||||||
|
} else if (PulsarActivityUtil.isPrimitiveSchemaTypeStr(schemaType)) {
|
||||||
|
pulsarSchema = PulsarActivityUtil.getPrimitiveTypeSchema((schemaType));
|
||||||
|
} else {
|
||||||
|
throw new RuntimeException("Unsupported schema type string: " + schemaType + "; " +
|
||||||
|
"Only primitive type and Avro type are supported at the moment!");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public PulsarNBClientConf getPulsarConf() { return this.pulsarNBClientConf;}
|
||||||
|
public String getPulsarSvcUrl() { return this.pulsarSvcUrl;}
|
||||||
|
public String getWebSvcUrl() { return this.webSvcUrl; }
|
||||||
|
public PulsarAdmin getPulsarAdmin() { return this.pulsarAdmin; }
|
||||||
|
public PulsarClient getPulsarClient() { return this.pulsarClient; }
|
||||||
|
public Schema<?> getPulsarSchema() { return pulsarSchema; }
|
||||||
|
|
||||||
|
public Counter getBytesCounter() { return bytesCounter; }
|
||||||
|
public Histogram getMessageSizeHistogram() { return messageSizeHistogram; }
|
||||||
|
public Timer getBindTimer() { return bindTimer; }
|
||||||
|
public Timer getExecuteTimer() { return this.executeTimer; }
|
||||||
|
public Timer getCreateTransactionTimer() { return createTransactionTimer; }
|
||||||
|
public Timer getCommitTransactionTimer() { return commitTransactionTimer; }
|
||||||
|
|
||||||
|
public Histogram getE2eMsgProcLatencyHistogram() { return e2eMsgProcLatencyHistogram; }
|
||||||
}
|
}
|
||||||
|
@ -7,7 +7,6 @@ import io.nosqlbench.driver.pulsar.util.PulsarNBClientConf;
|
|||||||
import io.nosqlbench.engine.api.activityimpl.ActivityDef;
|
import io.nosqlbench.engine.api.activityimpl.ActivityDef;
|
||||||
import io.nosqlbench.engine.api.metrics.ActivityMetrics;
|
import io.nosqlbench.engine.api.metrics.ActivityMetrics;
|
||||||
import org.apache.commons.collections.CollectionUtils;
|
import org.apache.commons.collections.CollectionUtils;
|
||||||
import org.apache.commons.lang3.BooleanUtils;
|
|
||||||
import org.apache.commons.lang3.StringUtils;
|
import org.apache.commons.lang3.StringUtils;
|
||||||
import org.apache.logging.log4j.LogManager;
|
import org.apache.logging.log4j.LogManager;
|
||||||
import org.apache.logging.log4j.Logger;
|
import org.apache.logging.log4j.Logger;
|
||||||
@ -17,6 +16,7 @@ import org.apache.pulsar.client.admin.PulsarAdminException;
|
|||||||
import org.apache.pulsar.client.api.*;
|
import org.apache.pulsar.client.api.*;
|
||||||
import org.apache.pulsar.client.api.transaction.Transaction;
|
import org.apache.pulsar.client.api.transaction.Transaction;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.HashSet;
|
import java.util.HashSet;
|
||||||
@ -39,40 +39,36 @@ public class PulsarSpace {
|
|||||||
|
|
||||||
private final static Logger logger = LogManager.getLogger(PulsarSpace.class);
|
private final static Logger logger = LogManager.getLogger(PulsarSpace.class);
|
||||||
|
|
||||||
|
private final String spaceName;
|
||||||
|
|
||||||
private final ConcurrentHashMap<String, Producer<?>> producers = new ConcurrentHashMap<>();
|
private final ConcurrentHashMap<String, Producer<?>> producers = new ConcurrentHashMap<>();
|
||||||
private final ConcurrentHashMap<String, Consumer<?>> consumers = new ConcurrentHashMap<>();
|
private final ConcurrentHashMap<String, Consumer<?>> consumers = new ConcurrentHashMap<>();
|
||||||
private final ConcurrentHashMap<String, Reader<?>> readers = new ConcurrentHashMap<>();
|
private final ConcurrentHashMap<String, Reader<?>> readers = new ConcurrentHashMap<>();
|
||||||
|
|
||||||
private final String spaceName;
|
private final PulsarActivity pulsarActivity;
|
||||||
|
private final ActivityDef activityDef;
|
||||||
|
|
||||||
private final PulsarNBClientConf pulsarNBClientConf;
|
private final PulsarNBClientConf pulsarNBClientConf;
|
||||||
private final String pulsarSvcUrl;
|
private final String pulsarSvcUrl;
|
||||||
private final String webSvcUrl;
|
private final String webSvcUrl;
|
||||||
private final PulsarAdmin pulsarAdmin;
|
private final PulsarAdmin pulsarAdmin;
|
||||||
|
private final PulsarClient pulsarClient;
|
||||||
|
private final Schema<?> pulsarSchema;
|
||||||
|
private final Set<String> pulsarClusterMetadata = new HashSet<>();
|
||||||
private final Timer createTransactionTimer;
|
private final Timer createTransactionTimer;
|
||||||
|
|
||||||
private final Set<String> pulsarClusterMetadata = new HashSet<>();
|
public PulsarSpace(String name, PulsarActivity pulsarActivity) {
|
||||||
|
|
||||||
private PulsarClient pulsarClient = null;
|
|
||||||
private Schema<?> pulsarSchema = null;
|
|
||||||
private final ActivityDef activityDef;
|
|
||||||
|
|
||||||
public PulsarSpace(String name,
|
|
||||||
PulsarNBClientConf pulsarClientConf,
|
|
||||||
String pulsarSvcUrl,
|
|
||||||
String webSvcUrl,
|
|
||||||
PulsarAdmin pulsarAdmin,
|
|
||||||
ActivityDef activityDef,
|
|
||||||
Timer createTransactionTimer) {
|
|
||||||
this.spaceName = name;
|
this.spaceName = name;
|
||||||
this.pulsarNBClientConf = pulsarClientConf;
|
this.pulsarActivity = pulsarActivity;
|
||||||
this.pulsarSvcUrl = pulsarSvcUrl;
|
|
||||||
this.webSvcUrl = webSvcUrl;
|
|
||||||
this.pulsarAdmin = pulsarAdmin;
|
|
||||||
this.activityDef = activityDef;
|
|
||||||
this.createTransactionTimer = createTransactionTimer;
|
|
||||||
|
|
||||||
createPulsarClientFromConf();
|
this.pulsarNBClientConf = pulsarActivity.getPulsarConf();
|
||||||
createPulsarSchemaFromConf();
|
this.pulsarSvcUrl = pulsarActivity.getPulsarSvcUrl();
|
||||||
|
this.webSvcUrl = pulsarActivity.getWebSvcUrl();
|
||||||
|
this.pulsarAdmin = pulsarActivity.getPulsarAdmin();
|
||||||
|
this.pulsarClient = pulsarActivity.getPulsarClient();
|
||||||
|
this.pulsarSchema = pulsarActivity.getPulsarSchema();
|
||||||
|
this.activityDef = pulsarActivity.getActivityDef();
|
||||||
|
this.createTransactionTimer = pulsarActivity.getCreateTransactionTimer();
|
||||||
|
|
||||||
try {
|
try {
|
||||||
Clusters clusters = pulsarAdmin.clusters();
|
Clusters clusters = pulsarAdmin.clusters();
|
||||||
@ -86,114 +82,111 @@ public class PulsarSpace {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private void createPulsarClientFromConf() {
|
public PulsarNBClientConf getPulsarClientConf() { return pulsarNBClientConf; }
|
||||||
ClientBuilder clientBuilder = PulsarClient.builder();
|
|
||||||
|
|
||||||
try {
|
|
||||||
Map<String, Object> clientConf = pulsarNBClientConf.getClientConfMap();
|
|
||||||
|
|
||||||
// Override "client.serviceUrl" setting in config.properties
|
|
||||||
clientConf.remove("serviceUrl");
|
|
||||||
clientBuilder.loadConf(clientConf).serviceUrl(pulsarSvcUrl);
|
|
||||||
|
|
||||||
String authPluginClassName =
|
|
||||||
(String) pulsarNBClientConf.getClientConfValue(PulsarActivityUtil.CLNT_CONF_KEY.authPulginClassName.label);
|
|
||||||
String authParams =
|
|
||||||
(String) pulsarNBClientConf.getClientConfValue(PulsarActivityUtil.CLNT_CONF_KEY.authParams.label);
|
|
||||||
|
|
||||||
String useTlsStr =
|
|
||||||
(String) pulsarNBClientConf.getClientConfValue(PulsarActivityUtil.CLNT_CONF_KEY.useTls.label);
|
|
||||||
boolean useTls = BooleanUtils.toBoolean(useTlsStr);
|
|
||||||
|
|
||||||
String tlsTrustCertsFilePath =
|
|
||||||
(String) pulsarNBClientConf.getClientConfValue(PulsarActivityUtil.CLNT_CONF_KEY.tlsTrustCertsFilePath.label);
|
|
||||||
|
|
||||||
String tlsAllowInsecureConnectionStr =
|
|
||||||
(String) pulsarNBClientConf.getClientConfValue(PulsarActivityUtil.CLNT_CONF_KEY.tlsAllowInsecureConnection.label);
|
|
||||||
boolean tlsAllowInsecureConnection = BooleanUtils.toBoolean(tlsAllowInsecureConnectionStr);
|
|
||||||
|
|
||||||
String tlsHostnameVerificationEnableStr =
|
|
||||||
(String) pulsarNBClientConf.getClientConfValue(PulsarActivityUtil.CLNT_CONF_KEY.tlsHostnameVerificationEnable.label);
|
|
||||||
boolean tlsHostnameVerificationEnable = BooleanUtils.toBoolean(tlsHostnameVerificationEnableStr);
|
|
||||||
|
|
||||||
if ( !StringUtils.isAnyBlank(authPluginClassName, authParams) ) {
|
|
||||||
clientBuilder.authentication(authPluginClassName, authParams);
|
|
||||||
}
|
|
||||||
|
|
||||||
if ( useTls ) {
|
|
||||||
clientBuilder
|
|
||||||
.useKeyStoreTls(useTls)
|
|
||||||
.enableTlsHostnameVerification(tlsHostnameVerificationEnable);
|
|
||||||
|
|
||||||
if (!StringUtils.isBlank(tlsTrustCertsFilePath))
|
|
||||||
clientBuilder.tlsTrustCertsFilePath(tlsTrustCertsFilePath);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Put this outside "if (useTls)" block for easier handling of "tlsAllowInsecureConnection"
|
|
||||||
clientBuilder.allowTlsInsecureConnection(tlsAllowInsecureConnection);
|
|
||||||
|
|
||||||
pulsarClient = clientBuilder.build();
|
|
||||||
}
|
|
||||||
catch (PulsarClientException pce) {
|
|
||||||
String errMsg = "Fail to create PulsarClient from global configuration: " + pce.getMessage();
|
|
||||||
logger.error(errMsg);
|
|
||||||
throw new RuntimeException(errMsg);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private void createPulsarSchemaFromConf() {
|
|
||||||
Object value = pulsarNBClientConf.getSchemaConfValue("schema.type");
|
|
||||||
String schemaType = (value != null) ? value.toString() : "";
|
|
||||||
|
|
||||||
if (PulsarActivityUtil.isAvroSchemaTypeStr(schemaType)) {
|
|
||||||
value = pulsarNBClientConf.getSchemaConfValue("schema.definition");
|
|
||||||
String schemaDefStr = (value != null) ? value.toString() : "";
|
|
||||||
pulsarSchema = PulsarActivityUtil.getAvroSchema(schemaType, schemaDefStr);
|
|
||||||
} else if (PulsarActivityUtil.isPrimitiveSchemaTypeStr(schemaType)) {
|
|
||||||
pulsarSchema = PulsarActivityUtil.getPrimitiveTypeSchema((schemaType));
|
|
||||||
} else {
|
|
||||||
throw new RuntimeException("Unsupported schema type string: " + schemaType + "; " +
|
|
||||||
"Only primitive type and Avro type are supported at the moment!");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public PulsarClient getPulsarClient() { return pulsarClient; }
|
|
||||||
|
|
||||||
public PulsarNBClientConf getPulsarClientConf() {
|
|
||||||
return pulsarNBClientConf;
|
|
||||||
}
|
|
||||||
|
|
||||||
public Schema<?> getPulsarSchema() {
|
|
||||||
return pulsarSchema;
|
|
||||||
}
|
|
||||||
|
|
||||||
public PulsarAdmin getPulsarAdmin() { return pulsarAdmin; }
|
public PulsarAdmin getPulsarAdmin() { return pulsarAdmin; }
|
||||||
|
public PulsarClient getPulsarClient() { return pulsarClient; }
|
||||||
|
public Schema<?> getPulsarSchema() { return pulsarSchema; }
|
||||||
|
public String getPulsarSvcUrl() { return pulsarSvcUrl;}
|
||||||
|
public String getWebSvcUrl() { return webSvcUrl; }
|
||||||
|
public Set<String> getPulsarClusterMetadata() { return pulsarClusterMetadata; }
|
||||||
|
|
||||||
public String getPulsarSvcUrl() {
|
|
||||||
return pulsarSvcUrl;
|
// Properly shut down all Pulsar objects (producers, consumers, etc.) that are associated with this space
|
||||||
|
public void shutdownPulsarSpace() {
|
||||||
|
try {
|
||||||
|
for (Producer<?> producer : producers.values()) {
|
||||||
|
if (producer != null) producer.close();
|
||||||
|
}
|
||||||
|
|
||||||
|
for (Consumer<?> consumer : consumers.values()) {
|
||||||
|
if (consumer != null) consumer.close();
|
||||||
|
}
|
||||||
|
|
||||||
|
for (Reader<?> reader : readers.values()) {
|
||||||
|
if (reader != null) reader.close();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pulsarAdmin != null) pulsarAdmin.close();
|
||||||
|
|
||||||
|
if (pulsarClient != null) pulsarClient.close();
|
||||||
|
}
|
||||||
|
catch (Exception e) {
|
||||||
|
throw new RuntimeException("Unexpected error when closing Pulsar objects!");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public String getWebSvcUrl() { return webSvcUrl; }
|
/**
|
||||||
|
* Get a proper Pulsar API metrics prefix depending on the API type
|
||||||
|
*
|
||||||
|
* @param apiType - Pulsar API type: producer, consumer, reader, etc.
|
||||||
|
* @param apiObjName - actual name of a producer, a consumer, a reader, etc.
|
||||||
|
* @param topicName - topic name
|
||||||
|
* @return String
|
||||||
|
*/
|
||||||
|
private String getPulsarAPIMetricsPrefix(String apiType, String apiObjName, String topicName) {
|
||||||
|
String apiMetricsPrefix;
|
||||||
|
|
||||||
|
if (!PulsarActivityUtil.isValidPulsarApiType(apiType)) {
|
||||||
|
throw new RuntimeException(
|
||||||
|
"Incorrect Pulsar API type. Valid type list: " + PulsarActivityUtil.getValidPulsarApiTypeList());
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!StringUtils.isBlank(apiObjName)) {
|
||||||
|
apiMetricsPrefix = apiObjName + "_";
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
// we want a meaningful name for the API object (producer, consumer, reader, etc.)
|
||||||
|
// we are not appending the topic name
|
||||||
|
apiMetricsPrefix = apiType;
|
||||||
|
|
||||||
|
if (apiType.equalsIgnoreCase(PulsarActivityUtil.PULSAR_API_TYPE.PRODUCER.label))
|
||||||
|
apiMetricsPrefix += producers.size();
|
||||||
|
else if (apiType.equalsIgnoreCase(PulsarActivityUtil.PULSAR_API_TYPE.CONSUMER.label))
|
||||||
|
apiMetricsPrefix += consumers.size();
|
||||||
|
else if (apiType.equalsIgnoreCase(PulsarActivityUtil.PULSAR_API_TYPE.READER.label))
|
||||||
|
apiMetricsPrefix += readers.size();
|
||||||
|
|
||||||
|
apiMetricsPrefix += "_";
|
||||||
|
}
|
||||||
|
|
||||||
|
apiMetricsPrefix += topicName + "_";
|
||||||
|
apiMetricsPrefix = apiMetricsPrefix
|
||||||
|
// default name for tests/demos (in all Pulsar examples) is persistent://public/default/test -> use just the topic name test
|
||||||
|
.replace("persistent://public/default/", "")
|
||||||
|
// always remove topic type
|
||||||
|
.replace("non-persistent://", "")
|
||||||
|
.replace("persistent://", "")
|
||||||
|
// persistent://tenant/namespace/topicname -> tenant_namespace_topicname
|
||||||
|
.replace("/","_");
|
||||||
|
|
||||||
|
return apiMetricsPrefix;
|
||||||
|
}
|
||||||
|
|
||||||
public Set<String> getPulsarClusterMetadata() { return pulsarClusterMetadata; }
|
|
||||||
|
|
||||||
//////////////////////////////////////
|
//////////////////////////////////////
|
||||||
// Producer Processing --> start
|
// Producer Processing --> start
|
||||||
//////////////////////////////////////
|
//////////////////////////////////////
|
||||||
// Topic name IS mandatory
|
//
|
||||||
// - It must be set at either global level or cycle level
|
private static class ProducerGaugeImpl implements Gauge<Object> {
|
||||||
// - If set at both levels, cycle level setting takes precedence
|
private final Producer<?> producer;
|
||||||
private String getEffectiveProducerTopicName(String cycleTopicName) {
|
private final Function<ProducerStats, Object> valueExtractor;
|
||||||
if (!StringUtils.isBlank(cycleTopicName)) {
|
|
||||||
return cycleTopicName;
|
ProducerGaugeImpl(Producer<?> producer, Function<ProducerStats, Object> valueExtractor) {
|
||||||
|
this.producer = producer;
|
||||||
|
this.valueExtractor = valueExtractor;
|
||||||
}
|
}
|
||||||
|
|
||||||
String globalTopicName = pulsarNBClientConf.getProducerTopicName();
|
@Override
|
||||||
if (!StringUtils.isBlank(globalTopicName)) {
|
public Object getValue() {
|
||||||
return globalTopicName;
|
// see Pulsar bug https://github.com/apache/pulsar/issues/10100
|
||||||
|
// we need to synchronize on producer otherwise we could receive corrupted data
|
||||||
|
synchronized(producer) {
|
||||||
|
return valueExtractor.apply(producer.getStats());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
throw new RuntimeException(" topic name must be set at either global level or cycle level!");
|
static Gauge<Object> producerSafeExtractMetric(Producer<?> producer, Function<ProducerStats, Object> valueExtractor) {
|
||||||
|
return new ProducerGaugeImpl(producer, valueExtractor);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Producer name is NOT mandatory
|
// Producer name is NOT mandatory
|
||||||
@ -212,7 +205,6 @@ public class PulsarSpace {
|
|||||||
return "";
|
return "";
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
public Supplier<Transaction> getTransactionSupplier() {
|
public Supplier<Transaction> getTransactionSupplier() {
|
||||||
PulsarClient pulsarClient = getPulsarClient();
|
PulsarClient pulsarClient = getPulsarClient();
|
||||||
return () -> {
|
return () -> {
|
||||||
@ -233,8 +225,20 @@ public class PulsarSpace {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
private static String buildCacheKey(String... keyParts) {
|
// Topic name IS mandatory
|
||||||
return String.join("::", keyParts);
|
// - It must be set at either global level or cycle level
|
||||||
|
// - If set at both levels, cycle level setting takes precedence
|
||||||
|
private String getEffectiveProducerTopicName(String cycleTopicName) {
|
||||||
|
if (!StringUtils.isBlank(cycleTopicName)) {
|
||||||
|
return cycleTopicName;
|
||||||
|
}
|
||||||
|
|
||||||
|
String globalTopicName = pulsarNBClientConf.getProducerTopicName();
|
||||||
|
if (!StringUtils.isBlank(globalTopicName)) {
|
||||||
|
return globalTopicName;
|
||||||
|
}
|
||||||
|
|
||||||
|
throw new RuntimeException("Producer topic name must be set at either global level or cycle level!");
|
||||||
}
|
}
|
||||||
|
|
||||||
public Producer<?> getProducer(String cycleTopicName, String cycleProducerName) {
|
public Producer<?> getProducer(String cycleTopicName, String cycleProducerName) {
|
||||||
@ -242,10 +246,10 @@ public class PulsarSpace {
|
|||||||
String producerName = getEffectiveProducerName(cycleProducerName);
|
String producerName = getEffectiveProducerName(cycleProducerName);
|
||||||
|
|
||||||
if (StringUtils.isBlank(topicName)) {
|
if (StringUtils.isBlank(topicName)) {
|
||||||
throw new RuntimeException("Producer:: must specify a topic name either at the global level or the cycle level");
|
throw new RuntimeException("Producer:: must specify a topic name");
|
||||||
}
|
}
|
||||||
|
|
||||||
String producerCacheKey = buildCacheKey(producerName, topicName);
|
String producerCacheKey = PulsarActivityUtil.buildCacheKey(producerName, topicName);
|
||||||
Producer<?> producer = producers.get(producerCacheKey);
|
Producer<?> producer = producers.get(producerCacheKey);
|
||||||
|
|
||||||
if (producer == null) {
|
if (producer == null) {
|
||||||
@ -253,37 +257,47 @@ public class PulsarSpace {
|
|||||||
|
|
||||||
// Get other possible producer settings that are set at global level
|
// Get other possible producer settings that are set at global level
|
||||||
Map<String, Object> producerConf = pulsarNBClientConf.getProducerConfMap();
|
Map<String, Object> producerConf = pulsarNBClientConf.getProducerConfMap();
|
||||||
producerConf.put(PulsarActivityUtil.PRODUCER_CONF_STD_KEY.topicName.label, topicName);
|
|
||||||
|
|
||||||
String producerMetricsPrefix;
|
// Remove global level settings: "topicName" and "producerName"
|
||||||
if (!StringUtils.isBlank(producerName)) {
|
producerConf.remove(PulsarActivityUtil.PRODUCER_CONF_STD_KEY.topicName.label);
|
||||||
producerConf.put(PulsarActivityUtil.PRODUCER_CONF_STD_KEY.producerName.label, producerName);
|
producerConf.remove(PulsarActivityUtil.PRODUCER_CONF_STD_KEY.producerName.label);
|
||||||
producerMetricsPrefix = producerName + "_";
|
|
||||||
} else {
|
|
||||||
// we want a meaningful name for the producer
|
|
||||||
// we are not appending the topic name
|
|
||||||
producerMetricsPrefix = "producer" + producers.size() + "_" ;
|
|
||||||
}
|
|
||||||
|
|
||||||
producerMetricsPrefix += topicName + "_";
|
String producerMetricsPrefix = getPulsarAPIMetricsPrefix(
|
||||||
producerMetricsPrefix = producerMetricsPrefix
|
PulsarActivityUtil.PULSAR_API_TYPE.PRODUCER.label,
|
||||||
.replace("persistent://public/default/", "") // default name for tests/demos (in all Pulsar examples) is persistent://public/default/test -> use just the topic name test
|
producerName,
|
||||||
.replace("non-persistent://", "") // always remove topic type
|
topicName);
|
||||||
.replace("persistent://", "")
|
|
||||||
.replace("/","_"); // persistent://tenant/namespace/topicname -> tenant_namespace_topicname
|
|
||||||
|
|
||||||
try {
|
try {
|
||||||
ProducerBuilder<?> producerBuilder = pulsarClient.newProducer(pulsarSchema);
|
ProducerBuilder<?> producerBuilder = pulsarClient.
|
||||||
producerBuilder.loadConf(producerConf);
|
newProducer(pulsarSchema).
|
||||||
|
loadConf(producerConf).
|
||||||
|
topic(topicName);
|
||||||
|
|
||||||
|
if (!StringUtils.isAnyBlank(producerName)) {
|
||||||
|
producerBuilder = producerBuilder.producerName(producerName);
|
||||||
|
}
|
||||||
|
|
||||||
producer = producerBuilder.create();
|
producer = producerBuilder.create();
|
||||||
producers.put(producerCacheKey, producer);
|
producers.put(producerCacheKey, producer);
|
||||||
|
|
||||||
ActivityMetrics.gauge(activityDef, producerMetricsPrefix + "totalbytessent", safeExtractMetric(producer, (s -> s.getTotalBytesSent() + s.getNumBytesSent())));
|
ActivityMetrics.gauge(activityDef,
|
||||||
ActivityMetrics.gauge(activityDef, producerMetricsPrefix + "totalmsgssent", safeExtractMetric(producer, (s -> s.getTotalMsgsSent() + s.getNumMsgsSent())));
|
producerMetricsPrefix + "total_bytes_sent",
|
||||||
ActivityMetrics.gauge(activityDef, producerMetricsPrefix + "totalsendfailed", safeExtractMetric(producer, (s -> s.getTotalSendFailed() + s.getNumSendFailed())));
|
producerSafeExtractMetric(producer, (s -> s.getTotalBytesSent() + s.getNumBytesSent())));
|
||||||
ActivityMetrics.gauge(activityDef, producerMetricsPrefix + "totalacksreceived", safeExtractMetric(producer,(s -> s.getTotalAcksReceived() + s.getNumAcksReceived())));
|
ActivityMetrics.gauge(activityDef,
|
||||||
ActivityMetrics.gauge(activityDef, producerMetricsPrefix + "sendbytesrate", safeExtractMetric(producer, ProducerStats::getSendBytesRate));
|
producerMetricsPrefix + "total_msg_sent",
|
||||||
ActivityMetrics.gauge(activityDef, producerMetricsPrefix + "sendmsgsrate", safeExtractMetric(producer, ProducerStats::getSendMsgsRate));
|
producerSafeExtractMetric(producer, (s -> s.getTotalMsgsSent() + s.getNumMsgsSent())));
|
||||||
|
ActivityMetrics.gauge(activityDef,
|
||||||
|
producerMetricsPrefix + "total_send_failed",
|
||||||
|
producerSafeExtractMetric(producer, (s -> s.getTotalSendFailed() + s.getNumSendFailed())));
|
||||||
|
ActivityMetrics.gauge(activityDef,
|
||||||
|
producerMetricsPrefix + "total_ack_received",
|
||||||
|
producerSafeExtractMetric(producer,(s -> s.getTotalAcksReceived() + s.getNumAcksReceived())));
|
||||||
|
ActivityMetrics.gauge(activityDef,
|
||||||
|
producerMetricsPrefix + "send_bytes_rate",
|
||||||
|
producerSafeExtractMetric(producer, ProducerStats::getSendBytesRate));
|
||||||
|
ActivityMetrics.gauge(activityDef,
|
||||||
|
producerMetricsPrefix + "send_msg_rate",
|
||||||
|
producerSafeExtractMetric(producer, ProducerStats::getSendMsgsRate));
|
||||||
}
|
}
|
||||||
catch (PulsarClientException ple) {
|
catch (PulsarClientException ple) {
|
||||||
throw new RuntimeException("Unable to create a Pulsar producer!", ple);
|
throw new RuntimeException("Unable to create a Pulsar producer!", ple);
|
||||||
@ -292,30 +306,7 @@ public class PulsarSpace {
|
|||||||
|
|
||||||
return producer;
|
return producer;
|
||||||
}
|
}
|
||||||
|
//
|
||||||
static Gauge<Object> safeExtractMetric(Producer<?> producer, Function<ProducerStats, Object> valueExtractor) {
|
|
||||||
return new GaugeImpl(producer, valueExtractor);
|
|
||||||
}
|
|
||||||
|
|
||||||
private static class GaugeImpl implements Gauge<Object> {
|
|
||||||
private final Producer<?> producer;
|
|
||||||
private final Function<ProducerStats, Object> valueExtractor;
|
|
||||||
|
|
||||||
GaugeImpl(Producer<?> producer, Function<ProducerStats, Object> valueExtractor) {
|
|
||||||
this.producer = producer;
|
|
||||||
this.valueExtractor = valueExtractor;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public Object getValue() {
|
|
||||||
// see Pulsar bug https://github.com/apache/pulsar/issues/10100
|
|
||||||
// we need to synchronize on producer otherwise we could receive corrupted data
|
|
||||||
synchronized(producer) {
|
|
||||||
return valueExtractor.apply(producer.getStats());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
//////////////////////////////////////
|
//////////////////////////////////////
|
||||||
// Producer Processing <-- end
|
// Producer Processing <-- end
|
||||||
//////////////////////////////////////
|
//////////////////////////////////////
|
||||||
@ -324,59 +315,28 @@ public class PulsarSpace {
|
|||||||
//////////////////////////////////////
|
//////////////////////////////////////
|
||||||
// Consumer Processing --> start
|
// Consumer Processing --> start
|
||||||
//////////////////////////////////////
|
//////////////////////////////////////
|
||||||
private String getEffectiveTopicNamesStr(String cycleTopicNames) {
|
//
|
||||||
if (!StringUtils.isBlank(cycleTopicNames)) {
|
private static class ConsumerGaugeImpl implements Gauge<Object> {
|
||||||
return cycleTopicNames;
|
private final Consumer<?> consumer;
|
||||||
|
private final Function<ConsumerStats, Object> valueExtractor;
|
||||||
|
|
||||||
|
ConsumerGaugeImpl(Consumer<?> consumer, Function<ConsumerStats, Object> valueExtractor) {
|
||||||
|
this.consumer = consumer;
|
||||||
|
this.valueExtractor = valueExtractor;
|
||||||
}
|
}
|
||||||
|
|
||||||
String globalTopicNames = pulsarNBClientConf.getConsumerTopicNames();
|
@Override
|
||||||
if (!StringUtils.isBlank(globalTopicNames)) {
|
public Object getValue() {
|
||||||
return globalTopicNames;
|
// see Pulsar bug https://github.com/apache/pulsar/issues/10100
|
||||||
|
// - this is a bug report for producer stats.
|
||||||
|
// - assume this also applies to consumer stats.
|
||||||
|
synchronized(consumer) {
|
||||||
|
return valueExtractor.apply(consumer.getStats());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return "";
|
|
||||||
}
|
}
|
||||||
|
static Gauge<Object> consumerSafeExtractMetric(Consumer<?> consumer, Function<ConsumerStats, Object> valueExtractor) {
|
||||||
private List<String> getEffectiveTopicNames(String cycleTopicNames) {
|
return new ConsumerGaugeImpl(consumer, valueExtractor);
|
||||||
String effectiveTopicNamesStr = getEffectiveTopicNamesStr(cycleTopicNames);
|
|
||||||
|
|
||||||
String[] names = effectiveTopicNamesStr.split("[;,]");
|
|
||||||
ArrayList<String> effectiveTopicNameList = new ArrayList<>();
|
|
||||||
|
|
||||||
for (String name : names) {
|
|
||||||
if (!StringUtils.isBlank(name))
|
|
||||||
effectiveTopicNameList.add(name.trim());
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
return effectiveTopicNameList;
|
|
||||||
}
|
|
||||||
|
|
||||||
private String getEffectiveTopicPatternStr(String cycleTopicsPattern) {
|
|
||||||
if (!StringUtils.isBlank(cycleTopicsPattern)) {
|
|
||||||
return cycleTopicsPattern;
|
|
||||||
}
|
|
||||||
|
|
||||||
String globalTopicsPattern = pulsarNBClientConf.getConsumerTopicPattern();
|
|
||||||
if (!StringUtils.isBlank(globalTopicsPattern)) {
|
|
||||||
return globalTopicsPattern;
|
|
||||||
}
|
|
||||||
|
|
||||||
return "";
|
|
||||||
}
|
|
||||||
|
|
||||||
private Pattern getEffectiveTopicPattern(String cycleTopicsPattern) {
|
|
||||||
String effectiveTopicsPatternStr = getEffectiveTopicPatternStr(cycleTopicsPattern);
|
|
||||||
Pattern topicsPattern;
|
|
||||||
try {
|
|
||||||
if (!StringUtils.isBlank(effectiveTopicsPatternStr))
|
|
||||||
topicsPattern = Pattern.compile(effectiveTopicsPatternStr);
|
|
||||||
else
|
|
||||||
topicsPattern = null;
|
|
||||||
} catch (PatternSyntaxException pse) {
|
|
||||||
topicsPattern = null;
|
|
||||||
}
|
|
||||||
return topicsPattern;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private String getEffectiveSubscriptionName(String cycleSubscriptionName) {
|
private String getEffectiveSubscriptionName(String cycleSubscriptionName) {
|
||||||
@ -404,7 +364,6 @@ public class PulsarSpace {
|
|||||||
|
|
||||||
return "";
|
return "";
|
||||||
}
|
}
|
||||||
|
|
||||||
private SubscriptionType getEffectiveSubscriptionType(String cycleSubscriptionType) {
|
private SubscriptionType getEffectiveSubscriptionType(String cycleSubscriptionType) {
|
||||||
String effectiveSubscriptionStr = getEffectiveSubscriptionTypeStr(cycleSubscriptionType);
|
String effectiveSubscriptionStr = getEffectiveSubscriptionTypeStr(cycleSubscriptionType);
|
||||||
SubscriptionType subscriptionType = SubscriptionType.Exclusive;
|
SubscriptionType subscriptionType = SubscriptionType.Exclusive;
|
||||||
@ -434,78 +393,74 @@ public class PulsarSpace {
|
|||||||
return "";
|
return "";
|
||||||
}
|
}
|
||||||
|
|
||||||
public Consumer<?> getConsumer(String cycleTopicUri,
|
public Consumer<?> getConsumer(String cycleTopicName,
|
||||||
String cycleTopicNames,
|
|
||||||
String cycleTopicsPattern,
|
|
||||||
String cycleSubscriptionName,
|
String cycleSubscriptionName,
|
||||||
String cycleSubscriptionType,
|
String cycleSubscriptionType,
|
||||||
String cycleConsumerName) {
|
String cycleConsumerName) {
|
||||||
|
|
||||||
List<String> topicNames = getEffectiveTopicNames(cycleTopicNames);
|
|
||||||
String topicsPatternStr = getEffectiveTopicPatternStr(cycleTopicsPattern);
|
|
||||||
Pattern topicsPattern = getEffectiveTopicPattern(cycleTopicsPattern);
|
|
||||||
String subscriptionName = getEffectiveSubscriptionName(cycleSubscriptionName);
|
String subscriptionName = getEffectiveSubscriptionName(cycleSubscriptionName);
|
||||||
SubscriptionType subscriptionType = getEffectiveSubscriptionType(cycleSubscriptionType);
|
SubscriptionType subscriptionType = getEffectiveSubscriptionType(cycleSubscriptionType);
|
||||||
String consumerName = getEffectiveConsumerName(cycleConsumerName);
|
String consumerName = getEffectiveConsumerName(cycleConsumerName);
|
||||||
|
|
||||||
if (StringUtils.isBlank(cycleTopicUri) && topicNames.isEmpty() && (topicsPattern == null)) {
|
if (StringUtils.isAnyBlank(cycleTopicName, subscriptionName)) {
|
||||||
throw new RuntimeException("Consumer:: \"topic_uri\", \"topic_names\" and \"topics_pattern\" parameters can't be all empty/invalid!");
|
throw new RuntimeException("Consumer:: must specify a topic name and a subscription name");
|
||||||
}
|
|
||||||
|
|
||||||
String consumerCacheKey;
|
|
||||||
// precedence sequence:
|
|
||||||
// topic_names (consumer statement param) >
|
|
||||||
// topics_pattern (consumer statement param) >
|
|
||||||
// topic_uri (document level param)
|
|
||||||
if (!topicNames.isEmpty()) {
|
|
||||||
consumerCacheKey = buildCacheKey(
|
|
||||||
consumerName,
|
|
||||||
subscriptionName,
|
|
||||||
String.join("|", topicNames));
|
|
||||||
} else if (topicsPattern != null) {
|
|
||||||
consumerCacheKey = buildCacheKey(
|
|
||||||
consumerName,
|
|
||||||
subscriptionName,
|
|
||||||
topicsPatternStr);
|
|
||||||
} else {
|
|
||||||
consumerCacheKey = buildCacheKey(
|
|
||||||
consumerName,
|
|
||||||
subscriptionName,
|
|
||||||
cycleTopicUri);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
String consumerCacheKey = PulsarActivityUtil.buildCacheKey(consumerName, subscriptionName, cycleTopicName);
|
||||||
Consumer<?> consumer = consumers.get(consumerCacheKey);
|
Consumer<?> consumer = consumers.get(consumerCacheKey);
|
||||||
|
|
||||||
if (consumer == null) {
|
if (consumer == null) {
|
||||||
PulsarClient pulsarClient = getPulsarClient();
|
PulsarClient pulsarClient = getPulsarClient();
|
||||||
|
|
||||||
// Get other possible producer settings that are set at global level
|
// Get other possible consumer settings that are set at global level
|
||||||
Map<String, Object> consumerConf = new HashMap<>(pulsarNBClientConf.getConsumerConfMap());
|
Map<String, Object> consumerConf = new HashMap<>(pulsarNBClientConf.getConsumerConfMap());
|
||||||
consumerConf.remove("timeout");
|
|
||||||
|
|
||||||
// Explicit topic names will take precedence over topics pattern
|
// Remove global level settings:
|
||||||
if (!topicNames.isEmpty()) {
|
// - "topicNames", "topicsPattern", "subscriptionName", "subscriptionType", "consumerName"
|
||||||
consumerConf.remove(PulsarActivityUtil.CONSUMER_CONF_STD_KEY.topicsPattern.label);
|
consumerConf.remove(PulsarActivityUtil.CONSUMER_CONF_STD_KEY.topicNames.label);
|
||||||
consumerConf.put(PulsarActivityUtil.CONSUMER_CONF_STD_KEY.topicNames.label, topicNames);
|
consumerConf.remove(PulsarActivityUtil.CONSUMER_CONF_STD_KEY.topicsPattern.label);
|
||||||
} else if (topicsPattern != null) {
|
consumerConf.remove(PulsarActivityUtil.CONSUMER_CONF_STD_KEY.subscriptionName.label);
|
||||||
consumerConf.remove(PulsarActivityUtil.CONSUMER_CONF_STD_KEY.topicNames.label);
|
consumerConf.remove(PulsarActivityUtil.CONSUMER_CONF_STD_KEY.subscriptionType.label);
|
||||||
consumerConf.put(
|
consumerConf.remove(PulsarActivityUtil.CONSUMER_CONF_STD_KEY.consumerName.label);
|
||||||
PulsarActivityUtil.CONSUMER_CONF_STD_KEY.topicsPattern.label,
|
// Remove non-standard consumer configuration properties
|
||||||
getEffectiveTopicPattern(cycleTopicsPattern));
|
consumerConf.remove(PulsarActivityUtil.CONSUMER_CONF_CUSTOM_KEY.timeout.label);
|
||||||
} else {
|
|
||||||
topicNames.add(cycleTopicUri);
|
|
||||||
consumerConf.remove(PulsarActivityUtil.CONSUMER_CONF_STD_KEY.topicsPattern.label);
|
|
||||||
consumerConf.put(PulsarActivityUtil.CONSUMER_CONF_STD_KEY.topicNames.label, topicNames);
|
|
||||||
}
|
|
||||||
|
|
||||||
consumerConf.put(PulsarActivityUtil.CONSUMER_CONF_STD_KEY.subscriptionName.label, subscriptionName);
|
|
||||||
consumerConf.put(PulsarActivityUtil.CONSUMER_CONF_STD_KEY.subscriptionType.label, subscriptionType);
|
|
||||||
if (!StringUtils.isBlank(consumerName)) {
|
|
||||||
consumerConf.put(PulsarActivityUtil.CONSUMER_CONF_STD_KEY.consumerName.label, consumerName);
|
|
||||||
}
|
|
||||||
|
|
||||||
try {
|
try {
|
||||||
consumer = pulsarClient.newConsumer(pulsarSchema).loadConf(consumerConf).subscribe();
|
ConsumerBuilder<?> consumerBuilder = pulsarClient.
|
||||||
|
newConsumer(pulsarSchema).
|
||||||
|
loadConf(consumerConf).
|
||||||
|
topic(cycleTopicName).
|
||||||
|
subscriptionName(subscriptionName).
|
||||||
|
subscriptionType(subscriptionType);
|
||||||
|
|
||||||
|
if (!StringUtils.isBlank(consumerName)) {
|
||||||
|
consumerBuilder = consumerBuilder.consumerName(consumerName);
|
||||||
|
}
|
||||||
|
|
||||||
|
consumer = consumerBuilder.subscribe();
|
||||||
|
|
||||||
|
String consumerMetricsPrefix = getPulsarAPIMetricsPrefix(
|
||||||
|
PulsarActivityUtil.PULSAR_API_TYPE.CONSUMER.label,
|
||||||
|
consumerName,
|
||||||
|
cycleTopicName);
|
||||||
|
|
||||||
|
ActivityMetrics.gauge(activityDef,
|
||||||
|
consumerMetricsPrefix + "total_bytes_recv",
|
||||||
|
consumerSafeExtractMetric(consumer, (s -> s.getTotalBytesReceived() + s.getNumBytesReceived())));
|
||||||
|
ActivityMetrics.gauge(activityDef,
|
||||||
|
consumerMetricsPrefix + "total_msg_recv",
|
||||||
|
consumerSafeExtractMetric(consumer, (s -> s.getTotalMsgsReceived() + s.getNumMsgsReceived())));
|
||||||
|
ActivityMetrics.gauge(activityDef,
|
||||||
|
consumerMetricsPrefix + "total_recv_failed",
|
||||||
|
consumerSafeExtractMetric(consumer, (s -> s.getTotalReceivedFailed() + s.getNumReceiveFailed())));
|
||||||
|
ActivityMetrics.gauge(activityDef,
|
||||||
|
consumerMetricsPrefix + "total_acks_sent",
|
||||||
|
consumerSafeExtractMetric(consumer,(s -> s.getTotalAcksSent() + s.getNumAcksSent())));
|
||||||
|
ActivityMetrics.gauge(activityDef,
|
||||||
|
consumerMetricsPrefix + "recv_bytes_rate",
|
||||||
|
consumerSafeExtractMetric(consumer, ConsumerStats::getRateBytesReceived));
|
||||||
|
ActivityMetrics.gauge(activityDef,
|
||||||
|
consumerMetricsPrefix + "recv_msg_rate",
|
||||||
|
consumerSafeExtractMetric(consumer, ConsumerStats::getRateMsgsReceived));
|
||||||
} catch (PulsarClientException ple) {
|
} catch (PulsarClientException ple) {
|
||||||
ple.printStackTrace();
|
ple.printStackTrace();
|
||||||
throw new RuntimeException("Unable to create a Pulsar consumer!");
|
throw new RuntimeException("Unable to create a Pulsar consumer!");
|
||||||
@ -516,11 +471,186 @@ public class PulsarSpace {
|
|||||||
|
|
||||||
return consumer;
|
return consumer;
|
||||||
}
|
}
|
||||||
|
//
|
||||||
//////////////////////////////////////
|
//////////////////////////////////////
|
||||||
// Consumer Processing <-- end
|
// Consumer Processing <-- end
|
||||||
//////////////////////////////////////
|
//////////////////////////////////////
|
||||||
|
|
||||||
|
|
||||||
|
//////////////////////////////////////
|
||||||
|
// Multi-topic Consumer Processing --> start
|
||||||
|
//////////////////////////////////////
|
||||||
|
//
|
||||||
|
private String getEffectiveConsumerTopicNameListStr(String cycleTopicNames) {
|
||||||
|
if (!StringUtils.isBlank(cycleTopicNames)) {
|
||||||
|
return cycleTopicNames;
|
||||||
|
}
|
||||||
|
|
||||||
|
String globalTopicNames = pulsarNBClientConf.getConsumerTopicNames();
|
||||||
|
if (!StringUtils.isBlank(globalTopicNames)) {
|
||||||
|
return globalTopicNames;
|
||||||
|
}
|
||||||
|
|
||||||
|
return "";
|
||||||
|
}
|
||||||
|
|
||||||
|
private List<String> getEffectiveConsumerTopicNameList(String cycleTopicNames) {
|
||||||
|
String effectiveTopicNamesStr = getEffectiveConsumerTopicNameListStr(cycleTopicNames);
|
||||||
|
|
||||||
|
String[] names = effectiveTopicNamesStr.split("[;,]");
|
||||||
|
ArrayList<String> effectiveTopicNameList = new ArrayList<>();
|
||||||
|
|
||||||
|
for (String name : names) {
|
||||||
|
if (!StringUtils.isBlank(name))
|
||||||
|
effectiveTopicNameList.add(name.trim());
|
||||||
|
}
|
||||||
|
|
||||||
|
return effectiveTopicNameList;
|
||||||
|
}
|
||||||
|
|
||||||
|
private String getEffectiveConsumerTopicPatternStr(String cycleTopicsPattern) {
|
||||||
|
if (!StringUtils.isBlank(cycleTopicsPattern)) {
|
||||||
|
return cycleTopicsPattern;
|
||||||
|
}
|
||||||
|
|
||||||
|
String globalTopicsPattern = pulsarNBClientConf.getConsumerTopicPattern();
|
||||||
|
if (!StringUtils.isBlank(globalTopicsPattern)) {
|
||||||
|
return globalTopicsPattern;
|
||||||
|
}
|
||||||
|
|
||||||
|
return "";
|
||||||
|
}
|
||||||
|
|
||||||
|
private Pattern getEffectiveConsumerTopicPattern(String cycleTopicsPattern) {
|
||||||
|
String effectiveTopicsPatternStr = getEffectiveConsumerTopicPatternStr(cycleTopicsPattern);
|
||||||
|
Pattern topicsPattern;
|
||||||
|
try {
|
||||||
|
if (!StringUtils.isBlank(effectiveTopicsPatternStr))
|
||||||
|
topicsPattern = Pattern.compile(effectiveTopicsPatternStr);
|
||||||
|
else
|
||||||
|
topicsPattern = null;
|
||||||
|
} catch (PatternSyntaxException pse) {
|
||||||
|
topicsPattern = null;
|
||||||
|
}
|
||||||
|
return topicsPattern;
|
||||||
|
}
|
||||||
|
|
||||||
|
public Consumer<?> getMultiTopicConsumer(
|
||||||
|
String cycleTopicUri,
|
||||||
|
String cycleTopicNameList,
|
||||||
|
String cycleTopicsPattern,
|
||||||
|
String cycleSubscriptionName,
|
||||||
|
String cycleSubscriptionType,
|
||||||
|
String cycleConsumerName) {
|
||||||
|
|
||||||
|
List<String> topicNameList = getEffectiveConsumerTopicNameList(cycleTopicNameList);
|
||||||
|
String topicsPatternStr = getEffectiveConsumerTopicPatternStr(cycleTopicsPattern);
|
||||||
|
Pattern topicsPattern = getEffectiveConsumerTopicPattern(cycleTopicsPattern);
|
||||||
|
String subscriptionName = getEffectiveSubscriptionName(cycleSubscriptionName);
|
||||||
|
SubscriptionType subscriptionType = getEffectiveSubscriptionType(cycleSubscriptionType);
|
||||||
|
String consumerName = getEffectiveConsumerName(cycleConsumerName);
|
||||||
|
|
||||||
|
if ( subscriptionType.equals(SubscriptionType.Exclusive) && (activityDef.getThreads() > 1) ) {
|
||||||
|
throw new RuntimeException("Consumer:: trying to create multiple consumers of " +
|
||||||
|
"\"Exclusive\" subscription type under the same subscription name to the same topic!");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (StringUtils.isBlank(cycleTopicUri) && topicNameList.isEmpty() && (topicsPattern == null)) {
|
||||||
|
throw new RuntimeException("Consumer:: \"topic_uri\", \"topic_names\" and \"topics_pattern\" parameters can't be all empty/invalid!");
|
||||||
|
}
|
||||||
|
|
||||||
|
// precedence sequence:
|
||||||
|
// topic_names (consumer statement param) >
|
||||||
|
// topics_pattern (consumer statement param) >
|
||||||
|
// topic_uri (document level param)
|
||||||
|
String consumerTopicListString;
|
||||||
|
if (!topicNameList.isEmpty()) {
|
||||||
|
consumerTopicListString = String.join("|", topicNameList);
|
||||||
|
} else if (topicsPattern != null) {
|
||||||
|
consumerTopicListString = topicsPatternStr;
|
||||||
|
} else {
|
||||||
|
consumerTopicListString = cycleTopicUri;
|
||||||
|
}
|
||||||
|
String consumerCacheKey = PulsarActivityUtil.buildCacheKey(
|
||||||
|
consumerName,
|
||||||
|
subscriptionName,
|
||||||
|
consumerTopicListString);
|
||||||
|
|
||||||
|
Consumer<?> consumer = consumers.get(consumerCacheKey);
|
||||||
|
|
||||||
|
if (consumer == null) {
|
||||||
|
PulsarClient pulsarClient = getPulsarClient();
|
||||||
|
|
||||||
|
// Get other possible producer settings that are set at global level
|
||||||
|
Map<String, Object> consumerConf = new HashMap<>(pulsarNBClientConf.getConsumerConfMap());
|
||||||
|
|
||||||
|
// Remove global level settings:
|
||||||
|
// - "topicNameList", "topicsPattern", "subscriptionName", "subscriptionType", "consumerName"
|
||||||
|
consumerConf.remove(PulsarActivityUtil.CONSUMER_CONF_STD_KEY.topicNames.label);
|
||||||
|
consumerConf.remove(PulsarActivityUtil.CONSUMER_CONF_STD_KEY.topicsPattern.label);
|
||||||
|
consumerConf.remove(PulsarActivityUtil.CONSUMER_CONF_STD_KEY.subscriptionName.label);
|
||||||
|
consumerConf.remove(PulsarActivityUtil.CONSUMER_CONF_STD_KEY.subscriptionType.label);
|
||||||
|
consumerConf.remove(PulsarActivityUtil.CONSUMER_CONF_STD_KEY.consumerName.label);
|
||||||
|
// Remove non-standard consumer configuration properties
|
||||||
|
consumerConf.remove(PulsarActivityUtil.CONSUMER_CONF_CUSTOM_KEY.timeout.label);
|
||||||
|
|
||||||
|
try {
|
||||||
|
ConsumerBuilder<?> consumerBuilder = pulsarClient.newConsumer(pulsarSchema).
|
||||||
|
loadConf(consumerConf).
|
||||||
|
subscriptionName(subscriptionName).
|
||||||
|
subscriptionType(subscriptionType).
|
||||||
|
consumerName(consumerName);
|
||||||
|
|
||||||
|
if (!topicNameList.isEmpty()) {
|
||||||
|
consumerBuilder = consumerBuilder.topics(topicNameList);
|
||||||
|
} else if (topicsPattern != null) {
|
||||||
|
consumerBuilder = consumerBuilder.topicsPattern(topicsPattern);
|
||||||
|
} else {
|
||||||
|
consumerBuilder = consumerBuilder.topic(cycleTopicUri);
|
||||||
|
}
|
||||||
|
|
||||||
|
consumer = consumerBuilder.subscribe();
|
||||||
|
|
||||||
|
String consumerMetricsPrefix = getPulsarAPIMetricsPrefix(
|
||||||
|
PulsarActivityUtil.PULSAR_API_TYPE.PRODUCER.label,
|
||||||
|
consumerName,
|
||||||
|
consumerTopicListString);
|
||||||
|
|
||||||
|
ActivityMetrics.gauge(activityDef,
|
||||||
|
consumerMetricsPrefix + "totalBytesRecvd",
|
||||||
|
consumerSafeExtractMetric(consumer, (s -> s.getTotalBytesReceived() + s.getNumBytesReceived())));
|
||||||
|
ActivityMetrics.gauge(activityDef,
|
||||||
|
consumerMetricsPrefix + "totalMsgsRecvd",
|
||||||
|
consumerSafeExtractMetric(consumer, (s -> s.getTotalMsgsReceived() + s.getNumMsgsReceived())));
|
||||||
|
ActivityMetrics.gauge(activityDef,
|
||||||
|
consumerMetricsPrefix + "totalRecvdFailed",
|
||||||
|
consumerSafeExtractMetric(consumer, (s -> s.getTotalReceivedFailed() + s.getNumReceiveFailed())));
|
||||||
|
ActivityMetrics.gauge(activityDef,
|
||||||
|
consumerMetricsPrefix + "totalAcksSent",
|
||||||
|
consumerSafeExtractMetric(consumer,(s -> s.getTotalAcksSent() + s.getNumAcksSent())));
|
||||||
|
ActivityMetrics.gauge(activityDef,
|
||||||
|
consumerMetricsPrefix + "recvdBytesRate",
|
||||||
|
consumerSafeExtractMetric(consumer, ConsumerStats::getRateBytesReceived));
|
||||||
|
ActivityMetrics.gauge(activityDef,
|
||||||
|
consumerMetricsPrefix + "recvdMsgsRate",
|
||||||
|
consumerSafeExtractMetric(consumer, ConsumerStats::getRateMsgsReceived));
|
||||||
|
|
||||||
|
} catch (PulsarClientException ple) {
|
||||||
|
ple.printStackTrace();
|
||||||
|
throw new RuntimeException("Unable to create a Pulsar consumer!");
|
||||||
|
}
|
||||||
|
|
||||||
|
consumers.put(consumerCacheKey, consumer);
|
||||||
|
}
|
||||||
|
|
||||||
|
return consumer;
|
||||||
|
}
|
||||||
|
//
|
||||||
|
//////////////////////////////////////
|
||||||
|
// Multi-topic Consumer Processing <-- end
|
||||||
|
//////////////////////////////////////
|
||||||
|
|
||||||
|
|
||||||
//////////////////////////////////////
|
//////////////////////////////////////
|
||||||
// Reader Processing --> Start
|
// Reader Processing --> Start
|
||||||
//////////////////////////////////////
|
//////////////////////////////////////
|
||||||
@ -534,7 +664,7 @@ public class PulsarSpace {
|
|||||||
return globalReaderTopicName;
|
return globalReaderTopicName;
|
||||||
}
|
}
|
||||||
|
|
||||||
throw new RuntimeException("Reader topic name must be set at either global level or cycle level!");
|
throw new RuntimeException("Reader:: Reader topic name must be set at either global level or cycle level!");
|
||||||
}
|
}
|
||||||
|
|
||||||
private String getEffectiveReaderName(String cycleReaderName) {
|
private String getEffectiveReaderName(String cycleReaderName) {
|
||||||
@ -568,35 +698,32 @@ public class PulsarSpace {
|
|||||||
String cycleStartMsgPos) {
|
String cycleStartMsgPos) {
|
||||||
|
|
||||||
String topicName = getEffectiveReaderTopicName(cycleTopicName);
|
String topicName = getEffectiveReaderTopicName(cycleTopicName);
|
||||||
if (StringUtils.isBlank(topicName)) {
|
|
||||||
throw new RuntimeException("Reader:: must specify a topic name either at the global level or the cycle level");
|
|
||||||
}
|
|
||||||
|
|
||||||
String readerName = getEffectiveReaderName(cycleReaderName);
|
String readerName = getEffectiveReaderName(cycleReaderName);
|
||||||
|
|
||||||
String startMsgPosStr = getEffectiveStartMsgPosStr(cycleStartMsgPos);
|
String startMsgPosStr = getEffectiveStartMsgPosStr(cycleStartMsgPos);
|
||||||
if (!PulsarActivityUtil.isValideReaderStartPosition(startMsgPosStr)) {
|
if (!PulsarActivityUtil.isValideReaderStartPosition(startMsgPosStr)) {
|
||||||
throw new RuntimeException("Reader:: Invalid value for Reader start message position!");
|
throw new RuntimeException("Reader:: Invalid value for reader start message position!");
|
||||||
}
|
}
|
||||||
|
|
||||||
String readerCacheKey = buildCacheKey(topicName, readerName, startMsgPosStr);
|
String readerCacheKey = PulsarActivityUtil.buildCacheKey(topicName, readerName, startMsgPosStr);
|
||||||
Reader<?> reader = readers.get(readerCacheKey);
|
Reader<?> reader = readers.get(readerCacheKey);
|
||||||
|
|
||||||
if (reader == null) {
|
if (reader == null) {
|
||||||
PulsarClient pulsarClient = getPulsarClient();
|
PulsarClient pulsarClient = getPulsarClient();
|
||||||
|
|
||||||
Map<String, Object> readerConf = pulsarNBClientConf.getReaderConfMap();
|
Map<String, Object> readerConf = pulsarNBClientConf.getReaderConfMap();
|
||||||
readerConf.put(PulsarActivityUtil.READER_CONF_STD_KEY.topicName.toString(), topicName);
|
|
||||||
|
|
||||||
if (!StringUtils.isBlank(readerName)) {
|
// Remove global level settings: "topicName" and "readerName"
|
||||||
readerConf.put(PulsarActivityUtil.READER_CONF_STD_KEY.readerName.toString(), readerName);
|
readerConf.remove(PulsarActivityUtil.READER_CONF_STD_KEY.topicName.label);
|
||||||
}
|
readerConf.remove(PulsarActivityUtil.READER_CONF_STD_KEY.readerName.label);
|
||||||
|
// Remove non-standard reader configuration properties
|
||||||
// "reader.startMessagePos" is NOT a standard Pulsar reader conf
|
|
||||||
readerConf.remove(PulsarActivityUtil.READER_CONF_CUSTOM_KEY.startMessagePos.label);
|
readerConf.remove(PulsarActivityUtil.READER_CONF_CUSTOM_KEY.startMessagePos.label);
|
||||||
|
|
||||||
try {
|
try {
|
||||||
ReaderBuilder<?> readerBuilder = pulsarClient.newReader(pulsarSchema).loadConf(readerConf);
|
ReaderBuilder<?> readerBuilder = pulsarClient.
|
||||||
|
newReader(pulsarSchema).
|
||||||
|
loadConf(readerConf).
|
||||||
|
topic(topicName).
|
||||||
|
readerName(readerName);
|
||||||
|
|
||||||
MessageId startMsgId = MessageId.latest;
|
MessageId startMsgId = MessageId.latest;
|
||||||
if (startMsgPosStr.equalsIgnoreCase(PulsarActivityUtil.READER_MSG_POSITION_TYPE.earliest.label)) {
|
if (startMsgPosStr.equalsIgnoreCase(PulsarActivityUtil.READER_MSG_POSITION_TYPE.earliest.label)) {
|
||||||
@ -607,11 +734,8 @@ public class PulsarSpace {
|
|||||||
// startMsgId = MessageId.latest;
|
// startMsgId = MessageId.latest;
|
||||||
//}
|
//}
|
||||||
|
|
||||||
if (startMsgId != null) {
|
reader = readerBuilder.startMessageId(startMsgId).create();
|
||||||
readerBuilder = readerBuilder.startMessageId(startMsgId);
|
|
||||||
}
|
|
||||||
|
|
||||||
reader = readerBuilder.create();
|
|
||||||
} catch (PulsarClientException ple) {
|
} catch (PulsarClientException ple) {
|
||||||
ple.printStackTrace();
|
ple.printStackTrace();
|
||||||
throw new RuntimeException("Unable to create a Pulsar reader!");
|
throw new RuntimeException("Unable to create a Pulsar reader!");
|
||||||
|
@ -20,20 +20,17 @@ public class PulsarSpaceCache {
|
|||||||
this.activity = pulsarActivity;
|
this.activity = pulsarActivity;
|
||||||
}
|
}
|
||||||
|
|
||||||
public PulsarSpace getPulsarSpace(String name) {
|
public Iterable<PulsarSpace> getAssociatedPulsarSpace() {
|
||||||
return clientScopes.computeIfAbsent(name, spaceName ->
|
return clientScopes.values();
|
||||||
new PulsarSpace(
|
|
||||||
spaceName,
|
|
||||||
activity.getPulsarConf(),
|
|
||||||
activity.getPulsarSvcUrl(),
|
|
||||||
activity.getWebSvcUrl(),
|
|
||||||
activity.getPulsarAdmin(),
|
|
||||||
activity.getActivityDef(),
|
|
||||||
activity.getCreateTransactionTimer()
|
|
||||||
));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public PulsarActivity getActivity() {
|
public PulsarActivity getAssociatedPulsarActivity() {
|
||||||
return activity;
|
return activity;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public PulsarSpace getPulsarSpace(String name) {
|
||||||
|
return clientScopes.computeIfAbsent(name, spaceName -> new PulsarSpace(spaceName, activity));
|
||||||
|
}
|
||||||
|
|
||||||
|
public PulsarActivity getActivity() { return activity; }
|
||||||
}
|
}
|
||||||
|
@ -0,0 +1,8 @@
|
|||||||
|
package io.nosqlbench.driver.pulsar.exception;
|
||||||
|
|
||||||
|
public class PulsarDriverParamException extends RuntimeException {
|
||||||
|
|
||||||
|
public PulsarDriverParamException(String message) {
|
||||||
|
super(message);
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,9 @@
|
|||||||
|
package io.nosqlbench.driver.pulsar.exception;
|
||||||
|
|
||||||
|
public class PulsarDriverUnexpectedException extends RuntimeException {
|
||||||
|
|
||||||
|
public PulsarDriverUnexpectedException(String message) {
|
||||||
|
super(message);
|
||||||
|
}
|
||||||
|
public PulsarDriverUnexpectedException(Exception e) { super(e); }
|
||||||
|
}
|
@ -0,0 +1,7 @@
|
|||||||
|
package io.nosqlbench.driver.pulsar.exception;
|
||||||
|
|
||||||
|
public class PulsarDriverUnsupportedOpException extends RuntimeException {
|
||||||
|
|
||||||
|
public PulsarDriverUnsupportedOpException() { super("Unsupported Pulsar driver operation type"); }
|
||||||
|
|
||||||
|
}
|
@ -0,0 +1,10 @@
|
|||||||
|
package io.nosqlbench.driver.pulsar.exception;
|
||||||
|
|
||||||
|
public class PulsarMsgDuplicateException extends RuntimeException {
|
||||||
|
|
||||||
|
public PulsarMsgDuplicateException(boolean asyncPulsarOp, long nbCycleNum, long curMsgSeqId, long prevMsgSeqId) {
|
||||||
|
super("" + (asyncPulsarOp ? "[AsyncAPI]" : "[SyncAPI]") +
|
||||||
|
" Detected duplicate message when message deduplication is enabled (curCycleNum=" + nbCycleNum +
|
||||||
|
", curMsgSeqId=" + curMsgSeqId + ", prevMsgSeqId=" + prevMsgSeqId + ").");
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,11 @@
|
|||||||
|
package io.nosqlbench.driver.pulsar.exception;
|
||||||
|
|
||||||
|
public class PulsarMsgLossException extends RuntimeException {
|
||||||
|
|
||||||
|
public PulsarMsgLossException(boolean asyncPulsarOp, long nbCycleNum, long curMsgSeqId, long prevMsgSeqId) {
|
||||||
|
super("" + (asyncPulsarOp ? "[AsyncAPI]" : "[SyncAPI]") +
|
||||||
|
" Detected message sequence id gap (curCycleNum=" + nbCycleNum +
|
||||||
|
", curMsgSeqId=" + curMsgSeqId + ", prevMsgSeqId=" + prevMsgSeqId + "). " +
|
||||||
|
"Some published messages are not received!");
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,11 @@
|
|||||||
|
package io.nosqlbench.driver.pulsar.exception;
|
||||||
|
|
||||||
|
public class PulsarMsgOutOfOrderException extends RuntimeException {
|
||||||
|
|
||||||
|
public PulsarMsgOutOfOrderException(boolean asyncPulsarOp, long nbCycleNum, long curMsgSeqId, long prevMsgSeqId) {
|
||||||
|
super("" + (asyncPulsarOp ? "[AsyncAPI]" : "[SyncAPI]" ) +
|
||||||
|
" Detected message ordering is not guaranteed (curCycleNum=" + nbCycleNum +
|
||||||
|
", curMsgSeqId=" + curMsgSeqId + ", prevMsgSeqId=" + prevMsgSeqId + "). " +
|
||||||
|
"Older messages are received earlier!");
|
||||||
|
}
|
||||||
|
}
|
@ -1,5 +1,6 @@
|
|||||||
package io.nosqlbench.driver.pulsar.ops;
|
package io.nosqlbench.driver.pulsar.ops;
|
||||||
|
|
||||||
|
import io.nosqlbench.driver.pulsar.PulsarActivity;
|
||||||
import io.nosqlbench.driver.pulsar.PulsarSpace;
|
import io.nosqlbench.driver.pulsar.PulsarSpace;
|
||||||
import io.nosqlbench.engine.api.templating.CommandTemplate;
|
import io.nosqlbench.engine.api.templating.CommandTemplate;
|
||||||
|
|
||||||
@ -20,9 +21,10 @@ public abstract class PulsarAdminMapper extends PulsarOpMapper {
|
|||||||
|
|
||||||
protected PulsarAdminMapper(CommandTemplate cmdTpl,
|
protected PulsarAdminMapper(CommandTemplate cmdTpl,
|
||||||
PulsarSpace clientSpace,
|
PulsarSpace clientSpace,
|
||||||
|
PulsarActivity pulsarActivity,
|
||||||
LongFunction<Boolean> asyncApiFunc,
|
LongFunction<Boolean> asyncApiFunc,
|
||||||
LongFunction<Boolean> adminDelOpFunc) {
|
LongFunction<Boolean> adminDelOpFunc) {
|
||||||
super(cmdTpl, clientSpace, asyncApiFunc);
|
super(cmdTpl, clientSpace, pulsarActivity, asyncApiFunc);
|
||||||
this.adminDelOpFunc = adminDelOpFunc;
|
this.adminDelOpFunc = adminDelOpFunc;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
package io.nosqlbench.driver.pulsar.ops;
|
package io.nosqlbench.driver.pulsar.ops;
|
||||||
|
|
||||||
|
import io.nosqlbench.driver.pulsar.PulsarActivity;
|
||||||
import io.nosqlbench.driver.pulsar.PulsarSpace;
|
import io.nosqlbench.driver.pulsar.PulsarSpace;
|
||||||
import io.nosqlbench.engine.api.templating.CommandTemplate;
|
import io.nosqlbench.engine.api.templating.CommandTemplate;
|
||||||
|
|
||||||
@ -21,11 +22,12 @@ public class PulsarAdminNamespaceMapper extends PulsarAdminMapper {
|
|||||||
|
|
||||||
public PulsarAdminNamespaceMapper(CommandTemplate cmdTpl,
|
public PulsarAdminNamespaceMapper(CommandTemplate cmdTpl,
|
||||||
PulsarSpace clientSpace,
|
PulsarSpace clientSpace,
|
||||||
|
PulsarActivity pulsarActivity,
|
||||||
LongFunction<Boolean> asyncApiFunc,
|
LongFunction<Boolean> asyncApiFunc,
|
||||||
LongFunction<Boolean> adminDelOpFunc,
|
LongFunction<Boolean> adminDelOpFunc,
|
||||||
LongFunction<String> namespaceFunc)
|
LongFunction<String> namespaceFunc)
|
||||||
{
|
{
|
||||||
super(cmdTpl, clientSpace, asyncApiFunc, adminDelOpFunc);
|
super(cmdTpl, clientSpace, pulsarActivity, asyncApiFunc, adminDelOpFunc);
|
||||||
this.namespaceFunc = namespaceFunc;
|
this.namespaceFunc = namespaceFunc;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,17 +1,8 @@
|
|||||||
package io.nosqlbench.driver.pulsar.ops;
|
package io.nosqlbench.driver.pulsar.ops;
|
||||||
|
|
||||||
import io.nosqlbench.driver.pulsar.PulsarSpace;
|
import io.nosqlbench.driver.pulsar.PulsarSpace;
|
||||||
import org.apache.commons.lang3.StringUtils;
|
|
||||||
import org.apache.logging.log4j.LogManager;
|
import org.apache.logging.log4j.LogManager;
|
||||||
import org.apache.logging.log4j.Logger;
|
import org.apache.logging.log4j.Logger;
|
||||||
import org.apache.pulsar.client.admin.Namespaces;
|
|
||||||
import org.apache.pulsar.client.admin.PulsarAdmin;
|
|
||||||
import org.apache.pulsar.client.admin.PulsarAdminException;
|
|
||||||
import org.apache.pulsar.client.admin.Tenants;
|
|
||||||
import org.apache.pulsar.common.policies.data.TenantInfo;
|
|
||||||
|
|
||||||
import java.util.Set;
|
|
||||||
import java.util.concurrent.CompletableFuture;
|
|
||||||
|
|
||||||
public abstract class PulsarAdminOp extends SyncPulsarOp {
|
public abstract class PulsarAdminOp extends SyncPulsarOp {
|
||||||
|
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
package io.nosqlbench.driver.pulsar.ops;
|
package io.nosqlbench.driver.pulsar.ops;
|
||||||
|
|
||||||
|
import io.nosqlbench.driver.pulsar.PulsarActivity;
|
||||||
import io.nosqlbench.driver.pulsar.PulsarSpace;
|
import io.nosqlbench.driver.pulsar.PulsarSpace;
|
||||||
import io.nosqlbench.engine.api.templating.CommandTemplate;
|
import io.nosqlbench.engine.api.templating.CommandTemplate;
|
||||||
|
|
||||||
@ -23,13 +24,14 @@ public class PulsarAdminTenantMapper extends PulsarAdminMapper {
|
|||||||
|
|
||||||
public PulsarAdminTenantMapper(CommandTemplate cmdTpl,
|
public PulsarAdminTenantMapper(CommandTemplate cmdTpl,
|
||||||
PulsarSpace clientSpace,
|
PulsarSpace clientSpace,
|
||||||
|
PulsarActivity pulsarActivity,
|
||||||
LongFunction<Boolean> asyncApiFunc,
|
LongFunction<Boolean> asyncApiFunc,
|
||||||
LongFunction<Boolean> adminDelOpFunc,
|
LongFunction<Boolean> adminDelOpFunc,
|
||||||
LongFunction<Set<String>> adminRolesFunc,
|
LongFunction<Set<String>> adminRolesFunc,
|
||||||
LongFunction<Set<String>> allowedClustersFunc,
|
LongFunction<Set<String>> allowedClustersFunc,
|
||||||
LongFunction<String> tenantFunc)
|
LongFunction<String> tenantFunc)
|
||||||
{
|
{
|
||||||
super(cmdTpl, clientSpace, asyncApiFunc, adminDelOpFunc);
|
super(cmdTpl, clientSpace, pulsarActivity, asyncApiFunc, adminDelOpFunc);
|
||||||
this.adminRolesFunc = adminRolesFunc;
|
this.adminRolesFunc = adminRolesFunc;
|
||||||
this.allowedClustersFunc = allowedClustersFunc;
|
this.allowedClustersFunc = allowedClustersFunc;
|
||||||
this.tenantFunc = tenantFunc;
|
this.tenantFunc = tenantFunc;
|
||||||
|
@ -4,11 +4,8 @@ import io.nosqlbench.driver.pulsar.PulsarSpace;
|
|||||||
import org.apache.commons.lang3.StringUtils;
|
import org.apache.commons.lang3.StringUtils;
|
||||||
import org.apache.logging.log4j.LogManager;
|
import org.apache.logging.log4j.LogManager;
|
||||||
import org.apache.logging.log4j.Logger;
|
import org.apache.logging.log4j.Logger;
|
||||||
import org.apache.pulsar.client.admin.Namespaces;
|
import org.apache.pulsar.client.admin.*;
|
||||||
import org.apache.pulsar.client.admin.PulsarAdmin;
|
import org.apache.pulsar.common.policies.data.TenantInfo;
|
||||||
import org.apache.pulsar.client.admin.PulsarAdminException;
|
|
||||||
import org.apache.pulsar.client.admin.Tenants;
|
|
||||||
import org.apache.pulsar.common.policies.data.TenantInfoImpl;
|
|
||||||
|
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
import java.util.concurrent.CompletableFuture;
|
import java.util.concurrent.CompletableFuture;
|
||||||
@ -45,26 +42,25 @@ public class PulsarAdminTenantOp extends PulsarAdminOp {
|
|||||||
|
|
||||||
// Admin API - create tenants and namespaces
|
// Admin API - create tenants and namespaces
|
||||||
if (!adminDelOp) {
|
if (!adminDelOp) {
|
||||||
|
TenantInfo tenantInfo = TenantInfo.builder()
|
||||||
TenantInfoImpl tenantInfo = TenantInfoImpl.builder().build();
|
.adminRoles(adminRoleSet)
|
||||||
tenantInfo.setAdminRoles(adminRoleSet);
|
.allowedClusters(!allowedClusterSet.isEmpty() ? allowedClusterSet : clientSpace.getPulsarClusterMetadata())
|
||||||
|
.build();
|
||||||
if ( !allowedClusterSet.isEmpty() ) {
|
|
||||||
tenantInfo.setAllowedClusters(allowedClusterSet);
|
|
||||||
} else {
|
|
||||||
tenantInfo.setAllowedClusters(clientSpace.getPulsarClusterMetadata());
|
|
||||||
}
|
|
||||||
|
|
||||||
try {
|
try {
|
||||||
if (!asyncApi) {
|
if (!asyncApi) {
|
||||||
tenants.createTenant(tenant, tenantInfo);
|
tenants.createTenant(tenant, tenantInfo);
|
||||||
logger.trace("Successfully created tenant \"" + tenant + "\" synchronously!");
|
if (logger.isDebugEnabled()) {
|
||||||
|
logger.debug("Successful sync creation of tenant {}", tenant);
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
CompletableFuture<Void> future = tenants.createTenantAsync(tenant, tenantInfo);
|
CompletableFuture<Void> future = tenants.createTenantAsync(tenant, tenantInfo);
|
||||||
future.whenComplete((unused, throwable) ->
|
future.whenComplete((unused, throwable) -> {
|
||||||
logger.trace("Successfully created tenant \"" + tenant + "\" asynchronously!"))
|
if (logger.isDebugEnabled()) {
|
||||||
.exceptionally(ex -> {
|
logger.debug("Successful async creation of tenant {}", tenant);
|
||||||
logger.error("Failed to create tenant \"" + tenant + "\" asynchronously!");
|
}
|
||||||
|
}).exceptionally(ex -> {
|
||||||
|
logger.error("Failed async creation of tenant {}", tenant);
|
||||||
return null;
|
return null;
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
@ -86,13 +82,19 @@ public class PulsarAdminTenantOp extends PulsarAdminOp {
|
|||||||
if ( nsNum == 0 ) {
|
if ( nsNum == 0 ) {
|
||||||
if (!asyncApi) {
|
if (!asyncApi) {
|
||||||
tenants.deleteTenant(tenant);
|
tenants.deleteTenant(tenant);
|
||||||
logger.trace("Successfully deleted tenant \"" + tenant + "\" synchronously!");
|
if (logger.isDebugEnabled()) {
|
||||||
|
logger.debug("Successful sync deletion of tenant {}", tenant);
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
CompletableFuture<Void> future = tenants.deleteTenantAsync(tenant);
|
CompletableFuture<Void> future = tenants.deleteTenantAsync(tenant);
|
||||||
future.whenComplete((unused, throwable)
|
future.whenComplete((unused, throwable) -> {
|
||||||
-> logger.trace("Successfully deleted tenant \"" + tenant + "\" asynchronously!"))
|
if (logger.isDebugEnabled()) {
|
||||||
.exceptionally(ex -> {
|
logger.debug("Successful async deletion of tenant {}", tenant);
|
||||||
logger.error("Failed to delete tenant \"" + tenant + "\" asynchronously!");
|
}
|
||||||
|
}).exceptionally(ex -> {
|
||||||
|
if (logger.isDebugEnabled()) {
|
||||||
|
logger.error("Failed async deletion of tenant {}", tenant);
|
||||||
|
}
|
||||||
return null;
|
return null;
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
package io.nosqlbench.driver.pulsar.ops;
|
package io.nosqlbench.driver.pulsar.ops;
|
||||||
|
|
||||||
|
import io.nosqlbench.driver.pulsar.PulsarActivity;
|
||||||
import io.nosqlbench.driver.pulsar.PulsarSpace;
|
import io.nosqlbench.driver.pulsar.PulsarSpace;
|
||||||
import io.nosqlbench.engine.api.templating.CommandTemplate;
|
import io.nosqlbench.engine.api.templating.CommandTemplate;
|
||||||
import org.apache.commons.lang3.BooleanUtils;
|
import org.apache.commons.lang3.BooleanUtils;
|
||||||
@ -24,13 +25,14 @@ public class PulsarAdminTopicMapper extends PulsarAdminMapper {
|
|||||||
|
|
||||||
public PulsarAdminTopicMapper(CommandTemplate cmdTpl,
|
public PulsarAdminTopicMapper(CommandTemplate cmdTpl,
|
||||||
PulsarSpace clientSpace,
|
PulsarSpace clientSpace,
|
||||||
|
PulsarActivity pulsarActivity,
|
||||||
LongFunction<Boolean> asyncApiFunc,
|
LongFunction<Boolean> asyncApiFunc,
|
||||||
LongFunction<Boolean> adminDelOpFunc,
|
LongFunction<Boolean> adminDelOpFunc,
|
||||||
LongFunction<String> topicUriFunc,
|
LongFunction<String> topicUriFunc,
|
||||||
LongFunction<String> enablePartionFunc,
|
LongFunction<String> enablePartionFunc,
|
||||||
LongFunction<String> partitionNumFunc)
|
LongFunction<String> partitionNumFunc)
|
||||||
{
|
{
|
||||||
super(cmdTpl, clientSpace, asyncApiFunc, adminDelOpFunc);
|
super(cmdTpl, clientSpace, pulsarActivity, asyncApiFunc, adminDelOpFunc);
|
||||||
this.topicUriFunc = topicUriFunc;
|
this.topicUriFunc = topicUriFunc;
|
||||||
this.enablePartionFunc = enablePartionFunc;
|
this.enablePartionFunc = enablePartionFunc;
|
||||||
this.partitionNumFunc = partitionNumFunc;
|
this.partitionNumFunc = partitionNumFunc;
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
package io.nosqlbench.driver.pulsar.ops;
|
package io.nosqlbench.driver.pulsar.ops;
|
||||||
|
|
||||||
import io.nosqlbench.driver.pulsar.PulsarSpace;
|
import io.nosqlbench.driver.pulsar.PulsarSpace;
|
||||||
|
import io.nosqlbench.driver.pulsar.util.PulsarActivityUtil;
|
||||||
import org.apache.commons.lang3.StringUtils;
|
import org.apache.commons.lang3.StringUtils;
|
||||||
import org.apache.logging.log4j.LogManager;
|
import org.apache.logging.log4j.LogManager;
|
||||||
import org.apache.logging.log4j.Logger;
|
import org.apache.logging.log4j.Logger;
|
||||||
@ -33,13 +34,7 @@ public class PulsarAdminTopicOp extends PulsarAdminOp {
|
|||||||
this.topicUri = topicUri;
|
this.topicUri = topicUri;
|
||||||
this.partitionTopic = partitionTopic;
|
this.partitionTopic = partitionTopic;
|
||||||
this.partitionNum = partitionNum;
|
this.partitionNum = partitionNum;
|
||||||
|
this.fullNsName = PulsarActivityUtil.getFullNamespaceName(this.topicUri);
|
||||||
// Get tenant/namespace string
|
|
||||||
// - topicUri : persistent://<tenant>/<namespace>/<topic>
|
|
||||||
// - tmpStr : <tenant>/<namespace>/<topic>
|
|
||||||
// - fullNsName : <tenant>/<namespace>
|
|
||||||
String tmpStr = StringUtils.substringAfter(this.topicUri,"://");
|
|
||||||
this.fullNsName = StringUtils.substringBeforeLast(tmpStr, "/");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check whether the specified topic already exists
|
// Check whether the specified topic already exists
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
package io.nosqlbench.driver.pulsar.ops;
|
package io.nosqlbench.driver.pulsar.ops;
|
||||||
|
|
||||||
|
import io.nosqlbench.driver.pulsar.PulsarActivity;
|
||||||
import io.nosqlbench.driver.pulsar.PulsarSpace;
|
import io.nosqlbench.driver.pulsar.PulsarSpace;
|
||||||
import io.nosqlbench.engine.api.templating.CommandTemplate;
|
import io.nosqlbench.engine.api.templating.CommandTemplate;
|
||||||
|
|
||||||
@ -9,9 +10,10 @@ public class PulsarBatchProducerEndMapper extends PulsarOpMapper {
|
|||||||
|
|
||||||
public PulsarBatchProducerEndMapper(CommandTemplate cmdTpl,
|
public PulsarBatchProducerEndMapper(CommandTemplate cmdTpl,
|
||||||
PulsarSpace clientSpace,
|
PulsarSpace clientSpace,
|
||||||
|
PulsarActivity pulsarActivity,
|
||||||
LongFunction<Boolean> asyncApiFunc)
|
LongFunction<Boolean> asyncApiFunc)
|
||||||
{
|
{
|
||||||
super(cmdTpl, clientSpace, asyncApiFunc);
|
super(cmdTpl, clientSpace, pulsarActivity, asyncApiFunc);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -24,7 +24,8 @@ public class PulsarBatchProducerEndOp extends SyncPulsarOp {
|
|||||||
|
|
||||||
container.clear();
|
container.clear();
|
||||||
PulsarBatchProducerStartOp.threadLocalBatchMsgContainer.set(null);
|
PulsarBatchProducerStartOp.threadLocalBatchMsgContainer.set(null);
|
||||||
} else {
|
}
|
||||||
|
else {
|
||||||
throw new BasicError("You tried to end an empty batch message container. This means you" +
|
throw new BasicError("You tried to end an empty batch message container. This means you" +
|
||||||
" did initiate the batch container properly, or there is an error in your" +
|
" did initiate the batch container properly, or there is an error in your" +
|
||||||
" pulsar op sequencing and ratios.");
|
" pulsar op sequencing and ratios.");
|
||||||
|
@ -1,22 +1,35 @@
|
|||||||
package io.nosqlbench.driver.pulsar.ops;
|
package io.nosqlbench.driver.pulsar.ops;
|
||||||
|
|
||||||
|
import io.nosqlbench.driver.pulsar.PulsarActivity;
|
||||||
import io.nosqlbench.driver.pulsar.PulsarSpace;
|
import io.nosqlbench.driver.pulsar.PulsarSpace;
|
||||||
|
import io.nosqlbench.driver.pulsar.util.PulsarActivityUtil;
|
||||||
import io.nosqlbench.engine.api.templating.CommandTemplate;
|
import io.nosqlbench.engine.api.templating.CommandTemplate;
|
||||||
|
import org.apache.logging.log4j.LogManager;
|
||||||
|
import org.apache.logging.log4j.Logger;
|
||||||
import org.apache.pulsar.client.api.Producer;
|
import org.apache.pulsar.client.api.Producer;
|
||||||
|
|
||||||
|
import java.util.HashMap;
|
||||||
|
import java.util.Map;
|
||||||
import java.util.function.LongFunction;
|
import java.util.function.LongFunction;
|
||||||
|
|
||||||
public class PulsarBatchProducerMapper extends PulsarOpMapper {
|
public class PulsarBatchProducerMapper extends PulsarOpMapper {
|
||||||
|
|
||||||
|
private final static Logger logger = LogManager.getLogger(PulsarBatchProducerMapper.class);
|
||||||
|
|
||||||
private final LongFunction<String> keyFunc;
|
private final LongFunction<String> keyFunc;
|
||||||
|
private final LongFunction<String> propFunc;
|
||||||
private final LongFunction<String> payloadFunc;
|
private final LongFunction<String> payloadFunc;
|
||||||
|
|
||||||
public PulsarBatchProducerMapper(CommandTemplate cmdTpl,
|
public PulsarBatchProducerMapper(CommandTemplate cmdTpl,
|
||||||
PulsarSpace clientSpace,
|
PulsarSpace clientSpace,
|
||||||
|
PulsarActivity pulsarActivity,
|
||||||
LongFunction<Boolean> asyncApiFunc,
|
LongFunction<Boolean> asyncApiFunc,
|
||||||
LongFunction<String> keyFunc,
|
LongFunction<String> keyFunc,
|
||||||
|
LongFunction<String> propFunc,
|
||||||
LongFunction<String> payloadFunc) {
|
LongFunction<String> payloadFunc) {
|
||||||
super(cmdTpl, clientSpace, asyncApiFunc);
|
super(cmdTpl, clientSpace, pulsarActivity, asyncApiFunc);
|
||||||
this.keyFunc = keyFunc;
|
this.keyFunc = keyFunc;
|
||||||
|
this.propFunc = propFunc;
|
||||||
this.payloadFunc = payloadFunc;
|
this.payloadFunc = payloadFunc;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -25,9 +38,24 @@ public class PulsarBatchProducerMapper extends PulsarOpMapper {
|
|||||||
String msgKey = keyFunc.apply(value);
|
String msgKey = keyFunc.apply(value);
|
||||||
String msgPayload = payloadFunc.apply(value);
|
String msgPayload = payloadFunc.apply(value);
|
||||||
|
|
||||||
|
// Check if msgPropJonStr is valid JSON string with a collection of key/value pairs
|
||||||
|
// - if Yes, convert it to a map
|
||||||
|
// - otherwise, log an error message and ignore message properties without throwing a runtime exception
|
||||||
|
Map<String, String> msgProperties = new HashMap<>();
|
||||||
|
String msgPropJsonStr = propFunc.apply(value);
|
||||||
|
try {
|
||||||
|
msgProperties = PulsarActivityUtil.convertJsonToMap(msgPropJsonStr);
|
||||||
|
}
|
||||||
|
catch (Exception e) {
|
||||||
|
logger.error(
|
||||||
|
"PulsarProducerMapper:: Error parsing message property JSON string {}, ignore message properties!",
|
||||||
|
msgPropJsonStr);
|
||||||
|
}
|
||||||
|
|
||||||
return new PulsarBatchProducerOp(
|
return new PulsarBatchProducerOp(
|
||||||
clientSpace.getPulsarSchema(),
|
clientSpace.getPulsarSchema(),
|
||||||
msgKey,
|
msgKey,
|
||||||
|
msgProperties,
|
||||||
msgPayload
|
msgPayload
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
@ -12,23 +12,26 @@ import org.apache.pulsar.common.schema.SchemaType;
|
|||||||
|
|
||||||
import java.nio.charset.StandardCharsets;
|
import java.nio.charset.StandardCharsets;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
import java.util.concurrent.CompletableFuture;
|
import java.util.concurrent.CompletableFuture;
|
||||||
|
|
||||||
public class PulsarBatchProducerOp extends SyncPulsarOp {
|
public class PulsarBatchProducerOp extends SyncPulsarOp {
|
||||||
|
|
||||||
private final Schema<?> pulsarSchema;
|
private final Schema<?> pulsarSchema;
|
||||||
private final String msgKey;
|
private final String msgKey;
|
||||||
|
private final Map<String, String> msgProperties;
|
||||||
private final String msgPayload;
|
private final String msgPayload;
|
||||||
|
|
||||||
public PulsarBatchProducerOp(Schema<?> schema,
|
public PulsarBatchProducerOp(Schema<?> schema,
|
||||||
String key,
|
String key,
|
||||||
|
Map<String, String> msgProperties,
|
||||||
String payload) {
|
String payload) {
|
||||||
this.pulsarSchema = schema;
|
this.pulsarSchema = schema;
|
||||||
this.msgKey = key;
|
this.msgKey = key;
|
||||||
|
this.msgProperties = msgProperties;
|
||||||
this.msgPayload = payload;
|
this.msgPayload = payload;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void run() {
|
public void run() {
|
||||||
if ((msgPayload == null) || msgPayload.isEmpty()) {
|
if ((msgPayload == null) || msgPayload.isEmpty()) {
|
||||||
@ -43,6 +46,9 @@ public class PulsarBatchProducerOp extends SyncPulsarOp {
|
|||||||
if ((msgKey != null) && (!msgKey.isEmpty())) {
|
if ((msgKey != null) && (!msgKey.isEmpty())) {
|
||||||
typedMessageBuilder = typedMessageBuilder.key(msgKey);
|
typedMessageBuilder = typedMessageBuilder.key(msgKey);
|
||||||
}
|
}
|
||||||
|
if (!msgProperties.isEmpty()) {
|
||||||
|
typedMessageBuilder = typedMessageBuilder.properties(msgProperties);
|
||||||
|
}
|
||||||
|
|
||||||
SchemaType schemaType = pulsarSchema.getSchemaInfo().getType();
|
SchemaType schemaType = pulsarSchema.getSchemaInfo().getType();
|
||||||
if (PulsarActivityUtil.isAvroSchemaTypeStr(schemaType.name())) {
|
if (PulsarActivityUtil.isAvroSchemaTypeStr(schemaType.name())) {
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
package io.nosqlbench.driver.pulsar.ops;
|
package io.nosqlbench.driver.pulsar.ops;
|
||||||
|
|
||||||
|
import io.nosqlbench.driver.pulsar.PulsarActivity;
|
||||||
import io.nosqlbench.driver.pulsar.PulsarSpace;
|
import io.nosqlbench.driver.pulsar.PulsarSpace;
|
||||||
import io.nosqlbench.engine.api.templating.CommandTemplate;
|
import io.nosqlbench.engine.api.templating.CommandTemplate;
|
||||||
import org.apache.pulsar.client.api.Producer;
|
import org.apache.pulsar.client.api.Producer;
|
||||||
@ -12,9 +13,10 @@ public class PulsarBatchProducerStartMapper extends PulsarOpMapper {
|
|||||||
|
|
||||||
public PulsarBatchProducerStartMapper(CommandTemplate cmdTpl,
|
public PulsarBatchProducerStartMapper(CommandTemplate cmdTpl,
|
||||||
PulsarSpace clientSpace,
|
PulsarSpace clientSpace,
|
||||||
|
PulsarActivity pulsarActivity,
|
||||||
LongFunction<Boolean> asyncApiFunc,
|
LongFunction<Boolean> asyncApiFunc,
|
||||||
LongFunction<Producer<?>> batchProducerFunc) {
|
LongFunction<Producer<?>> batchProducerFunc) {
|
||||||
super(cmdTpl, clientSpace, asyncApiFunc);
|
super(cmdTpl, clientSpace, pulsarActivity, asyncApiFunc);
|
||||||
this.batchProducerFunc = batchProducerFunc;
|
this.batchProducerFunc = batchProducerFunc;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3,8 +3,11 @@ package io.nosqlbench.driver.pulsar.ops;
|
|||||||
import com.codahale.metrics.Counter;
|
import com.codahale.metrics.Counter;
|
||||||
import com.codahale.metrics.Histogram;
|
import com.codahale.metrics.Histogram;
|
||||||
import com.codahale.metrics.Timer;
|
import com.codahale.metrics.Timer;
|
||||||
|
import io.nosqlbench.driver.pulsar.PulsarActivity;
|
||||||
import io.nosqlbench.driver.pulsar.PulsarSpace;
|
import io.nosqlbench.driver.pulsar.PulsarSpace;
|
||||||
import io.nosqlbench.engine.api.templating.CommandTemplate;
|
import io.nosqlbench.engine.api.templating.CommandTemplate;
|
||||||
|
import org.apache.logging.log4j.LogManager;
|
||||||
|
import org.apache.logging.log4j.Logger;
|
||||||
import org.apache.pulsar.client.api.Consumer;
|
import org.apache.pulsar.client.api.Consumer;
|
||||||
import org.apache.pulsar.client.api.Schema;
|
import org.apache.pulsar.client.api.Schema;
|
||||||
import org.apache.pulsar.client.api.transaction.Transaction;
|
import org.apache.pulsar.client.api.transaction.Transaction;
|
||||||
@ -22,30 +25,31 @@ import java.util.function.Supplier;
|
|||||||
*
|
*
|
||||||
* For additional parameterization, the command template is also provided.
|
* For additional parameterization, the command template is also provided.
|
||||||
*/
|
*/
|
||||||
public class PulsarConsumerMapper extends PulsarOpMapper {
|
public class PulsarConsumerMapper extends PulsarTransactOpMapper {
|
||||||
|
|
||||||
|
private final static Logger logger = LogManager.getLogger(PulsarProducerMapper.class);
|
||||||
|
|
||||||
private final LongFunction<Consumer<?>> consumerFunc;
|
private final LongFunction<Consumer<?>> consumerFunc;
|
||||||
private final Counter bytesCounter;
|
private final LongFunction<Boolean> topicMsgDedupFunc;
|
||||||
private final Histogram messagesizeHistogram;
|
private final LongFunction<String> subscriptionTypeFunc;
|
||||||
private final LongFunction<Boolean> useTransactionFunc;
|
private final boolean e2eMsProc;
|
||||||
private final LongFunction<Supplier<Transaction>> transactionSupplierFunc;
|
|
||||||
private final Timer transactionCommitTimer;
|
|
||||||
|
|
||||||
public PulsarConsumerMapper(CommandTemplate cmdTpl,
|
public PulsarConsumerMapper(CommandTemplate cmdTpl,
|
||||||
PulsarSpace clientSpace,
|
PulsarSpace clientSpace,
|
||||||
|
PulsarActivity pulsarActivity,
|
||||||
LongFunction<Boolean> asyncApiFunc,
|
LongFunction<Boolean> asyncApiFunc,
|
||||||
LongFunction<Consumer<?>> consumerFunc,
|
|
||||||
Counter bytesCounter,
|
|
||||||
Histogram messagesizeHistogram,
|
|
||||||
Timer transactionCommitTimer,
|
|
||||||
LongFunction<Boolean> useTransactionFunc,
|
LongFunction<Boolean> useTransactionFunc,
|
||||||
LongFunction<Supplier<Transaction>> transactionSupplierFunc) {
|
LongFunction<Boolean> seqTrackingFunc,
|
||||||
super(cmdTpl, clientSpace, asyncApiFunc);
|
LongFunction<Supplier<Transaction>> transactionSupplierFunc,
|
||||||
|
LongFunction<Boolean> topicMsgDedupFunc,
|
||||||
|
LongFunction<Consumer<?>> consumerFunc,
|
||||||
|
LongFunction<String> subscriptionTypeFunc,
|
||||||
|
boolean e2eMsgProc) {
|
||||||
|
super(cmdTpl, clientSpace, pulsarActivity, asyncApiFunc, useTransactionFunc, seqTrackingFunc, transactionSupplierFunc);
|
||||||
this.consumerFunc = consumerFunc;
|
this.consumerFunc = consumerFunc;
|
||||||
this.bytesCounter = bytesCounter;
|
this.topicMsgDedupFunc = topicMsgDedupFunc;
|
||||||
this.messagesizeHistogram = messagesizeHistogram;
|
this.subscriptionTypeFunc = subscriptionTypeFunc;
|
||||||
this.transactionCommitTimer = transactionCommitTimer;
|
this.e2eMsProc = e2eMsgProc;
|
||||||
this.useTransactionFunc = useTransactionFunc;
|
|
||||||
this.transactionSupplierFunc = transactionSupplierFunc;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@ -53,18 +57,23 @@ public class PulsarConsumerMapper extends PulsarOpMapper {
|
|||||||
Consumer<?> consumer = consumerFunc.apply(value);
|
Consumer<?> consumer = consumerFunc.apply(value);
|
||||||
boolean asyncApi = asyncApiFunc.apply(value);
|
boolean asyncApi = asyncApiFunc.apply(value);
|
||||||
boolean useTransaction = useTransactionFunc.apply(value);
|
boolean useTransaction = useTransactionFunc.apply(value);
|
||||||
|
boolean seqTracking = seqTrackingFunc.apply(value);
|
||||||
Supplier<Transaction> transactionSupplier = transactionSupplierFunc.apply(value);
|
Supplier<Transaction> transactionSupplier = transactionSupplierFunc.apply(value);
|
||||||
|
boolean topicMsgDedup = topicMsgDedupFunc.apply(value);
|
||||||
|
String subscriptionType = subscriptionTypeFunc.apply(value);
|
||||||
|
|
||||||
return new PulsarConsumerOp(
|
return new PulsarConsumerOp(
|
||||||
consumer,
|
pulsarActivity,
|
||||||
clientSpace.getPulsarSchema(),
|
|
||||||
asyncApi,
|
asyncApi,
|
||||||
clientSpace.getPulsarClientConf().getConsumerTimeoutSeconds(),
|
|
||||||
bytesCounter,
|
|
||||||
messagesizeHistogram,
|
|
||||||
useTransaction,
|
useTransaction,
|
||||||
|
seqTracking,
|
||||||
transactionSupplier,
|
transactionSupplier,
|
||||||
transactionCommitTimer
|
topicMsgDedup,
|
||||||
);
|
consumer,
|
||||||
|
subscriptionType,
|
||||||
|
clientSpace.getPulsarSchema(),
|
||||||
|
clientSpace.getPulsarClientConf().getConsumerTimeoutSeconds(),
|
||||||
|
value,
|
||||||
|
e2eMsProc);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -3,114 +3,309 @@ package io.nosqlbench.driver.pulsar.ops;
|
|||||||
import com.codahale.metrics.Counter;
|
import com.codahale.metrics.Counter;
|
||||||
import com.codahale.metrics.Histogram;
|
import com.codahale.metrics.Histogram;
|
||||||
import com.codahale.metrics.Timer;
|
import com.codahale.metrics.Timer;
|
||||||
|
import io.nosqlbench.driver.pulsar.PulsarActivity;
|
||||||
|
import io.nosqlbench.driver.pulsar.exception.*;
|
||||||
import io.nosqlbench.driver.pulsar.util.AvroUtil;
|
import io.nosqlbench.driver.pulsar.util.AvroUtil;
|
||||||
import io.nosqlbench.driver.pulsar.util.PulsarActivityUtil;
|
import io.nosqlbench.driver.pulsar.util.PulsarActivityUtil;
|
||||||
|
import org.apache.commons.lang3.StringUtils;
|
||||||
import org.apache.logging.log4j.LogManager;
|
import org.apache.logging.log4j.LogManager;
|
||||||
import org.apache.logging.log4j.Logger;
|
import org.apache.logging.log4j.Logger;
|
||||||
import org.apache.pulsar.client.api.*;
|
import org.apache.pulsar.client.api.*;
|
||||||
import org.apache.pulsar.client.api.transaction.Transaction;
|
import org.apache.pulsar.client.api.transaction.Transaction;
|
||||||
import org.apache.pulsar.common.schema.SchemaType;
|
import org.apache.pulsar.common.schema.SchemaType;
|
||||||
|
|
||||||
|
import java.util.concurrent.CompletableFuture;
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
import java.util.function.LongFunction;
|
|
||||||
import java.util.function.Supplier;
|
import java.util.function.Supplier;
|
||||||
|
|
||||||
public class PulsarConsumerOp extends SyncPulsarOp {
|
public class PulsarConsumerOp implements PulsarOp {
|
||||||
|
|
||||||
private final static Logger logger = LogManager.getLogger(PulsarConsumerOp.class);
|
private final static Logger logger = LogManager.getLogger(PulsarConsumerOp.class);
|
||||||
|
|
||||||
private final Consumer<?> consumer;
|
private final PulsarActivity pulsarActivity;
|
||||||
private final Schema<?> pulsarSchema;
|
|
||||||
private final boolean asyncPulsarOp;
|
private final boolean asyncPulsarOp;
|
||||||
private final int timeoutSeconds;
|
|
||||||
private final Counter bytesCounter;
|
|
||||||
private final Histogram messagesizeHistogram;
|
|
||||||
private final boolean useTransaction;
|
private final boolean useTransaction;
|
||||||
|
private final boolean seqTracking;
|
||||||
private final Supplier<Transaction> transactionSupplier;
|
private final Supplier<Transaction> transactionSupplier;
|
||||||
|
|
||||||
|
private final boolean topicMsgDedup;
|
||||||
|
private final Consumer<?> consumer;
|
||||||
|
private final String subscriptionType;
|
||||||
|
private final Schema<?> pulsarSchema;
|
||||||
|
private final int timeoutSeconds;
|
||||||
|
private final boolean e2eMsgProc;
|
||||||
|
private final long curCycleNum;
|
||||||
|
|
||||||
|
private long curMsgSeqId;
|
||||||
|
private long prevMsgSeqId;
|
||||||
|
|
||||||
|
private final Counter bytesCounter;
|
||||||
|
private final Histogram messageSizeHistogram;
|
||||||
private final Timer transactionCommitTimer;
|
private final Timer transactionCommitTimer;
|
||||||
|
|
||||||
public PulsarConsumerOp(Consumer<?> consumer, Schema<?> schema, boolean asyncPulsarOp, int timeoutSeconds,
|
// keep track of end-to-end message latency
|
||||||
Counter bytesCounter,
|
private final Histogram e2eMsgProcLatencyHistogram;
|
||||||
Histogram messagesizeHistogram,
|
|
||||||
boolean useTransaction,
|
public PulsarConsumerOp(
|
||||||
Supplier<Transaction> transactionSupplier,
|
PulsarActivity pulsarActivity,
|
||||||
Timer transactionCommitTimer) {
|
boolean asyncPulsarOp,
|
||||||
this.consumer = consumer;
|
boolean useTransaction,
|
||||||
this.pulsarSchema = schema;
|
boolean seqTracking,
|
||||||
|
Supplier<Transaction> transactionSupplier,
|
||||||
|
boolean topicMsgDedup,
|
||||||
|
Consumer<?> consumer,
|
||||||
|
String subscriptionType,
|
||||||
|
Schema<?> schema,
|
||||||
|
int timeoutSeconds,
|
||||||
|
long curCycleNum,
|
||||||
|
boolean e2eMsgProc)
|
||||||
|
{
|
||||||
|
this.pulsarActivity = pulsarActivity;
|
||||||
|
|
||||||
this.asyncPulsarOp = asyncPulsarOp;
|
this.asyncPulsarOp = asyncPulsarOp;
|
||||||
this.timeoutSeconds = timeoutSeconds;
|
|
||||||
this.bytesCounter = bytesCounter;
|
|
||||||
this.messagesizeHistogram = messagesizeHistogram;
|
|
||||||
this.useTransaction = useTransaction;
|
this.useTransaction = useTransaction;
|
||||||
|
this.seqTracking = seqTracking;
|
||||||
this.transactionSupplier = transactionSupplier;
|
this.transactionSupplier = transactionSupplier;
|
||||||
this.transactionCommitTimer = transactionCommitTimer;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void syncConsume() {
|
this.topicMsgDedup = topicMsgDedup;
|
||||||
try {
|
this.consumer = consumer;
|
||||||
Message<?> message;
|
this.subscriptionType = subscriptionType;
|
||||||
if (timeoutSeconds <= 0) {
|
this.pulsarSchema = schema;
|
||||||
// wait forever
|
this.timeoutSeconds = timeoutSeconds;
|
||||||
message = consumer.receive();
|
this.curCycleNum = curCycleNum;
|
||||||
} else {
|
this.e2eMsgProc = e2eMsgProc;
|
||||||
// we cannot use Consumer#receive(timeout, timeunit) due to
|
|
||||||
// https://github.com/apache/pulsar/issues/9921
|
|
||||||
message = consumer
|
|
||||||
.receiveAsync()
|
|
||||||
.get(timeoutSeconds, TimeUnit.SECONDS);
|
|
||||||
}
|
|
||||||
|
|
||||||
SchemaType schemaType = pulsarSchema.getSchemaInfo().getType();
|
this.curMsgSeqId = 0;
|
||||||
if (PulsarActivityUtil.isAvroSchemaTypeStr(schemaType.name())) {
|
this.prevMsgSeqId = (curCycleNum - 1);
|
||||||
if (logger.isDebugEnabled()) {
|
|
||||||
String avroDefStr = pulsarSchema.getSchemaInfo().getSchemaDefinition();
|
|
||||||
|
|
||||||
org.apache.avro.Schema avroSchema =
|
this.bytesCounter = pulsarActivity.getBytesCounter();
|
||||||
AvroUtil.GetSchema_ApacheAvro(avroDefStr);
|
this.messageSizeHistogram = pulsarActivity.getMessageSizeHistogram();
|
||||||
|
this.transactionCommitTimer = pulsarActivity.getCommitTransactionTimer();
|
||||||
|
|
||||||
org.apache.avro.generic.GenericRecord avroGenericRecord =
|
this.e2eMsgProcLatencyHistogram = pulsarActivity.getE2eMsgProcLatencyHistogram();
|
||||||
AvroUtil.GetGenericRecord_ApacheAvro(avroSchema, message.getData());
|
|
||||||
|
|
||||||
logger.debug("msg-key={} msg-payload={}", message.getKey(), avroGenericRecord.toString());
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if (logger.isDebugEnabled()) {
|
|
||||||
logger.debug("msg-key={} msg-payload={}", message.getKey(), new String(message.getData()));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
int messagesize = message.getData().length;
|
|
||||||
bytesCounter.inc(messagesize);
|
|
||||||
messagesizeHistogram.update(messagesize);
|
|
||||||
|
|
||||||
|
|
||||||
if (useTransaction) {
|
|
||||||
Transaction transaction = transactionSupplier.get();
|
|
||||||
consumer.acknowledgeAsync(message.getMessageId(), transaction).get();
|
|
||||||
|
|
||||||
// little problem: here we are counting the "commit" time
|
|
||||||
// inside the overall time spent for the execution of the consume operation
|
|
||||||
// we should refactor this operation as for PulsarProducerOp, and use the passed callback
|
|
||||||
// to track with precision the time spent for the operation and for the commit
|
|
||||||
try (Timer.Context ctx = transactionCommitTimer.time()) {
|
|
||||||
transaction.commit().get();
|
|
||||||
}
|
|
||||||
} else{
|
|
||||||
consumer.acknowledge(message.getMessageId());
|
|
||||||
}
|
|
||||||
} catch (Exception e) {
|
|
||||||
throw new RuntimeException(e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public void asyncConsume() {
|
|
||||||
//TODO: add support for async consume
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void run() {
|
public void run(Runnable timeTracker) {
|
||||||
if (!asyncPulsarOp)
|
|
||||||
syncConsume();
|
final Transaction transaction;
|
||||||
else
|
if (useTransaction) {
|
||||||
asyncConsume();
|
// if you are in a transaction you cannot set the schema per-message
|
||||||
|
transaction = transactionSupplier.get();
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
transaction = null;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!asyncPulsarOp) {
|
||||||
|
Message<?> message;
|
||||||
|
|
||||||
|
try {
|
||||||
|
if (timeoutSeconds <= 0) {
|
||||||
|
// wait forever
|
||||||
|
message = consumer.receive();
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
// we cannot use Consumer#receive(timeout, timeunit) due to
|
||||||
|
// https://github.com/apache/pulsar/issues/9921
|
||||||
|
message = consumer
|
||||||
|
.receiveAsync()
|
||||||
|
.get(timeoutSeconds, TimeUnit.SECONDS);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (logger.isDebugEnabled()) {
|
||||||
|
SchemaType schemaType = pulsarSchema.getSchemaInfo().getType();
|
||||||
|
|
||||||
|
if (PulsarActivityUtil.isAvroSchemaTypeStr(schemaType.name())) {
|
||||||
|
String avroDefStr = pulsarSchema.getSchemaInfo().getSchemaDefinition();
|
||||||
|
org.apache.avro.Schema avroSchema =
|
||||||
|
AvroUtil.GetSchema_ApacheAvro(avroDefStr);
|
||||||
|
org.apache.avro.generic.GenericRecord avroGenericRecord =
|
||||||
|
AvroUtil.GetGenericRecord_ApacheAvro(avroSchema, message.getData());
|
||||||
|
|
||||||
|
logger.debug("Sync message received: msg-key={}; msg-properties={}; msg-payload={}",
|
||||||
|
message.getKey(),
|
||||||
|
message.getProperties(),
|
||||||
|
avroGenericRecord.toString());
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
logger.debug("Sync message received: msg-key={}; msg-properties={}; msg-payload={}",
|
||||||
|
message.getKey(),
|
||||||
|
message.getProperties(),
|
||||||
|
new String(message.getData()));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// keep track end-to-end message processing latency
|
||||||
|
long e2eMsgLatency = System.currentTimeMillis() - message.getPublishTime();
|
||||||
|
if (e2eMsgProc) {
|
||||||
|
e2eMsgProcLatencyHistogram.update(e2eMsgLatency);
|
||||||
|
}
|
||||||
|
|
||||||
|
// keep track of message ordering and message loss
|
||||||
|
String msgSeqIdStr = message.getProperties().get(PulsarActivityUtil.MSG_SEQUENCE_ID);
|
||||||
|
if ( (seqTracking) && !StringUtils.isBlank(msgSeqIdStr) ) {
|
||||||
|
curMsgSeqId = Long.parseLong(msgSeqIdStr);
|
||||||
|
|
||||||
|
if ( prevMsgSeqId > -1) {
|
||||||
|
// normal case: message sequence id is monotonically increasing by 1
|
||||||
|
if ((curMsgSeqId - prevMsgSeqId) != 1) {
|
||||||
|
// abnormal case: out of ordering
|
||||||
|
// - for any subscription type, this check should always hold
|
||||||
|
if (curMsgSeqId < prevMsgSeqId) {
|
||||||
|
throw new PulsarMsgOutOfOrderException(
|
||||||
|
false, curCycleNum, curMsgSeqId, prevMsgSeqId);
|
||||||
|
}
|
||||||
|
// - this sequence based message loss and message duplicate check can't be used for
|
||||||
|
// "Shared" subscription (ignore this check)
|
||||||
|
// - TODO: for Key_Shared subscription type, this logic needs to be improved on
|
||||||
|
// per-key basis
|
||||||
|
else {
|
||||||
|
if ( !StringUtils.equalsAnyIgnoreCase(subscriptionType,
|
||||||
|
PulsarActivityUtil.SUBSCRIPTION_TYPE.Shared.label,
|
||||||
|
PulsarActivityUtil.SUBSCRIPTION_TYPE.Key_Shared.label)) {
|
||||||
|
// abnormal case: message loss
|
||||||
|
if ((curMsgSeqId - prevMsgSeqId) > 1) {
|
||||||
|
throw new PulsarMsgLossException(
|
||||||
|
false, curCycleNum, curMsgSeqId, prevMsgSeqId);
|
||||||
|
} else if (topicMsgDedup && (curMsgSeqId == prevMsgSeqId)) {
|
||||||
|
throw new PulsarMsgDuplicateException(
|
||||||
|
false, curCycleNum, curMsgSeqId, prevMsgSeqId);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
int messageSize = message.getData().length;
|
||||||
|
bytesCounter.inc(messageSize);
|
||||||
|
messageSizeHistogram.update(messageSize);
|
||||||
|
|
||||||
|
if (useTransaction) {
|
||||||
|
consumer.acknowledgeAsync(message.getMessageId(), transaction).get();
|
||||||
|
|
||||||
|
// little problem: here we are counting the "commit" time
|
||||||
|
// inside the overall time spent for the execution of the consume operation
|
||||||
|
// we should refactor this operation as for PulsarProducerOp, and use the passed callback
|
||||||
|
// to track with precision the time spent for the operation and for the commit
|
||||||
|
try (Timer.Context ctx = transactionCommitTimer.time()) {
|
||||||
|
transaction.commit().get();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
consumer.acknowledge(message.getMessageId());
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
catch (Exception e) {
|
||||||
|
logger.error(
|
||||||
|
"Sync message receiving failed - timeout value: {} seconds ", timeoutSeconds);
|
||||||
|
throw new PulsarDriverUnexpectedException("" +
|
||||||
|
"Sync message receiving failed - timeout value: " + timeoutSeconds + " seconds ");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
try {
|
||||||
|
CompletableFuture<? extends Message<?>> msgRecvFuture = consumer.receiveAsync();
|
||||||
|
if (useTransaction) {
|
||||||
|
// add commit step
|
||||||
|
msgRecvFuture = msgRecvFuture.thenCompose(msg -> {
|
||||||
|
Timer.Context ctx = transactionCommitTimer.time();
|
||||||
|
return transaction
|
||||||
|
.commit()
|
||||||
|
.whenComplete((m,e) -> ctx.close())
|
||||||
|
.thenApply(v-> msg);
|
||||||
|
}
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
msgRecvFuture.whenComplete((message, error) -> {
|
||||||
|
int messageSize = message.getData().length;
|
||||||
|
bytesCounter.inc(messageSize);
|
||||||
|
messageSizeHistogram.update(messageSize);
|
||||||
|
|
||||||
|
if (logger.isDebugEnabled()) {
|
||||||
|
SchemaType schemaType = pulsarSchema.getSchemaInfo().getType();
|
||||||
|
|
||||||
|
if (PulsarActivityUtil.isAvroSchemaTypeStr(schemaType.name())) {
|
||||||
|
String avroDefStr = pulsarSchema.getSchemaInfo().getSchemaDefinition();
|
||||||
|
org.apache.avro.Schema avroSchema =
|
||||||
|
AvroUtil.GetSchema_ApacheAvro(avroDefStr);
|
||||||
|
org.apache.avro.generic.GenericRecord avroGenericRecord =
|
||||||
|
AvroUtil.GetGenericRecord_ApacheAvro(avroSchema, message.getData());
|
||||||
|
|
||||||
|
logger.debug("Async message received: msg-key={}; msg-properties={}; msg-payload={})",
|
||||||
|
message.getKey(),
|
||||||
|
message.getProperties(),
|
||||||
|
avroGenericRecord.toString());
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
logger.debug("Async message received: msg-key={}; msg-properties={}; msg-payload={})",
|
||||||
|
message.getKey(),
|
||||||
|
message.getProperties(),
|
||||||
|
new String(message.getData()));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
long e2eMsgLatency = System.currentTimeMillis() - message.getPublishTime();
|
||||||
|
if (e2eMsgProc) {
|
||||||
|
e2eMsgProcLatencyHistogram.update(e2eMsgLatency);
|
||||||
|
}
|
||||||
|
|
||||||
|
// keep track of message ordering, message loss, and message duplication
|
||||||
|
String msgSeqIdStr = message.getProperties().get(PulsarActivityUtil.MSG_SEQUENCE_ID);
|
||||||
|
if ( (seqTracking) && !StringUtils.isBlank(msgSeqIdStr) ) {
|
||||||
|
curMsgSeqId = Long.parseLong(msgSeqIdStr);
|
||||||
|
|
||||||
|
if (prevMsgSeqId > -1) {
|
||||||
|
// normal case: message sequence id is monotonically increasing by 1
|
||||||
|
if ((curMsgSeqId - prevMsgSeqId) != 1) {
|
||||||
|
// abnormal case: out of ordering
|
||||||
|
// - for any subscription type, this check should always hold
|
||||||
|
if (curMsgSeqId < prevMsgSeqId) {
|
||||||
|
throw new PulsarMsgOutOfOrderException(
|
||||||
|
false, curCycleNum, curMsgSeqId, prevMsgSeqId);
|
||||||
|
}
|
||||||
|
// - this sequence based message loss and message duplicate check can't be used for
|
||||||
|
// "Shared" subscription (ignore this check)
|
||||||
|
// - TODO: for Key_Shared subscription type, this logic needs to be improved on
|
||||||
|
// per-key basis
|
||||||
|
else {
|
||||||
|
if ( !StringUtils.equalsAnyIgnoreCase(subscriptionType,
|
||||||
|
PulsarActivityUtil.SUBSCRIPTION_TYPE.Shared.label,
|
||||||
|
PulsarActivityUtil.SUBSCRIPTION_TYPE.Key_Shared.label)) {
|
||||||
|
// abnormal case: message loss
|
||||||
|
if ((curMsgSeqId - prevMsgSeqId) > 1) {
|
||||||
|
throw new PulsarMsgLossException(
|
||||||
|
false, curCycleNum, curMsgSeqId, prevMsgSeqId);
|
||||||
|
} else if (topicMsgDedup && (curMsgSeqId == prevMsgSeqId)) {
|
||||||
|
throw new PulsarMsgDuplicateException(
|
||||||
|
false, curCycleNum, curMsgSeqId, prevMsgSeqId);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (useTransaction) {
|
||||||
|
consumer.acknowledgeAsync(message.getMessageId(), transaction);
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
consumer.acknowledgeAsync(message);
|
||||||
|
}
|
||||||
|
|
||||||
|
timeTracker.run();
|
||||||
|
}).exceptionally(ex -> {
|
||||||
|
pulsarActivity.asyncOperationFailed(ex);
|
||||||
|
return null;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
catch (Exception e) {
|
||||||
|
throw new PulsarDriverUnexpectedException("Async message receiving failed");
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,23 +1,29 @@
|
|||||||
package io.nosqlbench.driver.pulsar.ops;
|
package io.nosqlbench.driver.pulsar.ops;
|
||||||
|
|
||||||
|
import io.nosqlbench.driver.pulsar.PulsarActivity;
|
||||||
import io.nosqlbench.driver.pulsar.PulsarSpace;
|
import io.nosqlbench.driver.pulsar.PulsarSpace;
|
||||||
import io.nosqlbench.engine.api.templating.CommandTemplate;
|
import io.nosqlbench.engine.api.templating.CommandTemplate;
|
||||||
import org.apache.pulsar.client.api.Producer;
|
import org.apache.pulsar.client.api.Producer;
|
||||||
import org.apache.pulsar.client.api.Schema;
|
import org.apache.pulsar.client.api.Schema;
|
||||||
|
import org.apache.pulsar.client.api.transaction.Transaction;
|
||||||
|
|
||||||
import java.util.function.LongFunction;
|
import java.util.function.LongFunction;
|
||||||
|
import java.util.function.Supplier;
|
||||||
|
|
||||||
public abstract class PulsarOpMapper implements LongFunction<PulsarOp> {
|
public abstract class PulsarOpMapper implements LongFunction<PulsarOp> {
|
||||||
protected final CommandTemplate cmdTpl;
|
protected final CommandTemplate cmdTpl;
|
||||||
protected final PulsarSpace clientSpace;
|
protected final PulsarSpace clientSpace;
|
||||||
|
protected final PulsarActivity pulsarActivity;
|
||||||
protected final LongFunction<Boolean> asyncApiFunc;
|
protected final LongFunction<Boolean> asyncApiFunc;
|
||||||
|
|
||||||
public PulsarOpMapper(CommandTemplate cmdTpl,
|
public PulsarOpMapper(CommandTemplate cmdTpl,
|
||||||
PulsarSpace clientSpace,
|
PulsarSpace clientSpace,
|
||||||
|
PulsarActivity pulsarActivity,
|
||||||
LongFunction<Boolean> asyncApiFunc)
|
LongFunction<Boolean> asyncApiFunc)
|
||||||
{
|
{
|
||||||
this.cmdTpl = cmdTpl;
|
this.cmdTpl = cmdTpl;
|
||||||
this.clientSpace = clientSpace;
|
this.clientSpace = clientSpace;
|
||||||
|
this.pulsarActivity = pulsarActivity;
|
||||||
this.asyncApiFunc = asyncApiFunc;
|
this.asyncApiFunc = asyncApiFunc;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,14 +1,18 @@
|
|||||||
package io.nosqlbench.driver.pulsar.ops;
|
package io.nosqlbench.driver.pulsar.ops;
|
||||||
|
|
||||||
import com.codahale.metrics.Counter;
|
|
||||||
import com.codahale.metrics.Histogram;
|
|
||||||
import io.nosqlbench.driver.pulsar.PulsarActivity;
|
import io.nosqlbench.driver.pulsar.PulsarActivity;
|
||||||
import io.nosqlbench.driver.pulsar.PulsarSpace;
|
import io.nosqlbench.driver.pulsar.PulsarSpace;
|
||||||
|
import io.nosqlbench.driver.pulsar.util.PulsarActivityUtil;
|
||||||
import io.nosqlbench.engine.api.templating.CommandTemplate;
|
import io.nosqlbench.engine.api.templating.CommandTemplate;
|
||||||
|
import org.apache.commons.lang3.RandomUtils;
|
||||||
|
import org.apache.commons.lang3.StringUtils;
|
||||||
|
import org.apache.logging.log4j.LogManager;
|
||||||
|
import org.apache.logging.log4j.Logger;
|
||||||
import org.apache.pulsar.client.api.Producer;
|
import org.apache.pulsar.client.api.Producer;
|
||||||
import org.apache.pulsar.client.api.Schema;
|
|
||||||
import org.apache.pulsar.client.api.transaction.Transaction;
|
import org.apache.pulsar.client.api.transaction.Transaction;
|
||||||
|
|
||||||
|
import java.util.HashMap;
|
||||||
|
import java.util.Map;
|
||||||
import java.util.function.LongFunction;
|
import java.util.function.LongFunction;
|
||||||
import java.util.function.Supplier;
|
import java.util.function.Supplier;
|
||||||
|
|
||||||
@ -22,49 +26,109 @@ import java.util.function.Supplier;
|
|||||||
*
|
*
|
||||||
* For additional parameterization, the command template is also provided.
|
* For additional parameterization, the command template is also provided.
|
||||||
*/
|
*/
|
||||||
public class PulsarProducerMapper extends PulsarOpMapper {
|
public class PulsarProducerMapper extends PulsarTransactOpMapper {
|
||||||
|
|
||||||
|
private final static Logger logger = LogManager.getLogger(PulsarProducerMapper.class);
|
||||||
|
|
||||||
private final LongFunction<Producer<?>> producerFunc;
|
private final LongFunction<Producer<?>> producerFunc;
|
||||||
|
private final LongFunction<String> seqErrSimuTypeFunc;
|
||||||
private final LongFunction<String> keyFunc;
|
private final LongFunction<String> keyFunc;
|
||||||
|
private final LongFunction<String> propFunc;
|
||||||
private final LongFunction<String> payloadFunc;
|
private final LongFunction<String> payloadFunc;
|
||||||
private final PulsarActivity pulsarActivity;
|
|
||||||
private final LongFunction<Boolean> useTransactionFunc;
|
|
||||||
private final LongFunction<Supplier<Transaction>> transactionSupplierFunc;
|
|
||||||
|
|
||||||
public PulsarProducerMapper(CommandTemplate cmdTpl,
|
public PulsarProducerMapper(CommandTemplate cmdTpl,
|
||||||
PulsarSpace clientSpace,
|
PulsarSpace clientSpace,
|
||||||
|
PulsarActivity pulsarActivity,
|
||||||
LongFunction<Boolean> asyncApiFunc,
|
LongFunction<Boolean> asyncApiFunc,
|
||||||
LongFunction<Producer<?>> producerFunc,
|
|
||||||
LongFunction<String> keyFunc,
|
|
||||||
LongFunction<String> payloadFunc,
|
|
||||||
LongFunction<Boolean> useTransactionFunc,
|
LongFunction<Boolean> useTransactionFunc,
|
||||||
|
LongFunction<Boolean> seqTrackingFunc,
|
||||||
LongFunction<Supplier<Transaction>> transactionSupplierFunc,
|
LongFunction<Supplier<Transaction>> transactionSupplierFunc,
|
||||||
PulsarActivity pulsarActivity) {
|
LongFunction<Producer<?>> producerFunc,
|
||||||
super(cmdTpl, clientSpace, asyncApiFunc);
|
LongFunction<String> seqErrSimuTypeFunc,
|
||||||
|
LongFunction<String> keyFunc,
|
||||||
|
LongFunction<String> propFunc,
|
||||||
|
LongFunction<String> payloadFunc) {
|
||||||
|
super(cmdTpl, clientSpace, pulsarActivity, asyncApiFunc, useTransactionFunc, seqTrackingFunc, transactionSupplierFunc);
|
||||||
|
|
||||||
this.producerFunc = producerFunc;
|
this.producerFunc = producerFunc;
|
||||||
|
this.seqErrSimuTypeFunc = seqErrSimuTypeFunc;
|
||||||
this.keyFunc = keyFunc;
|
this.keyFunc = keyFunc;
|
||||||
|
this.propFunc = propFunc;
|
||||||
this.payloadFunc = payloadFunc;
|
this.payloadFunc = payloadFunc;
|
||||||
this.pulsarActivity = pulsarActivity;
|
|
||||||
this.useTransactionFunc = useTransactionFunc;
|
|
||||||
this.transactionSupplierFunc = transactionSupplierFunc;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public PulsarOp apply(long value) {
|
public PulsarOp apply(long value) {
|
||||||
Producer<?> producer = producerFunc.apply(value);
|
|
||||||
boolean asyncApi = asyncApiFunc.apply(value);
|
boolean asyncApi = asyncApiFunc.apply(value);
|
||||||
|
boolean useTransaction = useTransactionFunc.apply(value);
|
||||||
|
boolean seqTracking = seqTrackingFunc.apply(value);
|
||||||
|
Supplier<Transaction> transactionSupplier = transactionSupplierFunc.apply(value);
|
||||||
|
|
||||||
|
Producer<?> producer = producerFunc.apply(value);
|
||||||
|
|
||||||
|
// Simulate error 10% of the time
|
||||||
|
float rndVal = RandomUtils.nextFloat(0, 1.0f);
|
||||||
|
boolean simulationError = (rndVal >= 0) && (rndVal < 0.1f);
|
||||||
|
String seqErrSimuType = seqErrSimuTypeFunc.apply(value);
|
||||||
|
boolean simulateMsgOutofOrder = simulationError &&
|
||||||
|
!StringUtils.isBlank(seqErrSimuType) &&
|
||||||
|
StringUtils.equalsIgnoreCase(seqErrSimuType, PulsarActivityUtil.SEQ_ERROR_SIMU_TYPE.OutOfOrder.label);
|
||||||
|
boolean simulateMsgLoss = simulationError &&
|
||||||
|
!StringUtils.isBlank(seqErrSimuType) &&
|
||||||
|
StringUtils.equalsIgnoreCase(seqErrSimuType, PulsarActivityUtil.SEQ_ERROR_SIMU_TYPE.MsgLoss.label);
|
||||||
|
boolean simulateMsgDup = simulationError &&
|
||||||
|
!StringUtils.isBlank(seqErrSimuType) &&
|
||||||
|
StringUtils.equalsIgnoreCase(seqErrSimuType, PulsarActivityUtil.SEQ_ERROR_SIMU_TYPE.MsgDup.label);
|
||||||
|
|
||||||
String msgKey = keyFunc.apply(value);
|
String msgKey = keyFunc.apply(value);
|
||||||
String msgPayload = payloadFunc.apply(value);
|
String msgPayload = payloadFunc.apply(value);
|
||||||
boolean useTransaction = useTransactionFunc.apply(value);
|
|
||||||
Supplier<Transaction> transactionSupplier = transactionSupplierFunc.apply(value);
|
// Check if msgPropJonStr is valid JSON string with a collection of key/value pairs
|
||||||
|
// - if Yes, convert it to a map
|
||||||
|
// - otherwise, log an error message and ignore message properties without throwing a runtime exception
|
||||||
|
Map<String, String> msgProperties = new HashMap<>();
|
||||||
|
String msgPropJsonStr = propFunc.apply(value);
|
||||||
|
if (!StringUtils.isBlank(msgPropJsonStr)) {
|
||||||
|
try {
|
||||||
|
msgProperties = PulsarActivityUtil.convertJsonToMap(msgPropJsonStr);
|
||||||
|
|
||||||
|
} catch (Exception e) {
|
||||||
|
logger.error(
|
||||||
|
"Error parsing message property JSON string {}, ignore message properties!",
|
||||||
|
msgPropJsonStr);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set message sequence tracking property
|
||||||
|
if (seqTracking) {
|
||||||
|
// normal case
|
||||||
|
if (!simulateMsgOutofOrder && !simulateMsgDup) {
|
||||||
|
msgProperties.put(PulsarActivityUtil.MSG_SEQUENCE_ID, String.valueOf(value));
|
||||||
|
}
|
||||||
|
// simulate message out of order
|
||||||
|
else if ( simulateMsgOutofOrder ) {
|
||||||
|
int rndmOffset = 2;
|
||||||
|
msgProperties.put(PulsarActivityUtil.MSG_SEQUENCE_ID,
|
||||||
|
String.valueOf((value > rndmOffset) ? (value-rndmOffset) : value));
|
||||||
|
}
|
||||||
|
// simulate message duplication
|
||||||
|
else {
|
||||||
|
msgProperties.put(PulsarActivityUtil.MSG_SEQUENCE_ID, String.valueOf(value-1));
|
||||||
|
}
|
||||||
|
// message loss simulation is not done by message property
|
||||||
|
// we simply skip sending message in the current NB cycle
|
||||||
|
}
|
||||||
|
|
||||||
return new PulsarProducerOp(
|
return new PulsarProducerOp(
|
||||||
producer,
|
pulsarActivity,
|
||||||
clientSpace.getPulsarSchema(),
|
|
||||||
asyncApi,
|
asyncApi,
|
||||||
useTransaction,
|
useTransaction,
|
||||||
transactionSupplier,
|
transactionSupplier,
|
||||||
|
producer,
|
||||||
|
clientSpace.getPulsarSchema(),
|
||||||
msgKey,
|
msgKey,
|
||||||
|
msgProperties,
|
||||||
msgPayload,
|
msgPayload,
|
||||||
pulsarActivity
|
simulateMsgLoss);
|
||||||
);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -4,8 +4,11 @@ import com.codahale.metrics.Counter;
|
|||||||
import com.codahale.metrics.Histogram;
|
import com.codahale.metrics.Histogram;
|
||||||
import com.codahale.metrics.Timer;
|
import com.codahale.metrics.Timer;
|
||||||
import io.nosqlbench.driver.pulsar.PulsarActivity;
|
import io.nosqlbench.driver.pulsar.PulsarActivity;
|
||||||
|
import io.nosqlbench.driver.pulsar.exception.PulsarDriverParamException;
|
||||||
|
import io.nosqlbench.driver.pulsar.exception.PulsarDriverUnexpectedException;
|
||||||
import io.nosqlbench.driver.pulsar.util.AvroUtil;
|
import io.nosqlbench.driver.pulsar.util.AvroUtil;
|
||||||
import io.nosqlbench.driver.pulsar.util.PulsarActivityUtil;
|
import io.nosqlbench.driver.pulsar.util.PulsarActivityUtil;
|
||||||
|
import org.apache.commons.lang3.StringUtils;
|
||||||
import org.apache.logging.log4j.LogManager;
|
import org.apache.logging.log4j.LogManager;
|
||||||
import org.apache.logging.log4j.Logger;
|
import org.apache.logging.log4j.Logger;
|
||||||
import org.apache.pulsar.client.api.*;
|
import org.apache.pulsar.client.api.*;
|
||||||
@ -15,6 +18,7 @@ import org.apache.pulsar.client.impl.schema.generic.GenericAvroSchema;
|
|||||||
import org.apache.pulsar.common.schema.SchemaType;
|
import org.apache.pulsar.common.schema.SchemaType;
|
||||||
|
|
||||||
import java.nio.charset.StandardCharsets;
|
import java.nio.charset.StandardCharsets;
|
||||||
|
import java.util.Map;
|
||||||
import java.util.concurrent.CompletableFuture;
|
import java.util.concurrent.CompletableFuture;
|
||||||
import java.util.concurrent.ExecutionException;
|
import java.util.concurrent.ExecutionException;
|
||||||
import java.util.function.Supplier;
|
import java.util.function.Supplier;
|
||||||
@ -23,57 +27,87 @@ public class PulsarProducerOp implements PulsarOp {
|
|||||||
|
|
||||||
private final static Logger logger = LogManager.getLogger(PulsarProducerOp.class);
|
private final static Logger logger = LogManager.getLogger(PulsarProducerOp.class);
|
||||||
|
|
||||||
private final Producer<?> producer;
|
|
||||||
private final Schema<?> pulsarSchema;
|
|
||||||
private final String msgKey;
|
|
||||||
private final String msgPayload;
|
|
||||||
private final boolean asyncPulsarOp;
|
|
||||||
private final Counter bytesCounter;
|
|
||||||
private final Histogram messagesizeHistogram;
|
|
||||||
private final PulsarActivity pulsarActivity;
|
private final PulsarActivity pulsarActivity;
|
||||||
|
|
||||||
|
private final boolean asyncPulsarOp;
|
||||||
private final boolean useTransaction;
|
private final boolean useTransaction;
|
||||||
private final Supplier<Transaction> transactionSupplier;
|
private final Supplier<Transaction> transactionSupplier;
|
||||||
|
|
||||||
public PulsarProducerOp(Producer<?> producer,
|
private final Producer<?> producer;
|
||||||
Schema<?> schema,
|
private final Schema<?> pulsarSchema;
|
||||||
boolean asyncPulsarOp,
|
private final String msgKey;
|
||||||
boolean useTransaction,
|
private final Map<String, String> msgProperties;
|
||||||
Supplier<Transaction> transactionSupplier,
|
private final String msgPayload;
|
||||||
String key,
|
private final boolean simulateMsgLoss;
|
||||||
String payload,
|
|
||||||
PulsarActivity pulsarActivity) {
|
private final Counter bytesCounter;
|
||||||
|
private final Histogram messageSizeHistogram;
|
||||||
|
private final Timer transactionCommitTimer;
|
||||||
|
|
||||||
|
public PulsarProducerOp( PulsarActivity pulsarActivity,
|
||||||
|
boolean asyncPulsarOp,
|
||||||
|
boolean useTransaction,
|
||||||
|
Supplier<Transaction> transactionSupplier,
|
||||||
|
Producer<?> producer,
|
||||||
|
Schema<?> schema,
|
||||||
|
String key,
|
||||||
|
Map<String, String> msgProperties,
|
||||||
|
String payload,
|
||||||
|
boolean simulateMsgLoss) {
|
||||||
|
this.pulsarActivity = pulsarActivity;
|
||||||
|
|
||||||
|
this.asyncPulsarOp = asyncPulsarOp;
|
||||||
|
this.useTransaction = useTransaction;
|
||||||
|
this.transactionSupplier = transactionSupplier;
|
||||||
|
|
||||||
this.producer = producer;
|
this.producer = producer;
|
||||||
this.pulsarSchema = schema;
|
this.pulsarSchema = schema;
|
||||||
this.msgKey = key;
|
this.msgKey = key;
|
||||||
|
this.msgProperties = msgProperties;
|
||||||
this.msgPayload = payload;
|
this.msgPayload = payload;
|
||||||
this.asyncPulsarOp = asyncPulsarOp;
|
this.simulateMsgLoss = simulateMsgLoss;
|
||||||
this.pulsarActivity = pulsarActivity;
|
|
||||||
this.bytesCounter = pulsarActivity.getBytesCounter();
|
this.bytesCounter = pulsarActivity.getBytesCounter();
|
||||||
this.messagesizeHistogram = pulsarActivity.getMessagesizeHistogram();
|
this.messageSizeHistogram = pulsarActivity.getMessageSizeHistogram();
|
||||||
this.useTransaction = useTransaction;
|
this.transactionCommitTimer = pulsarActivity.getCommitTransactionTimer();
|
||||||
this.transactionSupplier = transactionSupplier;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void run(Runnable timeTracker) {
|
public void run(Runnable timeTracker) {
|
||||||
if ((msgPayload == null) || msgPayload.isEmpty()) {
|
// Skip this cycle (without sending messages) if we're doing message loss simulation
|
||||||
throw new RuntimeException("Message payload (\"msg-value\") can't be empty!");
|
if (simulateMsgLoss) {
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if ( StringUtils.isBlank(msgPayload)) {
|
||||||
|
throw new PulsarDriverParamException("Message payload (\"msg-value\") can't be empty!");
|
||||||
|
}
|
||||||
|
|
||||||
TypedMessageBuilder typedMessageBuilder;
|
TypedMessageBuilder typedMessageBuilder;
|
||||||
|
|
||||||
final Transaction transaction;
|
final Transaction transaction;
|
||||||
if (useTransaction) {
|
if (useTransaction) {
|
||||||
// if you are in a transaction you cannot set the schema per-message
|
// if you are in a transaction you cannot set the schema per-message
|
||||||
transaction = transactionSupplier.get();
|
transaction = transactionSupplier.get();
|
||||||
typedMessageBuilder = producer.newMessage(transaction);
|
typedMessageBuilder = producer.newMessage(transaction);
|
||||||
} else {
|
}
|
||||||
|
else {
|
||||||
transaction = null;
|
transaction = null;
|
||||||
typedMessageBuilder = producer.newMessage(pulsarSchema);
|
typedMessageBuilder = producer.newMessage(pulsarSchema);
|
||||||
}
|
}
|
||||||
if ((msgKey != null) && (!msgKey.isEmpty())) {
|
|
||||||
|
// set message key
|
||||||
|
if (!StringUtils.isBlank(msgKey)) {
|
||||||
typedMessageBuilder = typedMessageBuilder.key(msgKey);
|
typedMessageBuilder = typedMessageBuilder.key(msgKey);
|
||||||
}
|
}
|
||||||
|
|
||||||
int messagesize;
|
// set message properties
|
||||||
|
if ( !msgProperties.isEmpty() ) {
|
||||||
|
typedMessageBuilder = typedMessageBuilder.properties(msgProperties);
|
||||||
|
}
|
||||||
|
|
||||||
|
// set message payload
|
||||||
|
int messageSize;
|
||||||
SchemaType schemaType = pulsarSchema.getSchemaInfo().getType();
|
SchemaType schemaType = pulsarSchema.getSchemaInfo().getType();
|
||||||
if (PulsarActivityUtil.isAvroSchemaTypeStr(schemaType.name())) {
|
if (PulsarActivityUtil.isAvroSchemaTypeStr(schemaType.name())) {
|
||||||
GenericRecord payload = AvroUtil.GetGenericRecord_PulsarAvro(
|
GenericRecord payload = AvroUtil.GetGenericRecord_PulsarAvro(
|
||||||
@ -83,56 +117,114 @@ public class PulsarProducerOp implements PulsarOp {
|
|||||||
);
|
);
|
||||||
typedMessageBuilder = typedMessageBuilder.value(payload);
|
typedMessageBuilder = typedMessageBuilder.value(payload);
|
||||||
// TODO: add a way to calculate the message size for AVRO messages
|
// TODO: add a way to calculate the message size for AVRO messages
|
||||||
messagesize = msgPayload.length();
|
messageSize = msgPayload.length();
|
||||||
} else {
|
} else {
|
||||||
byte[] array = msgPayload.getBytes(StandardCharsets.UTF_8);
|
byte[] array = msgPayload.getBytes(StandardCharsets.UTF_8);
|
||||||
typedMessageBuilder = typedMessageBuilder.value(array);
|
typedMessageBuilder = typedMessageBuilder.value(array);
|
||||||
messagesize = array.length;
|
messageSize = array.length;
|
||||||
}
|
}
|
||||||
messagesizeHistogram.update(messagesize);
|
messageSizeHistogram.update(messageSize);
|
||||||
bytesCounter.inc(messagesize);
|
bytesCounter.inc(messageSize);
|
||||||
|
|
||||||
//TODO: add error handling with failed message production
|
//TODO: add error handling with failed message production
|
||||||
if (!asyncPulsarOp) {
|
if (!asyncPulsarOp) {
|
||||||
try {
|
try {
|
||||||
logger.trace("sending message");
|
logger.trace("Sending message");
|
||||||
typedMessageBuilder.send();
|
typedMessageBuilder.send();
|
||||||
|
|
||||||
if (useTransaction) {
|
if (useTransaction) {
|
||||||
try (Timer.Context ctx = pulsarActivity.getCommitTransactionTimer().time();) {
|
try (Timer.Context ctx = transactionCommitTimer.time()) {
|
||||||
transaction.commit().get();
|
transaction.commit().get();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} catch (PulsarClientException | ExecutionException | InterruptedException pce) {
|
|
||||||
logger.trace("failed sending message");
|
if (logger.isDebugEnabled()) {
|
||||||
throw new RuntimeException(pce);
|
if (PulsarActivityUtil.isAvroSchemaTypeStr(schemaType.name())) {
|
||||||
|
String avroDefStr = pulsarSchema.getSchemaInfo().getSchemaDefinition();
|
||||||
|
org.apache.avro.Schema avroSchema =
|
||||||
|
AvroUtil.GetSchema_ApacheAvro(avroDefStr);
|
||||||
|
org.apache.avro.generic.GenericRecord avroGenericRecord =
|
||||||
|
AvroUtil.GetGenericRecord_ApacheAvro(avroSchema, msgPayload);
|
||||||
|
|
||||||
|
logger.debug("Sync message sent: msg-key={}; msg-properties={}; msg-payload={})",
|
||||||
|
msgKey,
|
||||||
|
msgProperties,
|
||||||
|
avroGenericRecord.toString());
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
logger.debug("Sync message sent: msg-key={}; msg-properties={}; msg-payload={}",
|
||||||
|
msgKey,
|
||||||
|
msgProperties,
|
||||||
|
msgPayload);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
catch (PulsarClientException | ExecutionException | InterruptedException pce) {
|
||||||
|
String errMsg =
|
||||||
|
"Sync message sending failed: " +
|
||||||
|
"key - " + msgKey + "; " +
|
||||||
|
"properties - " + msgProperties + "; " +
|
||||||
|
"payload - " + msgPayload;
|
||||||
|
|
||||||
|
logger.trace(errMsg);
|
||||||
|
|
||||||
|
throw new PulsarDriverUnexpectedException(errMsg);
|
||||||
|
}
|
||||||
|
|
||||||
timeTracker.run();
|
timeTracker.run();
|
||||||
} else {
|
}
|
||||||
|
else {
|
||||||
try {
|
try {
|
||||||
// we rely on blockIfQueueIsFull in order to throttle the request in this case
|
// we rely on blockIfQueueIsFull in order to throttle the request in this case
|
||||||
CompletableFuture<?> future = typedMessageBuilder.sendAsync();
|
CompletableFuture<?> future = typedMessageBuilder.sendAsync();
|
||||||
|
|
||||||
if (useTransaction) {
|
if (useTransaction) {
|
||||||
// add commit step
|
// add commit step
|
||||||
future = future.thenCompose(msg -> {
|
future = future.thenCompose(msg -> {
|
||||||
Timer.Context ctx = pulsarActivity.getCommitTransactionTimer().time();;
|
Timer.Context ctx = transactionCommitTimer.time();
|
||||||
return transaction
|
return transaction
|
||||||
.commit()
|
.commit()
|
||||||
.whenComplete((m,e) -> {
|
.whenComplete((m,e) -> ctx.close())
|
||||||
ctx.close();
|
|
||||||
})
|
|
||||||
.thenApply(v-> msg);
|
.thenApply(v-> msg);
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
future.whenComplete((messageId, error) -> {
|
future.whenComplete((messageId, error) -> {
|
||||||
|
if (logger.isDebugEnabled()) {
|
||||||
|
if (PulsarActivityUtil.isAvroSchemaTypeStr(schemaType.name())) {
|
||||||
|
String avroDefStr = pulsarSchema.getSchemaInfo().getSchemaDefinition();
|
||||||
|
org.apache.avro.Schema avroSchema =
|
||||||
|
AvroUtil.GetSchema_ApacheAvro(avroDefStr);
|
||||||
|
org.apache.avro.generic.GenericRecord avroGenericRecord =
|
||||||
|
AvroUtil.GetGenericRecord_ApacheAvro(avroSchema, msgPayload);
|
||||||
|
|
||||||
|
logger.debug("Aysnc message sent: msg-key={}; msg-properties={}; msg-payload={})",
|
||||||
|
msgKey,
|
||||||
|
msgProperties,
|
||||||
|
avroGenericRecord.toString());
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
logger.debug("Aysnc message sent: msg-key={}; msg-properties={}; msg-payload={}",
|
||||||
|
msgKey,
|
||||||
|
msgProperties,
|
||||||
|
msgPayload);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
timeTracker.run();
|
timeTracker.run();
|
||||||
}).exceptionally(ex -> {
|
}).exceptionally(ex -> {
|
||||||
logger.error("Producing message failed: key - " + msgKey + "; payload - " + msgPayload);
|
logger.error("Async message sending failed: " +
|
||||||
|
"key - " + msgKey + "; " +
|
||||||
|
"properties - " + msgProperties + "; " +
|
||||||
|
"payload - " + msgPayload);
|
||||||
|
|
||||||
pulsarActivity.asyncOperationFailed(ex);
|
pulsarActivity.asyncOperationFailed(ex);
|
||||||
return null;
|
return null;
|
||||||
});
|
});
|
||||||
} catch (Exception e) {
|
}
|
||||||
throw new RuntimeException(e);
|
catch (Exception e) {
|
||||||
|
throw new PulsarDriverUnexpectedException(e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
package io.nosqlbench.driver.pulsar.ops;
|
package io.nosqlbench.driver.pulsar.ops;
|
||||||
|
|
||||||
|
import io.nosqlbench.driver.pulsar.PulsarActivity;
|
||||||
import io.nosqlbench.driver.pulsar.PulsarSpace;
|
import io.nosqlbench.driver.pulsar.PulsarSpace;
|
||||||
import io.nosqlbench.engine.api.templating.CommandTemplate;
|
import io.nosqlbench.engine.api.templating.CommandTemplate;
|
||||||
import org.apache.pulsar.client.api.Reader;
|
import org.apache.pulsar.client.api.Reader;
|
||||||
@ -13,10 +14,11 @@ public class PulsarReaderMapper extends PulsarOpMapper {
|
|||||||
|
|
||||||
public PulsarReaderMapper(CommandTemplate cmdTpl,
|
public PulsarReaderMapper(CommandTemplate cmdTpl,
|
||||||
PulsarSpace clientSpace,
|
PulsarSpace clientSpace,
|
||||||
|
PulsarActivity pulsarActivity,
|
||||||
LongFunction<Boolean> asyncApiFunc,
|
LongFunction<Boolean> asyncApiFunc,
|
||||||
LongFunction<Reader<?>> readerFunc)
|
LongFunction<Reader<?>> readerFunc)
|
||||||
{
|
{
|
||||||
super(cmdTpl, clientSpace, asyncApiFunc);
|
super(cmdTpl, clientSpace, pulsarActivity, asyncApiFunc);
|
||||||
this.readerFunc = readerFunc;
|
this.readerFunc = readerFunc;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -0,0 +1,29 @@
|
|||||||
|
package io.nosqlbench.driver.pulsar.ops;
|
||||||
|
|
||||||
|
import io.nosqlbench.driver.pulsar.PulsarActivity;
|
||||||
|
import io.nosqlbench.driver.pulsar.PulsarSpace;
|
||||||
|
import io.nosqlbench.engine.api.templating.CommandTemplate;
|
||||||
|
import org.apache.pulsar.client.api.transaction.Transaction;
|
||||||
|
|
||||||
|
import java.util.function.LongFunction;
|
||||||
|
import java.util.function.Supplier;
|
||||||
|
|
||||||
|
public abstract class PulsarTransactOpMapper extends PulsarOpMapper {
|
||||||
|
protected final LongFunction<Boolean> useTransactionFunc;
|
||||||
|
protected final LongFunction<Boolean> seqTrackingFunc;
|
||||||
|
protected final LongFunction<Supplier<Transaction>> transactionSupplierFunc;
|
||||||
|
|
||||||
|
public PulsarTransactOpMapper(CommandTemplate cmdTpl,
|
||||||
|
PulsarSpace clientSpace,
|
||||||
|
PulsarActivity pulsarActivity,
|
||||||
|
LongFunction<Boolean> asyncApiFunc,
|
||||||
|
LongFunction<Boolean> useTransactionFunc,
|
||||||
|
LongFunction<Boolean> seqTrackingFunc,
|
||||||
|
LongFunction<Supplier<Transaction>> transactionSupplierFunc)
|
||||||
|
{
|
||||||
|
super(cmdTpl, clientSpace, pulsarActivity, asyncApiFunc);
|
||||||
|
this.useTransactionFunc = useTransactionFunc;
|
||||||
|
this.seqTrackingFunc = seqTrackingFunc;
|
||||||
|
this.transactionSupplierFunc = transactionSupplierFunc;
|
||||||
|
}
|
||||||
|
}
|
@ -1,6 +1,8 @@
|
|||||||
package io.nosqlbench.driver.pulsar.ops;
|
package io.nosqlbench.driver.pulsar.ops;
|
||||||
|
|
||||||
import io.nosqlbench.driver.pulsar.*;
|
import io.nosqlbench.driver.pulsar.*;
|
||||||
|
import io.nosqlbench.driver.pulsar.exception.PulsarDriverParamException;
|
||||||
|
import io.nosqlbench.driver.pulsar.exception.PulsarDriverUnsupportedOpException;
|
||||||
import io.nosqlbench.driver.pulsar.util.PulsarActivityUtil;
|
import io.nosqlbench.driver.pulsar.util.PulsarActivityUtil;
|
||||||
import io.nosqlbench.engine.api.activityconfig.yaml.OpTemplate;
|
import io.nosqlbench.engine.api.activityconfig.yaml.OpTemplate;
|
||||||
import io.nosqlbench.engine.api.activityimpl.OpDispenser;
|
import io.nosqlbench.engine.api.activityimpl.OpDispenser;
|
||||||
@ -9,6 +11,8 @@ import org.apache.commons.lang3.BooleanUtils;
|
|||||||
import org.apache.commons.lang3.StringUtils;
|
import org.apache.commons.lang3.StringUtils;
|
||||||
import org.apache.logging.log4j.LogManager;
|
import org.apache.logging.log4j.LogManager;
|
||||||
import org.apache.logging.log4j.Logger;
|
import org.apache.logging.log4j.Logger;
|
||||||
|
import org.apache.pulsar.client.admin.PulsarAdmin;
|
||||||
|
import org.apache.pulsar.client.admin.PulsarAdminException;
|
||||||
import org.apache.pulsar.client.api.Producer;
|
import org.apache.pulsar.client.api.Producer;
|
||||||
import org.apache.pulsar.client.api.Consumer;
|
import org.apache.pulsar.client.api.Consumer;
|
||||||
import org.apache.pulsar.client.api.Reader;
|
import org.apache.pulsar.client.api.Reader;
|
||||||
@ -21,7 +25,9 @@ import java.util.function.LongFunction;
|
|||||||
import java.util.function.Supplier;
|
import java.util.function.Supplier;
|
||||||
|
|
||||||
public class ReadyPulsarOp implements OpDispenser<PulsarOp> {
|
public class ReadyPulsarOp implements OpDispenser<PulsarOp> {
|
||||||
|
|
||||||
private final static Logger logger = LogManager.getLogger(ReadyPulsarOp.class);
|
private final static Logger logger = LogManager.getLogger(ReadyPulsarOp.class);
|
||||||
|
|
||||||
private final OpTemplate opTpl;
|
private final OpTemplate opTpl;
|
||||||
private final CommandTemplate cmdTpl;
|
private final CommandTemplate cmdTpl;
|
||||||
private final PulsarSpace clientSpace;
|
private final PulsarSpace clientSpace;
|
||||||
@ -37,13 +43,13 @@ public class ReadyPulsarOp implements OpDispenser<PulsarOp> {
|
|||||||
this.cmdTpl = new CommandTemplate(optpl);
|
this.cmdTpl = new CommandTemplate(optpl);
|
||||||
|
|
||||||
if (cmdTpl.isDynamic("op_scope")) {
|
if (cmdTpl.isDynamic("op_scope")) {
|
||||||
throw new RuntimeException("op_scope must be static");
|
throw new PulsarDriverParamException("\"op_scope\" parameter must be static");
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: At the moment, only supports static "client"
|
// TODO: At the moment, only supports static "client"
|
||||||
if (cmdTpl.containsKey("client")) {
|
if (cmdTpl.containsKey("client")) {
|
||||||
if (cmdTpl.isDynamic("client")) {
|
if (cmdTpl.isDynamic("client")) {
|
||||||
throw new RuntimeException("\"client\" can't be made dynamic!");
|
throw new PulsarDriverParamException("\"client\" parameter can't be made dynamic!");
|
||||||
} else {
|
} else {
|
||||||
String client_name = cmdTpl.getStatic("client");
|
String client_name = cmdTpl.getStatic("client");
|
||||||
this.clientSpace = pcache.getPulsarSpace(client_name);
|
this.clientSpace = pcache.getPulsarSpace(client_name);
|
||||||
@ -63,15 +69,15 @@ public class ReadyPulsarOp implements OpDispenser<PulsarOp> {
|
|||||||
private LongFunction<PulsarOp> resolve() {
|
private LongFunction<PulsarOp> resolve() {
|
||||||
|
|
||||||
if (!cmdTpl.containsKey("optype") || !cmdTpl.isStatic("optype")) {
|
if (!cmdTpl.containsKey("optype") || !cmdTpl.isStatic("optype")) {
|
||||||
throw new RuntimeException("Statement parameter \"optype\" must be static and have a valid value!");
|
throw new PulsarDriverParamException("[resolve()] \"optype\" parameter must be static and have a valid value!");
|
||||||
}
|
}
|
||||||
String stmtOpType = cmdTpl.getStatic("optype");
|
String stmtOpType = cmdTpl.getStatic("optype");
|
||||||
|
|
||||||
if (cmdTpl.containsKey("topic_url")) {
|
if (cmdTpl.containsKey("topic_url")) {
|
||||||
throw new RuntimeException("topic_url is not valid. Perhaps you mean topic_uri ?");
|
throw new PulsarDriverParamException("[resolve()] \"topic_url\" parameter is not valid. Perhaps you mean \"topic_uri\"?");
|
||||||
}
|
}
|
||||||
|
|
||||||
// Global parameter: topic_uri
|
// Doc-level parameter: topic_uri
|
||||||
LongFunction<String> topicUriFunc = (l) -> null;
|
LongFunction<String> topicUriFunc = (l) -> null;
|
||||||
if (cmdTpl.containsKey(PulsarActivityUtil.DOC_LEVEL_PARAMS.TOPIC_URI.label)) {
|
if (cmdTpl.containsKey(PulsarActivityUtil.DOC_LEVEL_PARAMS.TOPIC_URI.label)) {
|
||||||
if (cmdTpl.isStatic(PulsarActivityUtil.DOC_LEVEL_PARAMS.TOPIC_URI.label)) {
|
if (cmdTpl.isStatic(PulsarActivityUtil.DOC_LEVEL_PARAMS.TOPIC_URI.label)) {
|
||||||
@ -80,60 +86,135 @@ public class ReadyPulsarOp implements OpDispenser<PulsarOp> {
|
|||||||
topicUriFunc = (l) -> cmdTpl.getDynamic(PulsarActivityUtil.DOC_LEVEL_PARAMS.TOPIC_URI.label, l);
|
topicUriFunc = (l) -> cmdTpl.getDynamic(PulsarActivityUtil.DOC_LEVEL_PARAMS.TOPIC_URI.label, l);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
logger.info("topic_uri: {}", topicUriFunc.apply(0));
|
||||||
|
|
||||||
// Global parameter: async_api
|
// Doc-level parameter: async_api
|
||||||
LongFunction<Boolean> asyncApiFunc = (l) -> false;
|
LongFunction<Boolean> asyncApiFunc = (l) -> false;
|
||||||
if (cmdTpl.containsKey(PulsarActivityUtil.DOC_LEVEL_PARAMS.ASYNC_API.label)) {
|
if (cmdTpl.containsKey(PulsarActivityUtil.DOC_LEVEL_PARAMS.ASYNC_API.label)) {
|
||||||
if (cmdTpl.isStatic(PulsarActivityUtil.DOC_LEVEL_PARAMS.ASYNC_API.label)) {
|
if (cmdTpl.isStatic(PulsarActivityUtil.DOC_LEVEL_PARAMS.ASYNC_API.label)) {
|
||||||
boolean value = BooleanUtils.toBoolean(cmdTpl.getStatic(PulsarActivityUtil.DOC_LEVEL_PARAMS.ASYNC_API.label));
|
boolean value = BooleanUtils.toBoolean(cmdTpl.getStatic(PulsarActivityUtil.DOC_LEVEL_PARAMS.ASYNC_API.label));
|
||||||
asyncApiFunc = (l) -> value;
|
asyncApiFunc = (l) -> value;
|
||||||
} else {
|
} else {
|
||||||
throw new RuntimeException("\"" + PulsarActivityUtil.DOC_LEVEL_PARAMS.ASYNC_API.label + "\" parameter cannot be dynamic!");
|
throw new PulsarDriverParamException("[resolve()] \"" + PulsarActivityUtil.DOC_LEVEL_PARAMS.ASYNC_API.label + "\" parameter cannot be dynamic!");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
logger.info("async_api: {}", asyncApiFunc.apply(0));
|
logger.info("async_api: {}", asyncApiFunc.apply(0));
|
||||||
|
|
||||||
|
// Doc-level parameter: async_api
|
||||||
LongFunction<Boolean> useTransactionFunc = (l) -> false;
|
LongFunction<Boolean> useTransactionFunc = (l) -> false;
|
||||||
if (cmdTpl.containsKey(PulsarActivityUtil.DOC_LEVEL_PARAMS.USE_TRANSACTION.label)) {
|
if (cmdTpl.containsKey(PulsarActivityUtil.DOC_LEVEL_PARAMS.USE_TRANSACTION.label)) {
|
||||||
if (cmdTpl.isStatic(PulsarActivityUtil.DOC_LEVEL_PARAMS.USE_TRANSACTION.label)) {
|
if (cmdTpl.isStatic(PulsarActivityUtil.DOC_LEVEL_PARAMS.USE_TRANSACTION.label)) {
|
||||||
boolean value = BooleanUtils.toBoolean(cmdTpl.getStatic(PulsarActivityUtil.DOC_LEVEL_PARAMS.USE_TRANSACTION.label));
|
boolean value = BooleanUtils.toBoolean(cmdTpl.getStatic(PulsarActivityUtil.DOC_LEVEL_PARAMS.USE_TRANSACTION.label));
|
||||||
useTransactionFunc = (l) -> value;
|
useTransactionFunc = (l) -> value;
|
||||||
} else {
|
} else {
|
||||||
throw new RuntimeException("\"" + PulsarActivityUtil.DOC_LEVEL_PARAMS.USE_TRANSACTION.label + "\" parameter cannot be dynamic!");
|
throw new PulsarDriverParamException("[resolve()] \"" + PulsarActivityUtil.DOC_LEVEL_PARAMS.USE_TRANSACTION.label + "\" parameter cannot be dynamic!");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
logger.info("use_transaction: {}", useTransactionFunc.apply(0));
|
logger.info("use_transaction: {}", useTransactionFunc.apply(0));
|
||||||
|
|
||||||
// Global parameter: admin_delop
|
// Doc-level parameter: admin_delop
|
||||||
LongFunction<Boolean> adminDelOpFunc = (l) -> false;
|
LongFunction<Boolean> adminDelOpFunc = (l) -> false;
|
||||||
if (cmdTpl.containsKey(PulsarActivityUtil.DOC_LEVEL_PARAMS.ADMIN_DELOP.label)) {
|
if (cmdTpl.containsKey(PulsarActivityUtil.DOC_LEVEL_PARAMS.ADMIN_DELOP.label)) {
|
||||||
if (cmdTpl.isStatic(PulsarActivityUtil.DOC_LEVEL_PARAMS.ADMIN_DELOP.label))
|
if (cmdTpl.isStatic(PulsarActivityUtil.DOC_LEVEL_PARAMS.ADMIN_DELOP.label))
|
||||||
adminDelOpFunc = (l) -> BooleanUtils.toBoolean(cmdTpl.getStatic(PulsarActivityUtil.DOC_LEVEL_PARAMS.ADMIN_DELOP.label));
|
adminDelOpFunc = (l) -> BooleanUtils.toBoolean(cmdTpl.getStatic(PulsarActivityUtil.DOC_LEVEL_PARAMS.ADMIN_DELOP.label));
|
||||||
else
|
else
|
||||||
throw new RuntimeException("\"" + PulsarActivityUtil.DOC_LEVEL_PARAMS.ADMIN_DELOP.label + "\" parameter cannot be dynamic!");
|
throw new PulsarDriverParamException("[resolve()] \"" + PulsarActivityUtil.DOC_LEVEL_PARAMS.ADMIN_DELOP.label + "\" parameter cannot be dynamic!");
|
||||||
}
|
}
|
||||||
|
logger.info("admin_delop: {}", adminDelOpFunc.apply(0));
|
||||||
|
|
||||||
|
// Doc-level parameter: seq_tracking
|
||||||
|
LongFunction<Boolean> seqTrackingFunc = (l) -> false;
|
||||||
|
if (cmdTpl.containsKey(PulsarActivityUtil.DOC_LEVEL_PARAMS.SEQ_TRACKING.label)) {
|
||||||
|
if (cmdTpl.isStatic(PulsarActivityUtil.DOC_LEVEL_PARAMS.SEQ_TRACKING.label))
|
||||||
|
seqTrackingFunc = (l) -> BooleanUtils.toBoolean(cmdTpl.getStatic(PulsarActivityUtil.DOC_LEVEL_PARAMS.SEQ_TRACKING.label));
|
||||||
|
else
|
||||||
|
throw new PulsarDriverParamException("[resolve()] \"" + PulsarActivityUtil.DOC_LEVEL_PARAMS.SEQ_TRACKING.label + "\" parameter cannot be dynamic!");
|
||||||
|
}
|
||||||
|
logger.info("seq_tracking: {}", seqTrackingFunc.apply(0));
|
||||||
|
|
||||||
|
// Doc-level parameter: msg_dedup_broker
|
||||||
|
LongFunction<Boolean> brokerMsgDedupFunc = (l) -> false;
|
||||||
|
if (cmdTpl.containsKey(PulsarActivityUtil.DOC_LEVEL_PARAMS.MSG_DEDUP_BROKER.label)) {
|
||||||
|
if (cmdTpl.isStatic(PulsarActivityUtil.DOC_LEVEL_PARAMS.MSG_DEDUP_BROKER.label))
|
||||||
|
brokerMsgDedupFunc = (l) -> BooleanUtils.toBoolean(cmdTpl.getStatic(PulsarActivityUtil.DOC_LEVEL_PARAMS.MSG_DEDUP_BROKER.label));
|
||||||
|
else
|
||||||
|
throw new PulsarDriverParamException("[resolve()] \"" + PulsarActivityUtil.DOC_LEVEL_PARAMS.MSG_DEDUP_BROKER.label + "\" parameter cannot be dynamic!");
|
||||||
|
}
|
||||||
|
logger.info("msg_dedup_broker: {}", seqTrackingFunc.apply(0));
|
||||||
|
|
||||||
|
|
||||||
// TODO: Complete implementation for websocket-producer and managed-ledger
|
// TODO: Complete implementation for websocket-producer and managed-ledger
|
||||||
|
// Admin operation: create/delete tenant
|
||||||
if ( StringUtils.equalsIgnoreCase(stmtOpType, PulsarActivityUtil.OP_TYPES.ADMIN_TENANT.label) ) {
|
if ( StringUtils.equalsIgnoreCase(stmtOpType, PulsarActivityUtil.OP_TYPES.ADMIN_TENANT.label) ) {
|
||||||
return resolveAdminTenant(clientSpace, asyncApiFunc, adminDelOpFunc);
|
return resolveAdminTenant(clientSpace, asyncApiFunc, adminDelOpFunc);
|
||||||
} else if (StringUtils.equalsIgnoreCase(stmtOpType, PulsarActivityUtil.OP_TYPES.ADMIN_NAMESPACE.label)) {
|
}
|
||||||
|
// Admin operation: create/delete namespace
|
||||||
|
else if (StringUtils.equalsIgnoreCase(stmtOpType, PulsarActivityUtil.OP_TYPES.ADMIN_NAMESPACE.label)) {
|
||||||
return resolveAdminNamespace(clientSpace, asyncApiFunc, adminDelOpFunc);
|
return resolveAdminNamespace(clientSpace, asyncApiFunc, adminDelOpFunc);
|
||||||
} else if (StringUtils.equalsIgnoreCase(stmtOpType, PulsarActivityUtil.OP_TYPES.ADMIN_TOPIC.label)) {
|
}
|
||||||
|
// Admin operation: create/delete topic
|
||||||
|
else if (StringUtils.equalsIgnoreCase(stmtOpType, PulsarActivityUtil.OP_TYPES.ADMIN_TOPIC.label)) {
|
||||||
return resolveAdminTopic(clientSpace, topicUriFunc, asyncApiFunc, adminDelOpFunc);
|
return resolveAdminTopic(clientSpace, topicUriFunc, asyncApiFunc, adminDelOpFunc);
|
||||||
} else if (StringUtils.equalsIgnoreCase(stmtOpType, PulsarActivityUtil.OP_TYPES.MSG_SEND.label)) {
|
}
|
||||||
return resolveMsgSend(clientSpace, topicUriFunc, asyncApiFunc, useTransactionFunc);
|
// Regular/non-admin operation: single message sending (producer)
|
||||||
} else if (StringUtils.equalsIgnoreCase(stmtOpType, PulsarActivityUtil.OP_TYPES.MSG_CONSUME.label)) {
|
else if (StringUtils.equalsIgnoreCase(stmtOpType, PulsarActivityUtil.OP_TYPES.MSG_SEND.label)) {
|
||||||
return resolveMsgConsume(clientSpace, topicUriFunc, asyncApiFunc, useTransactionFunc);
|
return resolveMsgSend(clientSpace, topicUriFunc, asyncApiFunc, useTransactionFunc, seqTrackingFunc);
|
||||||
} else if (StringUtils.equalsIgnoreCase(stmtOpType, PulsarActivityUtil.OP_TYPES.MSG_READ.label)) {
|
}
|
||||||
|
// Regular/non-admin operation: single message consuming from a single topic (consumer)
|
||||||
|
else if (StringUtils.equalsIgnoreCase(stmtOpType, PulsarActivityUtil.OP_TYPES.MSG_CONSUME.label)) {
|
||||||
|
return resolveMsgConsume(
|
||||||
|
clientSpace,
|
||||||
|
topicUriFunc,
|
||||||
|
asyncApiFunc,
|
||||||
|
useTransactionFunc,
|
||||||
|
seqTrackingFunc,
|
||||||
|
brokerMsgDedupFunc,
|
||||||
|
false);
|
||||||
|
}
|
||||||
|
// Regular/non-admin operation: single message consuming from multiple-topics (consumer)
|
||||||
|
else if (StringUtils.equalsIgnoreCase(stmtOpType, PulsarActivityUtil.OP_TYPES.MSG_MULTI_CONSUME.label)) {
|
||||||
|
return resolveMultiTopicMsgConsume(
|
||||||
|
clientSpace,
|
||||||
|
topicUriFunc,
|
||||||
|
asyncApiFunc,
|
||||||
|
useTransactionFunc,
|
||||||
|
seqTrackingFunc,
|
||||||
|
brokerMsgDedupFunc);
|
||||||
|
}
|
||||||
|
// Regular/non-admin operation: single message consuming a single topic (reader)
|
||||||
|
else if (StringUtils.equalsIgnoreCase(stmtOpType, PulsarActivityUtil.OP_TYPES.MSG_READ.label)) {
|
||||||
return resolveMsgRead(clientSpace, topicUriFunc, asyncApiFunc);
|
return resolveMsgRead(clientSpace, topicUriFunc, asyncApiFunc);
|
||||||
} else if (StringUtils.equalsIgnoreCase(stmtOpType, PulsarActivityUtil.OP_TYPES.BATCH_MSG_SEND_START.label)) {
|
}
|
||||||
|
// Regular/non-admin operation: batch message processing - batch start
|
||||||
|
else if (StringUtils.equalsIgnoreCase(stmtOpType, PulsarActivityUtil.OP_TYPES.BATCH_MSG_SEND_START.label)) {
|
||||||
return resolveMsgBatchSendStart(clientSpace, topicUriFunc, asyncApiFunc);
|
return resolveMsgBatchSendStart(clientSpace, topicUriFunc, asyncApiFunc);
|
||||||
} else if (StringUtils.equalsIgnoreCase(stmtOpType, PulsarActivityUtil.OP_TYPES.BATCH_MSG_SEND.label)) {
|
}
|
||||||
|
// Regular/non-admin operation: batch message processing - message sending (producer)
|
||||||
|
else if (StringUtils.equalsIgnoreCase(stmtOpType, PulsarActivityUtil.OP_TYPES.BATCH_MSG_SEND.label)) {
|
||||||
return resolveMsgBatchSend(clientSpace, asyncApiFunc);
|
return resolveMsgBatchSend(clientSpace, asyncApiFunc);
|
||||||
} else if (StringUtils.equalsIgnoreCase(stmtOpType, PulsarActivityUtil.OP_TYPES.BATCH_MSG_SEND_END.label)) {
|
}
|
||||||
|
// Regular/non-admin operation: batch message processing - batch send
|
||||||
|
else if (StringUtils.equalsIgnoreCase(stmtOpType, PulsarActivityUtil.OP_TYPES.BATCH_MSG_SEND_END.label)) {
|
||||||
return resolveMsgBatchSendEnd(clientSpace, asyncApiFunc);
|
return resolveMsgBatchSendEnd(clientSpace, asyncApiFunc);
|
||||||
} else {
|
}
|
||||||
throw new RuntimeException("Unsupported Pulsar operation type");
|
// Regular/non-admin operation: end-to-end message processing - sending message
|
||||||
|
else if (StringUtils.equalsIgnoreCase(stmtOpType, PulsarActivityUtil.OP_TYPES.E2E_MSG_PROC_SEND.label)) {
|
||||||
|
return resolveMsgSend(clientSpace, topicUriFunc, asyncApiFunc, useTransactionFunc, seqTrackingFunc);
|
||||||
|
}
|
||||||
|
// Regular/non-admin operation: end-to-end message processing - consuming message
|
||||||
|
else if (StringUtils.equalsIgnoreCase(stmtOpType, PulsarActivityUtil.OP_TYPES.E2E_MSG_PROC_CONSUME.label)) {
|
||||||
|
return resolveMsgConsume(
|
||||||
|
clientSpace,
|
||||||
|
topicUriFunc,
|
||||||
|
asyncApiFunc,
|
||||||
|
useTransactionFunc,
|
||||||
|
seqTrackingFunc,
|
||||||
|
brokerMsgDedupFunc,
|
||||||
|
true);
|
||||||
|
}
|
||||||
|
// Invalid operation type
|
||||||
|
else {
|
||||||
|
throw new PulsarDriverUnsupportedOpException();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -145,7 +226,7 @@ public class ReadyPulsarOp implements OpDispenser<PulsarOp> {
|
|||||||
{
|
{
|
||||||
if ( cmdTpl.isDynamic("admin_roles") ||
|
if ( cmdTpl.isDynamic("admin_roles") ||
|
||||||
cmdTpl.isDynamic("allowed_clusters") ) {
|
cmdTpl.isDynamic("allowed_clusters") ) {
|
||||||
throw new RuntimeException("\"admin_roles\" or \"allowed_clusters\" parameter must NOT be dynamic!");
|
throw new PulsarDriverParamException("\"admin_roles\" or \"allowed_clusters\" parameter must NOT be dynamic!");
|
||||||
}
|
}
|
||||||
|
|
||||||
LongFunction<Set<String>> adminRolesFunc;
|
LongFunction<Set<String>> adminRolesFunc;
|
||||||
@ -184,6 +265,7 @@ public class ReadyPulsarOp implements OpDispenser<PulsarOp> {
|
|||||||
return new PulsarAdminTenantMapper(
|
return new PulsarAdminTenantMapper(
|
||||||
cmdTpl,
|
cmdTpl,
|
||||||
clientSpace,
|
clientSpace,
|
||||||
|
pulsarActivity,
|
||||||
asyncApiFunc,
|
asyncApiFunc,
|
||||||
adminDelOpFunc,
|
adminDelOpFunc,
|
||||||
adminRolesFunc,
|
adminRolesFunc,
|
||||||
@ -209,6 +291,7 @@ public class ReadyPulsarOp implements OpDispenser<PulsarOp> {
|
|||||||
return new PulsarAdminNamespaceMapper(
|
return new PulsarAdminNamespaceMapper(
|
||||||
cmdTpl,
|
cmdTpl,
|
||||||
clientSpace,
|
clientSpace,
|
||||||
|
pulsarActivity,
|
||||||
asyncApiFunc,
|
asyncApiFunc,
|
||||||
adminDelOpFunc,
|
adminDelOpFunc,
|
||||||
namespaceFunc);
|
namespaceFunc);
|
||||||
@ -238,6 +321,7 @@ public class ReadyPulsarOp implements OpDispenser<PulsarOp> {
|
|||||||
return new PulsarAdminTopicMapper(
|
return new PulsarAdminTopicMapper(
|
||||||
cmdTpl,
|
cmdTpl,
|
||||||
clientSpace,
|
clientSpace,
|
||||||
|
pulsarActivity,
|
||||||
asyncApiFunc,
|
asyncApiFunc,
|
||||||
adminDelOpFunc,
|
adminDelOpFunc,
|
||||||
topic_uri_fun,
|
topic_uri_fun,
|
||||||
@ -249,8 +333,12 @@ public class ReadyPulsarOp implements OpDispenser<PulsarOp> {
|
|||||||
PulsarSpace clientSpace,
|
PulsarSpace clientSpace,
|
||||||
LongFunction<String> topic_uri_func,
|
LongFunction<String> topic_uri_func,
|
||||||
LongFunction<Boolean> async_api_func,
|
LongFunction<Boolean> async_api_func,
|
||||||
LongFunction<Boolean> useTransactionFunc
|
LongFunction<Boolean> useTransactionFunc,
|
||||||
|
LongFunction<Boolean> seqTrackingFunc
|
||||||
) {
|
) {
|
||||||
|
LongFunction<Supplier<Transaction>> transactionSupplierFunc =
|
||||||
|
(l) -> clientSpace.getTransactionSupplier(); //TODO make it dependant on current cycle?
|
||||||
|
|
||||||
LongFunction<String> cycle_producer_name_func;
|
LongFunction<String> cycle_producer_name_func;
|
||||||
if (cmdTpl.isStatic("producer_name")) {
|
if (cmdTpl.isStatic("producer_name")) {
|
||||||
cycle_producer_name_func = (l) -> cmdTpl.getStatic("producer_name");
|
cycle_producer_name_func = (l) -> cmdTpl.getStatic("producer_name");
|
||||||
@ -263,9 +351,19 @@ public class ReadyPulsarOp implements OpDispenser<PulsarOp> {
|
|||||||
LongFunction<Producer<?>> producerFunc =
|
LongFunction<Producer<?>> producerFunc =
|
||||||
(l) -> clientSpace.getProducer(topic_uri_func.apply(l), cycle_producer_name_func.apply(l));
|
(l) -> clientSpace.getProducer(topic_uri_func.apply(l), cycle_producer_name_func.apply(l));
|
||||||
|
|
||||||
LongFunction<Supplier<Transaction>> transactionSupplierFunc =
|
// check if we're going to simulate producer message out-of-sequence error
|
||||||
(l) -> clientSpace.getTransactionSupplier(); //TODO make it dependant on current cycle?
|
// - message ordering
|
||||||
|
// - message loss
|
||||||
|
LongFunction<String> seqErrSimuTypeFunc = (l) -> null;
|
||||||
|
if (cmdTpl.containsKey("seqerr_simu")) {
|
||||||
|
if (cmdTpl.isStatic("seqerr_simu")) {
|
||||||
|
seqErrSimuTypeFunc = (l) -> cmdTpl.getStatic("seqerr_simu");
|
||||||
|
} else {
|
||||||
|
throw new PulsarDriverParamException("[resolveMsgSend()] \"seqerr_simu\" parameter cannot be dynamic!");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// message key
|
||||||
LongFunction<String> keyFunc;
|
LongFunction<String> keyFunc;
|
||||||
if (cmdTpl.isStatic("msg_key")) {
|
if (cmdTpl.isStatic("msg_key")) {
|
||||||
keyFunc = (l) -> cmdTpl.getStatic("msg_key");
|
keyFunc = (l) -> cmdTpl.getStatic("msg_key");
|
||||||
@ -275,6 +373,16 @@ public class ReadyPulsarOp implements OpDispenser<PulsarOp> {
|
|||||||
keyFunc = (l) -> null;
|
keyFunc = (l) -> null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// message property
|
||||||
|
LongFunction<String> propFunc;
|
||||||
|
if (cmdTpl.isStatic("msg_property")) {
|
||||||
|
propFunc = (l) -> cmdTpl.getStatic("msg_property");
|
||||||
|
} else if (cmdTpl.isDynamic("msg_property")) {
|
||||||
|
propFunc = (l) -> cmdTpl.getDynamic("msg_property", l);
|
||||||
|
} else {
|
||||||
|
propFunc = (l) -> null;
|
||||||
|
}
|
||||||
|
|
||||||
LongFunction<String> valueFunc;
|
LongFunction<String> valueFunc;
|
||||||
if (cmdTpl.containsKey("msg_value")) {
|
if (cmdTpl.containsKey("msg_value")) {
|
||||||
if (cmdTpl.isStatic("msg_value")) {
|
if (cmdTpl.isStatic("msg_value")) {
|
||||||
@ -285,26 +393,121 @@ public class ReadyPulsarOp implements OpDispenser<PulsarOp> {
|
|||||||
valueFunc = (l) -> null;
|
valueFunc = (l) -> null;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
throw new RuntimeException("Producer:: \"msg_value\" field must be specified!");
|
throw new PulsarDriverParamException("[resolveMsgSend()] \"msg_value\" field must be specified!");
|
||||||
}
|
}
|
||||||
|
|
||||||
return new PulsarProducerMapper(
|
return new PulsarProducerMapper(
|
||||||
cmdTpl,
|
cmdTpl,
|
||||||
clientSpace,
|
clientSpace,
|
||||||
|
pulsarActivity,
|
||||||
async_api_func,
|
async_api_func,
|
||||||
producerFunc,
|
|
||||||
keyFunc,
|
|
||||||
valueFunc,
|
|
||||||
useTransactionFunc,
|
useTransactionFunc,
|
||||||
|
seqTrackingFunc,
|
||||||
transactionSupplierFunc,
|
transactionSupplierFunc,
|
||||||
pulsarActivity);
|
producerFunc,
|
||||||
|
seqErrSimuTypeFunc,
|
||||||
|
keyFunc,
|
||||||
|
propFunc,
|
||||||
|
valueFunc);
|
||||||
}
|
}
|
||||||
|
|
||||||
private LongFunction<PulsarOp> resolveMsgConsume(
|
private LongFunction<PulsarOp> resolveMsgConsume(
|
||||||
PulsarSpace clientSpace,
|
PulsarSpace clientSpace,
|
||||||
LongFunction<String> topic_uri_func,
|
LongFunction<String> topic_uri_func,
|
||||||
LongFunction<Boolean> async_api_func,
|
LongFunction<Boolean> async_api_func,
|
||||||
LongFunction<Boolean> useTransactionFunc
|
LongFunction<Boolean> useTransactionFunc,
|
||||||
|
LongFunction<Boolean> seqTrackingFunc,
|
||||||
|
LongFunction<Boolean> brokerMsgDupFunc,
|
||||||
|
boolean e2eMsgProc
|
||||||
|
) {
|
||||||
|
LongFunction<String> subscription_name_func;
|
||||||
|
if (cmdTpl.isStatic("subscription_name")) {
|
||||||
|
subscription_name_func = (l) -> cmdTpl.getStatic("subscription_name");
|
||||||
|
} else if (cmdTpl.isDynamic("subscription_name")) {
|
||||||
|
subscription_name_func = (l) -> cmdTpl.getDynamic("subscription_name", l);
|
||||||
|
} else {
|
||||||
|
subscription_name_func = (l) -> null;
|
||||||
|
}
|
||||||
|
|
||||||
|
LongFunction<String> subscription_type_func;
|
||||||
|
if (cmdTpl.isStatic("subscription_type")) {
|
||||||
|
subscription_type_func = (l) -> cmdTpl.getStatic("subscription_type");
|
||||||
|
} else if (cmdTpl.isDynamic("subscription_type")) {
|
||||||
|
subscription_type_func = (l) -> cmdTpl.getDynamic("subscription_type", l);
|
||||||
|
} else {
|
||||||
|
subscription_type_func = (l) -> null;
|
||||||
|
}
|
||||||
|
|
||||||
|
LongFunction<String> consumer_name_func;
|
||||||
|
if (cmdTpl.isStatic("consumer_name")) {
|
||||||
|
consumer_name_func = (l) -> cmdTpl.getStatic("consumer_name");
|
||||||
|
} else if (cmdTpl.isDynamic("consumer_name")) {
|
||||||
|
consumer_name_func = (l) -> cmdTpl.getDynamic("consumer_name", l);
|
||||||
|
} else {
|
||||||
|
consumer_name_func = (l) -> null;
|
||||||
|
}
|
||||||
|
|
||||||
|
LongFunction<Supplier<Transaction>> transactionSupplierFunc =
|
||||||
|
(l) -> clientSpace.getTransactionSupplier(); //TODO make it dependant on current cycle?
|
||||||
|
|
||||||
|
LongFunction<Boolean> topicMsgDedupFunc = (l) -> {
|
||||||
|
String topic = topic_uri_func.apply(l);
|
||||||
|
String namespace = PulsarActivityUtil.getFullNamespaceName(topic);
|
||||||
|
PulsarAdmin pulsarAdmin = pulsarActivity.getPulsarAdmin();
|
||||||
|
|
||||||
|
// Check namespace-level deduplication setting
|
||||||
|
// - default to broker level deduplication setting
|
||||||
|
boolean nsMsgDedup = brokerMsgDupFunc.apply(l);
|
||||||
|
try {
|
||||||
|
nsMsgDedup = pulsarAdmin.namespaces().getDeduplicationStatus(namespace);
|
||||||
|
}
|
||||||
|
catch (PulsarAdminException pae) {
|
||||||
|
// it is fine if we're unable to check namespace level setting; use default
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check topic-level deduplication setting
|
||||||
|
// - default to namespace level deduplication setting
|
||||||
|
boolean topicMsgDedup = nsMsgDedup;
|
||||||
|
try {
|
||||||
|
topicMsgDedup = pulsarAdmin.topics().getDeduplicationStatus(topic);
|
||||||
|
}
|
||||||
|
catch (PulsarAdminException pae) {
|
||||||
|
// it is fine if we're unable to check topic level setting; use default
|
||||||
|
}
|
||||||
|
|
||||||
|
return topicMsgDedup;
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
LongFunction<Consumer<?>> consumerFunc = (l) ->
|
||||||
|
clientSpace.getConsumer(
|
||||||
|
topic_uri_func.apply(l),
|
||||||
|
subscription_name_func.apply(l),
|
||||||
|
subscription_type_func.apply(l),
|
||||||
|
consumer_name_func.apply(l)
|
||||||
|
);
|
||||||
|
|
||||||
|
return new PulsarConsumerMapper(
|
||||||
|
cmdTpl,
|
||||||
|
clientSpace,
|
||||||
|
pulsarActivity,
|
||||||
|
async_api_func,
|
||||||
|
useTransactionFunc,
|
||||||
|
seqTrackingFunc,
|
||||||
|
transactionSupplierFunc,
|
||||||
|
topicMsgDedupFunc,
|
||||||
|
consumerFunc,
|
||||||
|
subscription_type_func,
|
||||||
|
e2eMsgProc);
|
||||||
|
}
|
||||||
|
|
||||||
|
private LongFunction<PulsarOp> resolveMultiTopicMsgConsume(
|
||||||
|
PulsarSpace clientSpace,
|
||||||
|
LongFunction<String> topic_uri_func,
|
||||||
|
LongFunction<Boolean> async_api_func,
|
||||||
|
LongFunction<Boolean> useTransactionFunc,
|
||||||
|
LongFunction<Boolean> seqTrackingFunc,
|
||||||
|
LongFunction<Boolean> brokerMsgDupFunc
|
||||||
) {
|
) {
|
||||||
// Topic list (multi-topic)
|
// Topic list (multi-topic)
|
||||||
LongFunction<String> topic_names_func;
|
LongFunction<String> topic_names_func;
|
||||||
@ -356,8 +559,8 @@ public class ReadyPulsarOp implements OpDispenser<PulsarOp> {
|
|||||||
LongFunction<Supplier<Transaction>> transactionSupplierFunc =
|
LongFunction<Supplier<Transaction>> transactionSupplierFunc =
|
||||||
(l) -> clientSpace.getTransactionSupplier(); //TODO make it dependant on current cycle?
|
(l) -> clientSpace.getTransactionSupplier(); //TODO make it dependant on current cycle?
|
||||||
|
|
||||||
LongFunction<Consumer<?>> consumerFunc = (l) ->
|
LongFunction<Consumer<?>> mtConsumerFunc = (l) ->
|
||||||
clientSpace.getConsumer(
|
clientSpace.getMultiTopicConsumer(
|
||||||
topic_uri_func.apply(l),
|
topic_uri_func.apply(l),
|
||||||
topic_names_func.apply(l),
|
topic_names_func.apply(l),
|
||||||
topics_pattern_func.apply(l),
|
topics_pattern_func.apply(l),
|
||||||
@ -366,9 +569,26 @@ public class ReadyPulsarOp implements OpDispenser<PulsarOp> {
|
|||||||
consumer_name_func.apply(l)
|
consumer_name_func.apply(l)
|
||||||
);
|
);
|
||||||
|
|
||||||
return new PulsarConsumerMapper(cmdTpl, clientSpace, async_api_func, consumerFunc,
|
return new PulsarConsumerMapper(
|
||||||
pulsarActivity.getBytesCounter(), pulsarActivity.getMessagesizeHistogram(), pulsarActivity.getCommitTransactionTimer(),
|
cmdTpl,
|
||||||
useTransactionFunc, transactionSupplierFunc);
|
clientSpace,
|
||||||
|
pulsarActivity,
|
||||||
|
async_api_func,
|
||||||
|
useTransactionFunc,
|
||||||
|
seqTrackingFunc,
|
||||||
|
transactionSupplierFunc,
|
||||||
|
// For multi-topic subscription message consumption,
|
||||||
|
// - Only consider broker-level message deduplication setting
|
||||||
|
// - Ignore namespace- and topic-level message deduplication setting
|
||||||
|
//
|
||||||
|
// This is because Pulsar is able to specify a list of topics from
|
||||||
|
// different namespaces. In theory, we can get topic deduplication
|
||||||
|
// status from each message, but this will be too much overhead.
|
||||||
|
// e.g. pulsarAdmin.getPulsarAdmin().topics().getDeduplicationStatus(message.getTopicName())
|
||||||
|
brokerMsgDupFunc,
|
||||||
|
mtConsumerFunc,
|
||||||
|
subscription_type_func,
|
||||||
|
false);
|
||||||
}
|
}
|
||||||
|
|
||||||
private LongFunction<PulsarOp> resolveMsgRead(
|
private LongFunction<PulsarOp> resolveMsgRead(
|
||||||
@ -401,7 +621,12 @@ public class ReadyPulsarOp implements OpDispenser<PulsarOp> {
|
|||||||
start_msg_pos_str_func.apply(l)
|
start_msg_pos_str_func.apply(l)
|
||||||
);
|
);
|
||||||
|
|
||||||
return new PulsarReaderMapper(cmdTpl, clientSpace, async_api_func, readerFunc);
|
return new PulsarReaderMapper(
|
||||||
|
cmdTpl,
|
||||||
|
clientSpace,
|
||||||
|
pulsarActivity,
|
||||||
|
async_api_func,
|
||||||
|
readerFunc);
|
||||||
}
|
}
|
||||||
|
|
||||||
private LongFunction<PulsarOp> resolveMsgBatchSendStart(
|
private LongFunction<PulsarOp> resolveMsgBatchSendStart(
|
||||||
@ -421,7 +646,12 @@ public class ReadyPulsarOp implements OpDispenser<PulsarOp> {
|
|||||||
LongFunction<Producer<?>> batchProducerFunc =
|
LongFunction<Producer<?>> batchProducerFunc =
|
||||||
(l) -> clientSpace.getProducer(topic_uri_func.apply(l), cycle_batch_producer_name_func.apply(l));
|
(l) -> clientSpace.getProducer(topic_uri_func.apply(l), cycle_batch_producer_name_func.apply(l));
|
||||||
|
|
||||||
return new PulsarBatchProducerStartMapper(cmdTpl, clientSpace, asyncApiFunc, batchProducerFunc);
|
return new PulsarBatchProducerStartMapper(
|
||||||
|
cmdTpl,
|
||||||
|
clientSpace,
|
||||||
|
pulsarActivity,
|
||||||
|
asyncApiFunc,
|
||||||
|
batchProducerFunc);
|
||||||
}
|
}
|
||||||
|
|
||||||
private LongFunction<PulsarOp> resolveMsgBatchSend(PulsarSpace clientSpace,
|
private LongFunction<PulsarOp> resolveMsgBatchSend(PulsarSpace clientSpace,
|
||||||
@ -436,6 +666,16 @@ public class ReadyPulsarOp implements OpDispenser<PulsarOp> {
|
|||||||
keyFunc = (l) -> null;
|
keyFunc = (l) -> null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// message property
|
||||||
|
LongFunction<String> propFunc;
|
||||||
|
if (cmdTpl.isStatic("msg_property")) {
|
||||||
|
propFunc = (l) -> cmdTpl.getStatic("msg_property");
|
||||||
|
} else if (cmdTpl.isDynamic("msg_property")) {
|
||||||
|
propFunc = (l) -> cmdTpl.getDynamic("msg_property", l);
|
||||||
|
} else {
|
||||||
|
propFunc = (l) -> null;
|
||||||
|
}
|
||||||
|
|
||||||
LongFunction<String> valueFunc;
|
LongFunction<String> valueFunc;
|
||||||
if (cmdTpl.containsKey("msg_value")) {
|
if (cmdTpl.containsKey("msg_value")) {
|
||||||
if (cmdTpl.isStatic("msg_value")) {
|
if (cmdTpl.isStatic("msg_value")) {
|
||||||
@ -446,20 +686,26 @@ public class ReadyPulsarOp implements OpDispenser<PulsarOp> {
|
|||||||
valueFunc = (l) -> null;
|
valueFunc = (l) -> null;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
throw new RuntimeException("Batch Producer:: \"msg_value\" field must be specified!");
|
throw new PulsarDriverParamException("[resolveMsgBatchSend()] \"msg_value\" field must be specified!");
|
||||||
}
|
}
|
||||||
|
|
||||||
return new PulsarBatchProducerMapper(
|
return new PulsarBatchProducerMapper(
|
||||||
cmdTpl,
|
cmdTpl,
|
||||||
clientSpace,
|
clientSpace,
|
||||||
|
pulsarActivity,
|
||||||
asyncApiFunc,
|
asyncApiFunc,
|
||||||
keyFunc,
|
keyFunc,
|
||||||
|
propFunc,
|
||||||
valueFunc);
|
valueFunc);
|
||||||
}
|
}
|
||||||
|
|
||||||
private LongFunction<PulsarOp> resolveMsgBatchSendEnd(PulsarSpace clientSpace,
|
private LongFunction<PulsarOp> resolveMsgBatchSendEnd(PulsarSpace clientSpace,
|
||||||
LongFunction<Boolean> asyncApiFunc)
|
LongFunction<Boolean> asyncApiFunc)
|
||||||
{
|
{
|
||||||
return new PulsarBatchProducerEndMapper(cmdTpl, clientSpace, asyncApiFunc);
|
return new PulsarBatchProducerEndMapper(
|
||||||
|
cmdTpl,
|
||||||
|
clientSpace,
|
||||||
|
pulsarActivity,
|
||||||
|
asyncApiFunc);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
package io.nosqlbench.driver.pulsar.util;
|
package io.nosqlbench.driver.pulsar.util;
|
||||||
|
|
||||||
|
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||||
import org.apache.commons.lang3.StringUtils;
|
import org.apache.commons.lang3.StringUtils;
|
||||||
import org.apache.logging.log4j.LogManager;
|
import org.apache.logging.log4j.LogManager;
|
||||||
import org.apache.logging.log4j.Logger;
|
import org.apache.logging.log4j.Logger;
|
||||||
@ -12,8 +13,10 @@ import java.nio.file.Files;
|
|||||||
import java.nio.file.Path;
|
import java.nio.file.Path;
|
||||||
import java.nio.file.Paths;
|
import java.nio.file.Paths;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
import java.util.HashMap;
|
import java.util.Base64;
|
||||||
|
import java.util.Map;
|
||||||
import java.util.stream.Collectors;
|
import java.util.stream.Collectors;
|
||||||
|
import java.util.stream.Stream;
|
||||||
|
|
||||||
public class PulsarActivityUtil {
|
public class PulsarActivityUtil {
|
||||||
|
|
||||||
@ -25,12 +28,15 @@ public class PulsarActivityUtil {
|
|||||||
ADMIN_TENANT("admin-tenant"),
|
ADMIN_TENANT("admin-tenant"),
|
||||||
ADMIN_NAMESPACE("admin-namespace"),
|
ADMIN_NAMESPACE("admin-namespace"),
|
||||||
ADMIN_TOPIC("admin-topic"),
|
ADMIN_TOPIC("admin-topic"),
|
||||||
|
E2E_MSG_PROC_SEND("ec2-msg-proc-send"),
|
||||||
|
E2E_MSG_PROC_CONSUME("ec2-msg-proc-consume"),
|
||||||
BATCH_MSG_SEND_START("batch-msg-send-start"),
|
BATCH_MSG_SEND_START("batch-msg-send-start"),
|
||||||
BATCH_MSG_SEND("batch-msg-send"),
|
BATCH_MSG_SEND("batch-msg-send"),
|
||||||
BATCH_MSG_SEND_END("batch-msg-send-end"),
|
BATCH_MSG_SEND_END("batch-msg-send-end"),
|
||||||
MSG_SEND("msg-send"),
|
MSG_SEND("msg-send"),
|
||||||
MSG_CONSUME("msg-consume"),
|
MSG_CONSUME("msg-consume"),
|
||||||
MSG_READ("msg-read");
|
MSG_READ("msg-read"),
|
||||||
|
MSG_MULTI_CONSUME("msg-mt-consume");
|
||||||
|
|
||||||
public final String label;
|
public final String label;
|
||||||
|
|
||||||
@ -42,11 +48,18 @@ public class PulsarActivityUtil {
|
|||||||
return Arrays.stream(OP_TYPES.values()).anyMatch(t -> t.label.equals(type));
|
return Arrays.stream(OP_TYPES.values()).anyMatch(t -> t.label.equals(type));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public static final String MSG_SEQUENCE_ID = "sequence_id";
|
||||||
|
public static final String MSG_SEQUENCE_TGTMAX = "sequence_tgtmax";
|
||||||
|
|
||||||
|
///////
|
||||||
|
// Valid document level parameters for Pulsar NB yaml file
|
||||||
public enum DOC_LEVEL_PARAMS {
|
public enum DOC_LEVEL_PARAMS {
|
||||||
TOPIC_URI("topic_uri"),
|
TOPIC_URI("topic_uri"),
|
||||||
ASYNC_API("async_api"),
|
ASYNC_API("async_api"),
|
||||||
USE_TRANSACTION("use_transaction"),
|
USE_TRANSACTION("use_transaction"),
|
||||||
ADMIN_DELOP("admin_delop");
|
ADMIN_DELOP("admin_delop"),
|
||||||
|
SEQ_TRACKING("seq_tracking"),
|
||||||
|
MSG_DEDUP_BROKER("msg_dedup_broker");
|
||||||
|
|
||||||
public final String label;
|
public final String label;
|
||||||
|
|
||||||
@ -55,9 +68,28 @@ public class PulsarActivityUtil {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
public static boolean isValidDocLevelParam(String param) {
|
public static boolean isValidDocLevelParam(String param) {
|
||||||
return Arrays.stream(OP_TYPES.values()).anyMatch(t -> t.label.equals(param));
|
return Arrays.stream(DOC_LEVEL_PARAMS.values()).anyMatch(t -> t.label.equals(param));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
///////
|
||||||
|
// Valid Pulsar API type
|
||||||
|
public enum PULSAR_API_TYPE {
|
||||||
|
PRODUCER("producer"),
|
||||||
|
CONSUMER("consumer"),
|
||||||
|
READER("reader");
|
||||||
|
|
||||||
|
public final String label;
|
||||||
|
|
||||||
|
PULSAR_API_TYPE(String label) {
|
||||||
|
this.label = label;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
public static boolean isValidPulsarApiType(String param) {
|
||||||
|
return Arrays.stream(PULSAR_API_TYPE.values()).anyMatch(t -> t.label.equals(param));
|
||||||
|
}
|
||||||
|
public static String getValidPulsarApiTypeList() {
|
||||||
|
return Arrays.stream(PULSAR_API_TYPE.values()).map(t -> t.label).collect(Collectors.joining(", "));
|
||||||
|
}
|
||||||
|
|
||||||
///////
|
///////
|
||||||
// Valid persistence type
|
// Valid persistence type
|
||||||
@ -75,7 +107,6 @@ public class PulsarActivityUtil {
|
|||||||
return Arrays.stream(PERSISTENT_TYPES.values()).anyMatch(t -> t.label.equals(type));
|
return Arrays.stream(PERSISTENT_TYPES.values()).anyMatch(t -> t.label.equals(type));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
///////
|
///////
|
||||||
// Valid Pulsar client configuration (activity-level settings)
|
// Valid Pulsar client configuration (activity-level settings)
|
||||||
// - https://pulsar.apache.org/docs/en/client-libraries-java/#client
|
// - https://pulsar.apache.org/docs/en/client-libraries-java/#client
|
||||||
@ -171,11 +202,29 @@ public class PulsarActivityUtil {
|
|||||||
this.label = label;
|
this.label = label;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public static boolean isStandardConsumerConfItem(String item) {
|
public static boolean isStandardConsumerConfItem(String item) {
|
||||||
return Arrays.stream(CONSUMER_CONF_STD_KEY.values()).anyMatch(t -> t.label.equals(item));
|
return Arrays.stream(CONSUMER_CONF_STD_KEY.values()).anyMatch(t -> t.label.equals(item));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
///////
|
||||||
|
// Custom consumer configuration (activity-level settings)
|
||||||
|
// - NOT part of https://pulsar.apache.org/docs/en/client-libraries-java/#consumer
|
||||||
|
// - NB Pulsar driver consumer operation specific
|
||||||
|
public enum CONSUMER_CONF_CUSTOM_KEY {
|
||||||
|
timeout("timeout");
|
||||||
|
|
||||||
|
public final String label;
|
||||||
|
|
||||||
|
CONSUMER_CONF_CUSTOM_KEY(String label) {
|
||||||
|
this.label = label;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
public static boolean isCustomConsumerConfItem(String item) {
|
||||||
|
return Arrays.stream(CONSUMER_CONF_CUSTOM_KEY.values()).anyMatch(t -> t.label.equals(item));
|
||||||
|
}
|
||||||
|
|
||||||
|
///////
|
||||||
|
// Pulsar subscription type
|
||||||
public enum SUBSCRIPTION_TYPE {
|
public enum SUBSCRIPTION_TYPE {
|
||||||
Exclusive("Exclusive"),
|
Exclusive("Exclusive"),
|
||||||
Failover("Failover"),
|
Failover("Failover"),
|
||||||
@ -188,7 +237,6 @@ public class PulsarActivityUtil {
|
|||||||
this.label = label;
|
this.label = label;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public static boolean isValidSubscriptionType(String item) {
|
public static boolean isValidSubscriptionType(String item) {
|
||||||
return Arrays.stream(SUBSCRIPTION_TYPE.values()).anyMatch(t -> t.label.equals(item));
|
return Arrays.stream(SUBSCRIPTION_TYPE.values()).anyMatch(t -> t.label.equals(item));
|
||||||
}
|
}
|
||||||
@ -220,6 +268,10 @@ public class PulsarActivityUtil {
|
|||||||
return Arrays.stream(READER_CONF_STD_KEY.values()).anyMatch(t -> t.label.equals(item));
|
return Arrays.stream(READER_CONF_STD_KEY.values()).anyMatch(t -> t.label.equals(item));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
///////
|
||||||
|
// Custom reader configuration (activity-level settings)
|
||||||
|
// - NOT part of https://pulsar.apache.org/docs/en/client-libraries-java/#reader
|
||||||
|
// - NB Pulsar driver reader operation specific
|
||||||
public enum READER_CONF_CUSTOM_KEY {
|
public enum READER_CONF_CUSTOM_KEY {
|
||||||
startMessagePos("startMessagePos");
|
startMessagePos("startMessagePos");
|
||||||
|
|
||||||
@ -229,11 +281,12 @@ public class PulsarActivityUtil {
|
|||||||
this.label = label;
|
this.label = label;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public static boolean isCustomReaderConfItem(String item) {
|
public static boolean isCustomReaderConfItem(String item) {
|
||||||
return Arrays.stream(READER_CONF_CUSTOM_KEY.values()).anyMatch(t -> t.label.equals(item));
|
return Arrays.stream(READER_CONF_CUSTOM_KEY.values()).anyMatch(t -> t.label.equals(item));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
///////
|
||||||
|
// Valid read positions for a Pulsar reader
|
||||||
public enum READER_MSG_POSITION_TYPE {
|
public enum READER_MSG_POSITION_TYPE {
|
||||||
earliest("earliest"),
|
earliest("earliest"),
|
||||||
latest("latest"),
|
latest("latest"),
|
||||||
@ -245,11 +298,30 @@ public class PulsarActivityUtil {
|
|||||||
this.label = label;
|
this.label = label;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public static boolean isValideReaderStartPosition(String item) {
|
public static boolean isValideReaderStartPosition(String item) {
|
||||||
return Arrays.stream(READER_MSG_POSITION_TYPE.values()).anyMatch(t -> t.label.equals(item));
|
return Arrays.stream(READER_MSG_POSITION_TYPE.values()).anyMatch(t -> t.label.equals(item));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
///////
|
||||||
|
// Pulsar subscription type
|
||||||
|
public enum SEQ_ERROR_SIMU_TYPE {
|
||||||
|
OutOfOrder("out_of_order"),
|
||||||
|
MsgLoss("msg_loss"),
|
||||||
|
MsgDup("msg_dup");
|
||||||
|
|
||||||
|
public final String label;
|
||||||
|
|
||||||
|
SEQ_ERROR_SIMU_TYPE(String label) {
|
||||||
|
this.label = label;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
public static boolean isValidSeqErrSimuType(String item) {
|
||||||
|
return Arrays.stream(SEQ_ERROR_SIMU_TYPE.values()).anyMatch(t -> t.label.equals(item));
|
||||||
|
}
|
||||||
|
public static String getValidSeqErrSimuTypeList() {
|
||||||
|
return Arrays.stream(SEQ_ERROR_SIMU_TYPE.values()).map(t -> t.label).collect(Collectors.joining(", "));
|
||||||
|
}
|
||||||
|
|
||||||
///////
|
///////
|
||||||
// Valid websocket-producer configuration (activity-level settings)
|
// Valid websocket-producer configuration (activity-level settings)
|
||||||
// TODO: to be added
|
// TODO: to be added
|
||||||
@ -387,5 +459,36 @@ public class PulsarActivityUtil {
|
|||||||
|
|
||||||
return schema;
|
return schema;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
///////
|
||||||
|
// Generate effective key string
|
||||||
|
public static String buildCacheKey(String... keyParts) {
|
||||||
|
// Ignore blank keyPart
|
||||||
|
String joinedKeyStr =
|
||||||
|
Stream.of(keyParts)
|
||||||
|
.filter(s -> !StringUtils.isBlank(s))
|
||||||
|
.collect(Collectors.joining(","));
|
||||||
|
|
||||||
|
return Base64.getEncoder().encodeToString(joinedKeyStr.getBytes());
|
||||||
|
}
|
||||||
|
|
||||||
|
///////
|
||||||
|
// Convert JSON string to a key/value map
|
||||||
|
public static Map<String, String> convertJsonToMap(String jsonStr) throws Exception {
|
||||||
|
ObjectMapper mapper = new ObjectMapper();
|
||||||
|
return mapper.readValue(jsonStr, Map.class);
|
||||||
|
}
|
||||||
|
|
||||||
|
///////
|
||||||
|
// Get full namespace name (<tenant>/<namespace>) from a Pulsar topic URI
|
||||||
|
public static String getFullNamespaceName(String topicUri) {
|
||||||
|
// Get tenant/namespace string
|
||||||
|
// - topicUri : persistent://<tenant>/<namespace>/<topic>
|
||||||
|
// - tmpStr : <tenant>/<namespace>/<topic>
|
||||||
|
// - fullNsName : <tenant>/<namespace>
|
||||||
|
|
||||||
|
String tmpStr = StringUtils.substringAfter(topicUri,"://");
|
||||||
|
return StringUtils.substringBeforeLast(tmpStr, "/");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -173,14 +173,16 @@ public class PulsarNBClientConf {
|
|||||||
}
|
}
|
||||||
// other producer helper functions ...
|
// other producer helper functions ...
|
||||||
public String getProducerName() {
|
public String getProducerName() {
|
||||||
Object confValue = getProducerConfValue("producer.producerName");
|
Object confValue = getProducerConfValue(
|
||||||
|
"producer." + PulsarActivityUtil.PRODUCER_CONF_STD_KEY.producerName.label);
|
||||||
if (confValue == null)
|
if (confValue == null)
|
||||||
return "";
|
return "";
|
||||||
else
|
else
|
||||||
return confValue.toString();
|
return confValue.toString();
|
||||||
}
|
}
|
||||||
public String getProducerTopicName() {
|
public String getProducerTopicName() {
|
||||||
Object confValue = getProducerConfValue("producer.topicName");
|
Object confValue = getProducerConfValue(
|
||||||
|
"producer." + PulsarActivityUtil.PRODUCER_CONF_STD_KEY.topicName);
|
||||||
if (confValue == null)
|
if (confValue == null)
|
||||||
return "";
|
return "";
|
||||||
else
|
else
|
||||||
@ -213,48 +215,56 @@ public class PulsarNBClientConf {
|
|||||||
}
|
}
|
||||||
// Other consumer helper functions ...
|
// Other consumer helper functions ...
|
||||||
public String getConsumerTopicNames() {
|
public String getConsumerTopicNames() {
|
||||||
Object confValue = getConsumerConfValue("consumer.topicNames");
|
Object confValue = getConsumerConfValue(
|
||||||
|
"consumer." + PulsarActivityUtil.CONSUMER_CONF_STD_KEY.topicNames.label);
|
||||||
if (confValue == null)
|
if (confValue == null)
|
||||||
return "";
|
return "";
|
||||||
else
|
else
|
||||||
return confValue.toString();
|
return confValue.toString();
|
||||||
}
|
}
|
||||||
public String getConsumerTopicPattern() {
|
public String getConsumerTopicPattern() {
|
||||||
Object confValue = getConsumerConfValue("consumer.topicsPattern");
|
Object confValue = getConsumerConfValue(
|
||||||
|
"consumer." + PulsarActivityUtil.CONSUMER_CONF_STD_KEY.topicsPattern.label);
|
||||||
if (confValue == null)
|
if (confValue == null)
|
||||||
return "";
|
return "";
|
||||||
else
|
else
|
||||||
return confValue.toString();
|
return confValue.toString();
|
||||||
}
|
}
|
||||||
public int getConsumerTimeoutSeconds() {
|
|
||||||
Object confValue = getConsumerConfValue("consumer.timeout");
|
|
||||||
if (confValue == null)
|
|
||||||
return -1; // infinite
|
|
||||||
else
|
|
||||||
return Integer.parseInt(confValue.toString());
|
|
||||||
}
|
|
||||||
public String getConsumerSubscriptionName() {
|
public String getConsumerSubscriptionName() {
|
||||||
Object confValue = getConsumerConfValue("consumer.subscriptionName");
|
Object confValue = getConsumerConfValue(
|
||||||
|
"consumer." + PulsarActivityUtil.CONSUMER_CONF_STD_KEY.subscriptionName.label);
|
||||||
if (confValue == null)
|
if (confValue == null)
|
||||||
return "";
|
return "";
|
||||||
else
|
else
|
||||||
return confValue.toString();
|
return confValue.toString();
|
||||||
}
|
}
|
||||||
public String getConsumerSubscriptionType() {
|
public String getConsumerSubscriptionType() {
|
||||||
Object confValue = getConsumerConfValue("consumer.subscriptionType");
|
Object confValue = getConsumerConfValue(
|
||||||
|
"consumer." + PulsarActivityUtil.CONSUMER_CONF_STD_KEY.subscriptionType.label);
|
||||||
if (confValue == null)
|
if (confValue == null)
|
||||||
return "";
|
return "";
|
||||||
else
|
else
|
||||||
return confValue.toString();
|
return confValue.toString();
|
||||||
}
|
}
|
||||||
public String getConsumerName() {
|
public String getConsumerName() {
|
||||||
Object confValue = getConsumerConfValue("consumer.consumerName");
|
Object confValue = getConsumerConfValue(
|
||||||
|
"consumer." + PulsarActivityUtil.CONSUMER_CONF_STD_KEY.consumerName.label);
|
||||||
if (confValue == null)
|
if (confValue == null)
|
||||||
return "";
|
return "";
|
||||||
else
|
else
|
||||||
return confValue.toString();
|
return confValue.toString();
|
||||||
}
|
}
|
||||||
|
// NOTE: Below are not a standard Pulsar consumer configuration parameter as
|
||||||
|
// listed in "https://pulsar.apache.org/docs/en/client-libraries-java/#configure-consumer"
|
||||||
|
// They're custom-made configuration properties for NB pulsar driver consumer.
|
||||||
|
public int getConsumerTimeoutSeconds() {
|
||||||
|
Object confValue = getConsumerConfValue(
|
||||||
|
"consumer." + PulsarActivityUtil.CONSUMER_CONF_CUSTOM_KEY.timeout.label);
|
||||||
|
if (confValue == null)
|
||||||
|
return -1; // infinite
|
||||||
|
else
|
||||||
|
return Integer.parseInt(confValue.toString());
|
||||||
|
}
|
||||||
|
|
||||||
//////////////////
|
//////////////////
|
||||||
// Get Pulsar reader related config
|
// Get Pulsar reader related config
|
||||||
@ -279,23 +289,29 @@ public class PulsarNBClientConf {
|
|||||||
else
|
else
|
||||||
readerConfMap.put(key, value);
|
readerConfMap.put(key, value);
|
||||||
}
|
}
|
||||||
// Other consumer helper functions ...
|
// Other reader helper functions ...
|
||||||
public String getReaderTopicName() {
|
public String getReaderTopicName() {
|
||||||
Object confValue = getReaderConfValue("reader.topicName");
|
Object confValue = getReaderConfValue(
|
||||||
|
"reader." + PulsarActivityUtil.READER_CONF_STD_KEY.topicName.label);
|
||||||
if (confValue == null)
|
if (confValue == null)
|
||||||
return "";
|
return "";
|
||||||
else
|
else
|
||||||
return confValue.toString();
|
return confValue.toString();
|
||||||
}
|
}
|
||||||
public String getReaderName() {
|
public String getReaderName() {
|
||||||
Object confValue = getReaderConfValue("reader.readerName");
|
Object confValue = getReaderConfValue(
|
||||||
|
"reader." + PulsarActivityUtil.READER_CONF_STD_KEY.readerName.label);
|
||||||
if (confValue == null)
|
if (confValue == null)
|
||||||
return "";
|
return "";
|
||||||
else
|
else
|
||||||
return confValue.toString();
|
return confValue.toString();
|
||||||
}
|
}
|
||||||
|
// NOTE: Below are not a standard Pulsar reader configuration parameter as
|
||||||
|
// listed in "https://pulsar.apache.org/docs/en/client-libraries-java/#reader"
|
||||||
|
// They're custom-made configuration properties for NB pulsar driver reader.
|
||||||
public String getStartMsgPosStr() {
|
public String getStartMsgPosStr() {
|
||||||
Object confValue = getReaderConfValue("reader.startMessagePos");
|
Object confValue = getReaderConfValue(
|
||||||
|
"reader." + PulsarActivityUtil.READER_CONF_CUSTOM_KEY.startMessagePos.label);
|
||||||
if (confValue == null)
|
if (confValue == null)
|
||||||
return "";
|
return "";
|
||||||
else
|
else
|
||||||
|
@ -8,6 +8,8 @@
|
|||||||
# TODO: as a starting point, only supports the following types
|
# TODO: as a starting point, only supports the following types
|
||||||
# 1) primitive types, including bytearray (byte[]) which is default, for messages without schema
|
# 1) primitive types, including bytearray (byte[]) which is default, for messages without schema
|
||||||
# 2) Avro for messages with schema
|
# 2) Avro for messages with schema
|
||||||
|
#schema.type=avro
|
||||||
|
#schema.definition=file:///Users/yabinmeng/DataStax/MyNoSQLBench/nosqlbench/driver-pulsar/src/main/resources/activities/iot-example.avsc
|
||||||
schema.type=
|
schema.type=
|
||||||
schema.definition=
|
schema.definition=
|
||||||
|
|
||||||
|
@ -5,10 +5,10 @@ bindings:
|
|||||||
params:
|
params:
|
||||||
# "true" - asynchronous Pulsar Admin API
|
# "true" - asynchronous Pulsar Admin API
|
||||||
# "false" - synchronous Pulsar Admin API
|
# "false" - synchronous Pulsar Admin API
|
||||||
async_api: "true"
|
async_api: "false"
|
||||||
# "true" - delete tenant
|
# "true" - delete tenant
|
||||||
# "false" - create tenant
|
# "false" - create tenant
|
||||||
admin_delop: "false"
|
admin_delop: "true"
|
||||||
|
|
||||||
blocks:
|
blocks:
|
||||||
- name: create-tenant-block
|
- name: create-tenant-block
|
||||||
|
@ -49,6 +49,7 @@ blocks:
|
|||||||
optype: msg-send
|
optype: msg-send
|
||||||
# producer_name: {producer_name}
|
# producer_name: {producer_name}
|
||||||
msg_key: "{mykey}"
|
msg_key: "{mykey}"
|
||||||
|
msg_property: "{myprop}"
|
||||||
msg_value: |
|
msg_value: |
|
||||||
{
|
{
|
||||||
"SensorID": "{sensor_id}",
|
"SensorID": "{sensor_id}",
|
||||||
|
@ -1,7 +1,10 @@
|
|||||||
bindings:
|
bindings:
|
||||||
# message key and value
|
# message key, property and value
|
||||||
mykey:
|
mykey:
|
||||||
|
int_prop_val: ToString(); Prefix("IntProp_")
|
||||||
|
text_prop_val: AlphaNumericString(10); Prefix("TextProp_")
|
||||||
myvalue: NumberNameToString() #AlphaNumericString(20)
|
myvalue: NumberNameToString() #AlphaNumericString(20)
|
||||||
|
# tenant, namespace, and core topic name (without tenant and namespace)
|
||||||
tenant: Mod(100); Div(10L); ToString(); Prefix("tnt")
|
tenant: Mod(100); Div(10L); ToString(); Prefix("tnt")
|
||||||
namespace: Mod(10); Div(5L); ToString(); Prefix("ns")
|
namespace: Mod(10); Div(5L); ToString(); Prefix("ns")
|
||||||
core_topic_name: Mod(5); ToString(); Prefix("t")
|
core_topic_name: Mod(5); ToString(); Prefix("t")
|
||||||
@ -25,6 +28,11 @@ blocks:
|
|||||||
- name: s2
|
- name: s2
|
||||||
optype: batch-msg-send
|
optype: batch-msg-send
|
||||||
msg_key: "{mykey}"
|
msg_key: "{mykey}"
|
||||||
|
msg_property: |
|
||||||
|
{
|
||||||
|
"prop1": "{int_prop_val}",
|
||||||
|
"prop2": "{text_prop_val}}"
|
||||||
|
}
|
||||||
msg_value: "{myvalue}"
|
msg_value: "{myvalue}"
|
||||||
ratio: 100
|
ratio: 100
|
||||||
- name: s3
|
- name: s3
|
||||||
@ -49,8 +57,6 @@ blocks:
|
|||||||
statements:
|
statements:
|
||||||
- name: s1
|
- name: s1
|
||||||
optype: msg-consume
|
optype: msg-consume
|
||||||
topic_names:
|
|
||||||
topics_pattern:
|
|
||||||
subscription_name: "mysub"
|
subscription_name: "mysub"
|
||||||
subscription_type:
|
subscription_type:
|
||||||
consumer_name:
|
consumer_name:
|
||||||
@ -64,6 +70,19 @@ blocks:
|
|||||||
optype: msg-read
|
optype: msg-read
|
||||||
reader_name:
|
reader_name:
|
||||||
|
|
||||||
|
- name: multi-topic-consumer-block
|
||||||
|
tags:
|
||||||
|
phase: multi-topic-consumer
|
||||||
|
admin_task: false
|
||||||
|
statements:
|
||||||
|
- name: s1
|
||||||
|
optype: msg-mt-consume
|
||||||
|
topic_names:
|
||||||
|
topics_pattern:
|
||||||
|
subscription_name: "mysub"
|
||||||
|
subscription_type:
|
||||||
|
consumer_name:
|
||||||
|
|
||||||
# - websocket-producer:
|
# - websocket-producer:
|
||||||
# tags:
|
# tags:
|
||||||
# type: websocket-produer
|
# type: websocket-produer
|
||||||
|
@ -0,0 +1,30 @@
|
|||||||
|
bindings:
|
||||||
|
# message key, property and value
|
||||||
|
myprop1: AlphaNumericString(10); Prefix("PropVal_")
|
||||||
|
myvalue: NumberNameToString() #AlphaNumericString(20)
|
||||||
|
|
||||||
|
# document level parameters that apply to all Pulsar client types:
|
||||||
|
params:
|
||||||
|
topic_uri: "persistent://public/default/sanity_e2e_2"
|
||||||
|
async_api: "true"
|
||||||
|
|
||||||
|
blocks:
|
||||||
|
- name: e2e-msg-proc-block
|
||||||
|
tags:
|
||||||
|
phase: e2e-msg-proc
|
||||||
|
admin_task: false
|
||||||
|
statements:
|
||||||
|
- name: s1
|
||||||
|
optype: ec2-msg-proc-send
|
||||||
|
msg_key:
|
||||||
|
msg_property: |
|
||||||
|
{
|
||||||
|
"prop1": "{myprop1}"
|
||||||
|
}
|
||||||
|
msg_value: "{myvalue}"
|
||||||
|
ratio: 1
|
||||||
|
- name: s2
|
||||||
|
optype: ec2-msg-proc-consume
|
||||||
|
ratio: 1
|
||||||
|
subscription_name: "mysub"
|
||||||
|
subscription_type:
|
@ -0,0 +1,39 @@
|
|||||||
|
bindings:
|
||||||
|
# message key, property and value
|
||||||
|
myprop1: AlphaNumericString(10)
|
||||||
|
myvalue: NumberNameToString()
|
||||||
|
|
||||||
|
# document level parameters that apply to all Pulsar client types:
|
||||||
|
params:
|
||||||
|
topic_uri: "persistent://public/default/sanity_seqloss2"
|
||||||
|
# Only applicable to producer and consumer
|
||||||
|
# - used for message ordering and message loss check
|
||||||
|
async_api: "true"
|
||||||
|
seq_tracking: "true"
|
||||||
|
msg_dedup_broker: "true"
|
||||||
|
|
||||||
|
blocks:
|
||||||
|
- name: producer-block
|
||||||
|
tags:
|
||||||
|
phase: producer
|
||||||
|
admin_task: false
|
||||||
|
statements:
|
||||||
|
- name: s1
|
||||||
|
optype: msg-send
|
||||||
|
#seqerr_simu: "out_of_order"
|
||||||
|
#seqerr_simu: "msg_loss"
|
||||||
|
#seqerr_simu: "msg_dup"
|
||||||
|
msg_key:
|
||||||
|
msg_property:
|
||||||
|
msg_value: "{myvalue}"
|
||||||
|
|
||||||
|
- name: consumer-block
|
||||||
|
tags:
|
||||||
|
phase: consumer
|
||||||
|
admin_task: false
|
||||||
|
statements:
|
||||||
|
- name: s1
|
||||||
|
optype: msg-consume
|
||||||
|
subscription_name: "mysub"
|
||||||
|
subscription_type:
|
||||||
|
consumer_name:
|
@ -1,24 +1,29 @@
|
|||||||
- [1. NoSQLBench (NB) Pulsar Driver Overview](#1-nosqlbench-nb-pulsar-driver-overview)
|
- [1. NoSQLBench (NB) Pulsar Driver Overview](#1-nosqlbench-nb-pulsar-driver-overview)
|
||||||
- [1.1. Issues Tracker](#11-issues-tracker)
|
- [1.1. Issues Tracker](#11-issues-tracker)
|
||||||
- [1.2. Global Level Pulsar Configuration Settings](#12-global-level-pulsar-configuration-settings)
|
- [1.2. Global Level Pulsar Configuration Settings](#12-global-level-pulsar-configuration-settings)
|
||||||
- [1.3. NB Pulsar Driver Yaml File - High Level Structure](#13-nb-pulsar-driver-yaml-file---high-level-structure)
|
- [1.3. NB Pulsar Driver Yaml File - High Level Structure](#13-nb-pulsar-driver-yaml-file---high-level-structure)
|
||||||
- [1.3.1. NB Cycle Level Parameters vs. Global Level Parameters](#131-nb-cycle-level-parameters-vs-global-level-parameters)
|
- [1.3.1. Configuration Parameter Levels](#131-configuration-parameter-levels)
|
||||||
- [1.4. Pulsar Driver Yaml File - Command Block Details](#14-pulsar-driver-yaml-file---command-block-details)
|
- [1.4. Pulsar Driver Yaml File - Command Blocks](#14-pulsar-driver-yaml-file---command-blocks)
|
||||||
- [1.4.1. Pulsar Admin API Command Block - Create Tenants](#141-pulsar-admin-api-command-block---create-tenants)
|
- [1.4.1. Pulsar Admin API Command Block - Create Tenants](#141-pulsar-admin-api-command-block---create-tenants)
|
||||||
- [1.4.2. Pulsar Admin API Command Block - Create Namespaces](#142-pulsar-admin-api-command-block---create-namespaces)
|
- [1.4.2. Pulsar Admin API Command Block - Create Namespaces](#142-pulsar-admin-api-command-block---create-namespaces)
|
||||||
- [1.4.3. Pulsar Admin API Command Block - Create Topics (Partitioned or Regular)](#143-pulsar-admin-api-command-block---create-topics-partitioned-or-regular)
|
- [1.4.3. Pulsar Admin API Command Block - Create Topics (Partitioned or Regular)](#143-pulsar-admin-api-command-block---create-topics-partitioned-or-regular)
|
||||||
- [1.4.4. Batch Producer Command Block](#144-batch-producer-command-block)
|
- [1.4.4. Batch Producer Command Block](#144-batch-producer-command-block)
|
||||||
- [1.4.5. Producer Command Block](#145-producer-command-block)
|
- [1.4.5. Producer Command Block](#145-producer-command-block)
|
||||||
- [1.4.6. Consumer Command Block](#146-consumer-command-block)
|
- [1.4.6. (Single-Topic) Consumer Command Block](#146-single-topic-consumer-command-block)
|
||||||
- [1.4.7. Reader Command Block](#147-reader-command-block)
|
- [1.4.7. Reader Command Block](#147-reader-command-block)
|
||||||
- [1.5. Schema Support](#15-schema-support)
|
- [1.4.8. Multi-topic Consumer Command Block](#148-multi-topic-consumer-command-block)
|
||||||
- [1.6. NB Activity Execution Parameters](#16-nb-activity-execution-parameters)
|
- [1.4.9. End-to-end Message Processing Command Block](#149-end-to-end-message-processing-command-block)
|
||||||
- [1.7. NB Pulsar Driver Execution Example](#17-nb-pulsar-driver-execution-example)
|
- [1.5. Message Properties](#15-message-properties)
|
||||||
- [1.8. Appendix A. Template Global Setting File (config.properties)](#18-appendix-a-template-global-setting-file-configproperties)
|
- [1.6. Schema Support](#16-schema-support)
|
||||||
|
- [1.7. Measure End-to-end Message Processing Latency](#17-measure-end-to-end-message-processing-latency)
|
||||||
|
- [1.8. Detect Message Out-of-order, Message Loss, and Message Duplication](#18-detect-message-out-of-order-message-loss-and-message-duplication)
|
||||||
|
- [1.9. NB Activity Execution Parameters](#19-nb-activity-execution-parameters)
|
||||||
|
- [1.10. NB Pulsar Driver Execution Example](#110-nb-pulsar-driver-execution-example)
|
||||||
|
- [1.11. Appendix A. Template Global Setting File (config.properties)](#111-appendix-a-template-global-setting-file-configproperties)
|
||||||
- [2. TODO : Design Revisit -- Advanced Driver Features](#2-todo--design-revisit----advanced-driver-features)
|
- [2. TODO : Design Revisit -- Advanced Driver Features](#2-todo--design-revisit----advanced-driver-features)
|
||||||
- [2.1. Other Activity Parameters](#21-other-activity-parameters)
|
- [2.1. Other Activity Parameters](#21-other-activity-parameters)
|
||||||
- [2.2. API Caching](#22-api-caching)
|
- [2.2. API Caching](#22-api-caching)
|
||||||
- [2.2.1. Instancing Controls](#221-instancing-controls)
|
- [2.2.1. Instancing Controls](#221-instancing-controls)
|
||||||
|
|
||||||
# 1. NoSQLBench (NB) Pulsar Driver Overview
|
# 1. NoSQLBench (NB) Pulsar Driver Overview
|
||||||
|
|
||||||
@ -38,7 +43,7 @@ If you have issues or new requirements for this driver, please add them at the [
|
|||||||
|
|
||||||
## 1.2. Global Level Pulsar Configuration Settings
|
## 1.2. Global Level Pulsar Configuration Settings
|
||||||
|
|
||||||
The NB Pulsar driver relies on Pulsar's [Java Client API](https://pulsar.apache.org/docs/en/client-libraries-java/) to publish and consume messages from the Pulsar cluster. In order to do so, a [PulsarClient](https://pulsar.incubator.apache.org/api/client/2.7.0-SNAPSHOT/org/apache/pulsar/client/api/PulsarClient) object needs to be created first in order to establish the connection to the Pulsar cluster; then a workload-specific object (e.g. [Producer](https://pulsar.incubator.apache.org/api/client/2.7.0-SNAPSHOT/org/apache/pulsar/client/api/Producer) or [Consumer](https://pulsar.incubator.apache.org/api/client/2.7.0-SNAPSHOT/org/apache/pulsar/client/api/Consumer)) is required in order to execute workload-specific actions (e.g. publishing or consuming messages).
|
The NB Pulsar driver relies on Pulsar's [Java Client API](https://pulsar.apache.org/docs/en/client-libraries-java/) to publish messages to and consume messages from a Pulsar cluster. In order to do so, a [PulsarClient](https://pulsar.incubator.apache.org/api/client/2.7.0-SNAPSHOT/org/apache/pulsar/client/api/PulsarClient) object needs to be created first in order to establish the connection to the Pulsar cluster; then a workload-specific object (e.g. [Producer](https://pulsar.incubator.apache.org/api/client/2.7.0-SNAPSHOT/org/apache/pulsar/client/api/Producer) or [Consumer](https://pulsar.incubator.apache.org/api/client/2.7.0-SNAPSHOT/org/apache/pulsar/client/api/Consumer)) is required in order to execute workload-specific actions (e.g. publishing or consuming messages).
|
||||||
|
|
||||||
When creating these objects (e.g. PulsarClient, Producer), there are different configuration options that can be used. For example, [this document](https://pulsar.apache.org/docs/en/client-libraries-java/#configure-producer) lists all possible configuration options when creating a Pulsar Producer object.
|
When creating these objects (e.g. PulsarClient, Producer), there are different configuration options that can be used. For example, [this document](https://pulsar.apache.org/docs/en/client-libraries-java/#configure-producer) lists all possible configuration options when creating a Pulsar Producer object.
|
||||||
|
|
||||||
@ -70,32 +75,28 @@ There are multiple sections in this file that correspond to different groups of
|
|||||||
format. The other valid option is **avro** which the Pulsar
|
format. The other valid option is **avro** which the Pulsar
|
||||||
message will follow a specific Avro format.
|
message will follow a specific Avro format.
|
||||||
* *schema.definition*: This only applies when an Avro schema type
|
* *schema.definition*: This only applies when an Avro schema type
|
||||||
is specified and the value is the (full) file path that contains
|
is specified. The value of this configuration is the (full) file
|
||||||
the Avro schema definition.
|
path that contains the Avro schema definition.
|
||||||
* **Pulsar Client related settings**:
|
* **Pulsar Client related settings**:
|
||||||
* All settings under this section starts with **client.** prefix.
|
* All settings under this section starts with **client.** prefix.
|
||||||
* This section defines all configuration settings that are related
|
* This section defines all configuration settings that are related
|
||||||
with defining a PulsarClient object.
|
with defining a PulsarClient object.
|
||||||
*
|
* See [Pulsar Doc Reference](https://pulsar.apache.org/docs/en/client-libraries-java/#default-broker-urls-for-standalone-clusters)
|
||||||
See [Pulsar Doc Reference](https://pulsar.apache.org/docs/en/client-libraries-java/#default-broker-urls-for-standalone-clusters)
|
|
||||||
* **Pulsar Producer related settings**:
|
* **Pulsar Producer related settings**:
|
||||||
* All settings under this section starts with **producer** prefix.
|
* All settings under this section starts with **producer** prefix.
|
||||||
* This section defines all configuration settings that are related
|
* This section defines all configuration settings that are related
|
||||||
with defining a Pulsar Producer object.
|
with defining a Pulsar Producer object.
|
||||||
*
|
* See [Pulsar Doc Reference](https://pulsar.apache.org/docs/en/client-libraries-java/#configure-producer)
|
||||||
See [Pulsar Doc Reference](https://pulsar.apache.org/docs/en/client-libraries-java/#configure-producer)
|
|
||||||
* **Pulsar Consumer related settings**:
|
* **Pulsar Consumer related settings**:
|
||||||
* All settings under this section starts with **consumer** prefix.
|
* All settings under this section starts with **consumer** prefix.
|
||||||
* This section defines all configuration settings that are related
|
* This section defines all configuration settings that are related
|
||||||
with defining a Pulsar Consumer object.
|
with defining a Pulsar Consumer object.
|
||||||
*
|
* See [Pulsar Doc Reference](http://pulsar.apache.org/docs/en/client-libraries-java/#configure-consumer)
|
||||||
See [Pulsar Doc Reference](http://pulsar.apache.org/docs/en/client-libraries-java/#configure-consumer)
|
|
||||||
* **Pulsar Reader related settings**:
|
* **Pulsar Reader related settings**:
|
||||||
* All settings under this section starts with **reader** prefix.
|
* All settings under this section starts with **reader** prefix.
|
||||||
* This section defines all configuration settings that are related
|
* This section defines all configuration settings that are related
|
||||||
with defining a Pulsar Reader object.
|
with defining a Pulsar Reader object.
|
||||||
*
|
* See [Pulsar Doc Reference](https://pulsar.apache.org/docs/en/client-libraries-java/#reader)
|
||||||
See [Pulsar Doc Reference](https://pulsar.apache.org/docs/en/client-libraries-java/#reader)
|
|
||||||
|
|
||||||
In the future, when the support for other types of Pulsar workloads is
|
In the future, when the support for other types of Pulsar workloads is
|
||||||
added in NB Pulsar driver, there will be corresponding configuration
|
added in NB Pulsar driver, there will be corresponding configuration
|
||||||
@ -104,7 +105,7 @@ sections in this file as well.
|
|||||||
## 1.3. NB Pulsar Driver Yaml File - High Level Structure
|
## 1.3. NB Pulsar Driver Yaml File - High Level Structure
|
||||||
|
|
||||||
Just like other NB driver types, the actual Pulsar workload generation is
|
Just like other NB driver types, the actual Pulsar workload generation is
|
||||||
determined by the statement blocks in the NB driver Yaml file. Depending
|
determined by the statement blocks in an NB driver Yaml file. Depending
|
||||||
on the Pulsar workload type, the corresponding statement block may have
|
on the Pulsar workload type, the corresponding statement block may have
|
||||||
different contents.
|
different contents.
|
||||||
|
|
||||||
@ -113,12 +114,21 @@ At high level, Pulsar driver yaml file has the following structure:
|
|||||||
* **description**: (optional) general description of the yaml file
|
* **description**: (optional) general description of the yaml file
|
||||||
* **bindings**: defines NB bindings
|
* **bindings**: defines NB bindings
|
||||||
* **params**: document level Pulsar driver parameters that apply to all
|
* **params**: document level Pulsar driver parameters that apply to all
|
||||||
command blocks. Currently there are two valid parameters:
|
command blocks. Currently, the following parameters are valid at this
|
||||||
|
level:
|
||||||
* **topic_url**: Pulsar topic uri ([persistent|non-persistent]:
|
* **topic_url**: Pulsar topic uri ([persistent|non-persistent]:
|
||||||
//<tenant>/<namespace>/<topic>). This can be statically assigned or
|
//<tenant>/<namespace>/<topic>). This can be statically assigned or
|
||||||
dynamically generated via NB bindings.
|
dynamically generated via NB bindings.
|
||||||
* **async_api**: Whether to use asynchronous Pulsar API (**note**:
|
* **async_api**: Whether to use asynchronous Pulsar API (**note**:
|
||||||
more on this later)
|
more on this later)
|
||||||
|
* **use_transaction**: Whether to simulate Pulsar transaction
|
||||||
|
* **admin_delop**: For Admin tasks, whether to execute delete operation
|
||||||
|
instead of the default create operation.
|
||||||
|
* **seq_tracking**: Whether to do message sequence tracking. This is
|
||||||
|
used for message out-of-order and message loss detection (more on
|
||||||
|
this later).
|
||||||
|
* **msg_dedup_broker**: Whether or not broker level message deduplication
|
||||||
|
is enabled.
|
||||||
* **blocks**: includes a series of command blocks. Each command block
|
* **blocks**: includes a series of command blocks. Each command block
|
||||||
defines one major Pulsar operation such as *producer*, *consumer*, etc.
|
defines one major Pulsar operation such as *producer*, *consumer*, etc.
|
||||||
Right now, the following command blocks are already supported or will be
|
Right now, the following command blocks are already supported or will be
|
||||||
@ -129,8 +139,12 @@ At high level, Pulsar driver yaml file has the following structure:
|
|||||||
* (Pulsar Admin API) **create-topic-block**: create/delete topics
|
* (Pulsar Admin API) **create-topic-block**: create/delete topics
|
||||||
* (Pulsar Client API) **batch-producer-block**: batch producer
|
* (Pulsar Client API) **batch-producer-block**: batch producer
|
||||||
* (Pulsar Client API) **producer-block**: producer
|
* (Pulsar Client API) **producer-block**: producer
|
||||||
* (Pulsar Client API) **consumer-block**: consumer
|
* (Pulsar Client API) **consumer-block**: consumer (single topic)
|
||||||
* (Pulsar Client API) **reader-block**: reader
|
* (Pulsar Client API) **reader-block**: reader
|
||||||
|
* (Pulsar Client API) **e2e-msg-proc-block**: keep track of end-to-end
|
||||||
|
message latency (histogram)
|
||||||
|
* (Pulsar Client API) **multi-topic-consumer-block**: consumer (multi-
|
||||||
|
topic)
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
description: |
|
description: |
|
||||||
@ -142,7 +156,10 @@ bindings:
|
|||||||
params:
|
params:
|
||||||
topic_uri: "<pulsar_topic_name>"
|
topic_uri: "<pulsar_topic_name>"
|
||||||
async_api: "false"
|
async_api: "false"
|
||||||
|
use_transaction: "false"
|
||||||
admin_delop: "false"
|
admin_delop: "false"
|
||||||
|
seq_transaction: "false"
|
||||||
|
msg_dedup_broker: "false"
|
||||||
|
|
||||||
blocks:
|
blocks:
|
||||||
- name: <command_block_1>
|
- name: <command_block_1>
|
||||||
@ -186,66 +203,43 @@ multiple Pulsar operations in one run! But if we want to focus the testing
|
|||||||
on one particular operation, we can use the tag to filter the command
|
on one particular operation, we can use the tag to filter the command
|
||||||
block as listed above!
|
block as listed above!
|
||||||
|
|
||||||
### 1.3.1. NB Cycle Level Parameters vs. Global Level Parameters
|
### 1.3.1. Configuration Parameter Levels
|
||||||
|
|
||||||
Some parameters, especially topic name and producer/consumer/reader/etc.
|
The NB Pulsar driver configuration parameters can be set at 3 different
|
||||||
name, can be set at the global level in **config.properties** file, or at
|
levels:
|
||||||
NB cycle level via **pulsar.yaml** file. An example of setting a topic
|
|
||||||
name in both levels is as below:
|
|
||||||
|
|
||||||
```bash
|
* **global level**: parameters that are set in ***config.properties*** file
|
||||||
# Global level setting (config.properties):
|
```
|
||||||
producer.topicName = ...
|
schema.type=
|
||||||
|
```
|
||||||
# Cycle level setting (pulsar.yaml)
|
* **document level**: parameters that are set within NB yaml file and under
|
||||||
|
the ***params*** section
|
||||||
|
```
|
||||||
params:
|
params:
|
||||||
topic_uri: ...
|
topic_uri: ...
|
||||||
```
|
```
|
||||||
|
* **statement level**: parameters that are set within NB yaml file, but
|
||||||
|
under different block statements
|
||||||
|
```
|
||||||
|
- name: producer-block
|
||||||
|
statements:
|
||||||
|
- name: s1
|
||||||
|
msg_key:
|
||||||
|
```
|
||||||
|
|
||||||
In theory, all Pulsar client settings can be made as cycle level settings
|
**NOTE**: If one parameter is set at multiple levels (e.g. producer name),
|
||||||
for maximum flexibility. But practically speaking (and also for simplicity
|
the parameter at lower level will take precedence.
|
||||||
purposes), only the following parameters are made to be configurable at
|
|
||||||
both levels, listed by cycle level setting names with their corresponding
|
|
||||||
global level setting names:
|
|
||||||
* topic_uri (Mandatory)
|
|
||||||
* producer.topicName
|
|
||||||
* consumer.topicNames
|
|
||||||
* reader.topicName
|
|
||||||
* topic_names (Optional for Consumer)
|
|
||||||
* consumer.topicNames
|
|
||||||
* subscription_name (Mandatory for Consumer)
|
|
||||||
* consumer.subscriptionName
|
|
||||||
* subscription_type (Mandatory for Consumer, default to **exclusive**
|
|
||||||
type)
|
|
||||||
* consumer.subscriptionType
|
|
||||||
* topics_pattern (Optional for Consumer)
|
|
||||||
* consumer.topicsPattern
|
|
||||||
* producer_name (Optional)
|
|
||||||
* producer.producerName
|
|
||||||
* consumer_name (Optional)
|
|
||||||
* consumer.consumerName
|
|
||||||
* reader_name (Optional)
|
|
||||||
* reader.readerName
|
|
||||||
|
|
||||||
One key difference between setting a parameter at the global level vs. at
|
## 1.4. Pulsar Driver Yaml File - Command Blocks
|
||||||
the cycle level is that the global level setting is always static and
|
|
||||||
stays the same for all NB cycle execution. The cycle level setting, on the
|
|
||||||
other side, can be dynamically bound and can be different from cycle to
|
|
||||||
cycle.
|
|
||||||
|
|
||||||
Because of this, setting these parameters at the NB cycle level allows us
|
|
||||||
to run Pulsar testing against multiple topics and/or multiple
|
|
||||||
producers/consumers/readers/etc all at once within one NB activity. This
|
|
||||||
makes the testing more flexible and effective.
|
|
||||||
|
|
||||||
**NOTE**: when a configuration is set at both the global level and the
|
|
||||||
cycle level, **the cycle level setting will take priority!**
|
|
||||||
|
|
||||||
## 1.4. Pulsar Driver Yaml File - Command Block Details
|
|
||||||
|
|
||||||
### 1.4.1. Pulsar Admin API Command Block - Create Tenants
|
### 1.4.1. Pulsar Admin API Command Block - Create Tenants
|
||||||
|
|
||||||
This Pulsar Admin API Block is used to create Pulsar tenants. It has the following format:
|
This Pulsar Admin API Block is used to create or delete Pulsar tenants. It
|
||||||
|
has the following format.
|
||||||
|
|
||||||
|
Please note that when document level parameter **admin_delop** is set to be
|
||||||
|
true, then this command block will delete Pulsar tenants instead. Similarly
|
||||||
|
this applies to other Admin API blocks for namespace and topic management.
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
- name: create-tenant-block
|
- name: create-tenant-block
|
||||||
@ -265,10 +259,10 @@ In this command block, there is only 1 statement (s1):
|
|||||||
* Statement **s1** is used for creating a Pulsar tenant
|
* Statement **s1** is used for creating a Pulsar tenant
|
||||||
* (Mandatory) **optype (admin-tenant)** is the statement identifier
|
* (Mandatory) **optype (admin-tenant)** is the statement identifier
|
||||||
for this statement
|
for this statement
|
||||||
* (Optional) **allowed_clusters** must be statically bound and it
|
* (Optional) **allowed_clusters** must be statically bound, and it
|
||||||
specifies the cluster list that is allowed for a tenant.
|
specifies the cluster list that is allowed for a tenant.
|
||||||
* (Optional) **admin_roles** must be statically bound and it specifies
|
* (Optional) **admin_roles** must be statically bound, and it specifies
|
||||||
the super user role that is associated with a tenant.
|
the superuser role that is associated with a tenant.
|
||||||
* (Mandatory) **tenant** is the Pulsar tenant name to be created. It
|
* (Mandatory) **tenant** is the Pulsar tenant name to be created. It
|
||||||
can either be dynamically or statically bound.
|
can either be dynamically or statically bound.
|
||||||
|
|
||||||
@ -293,7 +287,7 @@ In this command block, there is only 1 statement (s1):
|
|||||||
* (Mandatory) **optype (admin-namespace)** is the statement identifier
|
* (Mandatory) **optype (admin-namespace)** is the statement identifier
|
||||||
for this statement
|
for this statement
|
||||||
* (Mandatory) **namespace** is the Pulsar namespace name to be created
|
* (Mandatory) **namespace** is the Pulsar namespace name to be created
|
||||||
under the above tenant. It also can be dynamically or statically bound.
|
under a tenant. It can be either statically or dynamically bound.
|
||||||
|
|
||||||
### 1.4.3. Pulsar Admin API Command Block - Create Topics (Partitioned or Regular)
|
### 1.4.3. Pulsar Admin API Command Block - Create Topics (Partitioned or Regular)
|
||||||
|
|
||||||
@ -322,7 +316,7 @@ In this command block, there is only 1 statement (s1):
|
|||||||
a partitioned topic is to be created. It also can be dynamically or
|
a partitioned topic is to be created. It also can be dynamically or
|
||||||
statically bound.
|
statically bound.
|
||||||
|
|
||||||
**NOTE**: The topic name is bounded by the document level parameter "topic_uri".
|
**NOTE**: The topic name is bound by the document level parameter "topic_uri".
|
||||||
|
|
||||||
### 1.4.4. Batch Producer Command Block
|
### 1.4.4. Batch Producer Command Block
|
||||||
|
|
||||||
@ -331,7 +325,7 @@ once by one NB cycle execution. A typical format of this command block is
|
|||||||
as below:
|
as below:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
- name: batch-producer-block
|
- name: batch-producer-block
|
||||||
tags:
|
tags:
|
||||||
phase: batch-producer
|
phase: batch-producer
|
||||||
statements:
|
statements:
|
||||||
@ -343,6 +337,11 @@ as below:
|
|||||||
- name: s2
|
- name: s2
|
||||||
optype: batch-msg-send
|
optype: batch-msg-send
|
||||||
msg_key: "{mykey}"
|
msg_key: "{mykey}"
|
||||||
|
msg_property: |
|
||||||
|
{
|
||||||
|
"prop1": "{myprop1}",
|
||||||
|
"prop2": "{myprop2}"
|
||||||
|
}
|
||||||
msg_value: |
|
msg_value: |
|
||||||
{
|
{
|
||||||
"SensorID": "{sensor_id}",
|
"SensorID": "{sensor_id}",
|
||||||
@ -374,6 +373,9 @@ ratios: 1, <batch_num>, 1.
|
|||||||
for this statement
|
for this statement
|
||||||
* (Optional) **msg_key**, when provided, specifies the key of the
|
* (Optional) **msg_key**, when provided, specifies the key of the
|
||||||
generated message
|
generated message
|
||||||
|
* (Optional) **msg_property**, when provided, specifies the properties
|
||||||
|
of the generated message. It must be a JSON string that contains a
|
||||||
|
series of key-value pairs.
|
||||||
* (Mandatory) **msg_payload** specifies the payload of the generated
|
* (Mandatory) **msg_payload** specifies the payload of the generated
|
||||||
message
|
message
|
||||||
* (Optional) **ratio**, when provided, specifies the batch size (how
|
* (Optional) **ratio**, when provided, specifies the batch size (how
|
||||||
@ -385,6 +387,9 @@ ratios: 1, <batch_num>, 1.
|
|||||||
* (Optional) **ratio**, when provided, MUST be 1. If not provided, it
|
* (Optional) **ratio**, when provided, MUST be 1. If not provided, it
|
||||||
is default to 1.
|
is default to 1.
|
||||||
|
|
||||||
|
**NOTE**: the topic that the producer needs to publish messages to is
|
||||||
|
specified by the document level parameter ***topic_uri***.
|
||||||
|
|
||||||
### 1.4.5. Producer Command Block
|
### 1.4.5. Producer Command Block
|
||||||
|
|
||||||
This is the regular Pulsar producer command block that produces one Pulsar
|
This is the regular Pulsar producer command block that produces one Pulsar
|
||||||
@ -400,6 +405,11 @@ as below:
|
|||||||
optype: msg-send
|
optype: msg-send
|
||||||
# producer_name: {producer_name}
|
# producer_name: {producer_name}
|
||||||
msg_key: "{mykey}"
|
msg_key: "{mykey}"
|
||||||
|
msg_property: |
|
||||||
|
{
|
||||||
|
"prop1": "{myprop1}",
|
||||||
|
"prop2": "{myprop2}"
|
||||||
|
}
|
||||||
msg_value: |
|
msg_value: |
|
||||||
{
|
{
|
||||||
"SensorID": "{sensor_id}",
|
"SensorID": "{sensor_id}",
|
||||||
@ -418,14 +428,20 @@ This command block only has 1 statements (s1):
|
|||||||
producer name that is associated with the message production.
|
producer name that is associated with the message production.
|
||||||
* (Optional) **msg_key**, when provided, specifies the key of the
|
* (Optional) **msg_key**, when provided, specifies the key of the
|
||||||
generated message
|
generated message
|
||||||
|
* (Optional) **msg_property**, when provided, specifies the properties
|
||||||
|
of the generated message. It must be a JSON string that contains a
|
||||||
|
series of key-value pairs.
|
||||||
* (Mandatory) **msg_payload** specifies the payload of the generated
|
* (Mandatory) **msg_payload** specifies the payload of the generated
|
||||||
message
|
message
|
||||||
|
|
||||||
### 1.4.6. Consumer Command Block
|
**NOTE**: the topic that the producer needs to publish messages to is
|
||||||
|
specified by the document level parameter ***topic_uri***.
|
||||||
|
|
||||||
|
### 1.4.6. (Single-Topic) Consumer Command Block
|
||||||
|
|
||||||
This is the regular Pulsar consumer command block that consumes one Pulsar
|
This is the regular Pulsar consumer command block that consumes one Pulsar
|
||||||
message per NB cycle execution. A typical format of this command block is
|
message from one single Pulsar topic per NB cycle execution. A typical
|
||||||
as below:
|
format of this command block is as below:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
- name: consumer-block
|
- name: consumer-block
|
||||||
@ -434,8 +450,6 @@ as below:
|
|||||||
statements:
|
statements:
|
||||||
- name: s1
|
- name: s1
|
||||||
optype: msg-consume
|
optype: msg-consume
|
||||||
topic_names: "<pulsar_topic_1>, <pulsar_topic_2>"
|
|
||||||
# topics_pattern: "<pulsar_topic_regex_pattern>"
|
|
||||||
subscription_name:
|
subscription_name:
|
||||||
subscription_type:
|
subscription_type:
|
||||||
consumer_name:
|
consumer_name:
|
||||||
@ -447,19 +461,14 @@ This command block only has 1 statements (s1):
|
|||||||
and acknowledge it.
|
and acknowledge it.
|
||||||
* (Mandatory) **optype (msg-consume)** is the statement identifier for
|
* (Mandatory) **optype (msg-consume)** is the statement identifier for
|
||||||
this statement
|
this statement
|
||||||
* (Optional) **topic_names**, when provided, specifies multiple topic
|
|
||||||
names from which to consume messages for multi-topic message consumption.
|
|
||||||
* (Optional) **topics_pattern**, when provided, specifies pulsar
|
|
||||||
topic regex pattern for multi-topic message consumption
|
|
||||||
* (Mandatory) **subscription_name** specifies subscription name.
|
* (Mandatory) **subscription_name** specifies subscription name.
|
||||||
* (Optional) **subscription_type**, when provided, specifies
|
* (Optional) **subscription_type**, when provided, specifies
|
||||||
subscription type. Default to **exclusive** subscription type.
|
subscription type. Default to **exclusive** subscription type.
|
||||||
* (Optional) **consumer_name**, when provided, specifies the
|
* (Optional) **consumer_name**, when provided, specifies the
|
||||||
associated consumer name.
|
associated consumer name.
|
||||||
|
|
||||||
**NOTE 1**: when both **topic_names** and **topics_pattern** are provided, **topic_names** takes precedence over **topics_pattern**.
|
**NOTE**: the single topic that the consumer needs to consume messages from
|
||||||
|
is specified by the document level parameter ***topic_uri***.
|
||||||
**NOTE 2**: if both **topic_names** and **topics_pattern** are not provided, consumer topic name is default to the document level parameter **topic_uri**.
|
|
||||||
|
|
||||||
### 1.4.7. Reader Command Block
|
### 1.4.7. Reader Command Block
|
||||||
|
|
||||||
@ -486,6 +495,9 @@ This command block only has 1 statements (s1):
|
|||||||
* (Optional) **reader_name**, when provided, specifies the associated
|
* (Optional) **reader_name**, when provided, specifies the associated
|
||||||
consumer name.
|
consumer name.
|
||||||
|
|
||||||
|
**NOTE**: the single topic that the reader needs to read messages from
|
||||||
|
is specified by the document level parameter ***topic_uri***.
|
||||||
|
|
||||||
**TBD**: at the moment, the NB Pulsar driver Reader API only supports
|
**TBD**: at the moment, the NB Pulsar driver Reader API only supports
|
||||||
reading from the following positions:
|
reading from the following positions:
|
||||||
* MessageId.earliest
|
* MessageId.earliest
|
||||||
@ -501,7 +513,124 @@ Reader reader = pulsarClient.newReader()
|
|||||||
.create();
|
.create();
|
||||||
```
|
```
|
||||||
|
|
||||||
## 1.5. Schema Support
|
### 1.4.8. Multi-topic Consumer Command Block
|
||||||
|
|
||||||
|
This is the regular Pulsar consumer command block that consumes one Pulsar
|
||||||
|
message from multiple Pulsar topics per NB cycle execution. A typical format
|
||||||
|
of this command block is as below:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
- name: multi-topic-consumer-block
|
||||||
|
tags:
|
||||||
|
phase: multi-topic-consumer
|
||||||
|
admin_task: false
|
||||||
|
statements:
|
||||||
|
- name: s1
|
||||||
|
optype: msg-mt-consume
|
||||||
|
topic_names:
|
||||||
|
topics_pattern:
|
||||||
|
subscription_name: "mysub"
|
||||||
|
subscription_type:
|
||||||
|
consumer_name:
|
||||||
|
```
|
||||||
|
|
||||||
|
This command block only has 1 statements (s1):
|
||||||
|
|
||||||
|
* Statement **s1** is used to consume one message from the Pulsar cluster
|
||||||
|
and acknowledge it.
|
||||||
|
* (Mandatory) **optype (msg-consume)** is the statement identifier for
|
||||||
|
this statement
|
||||||
|
* (Optional) **topic_names**, when provided, specifies multiple topic
|
||||||
|
names from which to consume messages for multi-topic message consumption.
|
||||||
|
* (Optional) **topics_pattern**, when provided, specifies pulsar
|
||||||
|
topic regex pattern for multi-topic message consumption
|
||||||
|
* (Mandatory) **subscription_name** specifies subscription name.
|
||||||
|
* (Optional) **subscription_type**, when provided, specifies
|
||||||
|
subscription type. Default to **exclusive** subscription type.
|
||||||
|
* (Optional) **consumer_name**, when provided, specifies the
|
||||||
|
associated consumer name.
|
||||||
|
|
||||||
|
**NOTE 1**: when both **topic_names** and **topics_pattern** are provided,
|
||||||
|
**topic_names** takes precedence over **topics_pattern**.
|
||||||
|
|
||||||
|
**NOTE 2**: if both **topic_names** and **topics_pattern** are not provided,
|
||||||
|
consumer topic name is default to the document level parameter **topic_uri**.
|
||||||
|
|
||||||
|
### 1.4.9. End-to-end Message Processing Command Block
|
||||||
|
|
||||||
|
End-to-end message processing command block is used to simplify measuring
|
||||||
|
the end-to-end message processing (from being published to being consumed)
|
||||||
|
latency. A typical format of this command block is as below:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
- name: e2e-msg-proc-block
|
||||||
|
tags:
|
||||||
|
phase: e2e-msg-proc
|
||||||
|
admin_task: false
|
||||||
|
statements:
|
||||||
|
- name: s1
|
||||||
|
optype: ec2-msg-proc-send
|
||||||
|
msg_key:
|
||||||
|
msg_property: |
|
||||||
|
{
|
||||||
|
"prop1": "{myprop1}"
|
||||||
|
}
|
||||||
|
msg_value: "{myvalue}"
|
||||||
|
ratio: 1
|
||||||
|
- name: s2
|
||||||
|
optype: ec2-msg-proc-consume
|
||||||
|
subscription_name: "mysub"
|
||||||
|
subscription_type:
|
||||||
|
ratio: 1
|
||||||
|
```
|
||||||
|
|
||||||
|
This command block has 2 statements (s1 and s2) with the following
|
||||||
|
ratios: 1, 1.
|
||||||
|
|
||||||
|
* Statement **s1** is used to publish a message to a topic
|
||||||
|
* (Mandatory) **optype (ec2-msg-proc-send)** is the statement
|
||||||
|
identifier for this statement
|
||||||
|
* (Optional) **msg_key**, when provided, specifies the key of the
|
||||||
|
generated message
|
||||||
|
* (Optional) **msg_property**, when provided, specifies the properties
|
||||||
|
of the generated message. It must be a JSON string that contains a
|
||||||
|
series of key-value pairs.
|
||||||
|
* (Mandatory) **msg_payload** specifies the payload of the generated
|
||||||
|
message
|
||||||
|
* (Optional) **ratio**, must be 1 when provided.
|
||||||
|
Otherwise, default to 1.
|
||||||
|
* Statement **s2** is used to consume the message that just got published
|
||||||
|
from the same topic
|
||||||
|
* (Mandatory) **optype (ec2-msg-proc-consume)** is the statement
|
||||||
|
identifier for this statement
|
||||||
|
* (Mandatory) **subscription_name** specifies subscription name.
|
||||||
|
* (Optional) **subscription_type**, when provided, specifies
|
||||||
|
subscription type. Default to **exclusive** subscription type.
|
||||||
|
* (Optional) **ratio**, must be 1 when provided.
|
||||||
|
Otherwise, default to 1.
|
||||||
|
|
||||||
|
**NOTE**: the topic that the producer needs to publish messages to is
|
||||||
|
specified by the document level parameter ***topic_uri***.
|
||||||
|
|
||||||
|
## 1.5. Message Properties
|
||||||
|
|
||||||
|
In the producer command block, it is optional to specify message properties:
|
||||||
|
```
|
||||||
|
statements:
|
||||||
|
- name: s1
|
||||||
|
msg_property: |
|
||||||
|
{
|
||||||
|
"prop1": "{myprop1}",
|
||||||
|
"prop2": "{myprop2}"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
The provided message property string must be a valid JSON string that
|
||||||
|
contains a list of key value pairs. Otherwise, if it is not a valid
|
||||||
|
JSON string as expected, the driver will ignore it and treat the
|
||||||
|
message as having no properties.
|
||||||
|
|
||||||
|
## 1.6. Schema Support
|
||||||
|
|
||||||
Pulsar has built-in schema support. Other than primitive types, Pulsar
|
Pulsar has built-in schema support. Other than primitive types, Pulsar
|
||||||
also supports complex types like **Avro**, etc. At the moment, the NB
|
also supports complex types like **Avro**, etc. At the moment, the NB
|
||||||
@ -535,7 +664,81 @@ schema definition:
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
## 1.6. NB Activity Execution Parameters
|
## 1.7. Measure End-to-end Message Processing Latency
|
||||||
|
|
||||||
|
**e2e-msg-proc-block** measures the end-to-end message latency metrics. It
|
||||||
|
contains one message producing statement and one message consuming statement.
|
||||||
|
When the message that is published by the producer is received by the consumer,
|
||||||
|
the consumer calculates the time difference between when the time is received
|
||||||
|
and when the time is published.
|
||||||
|
|
||||||
|
The measured end-to-end message processing latency is captured as a histogram
|
||||||
|
metrics name "e2e_msg_latency".
|
||||||
|
|
||||||
|
This command block uses one single machine to act as both a producer and a
|
||||||
|
consumer. We do so just for convenience purposes. In reality, we can use
|
||||||
|
**producer-block** and **consumer-block** command blocks on separate machines
|
||||||
|
to achieve the same goal, which is probably closer to the actual use case and
|
||||||
|
probably more accurate measurement (to avoid the situation of always reading
|
||||||
|
messages from the managed ledger cache).
|
||||||
|
|
||||||
|
One thing to remember though if we're using multiple machines to measure the
|
||||||
|
end-to-end message processing latency, we need to make sure:
|
||||||
|
1) The time of the two machines are synced up with each other, e.g. through
|
||||||
|
NTP protocol.
|
||||||
|
2) If there is some time lag of starting the consumer, we need to count that
|
||||||
|
into consideration when interpreting the end-to-end message processing latency.
|
||||||
|
|
||||||
|
## 1.8. Detect Message Out-of-order, Message Loss, and Message Duplication
|
||||||
|
|
||||||
|
In order to detect errors like message out-of-order and message loss through
|
||||||
|
the NB Pulsar driver, we need to set the following document level parameter
|
||||||
|
to be true.
|
||||||
|
```
|
||||||
|
params:
|
||||||
|
# Only applicable to producer and consumer
|
||||||
|
# - used for message ordering and message loss check
|
||||||
|
seq_tracking: "true"
|
||||||
|
```
|
||||||
|
|
||||||
|
For message duplication detection, if broker level message dedup configuration
|
||||||
|
is enabled ("brokerDeduplicationEnabled=true" in broker.conf), we also need to
|
||||||
|
enable this document level parameter:
|
||||||
|
```
|
||||||
|
params:
|
||||||
|
msg_dedup_broker: "true"
|
||||||
|
```
|
||||||
|
|
||||||
|
However, since message dedup. can be also enabled or disabled at namespace level
|
||||||
|
or topic level, the NB Pulsar driver will also check the settings at these layers
|
||||||
|
through API. Basically, the final message dedup setting for a topic is determined
|
||||||
|
by the following rules:
|
||||||
|
* if topic level message dedup is not set, check namespace level setting
|
||||||
|
* if namespace level message dedup is not set, check broker level setting which
|
||||||
|
in turn is determined by the document level NB parameter **msg_dedup_broker**
|
||||||
|
* if message dedup is enabled at multiple levels, the priority sequence follows:
|
||||||
|
* topic level > namespace level > broker level
|
||||||
|
|
||||||
|
The logic of how this works is based on the fact that NB execution cycle number
|
||||||
|
is monotonically increasing by 1 for every cycle moving forward. When publishing
|
||||||
|
a series of messages, we use the current NB cycle number as one message property
|
||||||
|
which is also monotonically increasing by 1.
|
||||||
|
|
||||||
|
When receiving the messages, if the message sequence number stored in the message
|
||||||
|
property is not monotonically increasing or if there is a gap larger than 1, then
|
||||||
|
it must be one of the following errors:
|
||||||
|
* If the current message sequence ID is less than the previous message sequence ID,
|
||||||
|
then it is message out-of-order error. Exception **PulsarMsgOutOfOrderException**
|
||||||
|
will be thrown out.
|
||||||
|
* if the current message sequence ID is more than 1 bigger than the previous message
|
||||||
|
sequence ID, then it is message loss error. Exception **PulsarMsgLossException**
|
||||||
|
will be thrown out.
|
||||||
|
* if message dedup is enabled and the current message sequence ID is equal to the
|
||||||
|
previous message sequence ID, then it is message duplication error. Exception **PulsarMsgDuplicateException** will be thrown out.
|
||||||
|
|
||||||
|
In either case, a runtime error will be thrown out with corresponding error messages.
|
||||||
|
|
||||||
|
## 1.9. NB Activity Execution Parameters
|
||||||
|
|
||||||
At the moment, the following NB Pulsar driver **specific** activity
|
At the moment, the following NB Pulsar driver **specific** activity
|
||||||
parameters are supported:
|
parameters are supported:
|
||||||
@ -553,7 +756,7 @@ reference to NB documentation for more parameters
|
|||||||
* cycles=<total_NB_cycle_execution_number>
|
* cycles=<total_NB_cycle_execution_number>
|
||||||
* --report-csv-to <metrics_output_dir_name>
|
* --report-csv-to <metrics_output_dir_name>
|
||||||
|
|
||||||
## 1.7. NB Pulsar Driver Execution Example
|
## 1.10. NB Pulsar Driver Execution Example
|
||||||
|
|
||||||
**NOTE**: in the following examples, the Pulsar service URL is **pulsar:
|
**NOTE**: in the following examples, the Pulsar service URL is **pulsar:
|
||||||
//localhost:6650**, please change it accordingly for your own Pulsar
|
//localhost:6650**, please change it accordingly for your own Pulsar
|
||||||
@ -578,7 +781,7 @@ environment.
|
|||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
## 1.8. Appendix A. Template Global Setting File (config.properties)
|
## 1.11. Appendix A. Template Global Setting File (config.properties)
|
||||||
```properties
|
```properties
|
||||||
schema.type =
|
schema.type =
|
||||||
schema.definition =
|
schema.definition =
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
<parent>
|
<parent>
|
||||||
<artifactId>mvn-defaults</artifactId>
|
<artifactId>mvn-defaults</artifactId>
|
||||||
<groupId>io.nosqlbench</groupId>
|
<groupId>io.nosqlbench</groupId>
|
||||||
<version>4.15.52-SNAPSHOT</version>
|
<version>4.15.58-SNAPSHOT</version>
|
||||||
<relativePath>../mvn-defaults</relativePath>
|
<relativePath>../mvn-defaults</relativePath>
|
||||||
</parent>
|
</parent>
|
||||||
|
|
||||||
@ -21,15 +21,17 @@
|
|||||||
|
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>io.nosqlbench</groupId>
|
<groupId>io.nosqlbench</groupId>
|
||||||
<artifactId>drivers-api</artifactId>
|
<artifactId>engine-api</artifactId>
|
||||||
<version>4.15.52-SNAPSHOT</version>
|
<version>4.15.58-SNAPSHOT</version>
|
||||||
|
<scope>compile</scope>
|
||||||
|
</dependency>
|
||||||
|
|
||||||
|
<dependency>
|
||||||
|
<groupId>io.nosqlbench</groupId>
|
||||||
|
<artifactId>drivers-api</artifactId>
|
||||||
|
<version>4.15.58-SNAPSHOT</version>
|
||||||
|
<scope>compile</scope>
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
|
||||||
<groupId>io.nosqlbench</groupId>
|
|
||||||
<artifactId>engine-api</artifactId>
|
|
||||||
<version>4.15.52-SNAPSHOT</version>
|
|
||||||
<scope>compile</scope>
|
|
||||||
</dependency>
|
|
||||||
|
|
||||||
</dependencies>
|
</dependencies>
|
||||||
|
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
<parent>
|
<parent>
|
||||||
<artifactId>mvn-defaults</artifactId>
|
<artifactId>mvn-defaults</artifactId>
|
||||||
<groupId>io.nosqlbench</groupId>
|
<groupId>io.nosqlbench</groupId>
|
||||||
<version>4.15.52-SNAPSHOT</version>
|
<version>4.15.58-SNAPSHOT</version>
|
||||||
<relativePath>../mvn-defaults</relativePath>
|
<relativePath>../mvn-defaults</relativePath>
|
||||||
</parent>
|
</parent>
|
||||||
|
|
||||||
@ -21,16 +21,22 @@
|
|||||||
|
|
||||||
<dependencies>
|
<dependencies>
|
||||||
|
|
||||||
|
<dependency>
|
||||||
|
<groupId>io.nosqlbench</groupId>
|
||||||
|
<artifactId>engine-api</artifactId>
|
||||||
|
<version>4.15.58-SNAPSHOT</version>
|
||||||
|
</dependency>
|
||||||
|
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>io.nosqlbench</groupId>
|
<groupId>io.nosqlbench</groupId>
|
||||||
<artifactId>drivers-api</artifactId>
|
<artifactId>drivers-api</artifactId>
|
||||||
<version>4.15.52-SNAPSHOT</version>
|
<version>4.15.58-SNAPSHOT</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>io.nosqlbench</groupId>
|
<groupId>io.nosqlbench</groupId>
|
||||||
<artifactId>driver-stdout</artifactId>
|
<artifactId>driver-stdout</artifactId>
|
||||||
<version>4.15.52-SNAPSHOT</version>
|
<version>4.15.58-SNAPSHOT</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
|
||||||
</dependencies>
|
</dependencies>
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
<parent>
|
<parent>
|
||||||
<artifactId>mvn-defaults</artifactId>
|
<artifactId>mvn-defaults</artifactId>
|
||||||
<groupId>io.nosqlbench</groupId>
|
<groupId>io.nosqlbench</groupId>
|
||||||
<version>4.15.52-SNAPSHOT</version>
|
<version>4.15.58-SNAPSHOT</version>
|
||||||
<relativePath>../mvn-defaults</relativePath>
|
<relativePath>../mvn-defaults</relativePath>
|
||||||
</parent>
|
</parent>
|
||||||
|
|
||||||
@ -19,10 +19,16 @@
|
|||||||
|
|
||||||
<dependencies>
|
<dependencies>
|
||||||
|
|
||||||
|
<dependency>
|
||||||
|
<groupId>io.nosqlbench</groupId>
|
||||||
|
<artifactId>engine-api</artifactId>
|
||||||
|
<version>4.15.58-SNAPSHOT</version>
|
||||||
|
</dependency>
|
||||||
|
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>io.nosqlbench</groupId>
|
<groupId>io.nosqlbench</groupId>
|
||||||
<artifactId>drivers-api</artifactId>
|
<artifactId>drivers-api</artifactId>
|
||||||
<version>4.15.52-SNAPSHOT</version>
|
<version>4.15.58-SNAPSHOT</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
|
||||||
<dependency>
|
<dependency>
|
||||||
@ -66,12 +72,6 @@
|
|||||||
<artifactId>httpclient</artifactId>
|
<artifactId>httpclient</artifactId>
|
||||||
<version>4.5.13</version>
|
<version>4.5.13</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
|
||||||
<groupId>io.nosqlbench</groupId>
|
|
||||||
<artifactId>engine-api</artifactId>
|
|
||||||
<version>4.15.52-SNAPSHOT</version>
|
|
||||||
<scope>compile</scope>
|
|
||||||
</dependency>
|
|
||||||
|
|
||||||
</dependencies>
|
</dependencies>
|
||||||
|
|
||||||
|
@ -5,7 +5,7 @@
|
|||||||
<parent>
|
<parent>
|
||||||
<artifactId>mvn-defaults</artifactId>
|
<artifactId>mvn-defaults</artifactId>
|
||||||
<groupId>io.nosqlbench</groupId>
|
<groupId>io.nosqlbench</groupId>
|
||||||
<version>4.15.52-SNAPSHOT</version>
|
<version>4.15.58-SNAPSHOT</version>
|
||||||
<relativePath>../mvn-defaults</relativePath>
|
<relativePath>../mvn-defaults</relativePath>
|
||||||
</parent>
|
</parent>
|
||||||
|
|
||||||
@ -23,13 +23,13 @@
|
|||||||
<dependency>
|
<dependency>
|
||||||
<groupId>io.nosqlbench</groupId>
|
<groupId>io.nosqlbench</groupId>
|
||||||
<artifactId>nb-api</artifactId>
|
<artifactId>nb-api</artifactId>
|
||||||
<version>4.15.52-SNAPSHOT</version>
|
<version>4.15.58-SNAPSHOT</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>io.nosqlbench</groupId>
|
<groupId>io.nosqlbench</groupId>
|
||||||
<artifactId>virtdata-userlibs</artifactId>
|
<artifactId>virtdata-userlibs</artifactId>
|
||||||
<version>4.15.52-SNAPSHOT</version>
|
<version>4.15.58-SNAPSHOT</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
|
||||||
<dependency>
|
<dependency>
|
||||||
|
@ -5,7 +5,7 @@
|
|||||||
<parent>
|
<parent>
|
||||||
<artifactId>mvn-defaults</artifactId>
|
<artifactId>mvn-defaults</artifactId>
|
||||||
<groupId>io.nosqlbench</groupId>
|
<groupId>io.nosqlbench</groupId>
|
||||||
<version>4.15.52-SNAPSHOT</version>
|
<version>4.15.58-SNAPSHOT</version>
|
||||||
<relativePath>../mvn-defaults</relativePath>
|
<relativePath>../mvn-defaults</relativePath>
|
||||||
</parent>
|
</parent>
|
||||||
|
|
||||||
@ -23,25 +23,25 @@
|
|||||||
<dependency>
|
<dependency>
|
||||||
<groupId>io.nosqlbench</groupId>
|
<groupId>io.nosqlbench</groupId>
|
||||||
<artifactId>nb-api</artifactId>
|
<artifactId>nb-api</artifactId>
|
||||||
<version>4.15.52-SNAPSHOT</version>
|
<version>4.15.58-SNAPSHOT</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>io.nosqlbench</groupId>
|
<groupId>io.nosqlbench</groupId>
|
||||||
<artifactId>drivers-api</artifactId>
|
<artifactId>drivers-api</artifactId>
|
||||||
<version>4.15.52-SNAPSHOT</version>
|
<version>4.15.58-SNAPSHOT</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>io.nosqlbench</groupId>
|
<groupId>io.nosqlbench</groupId>
|
||||||
<artifactId>nb-annotations</artifactId>
|
<artifactId>nb-annotations</artifactId>
|
||||||
<version>4.15.52-SNAPSHOT</version>
|
<version>4.15.58-SNAPSHOT</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>io.nosqlbench</groupId>
|
<groupId>io.nosqlbench</groupId>
|
||||||
<artifactId>virtdata-userlibs</artifactId>
|
<artifactId>virtdata-userlibs</artifactId>
|
||||||
<version>4.15.52-SNAPSHOT</version>
|
<version>4.15.58-SNAPSHOT</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
|
||||||
<dependency>
|
<dependency>
|
||||||
|
@ -218,15 +218,17 @@ public class HybridRateLimiter implements Startable, RateLimiter {
|
|||||||
@Override
|
@Override
|
||||||
public String toString() {
|
public String toString() {
|
||||||
StringBuilder sb = new StringBuilder(HybridRateLimiter.class.getSimpleName());
|
StringBuilder sb = new StringBuilder(HybridRateLimiter.class.getSimpleName());
|
||||||
|
sb.append("{\n");
|
||||||
if (this.getRateSpec() != null) {
|
if (this.getRateSpec() != null) {
|
||||||
sb.append(" spec=").append(this.getRateSpec().toString());
|
sb.append(" spec:").append(this.getRateSpec().toString());
|
||||||
}
|
|
||||||
if (this.state != null) {
|
|
||||||
sb.append(" state=").append(this.state);
|
|
||||||
}
|
}
|
||||||
if (this.tokens != null) {
|
if (this.tokens != null) {
|
||||||
sb.append(" tokens=").append(this.tokens.toString());
|
sb.append(",\n tokenpool:").append(this.tokens.toString());
|
||||||
}
|
}
|
||||||
|
if (this.state != null) {
|
||||||
|
sb.append(",\n state:'").append(this.state).append("'");
|
||||||
|
}
|
||||||
|
sb.append("\n}");
|
||||||
return sb.toString();
|
return sb.toString();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -119,7 +119,8 @@ public class RateSpec {
|
|||||||
* Specify that a rate limiter should only be configured without affecting its running state.
|
* Specify that a rate limiter should only be configured without affecting its running state.
|
||||||
* If the rate limiter is already running, then the configuration should take effect immediately.
|
* If the rate limiter is already running, then the configuration should take effect immediately.
|
||||||
* A rate limiter will be created automatically if needed. Configurations that do not effectively
|
* A rate limiter will be created automatically if needed. Configurations that do not effectively
|
||||||
* change the rate limiter are ignored.
|
* change the rate limiter are ignored. This does not automatically start the rate limiter. It
|
||||||
|
* will need to be started explicitly before it is used.
|
||||||
*/
|
*/
|
||||||
configure,
|
configure,
|
||||||
/**
|
/**
|
||||||
@ -185,7 +186,7 @@ public class RateSpec {
|
|||||||
double burstPortion = Math.abs(br - ((long) br));
|
double burstPortion = Math.abs(br - ((long) br));
|
||||||
String burstfmt = (burstPortion > 0.001D) ? String.format("%,.3f", br) : String.format("%,d", (long) br);
|
String burstfmt = (burstPortion > 0.001D) ? String.format("%,.3f", br) : String.format("%,d", (long) br);
|
||||||
|
|
||||||
return String.format("rate=%s burstRatio=%.3f (%s SOPSS %s BOPSS) [%s]", ratefmt, burstRatio, ratefmt, burstfmt, verb);
|
return String.format("{ rate:'%s', burstRatio:'%.3f', SOPSS:'%s', BOPSS:'%s', verb:'%s' }", ratefmt, burstRatio, ratefmt, burstfmt, verb);
|
||||||
}
|
}
|
||||||
|
|
||||||
public RateSpec withOpsPerSecond(double rate) {
|
public RateSpec withOpsPerSecond(double rate) {
|
||||||
|
@ -55,7 +55,7 @@ public class ThreadDrivenTokenPool implements TokenPool {
|
|||||||
private long burstPoolSize;
|
private long burstPoolSize;
|
||||||
private long maxOverActivePool;
|
private long maxOverActivePool;
|
||||||
private double burstRatio;
|
private double burstRatio;
|
||||||
// TODO Consider removing volatile after investigating
|
// TODO Consider removing volatile after investigating
|
||||||
private volatile long activePool;
|
private volatile long activePool;
|
||||||
private volatile long waitingPool;
|
private volatile long waitingPool;
|
||||||
private RateSpec rateSpec;
|
private RateSpec rateSpec;
|
||||||
@ -77,7 +77,7 @@ public class ThreadDrivenTokenPool implements TokenPool {
|
|||||||
public ThreadDrivenTokenPool(RateSpec rateSpec, ActivityDef activityDef) {
|
public ThreadDrivenTokenPool(RateSpec rateSpec, ActivityDef activityDef) {
|
||||||
this.activityDef = activityDef;
|
this.activityDef = activityDef;
|
||||||
apply(rateSpec);
|
apply(rateSpec);
|
||||||
logger.debug("initialized token pool: " + this.toString() + " for rate:" + rateSpec.toString());
|
logger.debug("initialized token pool: " + this + " for rate:" + rateSpec);
|
||||||
// filler.start();
|
// filler.start();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -239,13 +239,14 @@ public class ThreadDrivenTokenPool implements TokenPool {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String toString() {
|
public String toString() {
|
||||||
return "Tokens: active=" + activePool + "/" + maxActivePool
|
return String.format(
|
||||||
+ String.format(
|
"{ active:%d, max:%d, fill:'(%,3.1f%%)A (%,3.1f%%)B', wait_ns:%,d, blocks:%,d }",
|
||||||
" (%3.1f%%)A (%3.1f%%)B ",
|
activePool, maxActivePool,
|
||||||
(((double) activePool / (double) maxActivePool) * 100.0),
|
(((double) activePool / (double) maxActivePool) * 100.0),
|
||||||
(((double) activePool / (double) maxOverActivePool) * 100.0)) + " waiting=" + waitingPool +
|
(((double) activePool / (double) maxOverActivePool) * 100.0),
|
||||||
" blocks=" + blocks +
|
waitingPool,
|
||||||
" rateSpec:" + ((rateSpec != null) ? rateSpec.toString() : "NULL");
|
blocks
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@ -262,7 +263,7 @@ public class ThreadDrivenTokenPool implements TokenPool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void start() {
|
public synchronized void start() {
|
||||||
filler.start();
|
filler.start();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -96,7 +96,7 @@ public class TokenFiller implements Runnable {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public TokenFiller start() {
|
public synchronized TokenFiller start() {
|
||||||
this.tokenPool.refill(rateSpec.getNanosPerOp());
|
this.tokenPool.refill(rateSpec.getNanosPerOp());
|
||||||
|
|
||||||
thread = new Thread(this);
|
thread = new Thread(this);
|
||||||
@ -104,7 +104,7 @@ public class TokenFiller implements Runnable {
|
|||||||
thread.setPriority(Thread.MAX_PRIORITY);
|
thread.setPriority(Thread.MAX_PRIORITY);
|
||||||
thread.setDaemon(true);
|
thread.setDaemon(true);
|
||||||
thread.start();
|
thread.start();
|
||||||
logger.debug("Starting token filler thread: " + this.toString());
|
logger.debug("Starting token filler thread: " + this);
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -123,7 +123,7 @@ public class TokenFiller implements Runnable {
|
|||||||
|
|
||||||
public synchronized long restart() {
|
public synchronized long restart() {
|
||||||
this.lastRefillAt=System.nanoTime();
|
this.lastRefillAt=System.nanoTime();
|
||||||
logger.debug("Restarting token filler at " + lastRefillAt + " thread: " + this.toString());
|
logger.debug("Restarting token filler at " + lastRefillAt + " thread: " + this);
|
||||||
long wait = this.tokenPool.restart();
|
long wait = this.tokenPool.restart();
|
||||||
return wait;
|
return wait;
|
||||||
}
|
}
|
||||||
|
@ -62,7 +62,7 @@ public class RateLimiterPerfTestMethods {
|
|||||||
return perf.getLastResult();
|
return perf.getLastResult();
|
||||||
}
|
}
|
||||||
|
|
||||||
public Result rateLimiterSingleThreadedConvergence(Function<RateSpec,RateLimiter> rlf, RateSpec rs, long startingCycles, double margin) {
|
public Result rateLimiterSingleThreadedConvergence(Function<RateSpec, RateLimiter> rlf, RateSpec rs, long startingCycles, double margin) {
|
||||||
//rl.applyRateSpec(rl.getRateSpec().withOpsPerSecond(1E9));
|
//rl.applyRateSpec(rl.getRateSpec().withOpsPerSecond(1E9));
|
||||||
Bounds bounds = new Bounds(startingCycles, 2);
|
Bounds bounds = new Bounds(startingCycles, 2);
|
||||||
Perf perf = new Perf("nanotime");
|
Perf perf = new Perf("nanotime");
|
||||||
@ -139,21 +139,21 @@ public class RateLimiterPerfTestMethods {
|
|||||||
double duration = (endAt - startAt) / 1000000000.0d;
|
double duration = (endAt - startAt) / 1000000000.0d;
|
||||||
double acqops = (count / duration);
|
double acqops = (count / duration);
|
||||||
|
|
||||||
System.out.println(rl.toString());
|
System.out.println(rl);
|
||||||
|
|
||||||
System.out.println(ANSI_Blue +
|
System.out.println(ANSI_Blue +
|
||||||
String.format(
|
String.format(
|
||||||
"spec: %s\n count: %9d, duration %.5fS, acquires/s %.3f, nanos/op: %f\n delay: %d (%.5fS)",
|
"spec: %s\n count: %9d, duration %.5fS, acquires/s %.3f, nanos/op: %f\n delay: %d (%.5fS)",
|
||||||
rl.getRateSpec(),
|
rl.getRateSpec(),
|
||||||
count, duration, acqops, (1_000_000_000.0d / acqops), divDelay, (divDelay / 1_000_000_000.0d)) +
|
count, duration, acqops, (1_000_000_000.0d / acqops), divDelay, (divDelay / 1_000_000_000.0d)) +
|
||||||
ANSI_Reset);
|
ANSI_Reset);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
long[] delays = results.stream().mapToLong(Long::longValue).toArray();
|
long[] delays = results.stream().mapToLong(Long::longValue).toArray();
|
||||||
|
|
||||||
String delaySummary = Arrays.stream(delays).mapToDouble(d -> (double) d / 1_000_000_000.0D).mapToObj(d -> String.format("%.3f", d))
|
String delaySummary = Arrays.stream(delays).mapToDouble(d -> (double) d / 1_000_000_000.0D).mapToObj(d -> String.format("%.3f", d))
|
||||||
.collect(Collectors.joining(","));
|
.collect(Collectors.joining(","));
|
||||||
System.out.println("delays in seconds:\n" + delaySummary);
|
System.out.println("delays in seconds:\n" + delaySummary);
|
||||||
System.out.println("delays in ns:\n" + Arrays.toString(delays));
|
System.out.println("delays in ns:\n" + Arrays.toString(delays));
|
||||||
|
|
||||||
@ -176,7 +176,7 @@ public class RateLimiterPerfTestMethods {
|
|||||||
* This a low-overhead test for multi-threaded access to the same getOpsPerSec limiter. It calculates the
|
* This a low-overhead test for multi-threaded access to the same getOpsPerSec limiter. It calculates the
|
||||||
* effective concurrent getOpsPerSec under atomic contention.
|
* effective concurrent getOpsPerSec under atomic contention.
|
||||||
*/
|
*/
|
||||||
public Perf testRateLimiterMultiThreadedContention(Function<RateSpec,RateLimiter> rlFunc, RateSpec spec, long iterations, int threadCount) {
|
public Perf testRateLimiterMultiThreadedContention(Function<RateSpec, RateLimiter> rlFunc, RateSpec spec, long iterations, int threadCount) {
|
||||||
System.out.println("Running " + Thread.currentThread().getStackTrace()[1].getMethodName());
|
System.out.println("Running " + Thread.currentThread().getStackTrace()[1].getMethodName());
|
||||||
|
|
||||||
RateLimiter rl = rlFunc.apply(spec);
|
RateLimiter rl = rlFunc.apply(spec);
|
||||||
@ -187,24 +187,24 @@ public class RateLimiterPerfTestMethods {
|
|||||||
}
|
}
|
||||||
RateLimiterPerfTestMethods.TestExceptionHandler errorhandler = new RateLimiterPerfTestMethods.TestExceptionHandler();
|
RateLimiterPerfTestMethods.TestExceptionHandler errorhandler = new RateLimiterPerfTestMethods.TestExceptionHandler();
|
||||||
RateLimiterPerfTestMethods.TestThreadFactory threadFactory = new RateLimiterPerfTestMethods.TestThreadFactory(errorhandler);
|
RateLimiterPerfTestMethods.TestThreadFactory threadFactory = new RateLimiterPerfTestMethods.TestThreadFactory(errorhandler);
|
||||||
ExecutorService tp = Executors.newFixedThreadPool(threadCount+1, threadFactory);
|
ExecutorService tp = Executors.newFixedThreadPool(threadCount + 1, threadFactory);
|
||||||
|
|
||||||
System.out.format("Running %d iterations split over %d threads (%d) at getOpsPerSec %.3f\n", iterations, threadCount, (iterations / threadCount), rate);
|
System.out.format("Running %,d iterations split over %,d threads (%,d per) at %,.3f ops/s\n", iterations, threadCount, (iterations / threadCount), rate);
|
||||||
RateLimiterPerfTestMethods.Acquirer[] threads = new RateLimiterPerfTestMethods.Acquirer[threadCount];
|
RateLimiterPerfTestMethods.Acquirer[] threads = new RateLimiterPerfTestMethods.Acquirer[threadCount];
|
||||||
DeltaHdrHistogramReservoir stats = new DeltaHdrHistogramReservoir("times", 5);
|
DeltaHdrHistogramReservoir stats = new DeltaHdrHistogramReservoir("times", 5);
|
||||||
|
|
||||||
CyclicBarrier barrier = new CyclicBarrier(threadCount+1);
|
CyclicBarrier barrier = new CyclicBarrier(threadCount + 1);
|
||||||
|
|
||||||
RateLimiterStarter starter = new RateLimiterStarter(barrier, rl);
|
RateLimiterStarter starter = new RateLimiterStarter(barrier, rl);
|
||||||
|
|
||||||
for (int i = 0; i < threadCount; i++) {
|
for (int i = 0; i < threadCount; i++) {
|
||||||
threads[i] = new RateLimiterPerfTestMethods.Acquirer(i, rl, (int) (iterationsPerThread), stats, barrier);
|
threads[i] = new RateLimiterPerfTestMethods.Acquirer(i, rl, iterationsPerThread, stats, barrier);
|
||||||
// threads[i] = new RateLimiterPerfTestMethods.Acquirer(i, rl, (int) (iterations / threadCount), stats, barrier);
|
// threads[i] = new RateLimiterPerfTestMethods.Acquirer(i, rl, (int) (iterations / threadCount), stats, barrier);
|
||||||
}
|
}
|
||||||
|
|
||||||
tp.execute(starter);
|
tp.execute(starter);
|
||||||
|
|
||||||
System.out.println("limiter stats:" + rl);
|
System.out.println(rl);
|
||||||
System.out.format("submitting (%d threads)...\n", threads.length);
|
System.out.format("submitting (%d threads)...\n", threads.length);
|
||||||
List<Future<Result>> futures = new ArrayList<>();
|
List<Future<Result>> futures = new ArrayList<>();
|
||||||
for (int i = 0; i < threadCount; i++) {
|
for (int i = 0; i < threadCount; i++) {
|
||||||
@ -223,7 +223,7 @@ public class RateLimiterPerfTestMethods {
|
|||||||
|
|
||||||
errorhandler.throwIfAny();
|
errorhandler.throwIfAny();
|
||||||
|
|
||||||
System.out.println("limiter stats:" + rl);
|
System.out.println(rl);
|
||||||
|
|
||||||
Perf aggregatePerf = new Perf("contended with " + threadCount + " threads for " + iterations + " iterations for " + rl.getRateSpec().toString());
|
Perf aggregatePerf = new Perf("contended with " + threadCount + " threads for " + iterations + " iterations for " + rl.getRateSpec().toString());
|
||||||
futures.stream().map(f -> {
|
futures.stream().map(f -> {
|
||||||
@ -234,7 +234,7 @@ public class RateLimiterPerfTestMethods {
|
|||||||
}
|
}
|
||||||
}).forEachOrdered(aggregatePerf::add);
|
}).forEachOrdered(aggregatePerf::add);
|
||||||
|
|
||||||
System.out.println(aggregatePerf);
|
// System.out.println(aggregatePerf);
|
||||||
|
|
||||||
// if (rl instanceof HybridRateLimiter) {
|
// if (rl instanceof HybridRateLimiter) {
|
||||||
// String refillLog = ((HybridRateLimiter) rl).getRefillLog();
|
// String refillLog = ((HybridRateLimiter) rl).getRefillLog();
|
||||||
@ -246,8 +246,8 @@ public class RateLimiterPerfTestMethods {
|
|||||||
}
|
}
|
||||||
|
|
||||||
private static class RateLimiterStarter implements Runnable {
|
private static class RateLimiterStarter implements Runnable {
|
||||||
private CyclicBarrier barrier;
|
private final CyclicBarrier barrier;
|
||||||
private RateLimiter rl;
|
private final RateLimiter rl;
|
||||||
|
|
||||||
public RateLimiterStarter(CyclicBarrier barrier, RateLimiter rl) {
|
public RateLimiterStarter(CyclicBarrier barrier, RateLimiter rl) {
|
||||||
this.barrier = barrier;
|
this.barrier = barrier;
|
||||||
@ -257,9 +257,9 @@ public class RateLimiterPerfTestMethods {
|
|||||||
@Override
|
@Override
|
||||||
public void run() {
|
public void run() {
|
||||||
try {
|
try {
|
||||||
System.out.println("awaiting barrier (starter) (" + barrier.getNumberWaiting() + " awaiting)");
|
// System.out.println("awaiting barrier (starter) (" + barrier.getNumberWaiting() + " awaiting)");
|
||||||
barrier.await(60, TimeUnit.SECONDS);
|
barrier.await(60, TimeUnit.SECONDS);
|
||||||
System.out.println("started the rate limiter (starter) (" + barrier.getNumberWaiting() + " awaiting)");
|
// System.out.println("started the rate limiter (starter) (" + barrier.getNumberWaiting() + " awaiting)");
|
||||||
|
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
throw new RuntimeException(e);
|
throw new RuntimeException(e);
|
||||||
@ -291,7 +291,7 @@ public class RateLimiterPerfTestMethods {
|
|||||||
private final int threadIdx;
|
private final int threadIdx;
|
||||||
private final DeltaHdrHistogramReservoir reservoir;
|
private final DeltaHdrHistogramReservoir reservoir;
|
||||||
private final CyclicBarrier barrier;
|
private final CyclicBarrier barrier;
|
||||||
private long iterations;
|
private final long iterations;
|
||||||
|
|
||||||
public Acquirer(int i, RateLimiter limiter, int iterations, DeltaHdrHistogramReservoir reservoir, CyclicBarrier barrier) {
|
public Acquirer(int i, RateLimiter limiter, int iterations, DeltaHdrHistogramReservoir reservoir, CyclicBarrier barrier) {
|
||||||
this.threadIdx = i;
|
this.threadIdx = i;
|
||||||
@ -304,14 +304,18 @@ public class RateLimiterPerfTestMethods {
|
|||||||
@Override
|
@Override
|
||||||
public Result call() {
|
public Result call() {
|
||||||
// synchronized (barrier) {
|
// synchronized (barrier) {
|
||||||
try {
|
try {
|
||||||
System.out.println("awaiting barrier " + this.threadIdx + " (" + barrier.getNumberWaiting() + " awaiting)");
|
if (this.threadIdx == 0) {
|
||||||
barrier.await(60, TimeUnit.SECONDS);
|
System.out.println("awaiting barrier");
|
||||||
|
|
||||||
// System.out.println("starting " + this.threadIdx);
|
|
||||||
} catch (Exception be) {
|
|
||||||
throw new RuntimeException(be); // This should not happen unless the test is broken
|
|
||||||
}
|
}
|
||||||
|
barrier.await(60, TimeUnit.SECONDS);
|
||||||
|
if (this.threadIdx == 0) {
|
||||||
|
System.out.println("starting all threads");
|
||||||
|
}
|
||||||
|
|
||||||
|
} catch (Exception be) {
|
||||||
|
throw new RuntimeException(be); // This should not happen unless the test is broken
|
||||||
|
}
|
||||||
// }
|
// }
|
||||||
long startTime = System.nanoTime();
|
long startTime = System.nanoTime();
|
||||||
for (int i = 0; i < iterations; i++) {
|
for (int i = 0; i < iterations; i++) {
|
||||||
|
@ -33,9 +33,62 @@ import java.util.function.Function;
|
|||||||
*/
|
*/
|
||||||
public class TestRateLimiterPerf1E8 {
|
public class TestRateLimiterPerf1E8 {
|
||||||
|
|
||||||
private final Function<RateSpec, RateLimiter> rlFunction = rs -> new HybridRateLimiter(ActivityDef.parseActivityDef("alias=tokenrl"),"hybrid", rs.withVerb(RateSpec.Verb.configure));
|
private final Function<RateSpec, RateLimiter> rlFunction =
|
||||||
|
rs -> new HybridRateLimiter(
|
||||||
|
ActivityDef.parseActivityDef("alias=tokenrl"),
|
||||||
|
"hybrid",
|
||||||
|
rs.withVerb(RateSpec.Verb.configure)
|
||||||
|
);
|
||||||
private final RateLimiterPerfTestMethods methods = new RateLimiterPerfTestMethods();
|
private final RateLimiterPerfTestMethods methods = new RateLimiterPerfTestMethods();
|
||||||
|
|
||||||
|
@Test
|
||||||
|
@Disabled
|
||||||
|
public void test100Mops_4000threads() {
|
||||||
|
Perf perf = methods.testRateLimiterMultiThreadedContention(
|
||||||
|
rlFunction,
|
||||||
|
new RateSpec(1E8, 1.1),
|
||||||
|
100_000_000,
|
||||||
|
4000
|
||||||
|
);
|
||||||
|
System.out.println(perf.getLastResult());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
@Disabled
|
||||||
|
public void test100Mops_2000threads() {
|
||||||
|
Perf perf = methods.testRateLimiterMultiThreadedContention(
|
||||||
|
rlFunction,
|
||||||
|
new RateSpec(1E8, 1.1),
|
||||||
|
100_000_000,
|
||||||
|
2000
|
||||||
|
);
|
||||||
|
System.out.println(perf.getLastResult());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
@Disabled
|
||||||
|
public void test100Mops_1000threads() {
|
||||||
|
Perf perf = methods.testRateLimiterMultiThreadedContention(
|
||||||
|
rlFunction,
|
||||||
|
new RateSpec(1E8, 1.1),
|
||||||
|
100_000_000,
|
||||||
|
1000
|
||||||
|
);
|
||||||
|
System.out.println(perf.getLastResult());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
@Disabled
|
||||||
|
public void test100Mops_320threads() {
|
||||||
|
Perf perf = methods.testRateLimiterMultiThreadedContention(
|
||||||
|
rlFunction,
|
||||||
|
new RateSpec(1E8, 1.1),
|
||||||
|
100_000_000,
|
||||||
|
320
|
||||||
|
);
|
||||||
|
System.out.println(perf.getLastResult());
|
||||||
|
}
|
||||||
|
|
||||||
// 160 threads at 100_000_000 ops/s
|
// 160 threads at 100_000_000 ops/s
|
||||||
// 1600000000_ops 149.351811_S 10712960.186_ops_s, 93_ns_op
|
// 1600000000_ops 149.351811_S 10712960.186_ops_s, 93_ns_op
|
||||||
// JVM 11.0.1, Intel(R) Core(TM) i7-4790 CPU @ 3.60GHz
|
// JVM 11.0.1, Intel(R) Core(TM) i7-4790 CPU @ 3.60GHz
|
||||||
@ -46,7 +99,12 @@ public class TestRateLimiterPerf1E8 {
|
|||||||
@Test
|
@Test
|
||||||
@Disabled
|
@Disabled
|
||||||
public void test100Mops_160threads() {
|
public void test100Mops_160threads() {
|
||||||
Perf perf = methods.testRateLimiterMultiThreadedContention(rlFunction, new RateSpec(1E8, 1.1), 100_000_000,160);
|
Perf perf = methods.testRateLimiterMultiThreadedContention(
|
||||||
|
rlFunction,
|
||||||
|
new RateSpec(1E8, 1.1),
|
||||||
|
100_000_000,
|
||||||
|
160
|
||||||
|
);
|
||||||
System.out.println(perf.getLastResult());
|
System.out.println(perf.getLastResult());
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -57,7 +115,7 @@ public class TestRateLimiterPerf1E8 {
|
|||||||
@Test
|
@Test
|
||||||
@Disabled
|
@Disabled
|
||||||
public void test100Mops_80threads() {
|
public void test100Mops_80threads() {
|
||||||
Perf perf = methods.testRateLimiterMultiThreadedContention(rlFunction, new RateSpec(1E8, 1.1), 100_000_000,80);
|
Perf perf = methods.testRateLimiterMultiThreadedContention(rlFunction, new RateSpec(1E8, 1.1), 100_000_000, 80);
|
||||||
System.out.println(perf.getLastResult());
|
System.out.println(perf.getLastResult());
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -70,7 +128,7 @@ public class TestRateLimiterPerf1E8 {
|
|||||||
@Test
|
@Test
|
||||||
@Disabled
|
@Disabled
|
||||||
public void test100Mops_40threads() {
|
public void test100Mops_40threads() {
|
||||||
Perf perf = methods.testRateLimiterMultiThreadedContention(rlFunction, new RateSpec(1E8, 1.1), 100_000_000,40);
|
Perf perf = methods.testRateLimiterMultiThreadedContention(rlFunction, new RateSpec(1E8, 1.1), 100_000_000, 40);
|
||||||
System.out.println(perf.getLastResult());
|
System.out.println(perf.getLastResult());
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -90,7 +148,7 @@ public class TestRateLimiterPerf1E8 {
|
|||||||
@Test
|
@Test
|
||||||
@Disabled
|
@Disabled
|
||||||
public void test100Mops_20threads() {
|
public void test100Mops_20threads() {
|
||||||
Perf perf = methods.testRateLimiterMultiThreadedContention(rlFunction, new RateSpec(1E8, 1.1), 100_000_000,20);
|
Perf perf = methods.testRateLimiterMultiThreadedContention(rlFunction, new RateSpec(1E8, 1.1), 100_000_000, 20);
|
||||||
System.out.println(perf.getLastResult());
|
System.out.println(perf.getLastResult());
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -106,7 +164,7 @@ public class TestRateLimiterPerf1E8 {
|
|||||||
@Test
|
@Test
|
||||||
@Disabled
|
@Disabled
|
||||||
public void test100Mops_10threads() {
|
public void test100Mops_10threads() {
|
||||||
Perf perf = methods.testRateLimiterMultiThreadedContention(rlFunction, new RateSpec(1E8, 1.1), 100_000_000,10);
|
Perf perf = methods.testRateLimiterMultiThreadedContention(rlFunction, new RateSpec(1E8, 1.1), 100_000_000, 10);
|
||||||
System.out.println(perf.getLastResult());
|
System.out.println(perf.getLastResult());
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -123,7 +181,7 @@ public class TestRateLimiterPerf1E8 {
|
|||||||
@Test
|
@Test
|
||||||
@Disabled
|
@Disabled
|
||||||
public void test100Mops_5threads() {
|
public void test100Mops_5threads() {
|
||||||
Perf perf = methods.testRateLimiterMultiThreadedContention(rlFunction, new RateSpec(1E8, 1.1), 100_000_000,5);
|
Perf perf = methods.testRateLimiterMultiThreadedContention(rlFunction, new RateSpec(1E8, 1.1), 100_000_000, 5);
|
||||||
System.out.println(perf.getLastResult());
|
System.out.println(perf.getLastResult());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4,7 +4,7 @@
|
|||||||
<parent>
|
<parent>
|
||||||
<artifactId>mvn-defaults</artifactId>
|
<artifactId>mvn-defaults</artifactId>
|
||||||
<groupId>io.nosqlbench</groupId>
|
<groupId>io.nosqlbench</groupId>
|
||||||
<version>4.15.52-SNAPSHOT</version>
|
<version>4.15.58-SNAPSHOT</version>
|
||||||
<relativePath>../mvn-defaults</relativePath>
|
<relativePath>../mvn-defaults</relativePath>
|
||||||
</parent>
|
</parent>
|
||||||
|
|
||||||
@ -23,13 +23,13 @@
|
|||||||
<dependency>
|
<dependency>
|
||||||
<groupId>io.nosqlbench</groupId>
|
<groupId>io.nosqlbench</groupId>
|
||||||
<artifactId>engine-core</artifactId>
|
<artifactId>engine-core</artifactId>
|
||||||
<version>4.15.52-SNAPSHOT</version>
|
<version>4.15.58-SNAPSHOT</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>io.nosqlbench</groupId>
|
<groupId>io.nosqlbench</groupId>
|
||||||
<artifactId>engine-docker</artifactId>
|
<artifactId>engine-docker</artifactId>
|
||||||
<version>4.15.52-SNAPSHOT</version>
|
<version>4.15.58-SNAPSHOT</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
|
||||||
</dependencies>
|
</dependencies>
|
||||||
|
@ -691,7 +691,7 @@ public class NBCLIOptions {
|
|||||||
public String getProgressSpec() {
|
public String getProgressSpec() {
|
||||||
ProgressSpec spec = parseProgressSpec(this.progressSpec);// sanity check
|
ProgressSpec spec = parseProgressSpec(this.progressSpec);// sanity check
|
||||||
if (spec.indicatorMode == IndicatorMode.console) {
|
if (spec.indicatorMode == IndicatorMode.console) {
|
||||||
if (NBLogLevel.INFO.isGreaterOrEqualTo(getConsoleLogLevel())) {
|
if (getConsoleLogLevel().isGreaterOrEqualTo(NBLogLevel.INFO)) {
|
||||||
// System.err.println("Console is already logging info or more, so progress data on console is " +
|
// System.err.println("Console is already logging info or more, so progress data on console is " +
|
||||||
// "suppressed.");
|
// "suppressed.");
|
||||||
spec.indicatorMode = IndicatorMode.logonly;
|
spec.indicatorMode = IndicatorMode.logonly;
|
||||||
|
@ -70,23 +70,26 @@ public class NBCLIScenarioParserTest {
|
|||||||
assertThat(cmds.size()).isEqualTo(6);
|
assertThat(cmds.size()).isEqualTo(6);
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: make this work
|
@Test
|
||||||
// @Test
|
public void testThatTemplatesAreExpandedDefault() {
|
||||||
// public void testThatTemplatesAreExpandedDefault() {
|
NBCLIOptions opts = new NBCLIOptions(new String[]{ "scenario-test", "template-test"});
|
||||||
// NBCLIOptions opts = new NBCLIOptions(new String[]{ "scenario-test", "template-test"});
|
List<Cmd> cmds = opts.getCommands();
|
||||||
// List<NBCLIOptions.Cmd> cmds = opts.getCommands();
|
assertThat(cmds.size()).isEqualTo(1);
|
||||||
// assertThat(cmds.size()).isEqualTo(1);
|
assertThat(cmds.get(0).getArg("driver")).isEqualTo("stdout");
|
||||||
// assertThat(cmds.get(0).getCmdSpec()).isEqualTo("driver=stdout;cycles=10;workload=scenario-test.yaml;");
|
assertThat(cmds.get(0).getArg("cycles")).isEqualTo("10");
|
||||||
// }
|
assertThat(cmds.get(0).getArg("workload")).isEqualTo("target/test-classes/activities/scenario-test.yaml");
|
||||||
|
}
|
||||||
|
|
||||||
// TODO: Make this work
|
@Test
|
||||||
// @Test
|
public void testThatTemplatesAreExpandedOverride() {
|
||||||
// public void testThatTemplatesAreExpandedOverride() {
|
NBCLIOptions opts = new NBCLIOptions(new String[]{ "scenario-test", "template-test", "cycles-test=20"});
|
||||||
// NBCLIOptions opts = new NBCLIOptions(new String[]{ "scenario-test", "template-test", "cycles-test=20"});
|
List<Cmd> cmds = opts.getCommands();
|
||||||
// List<NBCLIOptions.Cmd> cmds = opts.getCommands();
|
assertThat(cmds.size()).isEqualTo(1);
|
||||||
// assertThat(cmds.size()).isEqualTo(1);
|
assertThat(cmds.get(0).getArg("driver")).isEqualTo("stdout");
|
||||||
// assertThat(cmds.get(0).getCmdSpec()).isEqualTo("driver=stdout;cycles=20;cycles-test=20;workload=activities/scenario-test.yaml;");
|
assertThat(cmds.get(0).getArg("cycles")).isEqualTo("20");
|
||||||
// }
|
assertThat(cmds.get(0).getArg("cycles-test")).isEqualTo("20");
|
||||||
|
assertThat(cmds.get(0).getArg("workload")).isEqualTo("target/test-classes/activities/scenario-test.yaml");
|
||||||
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testThatUndefValuesAreUndefined() {
|
public void testThatUndefValuesAreUndefined() {
|
||||||
|
@ -5,7 +5,7 @@
|
|||||||
<parent>
|
<parent>
|
||||||
<artifactId>mvn-defaults</artifactId>
|
<artifactId>mvn-defaults</artifactId>
|
||||||
<groupId>io.nosqlbench</groupId>
|
<groupId>io.nosqlbench</groupId>
|
||||||
<version>4.15.52-SNAPSHOT</version>
|
<version>4.15.58-SNAPSHOT</version>
|
||||||
<relativePath>../mvn-defaults</relativePath>
|
<relativePath>../mvn-defaults</relativePath>
|
||||||
</parent>
|
</parent>
|
||||||
|
|
||||||
@ -21,7 +21,7 @@
|
|||||||
<dependency>
|
<dependency>
|
||||||
<groupId>io.nosqlbench</groupId>
|
<groupId>io.nosqlbench</groupId>
|
||||||
<artifactId>engine-api</artifactId>
|
<artifactId>engine-api</artifactId>
|
||||||
<version>4.15.52-SNAPSHOT</version>
|
<version>4.15.58-SNAPSHOT</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
|
||||||
</dependencies>
|
</dependencies>
|
||||||
|
@ -5,7 +5,7 @@
|
|||||||
<parent>
|
<parent>
|
||||||
<artifactId>mvn-defaults</artifactId>
|
<artifactId>mvn-defaults</artifactId>
|
||||||
<groupId>io.nosqlbench</groupId>
|
<groupId>io.nosqlbench</groupId>
|
||||||
<version>4.15.52-SNAPSHOT</version>
|
<version>4.15.58-SNAPSHOT</version>
|
||||||
<relativePath>../mvn-defaults</relativePath>
|
<relativePath>../mvn-defaults</relativePath>
|
||||||
</parent>
|
</parent>
|
||||||
|
|
||||||
@ -28,13 +28,13 @@
|
|||||||
<dependency>
|
<dependency>
|
||||||
<groupId>io.nosqlbench</groupId>
|
<groupId>io.nosqlbench</groupId>
|
||||||
<artifactId>engine-api</artifactId>
|
<artifactId>engine-api</artifactId>
|
||||||
<version>4.15.52-SNAPSHOT</version>
|
<version>4.15.58-SNAPSHOT</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>io.nosqlbench</groupId>
|
<groupId>io.nosqlbench</groupId>
|
||||||
<artifactId>drivers-api</artifactId>
|
<artifactId>drivers-api</artifactId>
|
||||||
<version>4.15.52-SNAPSHOT</version>
|
<version>4.15.58-SNAPSHOT</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
|
||||||
<dependency>
|
<dependency>
|
||||||
@ -85,7 +85,7 @@
|
|||||||
<dependency>
|
<dependency>
|
||||||
<groupId>io.nosqlbench</groupId>
|
<groupId>io.nosqlbench</groupId>
|
||||||
<artifactId>engine-clients</artifactId>
|
<artifactId>engine-clients</artifactId>
|
||||||
<version>4.15.52-SNAPSHOT</version>
|
<version>4.15.58-SNAPSHOT</version>
|
||||||
<scope>compile</scope>
|
<scope>compile</scope>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
|
||||||
|
@ -4,7 +4,7 @@
|
|||||||
<parent>
|
<parent>
|
||||||
<artifactId>mvn-defaults</artifactId>
|
<artifactId>mvn-defaults</artifactId>
|
||||||
<groupId>io.nosqlbench</groupId>
|
<groupId>io.nosqlbench</groupId>
|
||||||
<version>4.15.52-SNAPSHOT</version>
|
<version>4.15.58-SNAPSHOT</version>
|
||||||
<relativePath>../mvn-defaults</relativePath>
|
<relativePath>../mvn-defaults</relativePath>
|
||||||
</parent>
|
</parent>
|
||||||
|
|
||||||
@ -56,7 +56,7 @@
|
|||||||
<dependency>
|
<dependency>
|
||||||
<groupId>io.nosqlbench</groupId>
|
<groupId>io.nosqlbench</groupId>
|
||||||
<artifactId>engine-api</artifactId>
|
<artifactId>engine-api</artifactId>
|
||||||
<version>4.15.52-SNAPSHOT</version>
|
<version>4.15.58-SNAPSHOT</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
</dependencies>
|
</dependencies>
|
||||||
|
|
||||||
|
@ -4,7 +4,7 @@
|
|||||||
<parent>
|
<parent>
|
||||||
<artifactId>mvn-defaults</artifactId>
|
<artifactId>mvn-defaults</artifactId>
|
||||||
<groupId>io.nosqlbench</groupId>
|
<groupId>io.nosqlbench</groupId>
|
||||||
<version>4.15.52-SNAPSHOT</version>
|
<version>4.15.58-SNAPSHOT</version>
|
||||||
<relativePath>../mvn-defaults</relativePath>
|
<relativePath>../mvn-defaults</relativePath>
|
||||||
</parent>
|
</parent>
|
||||||
|
|
||||||
@ -28,7 +28,7 @@
|
|||||||
<dependency>
|
<dependency>
|
||||||
<groupId>io.nosqlbench</groupId>
|
<groupId>io.nosqlbench</groupId>
|
||||||
<artifactId>docsys</artifactId>
|
<artifactId>docsys</artifactId>
|
||||||
<version>4.15.52-SNAPSHOT</version>
|
<version>4.15.58-SNAPSHOT</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
|
||||||
</dependencies>
|
</dependencies>
|
||||||
|
@ -4,7 +4,7 @@
|
|||||||
<parent>
|
<parent>
|
||||||
<artifactId>mvn-defaults</artifactId>
|
<artifactId>mvn-defaults</artifactId>
|
||||||
<groupId>io.nosqlbench</groupId>
|
<groupId>io.nosqlbench</groupId>
|
||||||
<version>4.15.52-SNAPSHOT</version>
|
<version>4.15.58-SNAPSHOT</version>
|
||||||
<relativePath>../mvn-defaults</relativePath>
|
<relativePath>../mvn-defaults</relativePath>
|
||||||
</parent>
|
</parent>
|
||||||
|
|
||||||
@ -22,7 +22,7 @@
|
|||||||
<dependency>
|
<dependency>
|
||||||
<groupId>io.nosqlbench</groupId>
|
<groupId>io.nosqlbench</groupId>
|
||||||
<artifactId>engine-api</artifactId>
|
<artifactId>engine-api</artifactId>
|
||||||
<version>4.15.52-SNAPSHOT</version>
|
<version>4.15.58-SNAPSHOT</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
|
||||||
</dependencies>
|
</dependencies>
|
||||||
|
@ -0,0 +1,22 @@
|
|||||||
|
package io.nosqlbench.engine.extensions.csvoutput;
|
||||||
|
|
||||||
|
import com.codahale.metrics.MetricRegistry;
|
||||||
|
import io.nosqlbench.engine.api.extensions.ScriptingPluginInfo;
|
||||||
|
import io.nosqlbench.nb.annotations.Service;
|
||||||
|
import org.apache.logging.log4j.Logger;
|
||||||
|
|
||||||
|
import javax.script.ScriptContext;
|
||||||
|
|
||||||
|
@Service(value = ScriptingPluginInfo.class,selector = "csvoutput")
|
||||||
|
public class CsvOutputPluginData implements ScriptingPluginInfo<CsvOutputPluginInstance> {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getDescription() {
|
||||||
|
return "Write CSV output to a named file";
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public CsvOutputPluginInstance getExtensionObject(Logger logger, MetricRegistry metricRegistry, ScriptContext scriptContext) {
|
||||||
|
return new CsvOutputPluginInstance();
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,8 @@
|
|||||||
|
package io.nosqlbench.engine.extensions.csvoutput;
|
||||||
|
|
||||||
|
public class CsvOutputPluginInstance {
|
||||||
|
|
||||||
|
public CsvOutputPluginWriter open(String filename, String... headers) {
|
||||||
|
return new CsvOutputPluginWriter(filename, headers);
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,80 @@
|
|||||||
|
package io.nosqlbench.engine.extensions.csvoutput;
|
||||||
|
|
||||||
|
import org.apache.commons.csv.CSVFormat;
|
||||||
|
import org.apache.commons.csv.CSVPrinter;
|
||||||
|
import org.graalvm.polyglot.Value;
|
||||||
|
|
||||||
|
import java.io.FileWriter;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.nio.file.Files;
|
||||||
|
import java.nio.file.Path;
|
||||||
|
import java.nio.file.attribute.PosixFilePermissions;
|
||||||
|
import java.util.*;
|
||||||
|
|
||||||
|
public class CsvOutputPluginWriter {
|
||||||
|
|
||||||
|
private final CSVPrinter printer;
|
||||||
|
private final FileWriter filewriter;
|
||||||
|
private final LinkedHashSet<String> headerKeys;
|
||||||
|
private final String filename;
|
||||||
|
|
||||||
|
public CsvOutputPluginWriter(String filename, String... headers) {
|
||||||
|
try {
|
||||||
|
this.filename = filename;
|
||||||
|
Path filepath = Path.of(filename);
|
||||||
|
Files.createDirectories(filepath.getParent(), PosixFilePermissions.asFileAttribute(
|
||||||
|
PosixFilePermissions.fromString("rwxr-x---")
|
||||||
|
));
|
||||||
|
CSVFormat fmt = CSVFormat.DEFAULT;
|
||||||
|
this.headerKeys = new LinkedHashSet<>(Arrays.asList(headers));
|
||||||
|
this.filewriter = new FileWriter(filepath.toString());
|
||||||
|
this.printer = new CSVPrinter(filewriter, fmt);
|
||||||
|
if (Files.size(Path.of(filename)) == 0) {
|
||||||
|
printer.printRecord(headerKeys);
|
||||||
|
printer.flush();
|
||||||
|
}
|
||||||
|
} catch (IOException e) {
|
||||||
|
throw new RuntimeException(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public CsvOutputPluginWriter write(Value value) {
|
||||||
|
List<String> lineout = new ArrayList<>();
|
||||||
|
Map<String, String> provided = new HashMap<>();
|
||||||
|
if (value.isHostObject()) {
|
||||||
|
Object o = value.asHostObject();
|
||||||
|
if (o instanceof Map) {
|
||||||
|
((Map<?, ?>) o).forEach((k, v) -> {
|
||||||
|
provided.put(k.toString(), v.toString());
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
throw new RuntimeException("host object provided as '" + o.getClass().getCanonicalName() + ", but only Maps are supported.");
|
||||||
|
}
|
||||||
|
} else if (value.hasMembers()) {
|
||||||
|
for (String vkey : value.getMemberKeys()) {
|
||||||
|
provided.put(vkey, value.getMember(vkey).toString());
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
throw new RuntimeException("Value was not a Map host object nor a type with members.");
|
||||||
|
}
|
||||||
|
|
||||||
|
for (String headerKey : headerKeys) {
|
||||||
|
if (provided.containsKey(headerKey)) {
|
||||||
|
lineout.add(provided.remove(headerKey));
|
||||||
|
} else {
|
||||||
|
lineout.add("");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (provided.size() > 0) {
|
||||||
|
throw new RuntimeException("Unqualified column was emitted for file '" + filename);
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
printer.printRecord(lineout);
|
||||||
|
printer.flush();
|
||||||
|
} catch (IOException e) {
|
||||||
|
throw new RuntimeException(e);
|
||||||
|
}
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,13 @@
|
|||||||
|
csvoutput extension
|
||||||
|
===================
|
||||||
|
|
||||||
|
This extension makes it easy to start writing CSV data to a file,
|
||||||
|
using a defined set of headers.
|
||||||
|
|
||||||
|
### Examples
|
||||||
|
|
||||||
|
Open a writer and write a row:
|
||||||
|
|
||||||
|
var out=csvoutput.open('output.csv','time','value');
|
||||||
|
out.write({'time':23,'value':23});
|
||||||
|
|
@ -0,0 +1,22 @@
|
|||||||
|
package io.nosqlbench.engine.extensions.csvoutput;
|
||||||
|
|
||||||
|
import org.assertj.core.util.Files;
|
||||||
|
import org.graalvm.polyglot.Value;
|
||||||
|
import org.junit.jupiter.api.Test;
|
||||||
|
|
||||||
|
import java.io.File;
|
||||||
|
import java.util.Map;
|
||||||
|
|
||||||
|
public class CsvOutputPluginWriterTest {
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testCsvOutputWriter() {
|
||||||
|
File tmpfile = Files.newTemporaryFile();
|
||||||
|
tmpfile.deleteOnExit();
|
||||||
|
System.out.println("tmpfile="+ tmpfile.getPath());
|
||||||
|
CsvOutputPluginWriter out = new CsvOutputPluginWriter(tmpfile.getPath(), "one", "two");
|
||||||
|
out.write(Value.asValue(Map.of("one","one_","two","two_")));
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user