Merge branch 'main' into my-NB5-I446

This commit is contained in:
Mike Yaacoub
2022-12-19 16:58:50 -05:00
111 changed files with 6934 additions and 205 deletions

View File

@@ -1,12 +0,0 @@
- 114aea71b (HEAD -> main) Merge branch 'main' of github.com:nosqlbench/nosqlbench
- 0fd85c09b (origin/main) Merge pull request #728 from nosqlbench/feature/mongodb_baselines2_workloads
- 99f0226fc Merge pull request #729 from nosqlbench/snyk-upgrade-2d988862477c7e76f15ff8b65bcdc3a4
- d22413259 Merge pull request #727 from nosqlbench/snyk-upgrade-b28610be666fa7e80936b0c2f87df569
- d49a5087d improved build diagnostics
- e594aab92 (origin/snyk-upgrade-2d988862477c7e76f15ff8b65bcdc3a4) fix: upgrade com.datastax.oss:pulsar-jms from 2.4.4 to 2.4.9
- 64990c412 (origin/feature/mongodb_baselines2_workloads) Initial working draft of MongoDB timeseries
- 7ebb16a06 (origin/snyk-upgrade-b28610be666fa7e80936b0c2f87df569) fix: upgrade org.postgresql:postgresql from 42.4.2 to 42.5.0
- 68cdd075b new function, security updates, actions fix
- ec8e6ee71 Merge branch 'main' of github.com:nosqlbench/nosqlbench
- a63fa951c actions fix release
- 0f6bce0b0 fix typo in docker push logic

View File

@@ -20,7 +20,7 @@
<parent>
<groupId>io.nosqlbench</groupId>
<artifactId>mvn-defaults</artifactId>
<version>4.17.31-SNAPSHOT</version>
<version>4.17.32-SNAPSHOT</version>
<relativePath>../mvn-defaults</relativePath>
</parent>
@@ -38,7 +38,7 @@
<dependency>
<groupId>io.nosqlbench</groupId>
<artifactId>adapters-api</artifactId>
<version>4.17.31-SNAPSHOT</version>
<version>4.17.32-SNAPSHOT</version>
</dependency>
<dependency>

View File

@@ -24,8 +24,8 @@ import io.nosqlbench.adapter.cqld4.optypes.Cqld4CqlOp;
public class Cqld4CqlReboundStatement extends Cqld4CqlOp {
private final BoundStatement stmt;
public Cqld4CqlReboundStatement(CqlSession session, int maxpages, boolean retryreplace, BoundStatement rebound, RSProcessors processors) {
super(session,maxpages,retryreplace,processors);
public Cqld4CqlReboundStatement(CqlSession session, int maxPages, boolean retryReplace, int maxLwtRetries, int lwtRetryCount, BoundStatement rebound, RSProcessors processors) {
super(session,maxPages,retryReplace,maxLwtRetries,lwtRetryCount, processors);
this.stmt = rebound;
}

View File

@@ -0,0 +1,45 @@
/*
* Copyright (c) 2022 nosqlbench
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.nosqlbench.adapter.cqld4.exceptions;
import com.datastax.oss.driver.api.core.cql.ResultSet;
/**
* This is a synthetic error generated by the cql driver in NoSQLBench when
* the retryreplace option is used but the number of LWT round-trips from the driver
* is excessive. The number of LWT round trips allowed is controlled by the
* maxlwtretries op field.
*/
public class ExceededRetryReplaceException extends CqlGenericCycleException {
private final ResultSet resultSet;
private final String queryString;
private final int retries;
public ExceededRetryReplaceException(ResultSet resultSet, String queryString, int retries) {
super("After " + retries + " retries using the retryreplace option, Operation was not applied:" + queryString);
this.retries = retries;
this.resultSet = resultSet;
this.queryString = queryString;
}
public ResultSet getResultSet() {
return resultSet;
}
public String getQueryString() { return queryString; }
}

View File

@@ -45,12 +45,14 @@ public abstract class Cqld4BaseOpDispenser extends BaseOpDispenser<Cqld4CqlOp, C
private final Cqld4OpMetrics metrics = new Cqld4OpMetrics();
private final LongFunction<CqlSession> sessionFunc;
private final boolean isRetryReplace;
private final int maxLwtRetries;
public Cqld4BaseOpDispenser(DriverAdapter adapter, LongFunction<CqlSession> sessionFunc, ParsedOp op) {
super(adapter, op);
this.sessionFunc = sessionFunc;
this.maxpages = op.getStaticConfigOr("maxpages", 1);
this.isRetryReplace = op.getStaticConfigOr("retryreplace", false);
this.maxLwtRetries = op.getStaticConfigOr("maxlwtretries", 1);
}
public int getMaxPages() {
@@ -61,6 +63,11 @@ public abstract class Cqld4BaseOpDispenser extends BaseOpDispenser<Cqld4CqlOp, C
return isRetryReplace;
}
public int getMaxLwtRetries() {
return maxLwtRetries;
}
public LongFunction<CqlSession> getSessionFunc() {
return sessionFunc;
}

View File

@@ -89,6 +89,7 @@ public class Cqld4PreparedStmtDispenser extends Cqld4BaseOpDispenser {
boundStatement,
getMaxPages(),
isRetryReplace(),
getMaxLwtRetries(),
processors
);
} catch (Exception exception) {

View File

@@ -49,7 +49,8 @@ public class Cqld4RawStmtDispenser extends Cqld4BaseOpDispenser {
getSessionFunc().apply(value),
(SimpleStatement) stmtFunc.apply(value),
getMaxPages(),
isRetryReplace()
isRetryReplace(),
getMaxLwtRetries()
);
}

View File

@@ -46,7 +46,8 @@ public class Cqld4SimpleCqlStmtDispenser extends Cqld4BaseOpDispenser {
getSessionFunc().apply(value),
(SimpleStatement) stmtFunc.apply(value),
getMaxPages(),
isRetryReplace()
isRetryReplace(),
getMaxLwtRetries()
);
}

View File

@@ -24,8 +24,8 @@ public class Cqld4CqlBatchStatement extends Cqld4CqlOp {
private final BatchStatement stmt;
public Cqld4CqlBatchStatement(CqlSession session, BatchStatement stmt, int maxpages, boolean retryreplace) {
super(session,maxpages,retryreplace,new RSProcessors());
public Cqld4CqlBatchStatement(CqlSession session, BatchStatement stmt, int maxPage, int maxLwtRetries, boolean retryReplace) {
super(session,maxPage,retryReplace,maxLwtRetries,new RSProcessors());
this.stmt = stmt;
}

View File

@@ -23,6 +23,7 @@ import com.datastax.oss.driver.api.core.cql.Row;
import com.datastax.oss.driver.api.core.cql.Statement;
import io.nosqlbench.adapter.cqld4.*;
import io.nosqlbench.adapter.cqld4.exceptions.ChangeUnappliedCycleException;
import io.nosqlbench.adapter.cqld4.exceptions.ExceededRetryReplaceException;
import io.nosqlbench.adapter.cqld4.exceptions.UndefinedResultSetException;
import io.nosqlbench.adapter.cqld4.exceptions.UnexpectedPagingException;
import io.nosqlbench.engine.api.activityimpl.uniform.flowtypes.*;
@@ -44,17 +45,29 @@ import java.util.Map;
public abstract class Cqld4CqlOp implements CycleOp<ResultSet>, VariableCapture, OpGenerator, OpResultSize {
private final CqlSession session;
private final int maxpages;
private final boolean retryreplace;
private final int maxPages;
private final boolean retryReplace;
private final int maxLwtRetries;
private int retryReplaceCount =0;
private ResultSet rs;
private Cqld4CqlOp nextOp;
private final RSProcessors processors;
public Cqld4CqlOp(CqlSession session, int maxpages, boolean retryreplace, RSProcessors processors) {
public Cqld4CqlOp(CqlSession session, int maxPages, boolean retryReplace, int maxLwtRetries, RSProcessors processors) {
this.session = session;
this.maxpages = maxpages;
this.retryreplace = retryreplace;
this.maxPages = maxPages;
this.retryReplace = retryReplace;
this.maxLwtRetries =maxLwtRetries;
this.processors = processors;
}
protected Cqld4CqlOp(CqlSession session, int maxPages, boolean retryReplace, int maxLwtRetries, int retryRplaceCount, RSProcessors processors) {
this.session = session;
this.maxPages = maxPages;
this.retryReplace = retryReplace;
this.maxLwtRetries =maxLwtRetries;
this.retryReplaceCount=retryRplaceCount;
this.processors = processors;
}
@@ -66,9 +79,13 @@ public abstract class Cqld4CqlOp implements CycleOp<ResultSet>, VariableCapture,
int totalRows = 0;
if (!rs.wasApplied()) {
if (!retryreplace) {
if (!retryReplace) {
throw new ChangeUnappliedCycleException(rs, getQueryString());
} else {
retryReplaceCount++;
if (retryReplaceCount >maxLwtRetries) {
throw new ExceededRetryReplaceException(rs,getQueryString(), retryReplaceCount);
}
Row one = rs.one();
processors.buffer(one);
totalRows++;
@@ -86,8 +103,8 @@ public abstract class Cqld4CqlOp implements CycleOp<ResultSet>, VariableCapture,
Row row = reader.next();
processors.buffer(row);
}
if (pages++ > maxpages) {
throw new UnexpectedPagingException(rs, getQueryString(), pages, maxpages, stmt.getPageSize());
if (pages++ > maxPages) {
throw new UnexpectedPagingException(rs, getQueryString(), pages, maxPages, stmt.getPageSize());
}
if (rs.isFullyFetched()) {
break;
@@ -119,7 +136,7 @@ public abstract class Cqld4CqlOp implements CycleOp<ResultSet>, VariableCapture,
private Cqld4CqlOp rebindLwt(Statement<?> stmt, Row row) {
BoundStatement rebound = LWTRebinder.rebindUnappliedStatement(stmt, row);
return new Cqld4CqlReboundStatement(session, maxpages, retryreplace, rebound, processors);
return new Cqld4CqlReboundStatement(session, maxPages, retryReplace, maxLwtRetries, retryReplaceCount, rebound, processors);
}
}

View File

@@ -24,8 +24,8 @@ public class Cqld4CqlPreparedStatement extends Cqld4CqlOp {
private final BoundStatement stmt;
public Cqld4CqlPreparedStatement(CqlSession session, BoundStatement stmt, int maxpages, boolean retryreplace, RSProcessors processors) {
super(session,maxpages,retryreplace,processors);
public Cqld4CqlPreparedStatement(CqlSession session, BoundStatement stmt, int maxPages, boolean retryReplace, int maxLwtRetries, RSProcessors processors) {
super(session,maxPages,retryReplace,maxLwtRetries,processors);
this.stmt = stmt;
}

View File

@@ -23,8 +23,8 @@ import io.nosqlbench.adapter.cqld4.RSProcessors;
public class Cqld4CqlSimpleStatement extends Cqld4CqlOp {
private final SimpleStatement stmt;
public Cqld4CqlSimpleStatement(CqlSession session, SimpleStatement stmt, int maxpages, boolean retryreplace) {
super(session, maxpages,retryreplace, new RSProcessors());
public Cqld4CqlSimpleStatement(CqlSession session, SimpleStatement stmt, int maxPages, boolean retryReplace, int maxLwtRetries) {
super(session, maxPages,retryReplace, maxLwtRetries, new RSProcessors());
this.stmt = stmt;
}

View File

@@ -21,6 +21,8 @@
* Project : cql-parser; an ANTLR4 grammar for Apache Cassandra CQL https://github.com/kdcro101cql-parser
*/
// TODO: Add support for
// CREATE CUSTOM INDEX idxname ON ksname.tbname (fieldname) USING 'StorageAttachedIndex';
parser grammar CqlParser;
options

View File

@@ -175,7 +175,11 @@ public class CqlModelBuilder extends CqlParserBaseListener {
@Override
public void exitTableOptionItem(CqlParser.TableOptionItemContext ctx) {
if (table!=null) {
table.setCompactStorage(ctx.kwCompactStorage() != null);
} else {
logger.debug("table option item found with no table, this is likely for a materialized view");
}
}
@Override
@@ -196,8 +200,12 @@ public class CqlModelBuilder extends CqlParserBaseListener {
.map(c -> c.getText())
.toList();
if (table!=null) {
IntStream.range(0, columns.size())
.forEach(i -> table.addTableClusteringOrder(columns.get(i), orders.get(i)));
} else {
logger.debug("clustering order found, but not active table. This is likely for a materialized view.");
}
}
private String textOf(ParserRuleContext ctx) {

View File

@@ -198,6 +198,11 @@ params:
# match the preconditions) in order to test LWT performance.
retryreplace: true
# Set the number of retries allowed by the retryreplace option. This is set
# to 1 conservatively, as with the maxpages setting. This means that you will
# see an error if the first LWT retry after an unapplied change was not successful.
maxlwtretries: 1
## The following options are meant for advanced testing scenarios only,
## and are not generally meant to be used in typical application-level,
## data mode, performance or scale testing. These expose properties

View File

@@ -13,11 +13,14 @@ bindings:
float: ToFloat();
map<text,timestamp>: MapSized(3,Combinations('A-Z;0-9', ToJavaInstant()));
frozen<list<int>>: ListSizedHashed(HashRange(3,7),ToInt()));
list<text>: ListStepped(ToString(),ToString())
list<text>: ListStepped(ToString(),ToString());
list<ascii>: ListStepped(ToString(),ToString())
map<text,text>: MapSized(3, Combinations('A-Z;0-9'), ToString(), ToString());
map<ascii,text>: MapSized(3, Combinations('A-Z;0-9'), ToString(), ToString());
map<int,int>: MapSized(3, ToInt(), ToInt());
counter: HashRange(1,3);
set<text>: SetSized(HashRange(3,4),ToString()));
set<ascii>: SetSized(HashRange(3,4),ToString()));
smallint: ToShort();
time: StartingEpochMillis('2022-01-01 00:00:00'); ToLocalTime();
timestamp: ToJavaInstant();

View File

@@ -21,7 +21,7 @@
<parent>
<artifactId>mvn-defaults</artifactId>
<groupId>io.nosqlbench</groupId>
<version>4.17.31-SNAPSHOT</version>
<version>4.17.32-SNAPSHOT</version>
<relativePath>../mvn-defaults</relativePath>
</parent>
@@ -37,13 +37,13 @@
<dependency>
<groupId>io.nosqlbench</groupId>
<artifactId>nb-annotations</artifactId>
<version>4.17.31-SNAPSHOT</version>
<version>4.17.32-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>io.nosqlbench</groupId>
<artifactId>engine-api</artifactId>
<version>4.17.31-SNAPSHOT</version>
<version>4.17.32-SNAPSHOT</version>
<scope>compile</scope>
</dependency>

View File

@@ -20,7 +20,7 @@
<parent>
<groupId>io.nosqlbench</groupId>
<artifactId>mvn-defaults</artifactId>
<version>4.17.31-SNAPSHOT</version>
<version>4.17.32-SNAPSHOT</version>
<relativePath>../mvn-defaults</relativePath>
</parent>
@@ -39,13 +39,13 @@
<dependency>
<groupId>io.nosqlbench</groupId>
<artifactId>adapters-api</artifactId>
<version>4.17.31-SNAPSHOT</version>
<version>4.17.32-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>com.amazonaws</groupId>
<artifactId>aws-java-sdk-dynamodb</artifactId>
<version>1.12.348</version>
<version>1.12.364</version>
</dependency>
</dependencies>

View File

@@ -20,7 +20,7 @@
<parent>
<artifactId>mvn-defaults</artifactId>
<groupId>io.nosqlbench</groupId>
<version>4.17.31-SNAPSHOT</version>
<version>4.17.32-SNAPSHOT</version>
<relativePath>../mvn-defaults</relativePath>
</parent>
@@ -38,7 +38,7 @@
<dependency>
<groupId>io.nosqlbench</groupId>
<artifactId>engine-api</artifactId>
<version>4.17.31-SNAPSHOT</version>
<version>4.17.32-SNAPSHOT</version>
<scope>compile</scope>
</dependency>

81
adapter-kafka/pom.xml Normal file
View File

@@ -0,0 +1,81 @@
<!--
~ Copyright (c) 2022 nosqlbench
~
~ Licensed under the Apache License, Version 2.0 (the "License");
~ you may not use this file except in compliance with the License.
~ You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<artifactId>adapter-kafka</artifactId>
<packaging>jar</packaging>
<parent>
<artifactId>mvn-defaults</artifactId>
<groupId>io.nosqlbench</groupId>
<version>4.17.32-SNAPSHOT</version>
<relativePath>../mvn-defaults</relativePath>
</parent>
<name>${project.artifactId}</name>
<description>
A Kafka driver for nosqlbench. This provides the ability to inject synthetic data
into a Kafka or a Kafka-compatible (e.g. Pulsar with S4K) system .
</description>
<properties>
<kafka.version>3.3.1</kafka.version>
</properties>
<dependencies>
<dependency>
<groupId>io.nosqlbench</groupId>
<artifactId>engine-api</artifactId>
<version>4.17.32-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>io.nosqlbench</groupId>
<artifactId>adapters-api</artifactId>
<version>4.17.32-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka-clients</artifactId>
<version>${kafka.version}</version>
</dependency>
<!-- https://mvnrepository.com/artifact/org.apache.commons/commons-lang3 -->
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-lang3</artifactId>
<version>3.12.0</version>
</dependency>
<!-- https://mvnrepository.com/artifact/commons-beanutils/commons-beanutils -->
<dependency>
<groupId>commons-beanutils</groupId>
<artifactId>commons-beanutils</artifactId>
<version>1.9.4</version>
</dependency>
<!-- https://mvnrepository.com/artifact/org.apache.commons/commons-configuration2 -->
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-configuration2</artifactId>
<version>2.8.0</version>
</dependency>
</dependencies>
</project>

View File

@@ -0,0 +1,52 @@
/*
* Copyright (c) 2022 nosqlbench
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.nosqlbench.adapter.kafka;
import io.nosqlbench.adapter.kafka.ops.KafkaOp;
import io.nosqlbench.api.config.standard.NBConfigModel;
import io.nosqlbench.api.config.standard.NBConfiguration;
import io.nosqlbench.engine.api.activityimpl.OpMapper;
import io.nosqlbench.engine.api.activityimpl.uniform.BaseDriverAdapter;
import io.nosqlbench.engine.api.activityimpl.uniform.DriverAdapter;
import io.nosqlbench.engine.api.activityimpl.uniform.DriverSpaceCache;
import io.nosqlbench.nb.annotations.Service;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import java.util.function.Function;
@Service(value = DriverAdapter.class, selector = "kafka")
public class KafkaDriverAdapter extends BaseDriverAdapter<KafkaOp, KafkaSpace> {
private final static Logger logger = LogManager.getLogger(KafkaDriverAdapter.class);
@Override
public OpMapper<KafkaOp> getOpMapper() {
DriverSpaceCache<? extends KafkaSpace> spaceCache = getSpaceCache();
NBConfiguration adapterConfig = getConfiguration();
return new KafkaOpMapper(this, adapterConfig, spaceCache);
}
@Override
public Function<String, ? extends KafkaSpace> getSpaceInitializer(NBConfiguration cfg) {
return (s) -> new KafkaSpace(s, cfg);
}
@Override
public NBConfigModel getConfigModel() {
return super.getConfigModel().add(KafkaSpace.getConfigModel());
}
}

View File

@@ -0,0 +1,71 @@
/*
* Copyright (c) 2022 nosqlbench
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.nosqlbench.adapter.kafka;
import io.nosqlbench.adapter.kafka.dispensers.MessageConsumerOpDispenser;
import io.nosqlbench.adapter.kafka.dispensers.MessageProducerOpDispenser;
import io.nosqlbench.adapter.kafka.ops.KafkaOp;
import io.nosqlbench.api.config.standard.NBConfiguration;
import io.nosqlbench.engine.api.activityimpl.OpDispenser;
import io.nosqlbench.engine.api.activityimpl.OpMapper;
import io.nosqlbench.engine.api.activityimpl.uniform.DriverAdapter;
import io.nosqlbench.engine.api.activityimpl.uniform.DriverSpaceCache;
import io.nosqlbench.engine.api.templating.ParsedOp;
import io.nosqlbench.engine.api.templating.TypeAndTarget;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
public class KafkaOpMapper implements OpMapper<KafkaOp> {
private final static Logger logger = LogManager.getLogger(KafkaOpMapper.class);
private final NBConfiguration cfg;
private final DriverSpaceCache<? extends KafkaSpace> spaceCache;
private final DriverAdapter adapter;
public KafkaOpMapper(DriverAdapter adapter, NBConfiguration cfg, DriverSpaceCache<? extends KafkaSpace> spaceCache) {
this.cfg = cfg;
this.spaceCache = spaceCache;
this.adapter = adapter;
}
@Override
public OpDispenser<? extends KafkaOp> apply(ParsedOp op) {
String spaceName = op.getStaticConfigOr("space", "default");
KafkaSpace kafkaSpace = spaceCache.get(spaceName);
/*
* If the user provides a body element, then they want to provide the JSON or
* a data structure that can be converted into JSON, bypassing any further
* specialized type-checking or op-type specific features
*/
if (op.isDefined("body")) {
throw new RuntimeException("This mode is reserved for later. Do not use the 'body' op field.");
}
else {
TypeAndTarget<KafkaOpType, String> opType = op.getTypeAndTarget(KafkaOpType.class, String.class);
return switch (opType.enumId) {
case MessageProduce ->
new MessageProducerOpDispenser(adapter, op, opType.targetFunction, kafkaSpace);
case MessageConsume ->
new MessageConsumerOpDispenser(adapter, op, opType.targetFunction, kafkaSpace);
};
}
}
}

View File

@@ -0,0 +1,26 @@
/*
* Copyright (c) 2022 nosqlbench
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.nosqlbench.adapter.kafka;
public enum KafkaOpType {
// Kafka producer
MessageProduce,
// Kafka consumer
MessageConsume
}

View File

@@ -0,0 +1,157 @@
/*
* Copyright (c) 2022 nosqlbench
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.nosqlbench.adapter.kafka;
import io.nosqlbench.adapter.kafka.exception.KafkaAdapterUnexpectedException;
import io.nosqlbench.adapter.kafka.ops.OpTimeTrackKafkaClient;
import io.nosqlbench.adapter.kafka.util.KafkaAdapterUtil;
import io.nosqlbench.adapter.kafka.util.KafkaClientConf;
import io.nosqlbench.api.config.standard.ConfigModel;
import io.nosqlbench.api.config.standard.NBConfigModel;
import io.nosqlbench.api.config.standard.NBConfiguration;
import io.nosqlbench.api.config.standard.Param;
import org.apache.commons.lang3.BooleanUtils;
import org.apache.commons.lang3.math.NumberUtils;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicBoolean;
public class KafkaSpace implements AutoCloseable {
private final static Logger logger = LogManager.getLogger(KafkaSpace.class);
private final String spaceName;
private final NBConfiguration cfg;
// TODO: currently this NB Kafka driver only supports String type for message key and value
// add schema support in the future
private final ConcurrentHashMap<String, OpTimeTrackKafkaClient> opTimeTrackKafkaClients = new ConcurrentHashMap<>();
private final String bootstrapSvr;
private final String kafkaClientConfFileName;
private final KafkaClientConf kafkaClientConf;
// Whether to do strict error handling while sending/receiving messages
// - Yes: any error returned from the Pulsar server while doing message receiving/sending will trigger NB execution stop
// - No: pause the current thread that received the error message for 1 second and then continue processing
private final boolean strictMsgErrorHandling;
// Maximum time length to execute S4J operations (e.g. message send or consume)
// - when NB execution passes this threshold, it is simply NoOp
// - 0 means no maximum time constraint. S4JOp is always executed until NB execution cycle finishes
private final long maxOpTimeInSec;
private final long activityStartTimeMills;
// Maximum number of Kafka clients
// - For Producer workload, this represents how many total producers to publish messages
// it must be the same value as the NB "threads" parameter
// - For Consumer workload, this represents how many total consumers per consumer group to subscribe messages
private final int kafkaClntNum;
// Maximum number of Kafka consumer groups
// - Only relevant for Consumer workload
// - (topicPartNum * consumerGrpNum) is the total consumer thread number and must be the same
// as the NB "threads" parameter
// - For multi-topic testing, this means one consumer thread may read from multiple topics.
private final int consumerGrpNum;
private long totalCycleNum;
private AtomicBoolean beingShutdown = new AtomicBoolean(false);
public KafkaSpace(String spaceName, NBConfiguration cfg) {
this.spaceName = spaceName;
this.cfg = cfg;
this.bootstrapSvr = cfg.get("bootstrap_server");
this.kafkaClntNum =
NumberUtils.toInt(cfg.getOptional("num_clnt").orElse("1"));
this.consumerGrpNum =
NumberUtils.toInt(cfg.getOptional("num_cons_grp").orElse("1"));
this.maxOpTimeInSec =
NumberUtils.toLong(cfg.getOptional("max_op_time").orElse("0L"));
this.strictMsgErrorHandling =
BooleanUtils.toBoolean(cfg.getOptional("strict_msg_error_handling").orElse("false"));
this.kafkaClientConfFileName = cfg.get("config");
this.kafkaClientConf = new KafkaClientConf(kafkaClientConfFileName);
this.activityStartTimeMills = System.currentTimeMillis();
}
@Override
public void close() {
shutdownSpace();
}
public static NBConfigModel getConfigModel() {
return ConfigModel.of(KafkaSpace.class)
.add(Param.defaultTo("bootstrap_server", "pulsar://localhost:9020")
.setDescription("Kafka bootstrap server URL."))
.add(Param.defaultTo("config", "config.properties")
.setDescription("Kafka client connection configuration property file."))
.add(Param.defaultTo("num_clnt", 1)
.setDescription("Number of Kafka clients. For consumer, this is the number of consumers per consumer group"))
.add(Param.defaultTo("num_cons_grp", 1)
.setDescription("Number of consumer groups (only relevant for Kafka consumer workload). "))
.add(Param.defaultTo("max_op_time", 0)
.setDescription("Maximum time (in seconds) to run NB Kafka testing scenario."))
.add(Param.defaultTo("strict_msg_error_handling", false)
.setDescription("Whether to do strict error handling which is to stop NB Kafka execution."))
.asReadOnly();
}
public OpTimeTrackKafkaClient getOpTimeTrackKafkaClient(String cacheKey) {
return opTimeTrackKafkaClients.get(cacheKey);
}
public void addOpTimeTrackKafkaClient(String cacheKey, OpTimeTrackKafkaClient client) {
opTimeTrackKafkaClients.put(cacheKey, client);
}
public long getActivityStartTimeMills() { return this.activityStartTimeMills; }
public long getMaxOpTimeInSec() { return this.maxOpTimeInSec; }
public String getBootstrapSvr() { return this.bootstrapSvr; }
public KafkaClientConf getKafkaClientConf() { return kafkaClientConf; }
public int getKafkaClntNum() { return this.kafkaClntNum; }
public int getConsumerGrpNum() { return this.consumerGrpNum; }
public boolean isStrictMsgErrorHandling() { return this.strictMsgErrorHandling; }
public long getTotalCycleNum() { return totalCycleNum; }
public void setTotalCycleNum(long cycleNum) { totalCycleNum = cycleNum; }
public boolean isShuttigDown() {
return beingShutdown.get();
}
public void shutdownSpace() {
try {
beingShutdown.set(true);
for (OpTimeTrackKafkaClient client : opTimeTrackKafkaClients.values()) {
client.close();
}
// Pause 5 seconds before closing producers/consumers
KafkaAdapterUtil.pauseCurThreadExec(5);
}
catch (Exception e) {
e.printStackTrace();
throw new KafkaAdapterUnexpectedException("Unexpected error when shutting down NB S4J space.");
}
}
}

View File

@@ -0,0 +1,135 @@
package io.nosqlbench.adapter.kafka.dispensers;
/*
* Copyright (c) 2022 nosqlbench
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import io.nosqlbench.adapter.kafka.KafkaSpace;
import io.nosqlbench.adapter.kafka.exception.KafkaAdapterInvalidParamException;
import io.nosqlbench.adapter.kafka.ops.KafkaOp;
import io.nosqlbench.adapter.kafka.util.KafkaAdapterMetrics;
import io.nosqlbench.adapter.kafka.util.KafkaAdapterUtil;
import io.nosqlbench.engine.api.activityimpl.BaseOpDispenser;
import io.nosqlbench.engine.api.activityimpl.uniform.DriverAdapter;
import io.nosqlbench.engine.api.templating.ParsedOp;
import org.apache.commons.lang3.BooleanUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.math.NumberUtils;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import java.util.*;
import java.util.function.LongFunction;
import java.util.function.Predicate;
public abstract class KafkaBaseOpDispenser extends BaseOpDispenser<KafkaOp, KafkaSpace> {
private final static Logger logger = LogManager.getLogger("PulsarBaseOpDispenser");
protected final ParsedOp parsedOp;
protected final KafkaAdapterMetrics kafkaAdapterMetrics;
protected final KafkaSpace kafkaSpace;
protected final int kafkaClntCnt;
protected final int consumerGrpCnt;
// Doc-level parameter: async_api (default: true)
// - For Producer workload, this means waiting for message send ack. synchronously or asynchronously
// - For Consumer workload, this means doing manual message commit synchronously or asynchronously
// Only relevant when auto.commit is disabled
protected final boolean asyncAPI;
protected final LongFunction<String> topicNameStrFunc;
protected final Map<String, String> topicConfMap = new HashMap<>();
protected final int totalThreadNum;
protected final long totalCycleNum;
public KafkaBaseOpDispenser(DriverAdapter adapter,
ParsedOp op,
LongFunction<String> topicNameStrFunc,
KafkaSpace kafkaSpace) {
super(adapter, op);
this.parsedOp = op;
this.kafkaSpace = kafkaSpace;
String defaultMetricsPrefix = getDefaultMetricsPrefix(this.parsedOp);
this.kafkaAdapterMetrics = new KafkaAdapterMetrics(defaultMetricsPrefix);
kafkaAdapterMetrics.initS4JAdapterInstrumentation();
this.asyncAPI =
parsedOp.getStaticConfigOr(KafkaAdapterUtil.DOC_LEVEL_PARAMS.ASYNC_API.label, Boolean.TRUE);
this.topicNameStrFunc = topicNameStrFunc;
this.topicConfMap.putAll(kafkaSpace.getKafkaClientConf().getTopicConfMap());
this.totalCycleNum = NumberUtils.toLong(parsedOp.getStaticConfig("cycles", String.class));
kafkaSpace.setTotalCycleNum(totalCycleNum);
this.kafkaClntCnt = kafkaSpace.getKafkaClntNum();
this.consumerGrpCnt = kafkaSpace.getConsumerGrpNum();
this.totalThreadNum = NumberUtils.toInt(parsedOp.getStaticConfig("threads", String.class));
assert (kafkaClntCnt > 0);
assert (consumerGrpCnt > 0);
boolean validThreadNum =
( ((this instanceof MessageProducerOpDispenser) && (totalThreadNum == kafkaClntCnt)) ||
((this instanceof MessageConsumerOpDispenser) && (totalThreadNum == kafkaClntCnt*consumerGrpCnt)) );
if (!validThreadNum) {
throw new KafkaAdapterInvalidParamException(
"Incorrect settings of 'threads', 'num_clnt', or 'num_cons_grp' -- " +
totalThreadNum + ", " + kafkaClntCnt + ", " + consumerGrpCnt);
}
}
public KafkaSpace getKafkaSpace() { return kafkaSpace; }
public KafkaAdapterMetrics getKafkaAdapterMetrics() { return kafkaAdapterMetrics; }
protected LongFunction<Boolean> lookupStaticBoolConfigValueFunc(String paramName, boolean defaultValue) {
LongFunction<Boolean> booleanLongFunction;
booleanLongFunction = (l) -> parsedOp.getOptionalStaticConfig(paramName, String.class)
.filter(Predicate.not(String::isEmpty))
.map(value -> BooleanUtils.toBoolean(value))
.orElse(defaultValue);
logger.info("{}: {}", paramName, booleanLongFunction.apply(0));
return booleanLongFunction;
}
// If the corresponding Op parameter is not provided, use the specified default value
protected LongFunction<String> lookupOptionalStrOpValueFunc(String paramName, String defaultValue) {
LongFunction<String> stringLongFunction;
stringLongFunction = parsedOp.getAsOptionalFunction(paramName, String.class)
.orElse((l) -> defaultValue);
logger.info("{}: {}", paramName, stringLongFunction.apply(0));
return stringLongFunction;
}
protected LongFunction<String> lookupOptionalStrOpValueFunc(String paramName) {
return lookupOptionalStrOpValueFunc(paramName, "");
}
// Mandatory Op parameter. Throw an error if not specified or having empty value
protected LongFunction<String> lookupMandtoryStrOpValueFunc(String paramName) {
LongFunction<String> stringLongFunction;
stringLongFunction = parsedOp.getAsRequiredFunction(paramName, String.class);
logger.info("{}: {}", paramName, stringLongFunction.apply(0));
return stringLongFunction;
}
}

View File

@@ -0,0 +1,157 @@
/*
* Copyright (c) 2022 nosqlbench
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.nosqlbench.adapter.kafka.dispensers;
import io.nosqlbench.adapter.kafka.KafkaSpace;
import io.nosqlbench.adapter.kafka.exception.KafkaAdapterInvalidParamException;
import io.nosqlbench.adapter.kafka.ops.KafkaOp;
import io.nosqlbench.adapter.kafka.ops.OpTimeTrackKafkaClient;
import io.nosqlbench.adapter.kafka.ops.OpTimeTrackKafkaConsumer;
import io.nosqlbench.adapter.kafka.util.KafkaAdapterUtil;
import io.nosqlbench.engine.api.activityimpl.uniform.DriverAdapter;
import io.nosqlbench.engine.api.templating.ParsedOp;
import org.apache.commons.lang3.BooleanUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.math.NumberUtils;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import java.util.*;
import java.util.function.LongFunction;
import java.util.stream.Collectors;
public class MessageConsumerOpDispenser extends KafkaBaseOpDispenser {
private final static Logger logger = LogManager.getLogger("MessageConsumerOpDispenser");
private final Map<String, String> consumerClientConfMap = new HashMap<>();
// The timeout value as message Poll interval (in seconds)
protected final int msgPollIntervalInSec;
// Manual commit frequency
// - # of received messages / sec.
// - This is only relevant when the effective setting (global level and statement level)
// of "enable.auto.commit" is false
protected final int maxMsgCntPerCommit;
protected boolean autoCommitEnabled;
public MessageConsumerOpDispenser(DriverAdapter adapter,
ParsedOp op,
LongFunction<String> tgtNameFunc,
KafkaSpace kafkaSpace) {
super(adapter, op, tgtNameFunc, kafkaSpace);
this.consumerClientConfMap.putAll(kafkaSpace.getKafkaClientConf().getConsumerConfMap());
consumerClientConfMap.put("bootstrap.servers", kafkaSpace.getBootstrapSvr());
this.msgPollIntervalInSec =
NumberUtils.toInt(parsedOp.getStaticConfigOr("msg_poll_interval", "0"));
this.maxMsgCntPerCommit =
NumberUtils.toInt(parsedOp.getStaticConfig("manual_commit_batch_num", String.class));
this.autoCommitEnabled = true;
if (maxMsgCntPerCommit > 0) {
this.autoCommitEnabled = false;
consumerClientConfMap.put("enable.auto.commit", "false");
} else {
if (consumerClientConfMap.containsKey("enable.auto.commit")) {
this.autoCommitEnabled = BooleanUtils.toBoolean(consumerClientConfMap.get("enable.auto.commit"));
}
}
}
private String getEffectiveGroupId(long cycle) {
int grpIdx = (int) (cycle % consumerGrpCnt);
String defaultGrpNamePrefix = KafkaAdapterUtil.DFT_CONSUMER_GROUP_NAME_PREFIX;
if (consumerClientConfMap.containsKey("group.id")) {
defaultGrpNamePrefix = consumerClientConfMap.get("group.id");
}
return defaultGrpNamePrefix + "-" + grpIdx;
}
private OpTimeTrackKafkaClient getOrCreateOpTimeTrackKafkaConsumer(
long cycle,
List<String> topicNameList,
String groupId)
{
String topicNameListStr = topicNameList.stream()
.collect(Collectors.joining("::"));
String cacheKey = KafkaAdapterUtil.buildCacheKey(
"consumer-" + String.valueOf(cycle % kafkaClntCnt), topicNameListStr, groupId );
OpTimeTrackKafkaClient opTimeTrackKafkaClient = kafkaSpace.getOpTimeTrackKafkaClient(cacheKey);
if (opTimeTrackKafkaClient == null) {
Properties consumerConfProps = new Properties();
consumerConfProps.putAll(consumerClientConfMap);
consumerConfProps.put("group.id", groupId);
KafkaConsumer<String, String> consumer = new KafkaConsumer<>(consumerConfProps);
synchronized (this) {
consumer.subscribe(topicNameList);
}
if (logger.isDebugEnabled()) {
logger.debug("Kafka consumer created: {}/{} -- {}, {}, {}",
cacheKey,
consumer,
topicNameList,
autoCommitEnabled,
maxMsgCntPerCommit);
}
opTimeTrackKafkaClient = new OpTimeTrackKafkaConsumer(
kafkaSpace, asyncAPI, msgPollIntervalInSec, autoCommitEnabled, maxMsgCntPerCommit, consumer);
kafkaSpace.addOpTimeTrackKafkaClient(cacheKey, opTimeTrackKafkaClient);
}
return opTimeTrackKafkaClient;
}
protected List<String> getEffectiveTopicNameList(long cycle) {
String explicitTopicListStr = topicNameStrFunc.apply(cycle);
assert (StringUtils.isNotBlank(explicitTopicListStr));
return Arrays.stream(StringUtils.split(explicitTopicListStr, ','))
.filter(s -> StringUtils.isNotBlank(s))
.toList();
}
@Override
public KafkaOp apply(long cycle) {
List<String> topicNameList = getEffectiveTopicNameList(cycle);
String groupId = getEffectiveGroupId(cycle);
if (topicNameList.size() ==0 || StringUtils.isBlank(groupId)) {
throw new KafkaAdapterInvalidParamException(
"Effective consumer group name and/or topic names are needed for creating a consumer!");
}
OpTimeTrackKafkaClient opTimeTrackKafkaConsumer =
getOrCreateOpTimeTrackKafkaConsumer(cycle, topicNameList, groupId);
return new KafkaOp(
kafkaAdapterMetrics,
kafkaSpace,
opTimeTrackKafkaConsumer,
null);
}
}

View File

@@ -0,0 +1,211 @@
/*
* Copyright (c) 2022 nosqlbench
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.nosqlbench.adapter.kafka.dispensers;
import io.nosqlbench.adapter.kafka.KafkaSpace;
import io.nosqlbench.adapter.kafka.exception.KafkaAdapterInvalidParamException;
import io.nosqlbench.adapter.kafka.ops.KafkaOp;
import io.nosqlbench.adapter.kafka.ops.OpTimeTrackKafkaClient;
import io.nosqlbench.adapter.kafka.ops.OpTimeTrackKafkaProducer;
import io.nosqlbench.adapter.kafka.util.KafkaAdapterUtil;
import io.nosqlbench.engine.api.activityimpl.uniform.DriverAdapter;
import io.nosqlbench.engine.api.templating.ParsedOp;
import org.apache.commons.lang3.StringUtils;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import java.util.function.LongFunction;
public class MessageProducerOpDispenser extends KafkaBaseOpDispenser {
private final static Logger logger = LogManager.getLogger("MessageProducerOpDispenser");
public static final String MSG_HEADER_OP_PARAM = "msg_header";
public static final String MSG_KEY_OP_PARAM = "msg_key";
public static final String MSG_BODY_OP_PARAM = "msg_body";
private final Map<String, String> producerClientConfMap = new HashMap<>();
protected final int txnBatchNum;
private final LongFunction<String> msgHeaderJsonStrFunc;
private final LongFunction<String> msgKeyStrFunc;
private final LongFunction<String> msgValueStrFunc;
public MessageProducerOpDispenser(DriverAdapter adapter,
ParsedOp op,
LongFunction<String> tgtNameFunc,
KafkaSpace kafkaSpace) {
super(adapter, op, tgtNameFunc, kafkaSpace);
this.producerClientConfMap.putAll(kafkaSpace.getKafkaClientConf().getProducerConfMap());
producerClientConfMap.put("bootstrap.servers", kafkaSpace.getBootstrapSvr());
this.txnBatchNum = parsedOp.getStaticConfigOr("txn_batch_num", Integer.valueOf(0));
this.msgHeaderJsonStrFunc = lookupOptionalStrOpValueFunc(MSG_HEADER_OP_PARAM);
this.msgKeyStrFunc = lookupOptionalStrOpValueFunc(MSG_KEY_OP_PARAM);
this.msgValueStrFunc = lookupMandtoryStrOpValueFunc(MSG_BODY_OP_PARAM);
}
private String getEffectiveClientId(long cycle) {
if (producerClientConfMap.containsKey("client.id")) {
String defaultClientIdPrefix = producerClientConfMap.get("client.id");
int clntIdx = (int) (cycle % kafkaClntCnt);
return defaultClientIdPrefix + "-" + clntIdx;
}
else {
return "";
}
}
private OpTimeTrackKafkaClient getOrCreateOpTimeTrackKafkaProducer(long cycle,
String topicName,
String clientId)
{
String cacheKey = KafkaAdapterUtil.buildCacheKey(
"producer-" + String.valueOf(cycle % kafkaClntCnt), topicName);
OpTimeTrackKafkaClient opTimeTrackKafkaClient = kafkaSpace.getOpTimeTrackKafkaClient(cacheKey);
if (opTimeTrackKafkaClient == null) {
Properties producerConfProps = new Properties();
producerConfProps.putAll(producerClientConfMap);
if (StringUtils.isNotBlank(clientId))
producerConfProps.put("client.id", clientId);
else
producerConfProps.remove("client.id");
// When transaction batch number is less than 2, it is treated effectively as no-transaction
if (txnBatchNum < 2)
producerConfProps.remove("transactional.id");
String baseTransactId = "";
boolean transactionEnabled = false;
if (producerConfProps.containsKey("transactional.id")) {
baseTransactId = producerConfProps.get("transactional.id").toString();
producerConfProps.put("transactional.id", baseTransactId + "-" + cacheKey);
transactionEnabled = StringUtils.isNotBlank(producerConfProps.get("transactional.id").toString());
}
KafkaProducer<String, String> producer = new KafkaProducer<>(producerConfProps);
if (transactionEnabled) {
producer.initTransactions();
}
if (logger.isDebugEnabled()) {
logger.debug("Producer created: {}/{} -- ({}, {}, {})",
cacheKey,
producer,
topicName,
transactionEnabled,
clientId);
}
opTimeTrackKafkaClient = new OpTimeTrackKafkaProducer(
kafkaSpace,
asyncAPI,
transactionEnabled,
txnBatchNum,
producer);
kafkaSpace.addOpTimeTrackKafkaClient(cacheKey, opTimeTrackKafkaClient);
}
return opTimeTrackKafkaClient;
}
private ProducerRecord<String, String> createKafkaMessage(
long curCycle,
String topicName,
String msgHeaderRawJsonStr,
String msgKey,
String msgValue
) {
if (StringUtils.isAllBlank(msgKey, msgValue)) {
throw new KafkaAdapterInvalidParamException("Message key and value can't both be empty!");
}
int messageSize = KafkaAdapterUtil.getStrObjSize(msgKey) + KafkaAdapterUtil.getStrObjSize(msgValue);
ProducerRecord<String, String> record = new ProducerRecord<>(topicName, msgKey, msgValue);
// Check if msgHeaderRawJsonStr is a valid JSON string with a collection of key/value pairs
// - if Yes, convert it to a map
// - otherwise, log an error message and ignore message headers without throwing a runtime exception
Map<String, String> msgHeaderProperties = new HashMap<>();
if (!StringUtils.isBlank(msgHeaderRawJsonStr)) {
try {
msgHeaderProperties = KafkaAdapterUtil.convertJsonToMap(msgHeaderRawJsonStr);
} catch (Exception e) {
logger.warn(
"Error parsing message property JSON string {}, ignore message properties!",
msgHeaderRawJsonStr);
}
}
for (Map.Entry<String, String> entry : msgHeaderProperties.entrySet()) {
String headerKey = entry.getKey();
String headerValue = entry.getValue();
messageSize += KafkaAdapterUtil.getStrObjSize(headerKey) + KafkaAdapterUtil.getStrObjSize(headerValue);
if (! StringUtils.isAnyBlank(headerKey, headerValue)) {
record.headers().add(headerKey, headerValue.getBytes());
}
}
// NB-specific headers
messageSize += KafkaAdapterUtil.getStrObjSize(KafkaAdapterUtil.NB_MSG_SEQ_PROP);
messageSize += 8;
messageSize += KafkaAdapterUtil.getStrObjSize(KafkaAdapterUtil.NB_MSG_SIZE_PROP);
messageSize += 6;
record.headers().add(KafkaAdapterUtil.NB_MSG_SEQ_PROP, String.valueOf(curCycle).getBytes());
record.headers().add(KafkaAdapterUtil.NB_MSG_SIZE_PROP, String.valueOf(messageSize).getBytes());
return record;
}
@Override
public KafkaOp apply(long cycle) {
String topicName = topicNameStrFunc.apply(cycle);
String clientId = getEffectiveClientId(cycle);
OpTimeTrackKafkaClient opTimeTrackKafkaProducer =
getOrCreateOpTimeTrackKafkaProducer(cycle, topicName, clientId);
ProducerRecord<String, String> message = createKafkaMessage(
cycle,
topicName,
msgHeaderJsonStrFunc.apply(cycle),
msgKeyStrFunc.apply(cycle),
msgValueStrFunc.apply(cycle)
);
return new KafkaOp(
kafkaAdapterMetrics,
kafkaSpace,
opTimeTrackKafkaProducer,
message);
}
}

View File

@@ -0,0 +1,29 @@
/*
* Copyright (c) 2022 nosqlbench
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package io.nosqlbench.adapter.kafka.exception;
public class KafkaAdapterInvalidParamException extends RuntimeException {
public KafkaAdapterInvalidParamException(String paramName, String errDesc) {
super("Invalid setting for parameter (" + paramName + "): " + errDesc);
}
public KafkaAdapterInvalidParamException(String fullErrDesc) {
super(fullErrDesc);
}
}

View File

@@ -0,0 +1,30 @@
/*
* Copyright (c) 2022 nosqlbench
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package io.nosqlbench.adapter.kafka.exception;
public class KafkaAdapterUnexpectedException extends RuntimeException {
public KafkaAdapterUnexpectedException(String message) {
super(message);
printStackTrace();
}
public KafkaAdapterUnexpectedException(Exception e) {
super(e);
printStackTrace();
}
}

View File

@@ -0,0 +1,25 @@
/*
* Copyright (c) 2022 nosqlbench
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package io.nosqlbench.adapter.kafka.exception;
public class KafkaAdapterUnsupportedOpException extends RuntimeException {
public KafkaAdapterUnsupportedOpException(String pulsarOpType) {
super("Unsupported Pulsar adapter operation type: \"" + pulsarOpType + "\"");
}
}

View File

@@ -0,0 +1,50 @@
/*
* Copyright (c) 2022 nosqlbench
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package io.nosqlbench.adapter.kafka.ops;
import com.codahale.metrics.Histogram;
import io.nosqlbench.adapter.kafka.KafkaSpace;
import io.nosqlbench.adapter.kafka.util.KafkaAdapterMetrics;
import io.nosqlbench.engine.api.activityimpl.uniform.flowtypes.CycleOp;
public class KafkaOp implements CycleOp<Object> {
private final KafkaAdapterMetrics kafkaAdapterMetrics;
protected final KafkaSpace kafkaSpace;
private final OpTimeTrackKafkaClient opTimeTrackKafkaClient;
private final Object cycleObj;
protected final Histogram messageSizeHistogram;
public KafkaOp(KafkaAdapterMetrics kafkaAdapterMetrics,
KafkaSpace kafkaSpace,
OpTimeTrackKafkaClient opTimeTrackKafkaClient,
Object cycleObj)
{
this.kafkaAdapterMetrics = kafkaAdapterMetrics;
this.kafkaSpace = kafkaSpace;
this.opTimeTrackKafkaClient = opTimeTrackKafkaClient;
this.cycleObj = cycleObj;
this.messageSizeHistogram = kafkaAdapterMetrics.getMessagesizeHistogram();
}
@Override
public Object apply(long value) {
opTimeTrackKafkaClient.process(value, cycleObj);
return null;
}
}

View File

@@ -0,0 +1,58 @@
/*
* Copyright (c) 2022 nosqlbench
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package io.nosqlbench.adapter.kafka.ops;
import io.nosqlbench.adapter.kafka.KafkaSpace;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
abstract public class OpTimeTrackKafkaClient {
private final static Logger logger = LogManager.getLogger("OpTimeTrackKafkaClient");
protected final KafkaSpace kafkaSpace;
protected final long activityStartTime;
// Maximum time length to execute S4J operations (e.g. message send or consume)
// - when NB execution passes this threshold, it is simply NoOp
// - 0 means no maximum time constraint. S4JOp is always executed until NB execution cycle finishes
protected final long maxOpTimeInSec;
public OpTimeTrackKafkaClient(KafkaSpace kafkaSpace) {
this.kafkaSpace = kafkaSpace;
this.activityStartTime = kafkaSpace.getActivityStartTimeMills();
this.maxOpTimeInSec = kafkaSpace.getMaxOpTimeInSec();
}
public void process(long cycle, Object cycleObj) {
long timeElapsedMills = System.currentTimeMillis() - activityStartTime;
// If maximum operation duration is specified, only process messages
// before the maximum duration threshold is reached. Otherwise, this is
// just no-op.
if ( (maxOpTimeInSec == 0) || (timeElapsedMills <= (maxOpTimeInSec*1000)) ) {
cycleMsgProcess(cycle, cycleObj);
}
}
abstract void cycleMsgProcess(long cycle, Object cycleObj);
abstract public void close();
}

View File

@@ -0,0 +1,190 @@
/*
* Copyright (c) 2022 nosqlbench
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package io.nosqlbench.adapter.kafka.ops;
import io.nosqlbench.adapter.kafka.KafkaSpace;
import io.nosqlbench.adapter.kafka.util.KafkaAdapterUtil;
import org.apache.kafka.clients.consumer.*;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.header.Header;
import org.apache.kafka.common.header.Headers;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import java.util.Map;
public class OpTimeTrackKafkaConsumer extends OpTimeTrackKafkaClient {
private final static Logger logger = LogManager.getLogger("OpTimeTrackKafkaConsumer");
private final int msgPoolIntervalInMs;
private final boolean asyncMsgCommit;
private final boolean autoCommitEnabled;
private final int maxMsgCntPerCommit;
// Keep track the manual commit count
private final ThreadLocal<Integer> manualCommitTrackingCnt = ThreadLocal.withInitial(() -> 0);
private final KafkaConsumer<String, String> consumer;
public OpTimeTrackKafkaConsumer(KafkaSpace kafkaSpace,
boolean asyncMsgCommit,
int msgPoolIntervalInMs,
boolean autoCommitEnabled,
int maxMsgCntPerCommit,
KafkaConsumer<String, String> consumer) {
super(kafkaSpace);
this.msgPoolIntervalInMs = msgPoolIntervalInMs;
this.asyncMsgCommit = asyncMsgCommit;
this.autoCommitEnabled = autoCommitEnabled;
this.maxMsgCntPerCommit = maxMsgCntPerCommit;
this.consumer = consumer;
}
public int getManualCommitTrackingCnt() { return manualCommitTrackingCnt.get(); }
public void incManualCommitTrackingCnt() {
int curVal = getManualCommitTrackingCnt();
manualCommitTrackingCnt.set(curVal + 1);
}
public void resetManualCommitTrackingCnt() {
manualCommitTrackingCnt.set(0);
}
private boolean msgCommitNeeded(long cycle) {
// Whether to commit the transaction which happens when:
// - "txn_batch_num" has been reached since last reset
boolean commitNeeded = !autoCommitEnabled;
if (commitNeeded) {
int msgCommitTackingCnt = manualCommitTrackingCnt.get();
if ( ( (msgCommitTackingCnt > 0) && ((msgCommitTackingCnt % maxMsgCntPerCommit) == 0) ) ||
( cycle >= (kafkaSpace.getTotalCycleNum() - 1) ) ) {
commitNeeded = true;
if (logger.isDebugEnabled()) {
logger.debug("Manually commit message ({}, {}, {})",
manualCommitTrackingCnt, msgCommitTackingCnt, cycle);
}
}
else {
commitNeeded = false;
}
}
return commitNeeded;
}
private String printRecvedMsg(ConsumerRecord<String, String> record) {
Headers headers = record.headers();
Header nbMsgSeqHeader = headers.lastHeader(KafkaAdapterUtil.NB_MSG_SEQ_PROP);
StringBuilder sb = new StringBuilder();
if (nbMsgSeqHeader != null) {
sb.append("Header (MsgSeq): " + new String(nbMsgSeqHeader.value()) + "; ");
}
sb.append("Key: " + record.key() + "; ");
sb.append("Value: " + record.value() + "; ");
return sb.toString();
}
@Override
void cycleMsgProcess(long cycle, Object cycleObj) {
if (kafkaSpace.isShuttigDown()) {
return;
}
synchronized (this) {
ConsumerRecords<String, String> records = consumer.poll(msgPoolIntervalInMs);
for (ConsumerRecord<String, String> record : records) {
if (record != null) {
if (logger.isDebugEnabled()) {
logger.debug(
"Receiving message is successful: [{}] - offset({}), cycle ({})",
printRecvedMsg(record),
record.offset(),
cycle);
}
if (!autoCommitEnabled) {
boolean bCommitMsg = msgCommitNeeded(cycle);
if (bCommitMsg) {
if (!asyncMsgCommit) {
consumer.commitSync();
if (logger.isDebugEnabled()) {
logger.debug(
"Sync message commit is successful: cycle ({}), maxMsgCntPerCommit ({})",
cycle,
maxMsgCntPerCommit);
}
} else {
consumer.commitAsync(new OffsetCommitCallback() {
@Override
public void onComplete(Map<TopicPartition, OffsetAndMetadata> map, Exception e) {
if (logger.isDebugEnabled()) {
if (e == null) {
logger.debug(
"Async message commit succeeded: cycle({}), maxMsgCntPerCommit ({})",
cycle,
maxMsgCntPerCommit);
} else {
logger.debug(
"Async message commit failed: cycle ({}), maxMsgCntPerCommit ({}), error ({})",
cycle,
maxMsgCntPerCommit,
e.getMessage());
}
}
}
});
}
resetManualCommitTrackingCnt();
} else {
incManualCommitTrackingCnt();
}
}
}
}
}
}
@Override
public void close() {
try {
if (consumer != null) {
if (!asyncMsgCommit)
consumer.commitSync();
else
consumer.commitAsync();
consumer.close();
}
this.manualCommitTrackingCnt.remove();
}
catch (IllegalStateException ise) {
// If a consumer is already closed, that's fine.
}
catch (Exception e) {
e.printStackTrace();
}
}
}

View File

@@ -0,0 +1,264 @@
/*
* Copyright (c) 2022 nosqlbench
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package io.nosqlbench.adapter.kafka.ops;
import io.nosqlbench.adapter.kafka.KafkaSpace;
import io.nosqlbench.adapter.kafka.exception.KafkaAdapterUnexpectedException;
import io.nosqlbench.adapter.kafka.util.KafkaAdapterUtil;
import org.apache.kafka.clients.producer.Callback;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
import org.apache.kafka.common.KafkaException;
import org.apache.kafka.common.errors.AuthorizationException;
import org.apache.kafka.common.errors.OutOfOrderSequenceException;
import org.apache.kafka.common.errors.ProducerFencedException;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import org.apache.kafka.common.errors.TimeoutException;
import org.apache.kafka.common.errors.InterruptException;
public class OpTimeTrackKafkaProducer extends OpTimeTrackKafkaClient {
private final static Logger logger = LogManager.getLogger("OpTimeTrackKafkaProducer");
private final boolean transactionEnabled;
private final boolean asyncMsgAck;
private final boolean transactEnabledConfig;
private final int txnBatchNum;
enum TxnProcResult {
SUCCESS,
RECOVERABLE_ERROR,
FATAL_ERROR,
UNKNOWN_ERROR
}
// Keep track the transaction count per thread
private static ThreadLocal<Integer>
txnBatchTrackingCntTL = ThreadLocal.withInitial(() -> 0);
private static ThreadLocal<TxnProcResult>
txnProcResultTL = ThreadLocal.withInitial(() -> TxnProcResult.SUCCESS);
private final KafkaProducer<String, String> producer;
public OpTimeTrackKafkaProducer(KafkaSpace kafkaSpace,
boolean asyncMsgAck,
boolean transactEnabledConfig,
int txnBatchNum,
KafkaProducer<String, String> producer) {
super(kafkaSpace);
this.asyncMsgAck = asyncMsgAck;
this.transactEnabledConfig = transactEnabledConfig;
this.txnBatchNum = txnBatchNum;
this.transactionEnabled = transactEnabledConfig && (txnBatchNum > 2);
this.producer = producer;
}
public static int getTxnBatchTrackingCntTL() {
return txnBatchTrackingCntTL.get();
}
public static void incTxnBatchTrackingCnt() {
txnBatchTrackingCntTL.set(getTxnBatchTrackingCntTL() + 1);
}
public static void resetTxnBatchTrackingCnt() {
txnBatchTrackingCntTL.set(0);
}
public static TxnProcResult getTxnProcResultTL() {
return txnProcResultTL.get();
}
public static void setTxnProcResultTL(TxnProcResult result) {
txnProcResultTL.set(result);
}
public static void resetTxnProcResultTL(TxnProcResult result) {
txnProcResultTL.set(TxnProcResult.SUCCESS);
}
private void processMsgTransaction(long cycle, KafkaProducer<String, String> producer) {
TxnProcResult result = TxnProcResult.SUCCESS;
if (transactionEnabled) {
int txnBatchTackingCnt = getTxnBatchTrackingCntTL();
try {
if (txnBatchTackingCnt == 0) {
// Start a new transaction when first starting the processing
producer.beginTransaction();
if (logger.isDebugEnabled()) {
logger.debug("New transaction started ( {}, {}, {}, {}, {} )",
cycle, producer, transactEnabledConfig, txnBatchNum, getTxnBatchTrackingCntTL());
}
} else if ( (txnBatchTackingCnt % (txnBatchNum - 1) == 0) ||
(cycle == (kafkaSpace.getTotalCycleNum() - 1)) ) {
synchronized (this) {
// Commit the current transaction
if (logger.isDebugEnabled()) {
logger.debug("Start committing transaction ... ( {}, {}, {}, {}, {} )",
cycle, producer, transactEnabledConfig, txnBatchNum, getTxnBatchTrackingCntTL());
}
producer.commitTransaction();
if (logger.isDebugEnabled()) {
logger.debug("Transaction committed ( {}, {}, {}, {}, {} )",
cycle, producer, transactEnabledConfig, txnBatchNum, getTxnBatchTrackingCntTL());
}
// Start a new transaction
producer.beginTransaction();
if (logger.isDebugEnabled()) {
logger.debug("New transaction started ( {}, {}, {}, {}, {} )",
cycle, producer, transactEnabledConfig, txnBatchNum, getTxnBatchTrackingCntTL());
}
}
}
}
catch (Exception e) {
e.printStackTrace();
if ( (e instanceof IllegalStateException) ||
(e instanceof ProducerFencedException) ||
(e instanceof UnsupportedOperationException) ||
(e instanceof AuthorizationException) ) {
result = TxnProcResult.FATAL_ERROR;
}
else if ( (e instanceof TimeoutException ) ||
(e instanceof InterruptException)) {
result = TxnProcResult.RECOVERABLE_ERROR;
}
else {
result = TxnProcResult.UNKNOWN_ERROR;
}
}
}
setTxnProcResultTL(result);
}
@Override
void cycleMsgProcess(long cycle, Object cycleObj) {
// For producer, cycleObj represents a "message" (ProducerRecord)
assert (cycleObj != null);
if (kafkaSpace.isShuttigDown()) {
if (transactionEnabled) {
try {
producer.abortTransaction();
if (logger.isDebugEnabled()) {
logger.debug("Abort open transaction while shutting down ( {}, {}, {}, {}, {} )",
cycle, producer, transactEnabledConfig, txnBatchNum, getTxnBatchTrackingCntTL());
}
}
catch (Exception e) {
e.printStackTrace();
}
}
return;
}
processMsgTransaction(cycle, producer);
TxnProcResult result = getTxnProcResultTL();
if (result == TxnProcResult.RECOVERABLE_ERROR) {
try {
producer.abortTransaction();
}
catch (Exception e) {
throw new KafkaAdapterUnexpectedException("Aborting transaction failed!");
}
} else if (result == TxnProcResult.FATAL_ERROR) {
throw new KafkaAdapterUnexpectedException("Fatal error when initializing or committing transactions!");
} else if (result == TxnProcResult.UNKNOWN_ERROR) {
logger.debug("Unexpected error when initializing or committing transactions!");
}
ProducerRecord<String, String> message = (ProducerRecord<String, String>) cycleObj;
try {
if (result == TxnProcResult.SUCCESS) {
Future<RecordMetadata> responseFuture = producer.send(message, new Callback() {
@Override
public void onCompletion(RecordMetadata recordMetadata, Exception e) {
if (asyncMsgAck) {
if (logger.isDebugEnabled()) {
logger.debug("Message sending with async ack. is successful ({}) - {}, {}",
cycle, producer, recordMetadata);
}
}
}
});
if (!asyncMsgAck) {
try {
RecordMetadata recordMetadata = responseFuture.get();
if (logger.isDebugEnabled()) {
logger.debug("Message sending with sync ack. is successful ({}) - {}, {}",
cycle, producer, recordMetadata);
}
} catch (InterruptedException | ExecutionException e) {
KafkaAdapterUtil.messageErrorHandling(
e,
kafkaSpace.isStrictMsgErrorHandling(),
"Unexpected error when waiting to receive message-send ack from the Kafka cluster." +
"\n-----\n" + e);
}
}
incTxnBatchTrackingCnt();
}
}
catch ( ProducerFencedException | OutOfOrderSequenceException |
UnsupportedOperationException | AuthorizationException e) {
if (logger.isDebugEnabled()) {
logger.debug("Fatal error when sending a message ({}) - {}, {}",
cycle, producer, message);
}
throw new KafkaAdapterUnexpectedException(e);
}
catch (IllegalStateException | KafkaException e) {
if (transactionEnabled) {
}
}
catch (Exception e) {
throw new KafkaAdapterUnexpectedException(e);
}
}
public void close() {
try {
if (producer != null) {
if (transactionEnabled) producer.commitTransaction();
producer.close();
}
this.txnBatchTrackingCntTL.remove();
}
catch (IllegalStateException ise) {
// If a producer is already closed, that's fine.
}
catch (Exception e) {
e.printStackTrace();
}
}
}

View File

@@ -0,0 +1,68 @@
/*
* Copyright (c) 2022 nosqlbench
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.nosqlbench.adapter.kafka.util;
import com.codahale.metrics.Histogram;
import com.codahale.metrics.Timer;
import io.nosqlbench.api.config.NBNamedElement;
import io.nosqlbench.api.engine.metrics.ActivityMetrics;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
public class KafkaAdapterMetrics implements NBNamedElement {
private final static Logger logger = LogManager.getLogger("S4JAdapterMetrics");
private final String defaultAdapterMetricsPrefix;
private Histogram messageSizeHistogram;
private Timer bindTimer;
private Timer executeTimer;
public KafkaAdapterMetrics(String defaultMetricsPrefix) {
this.defaultAdapterMetricsPrefix = defaultMetricsPrefix;
}
@Override
public String getName() {
return "S4JAdapterMetrics";
}
public void initS4JAdapterInstrumentation() {
// Histogram metrics
this.messageSizeHistogram =
ActivityMetrics.histogram(
this,
defaultAdapterMetricsPrefix + "message_size",
ActivityMetrics.DEFAULT_HDRDIGITS);
// Timer metrics
this.bindTimer =
ActivityMetrics.timer(
this,
defaultAdapterMetricsPrefix + "bind",
ActivityMetrics.DEFAULT_HDRDIGITS);
this.executeTimer =
ActivityMetrics.timer(
this,
defaultAdapterMetricsPrefix + "execute",
ActivityMetrics.DEFAULT_HDRDIGITS);
}
public Timer getBindTimer() { return bindTimer; }
public Timer getExecuteTimer() { return executeTimer; }
public Histogram getMessagesizeHistogram() { return messageSizeHistogram; }
}

View File

@@ -0,0 +1,117 @@
package io.nosqlbench.adapter.kafka.util;
/*
* Copyright (c) 2022 nosqlbench
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import com.amazonaws.util.Base64;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.ObjectMapper;
import org.apache.commons.lang3.StringUtils;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import java.nio.charset.StandardCharsets;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
public class KafkaAdapterUtil {
private final static Logger logger = LogManager.getLogger(KafkaAdapterUtil.class);
public static String DFT_CONSUMER_GROUP_NAME_PREFIX = "nbKafkaGrp";
public static String DFT_TOPIC_NAME_PREFIX = "nbKafkaTopic";
///////
// Valid document level parameters for JMS NB yaml file
public enum DOC_LEVEL_PARAMS {
// Blocking message producing or consuming
ASYNC_API("async_api");
public final String label;
DOC_LEVEL_PARAMS(String label) {
this.label = label;
}
}
public static boolean isValidDocLevelParam(String param) {
return Arrays.stream(DOC_LEVEL_PARAMS.values()).anyMatch(t -> t.label.equals(param));
}
public static String getValidDocLevelParamList() {
return Arrays.stream(DOC_LEVEL_PARAMS.values()).map(t -> t.label).collect(Collectors.joining(", "));
}
public final static String NB_MSG_SEQ_PROP = "NBMsgSeqProp";
public final static String NB_MSG_SIZE_PROP = "NBMsgSize";
// Get simplified NB thread name
public static String getSimplifiedNBThreadName(String fullThreadName) {
assert (StringUtils.isNotBlank(fullThreadName));
if (StringUtils.contains(fullThreadName, '/'))
return StringUtils.substringAfterLast(fullThreadName, "/");
else
return fullThreadName;
}
public static Map<String, String> convertJsonToMap(String jsonStr) throws Exception {
ObjectMapper mapper = new ObjectMapper();
return mapper.readValue(jsonStr, new TypeReference<Map<String, String>>(){});
}
public static List<Object> convertJsonToObjList(String jsonStr) throws Exception {
ObjectMapper mapper = new ObjectMapper();
return Arrays.asList(mapper.readValue(jsonStr, Object[].class));
}
public static String buildCacheKey(String... keyParts) {
String combinedStr = Arrays.stream(keyParts)
.filter(StringUtils::isNotBlank)
.collect(Collectors.joining("::"));
return Base64.encodeAsString(combinedStr.getBytes());
}
public static void pauseCurThreadExec(int pauseInSec) {
if (pauseInSec > 0) {
try {
Thread.sleep(pauseInSec * 1000);
}
catch (InterruptedException ie) {
ie.printStackTrace();
}
}
}
public static int getStrObjSize(String strObj) {
// << https://docs.oracle.com/javase/6/docs/api/java/lang/String.html >>
// A String represents a string in the UTF-16 format ...
return strObj.getBytes(StandardCharsets.UTF_16).length;
}
public static void messageErrorHandling(Exception exception, boolean strictErrorHandling, String errorMsg) {
exception.printStackTrace();
if (strictErrorHandling) {
throw new RuntimeException(errorMsg + " [ " + exception.getMessage() + " ]");
}
else {
KafkaAdapterUtil.pauseCurThreadExec(1);
}
}
}

View File

@@ -0,0 +1,127 @@
/*
* Copyright (c) 2022 nosqlbench
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.nosqlbench.adapter.kafka.util;
import org.apache.commons.configuration2.Configuration;
import org.apache.commons.configuration2.FileBasedConfiguration;
import org.apache.commons.configuration2.PropertiesConfiguration;
import org.apache.commons.configuration2.builder.FileBasedConfigurationBuilder;
import org.apache.commons.configuration2.builder.fluent.Parameters;
import org.apache.commons.configuration2.ex.ConfigurationException;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.builder.ToStringBuilder;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import java.io.File;
import java.io.IOException;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
public class KafkaClientConf {
private final static Logger logger = LogManager.getLogger(KafkaClientConf.class);
public static final String TOPIC_CONF_PREFIX = "topic";
public static final String PRODUCER_CONF_PREFIX = "producer";
public static final String CONSUMER_CONF_PREFIX = "consumer";
// https://kafka.apache.org/documentation/#topicconfigs
private Map<String, String> topicConfMap = new HashMap<>();
private Map<String, String> producerConfMap = new HashMap<>();
private Map<String, String> consumerConfMap = new HashMap<>();
public KafkaClientConf(String clientConfFileName) {
//////////////////
// Read related Pulsar client configuration settings from a file
readRawConfFromFile(clientConfFileName);
//////////////////
// Ignores the following Kafka producer/consumer configurations since
// they're either not supported in the Kafka API or they must be specified
// as the NB CLI parameters or the NB yaml file parameters.
// <<< https://kafka.apache.org/documentation/#producerconfigs >>>
// producer config
// * bootstrap.servers
producerConfMap.remove("bootstrap.servers");
// <<< https://kafka.apache.org/documentation/#consumerconfigs >>>
// consumer config
// * bootstrap.servers
consumerConfMap.remove("bootstrap.servers");
}
public void readRawConfFromFile(String fileName) {
File file = new File(fileName);
try {
String canonicalFilePath = file.getCanonicalPath();
Parameters params = new Parameters();
FileBasedConfigurationBuilder<FileBasedConfiguration> builder =
new FileBasedConfigurationBuilder<FileBasedConfiguration>(PropertiesConfiguration.class)
.configure(params.properties()
.setFileName(fileName));
Configuration config = builder.getConfiguration();
for (Iterator<String> it = config.getKeys(); it.hasNext(); ) {
String confKey = it.next();
String confVal = config.getProperty(confKey).toString();
if (!StringUtils.isBlank(confVal)) {
// Get client connection specific configuration settings, removing "topic." prefix
if (StringUtils.startsWith(confKey, TOPIC_CONF_PREFIX)) {
topicConfMap.put(confKey.substring(TOPIC_CONF_PREFIX.length() + 1), confVal);
}
// Get producer specific configuration settings, removing "producer." prefix
else if (StringUtils.startsWith(confKey, PRODUCER_CONF_PREFIX)) {
producerConfMap.put(confKey.substring(PRODUCER_CONF_PREFIX.length() + 1), confVal);
}
// Get consumer specific configuration settings, removing "consumer." prefix
else if (StringUtils.startsWith(confKey, CONSUMER_CONF_PREFIX)) {
consumerConfMap.put(confKey.substring(CONSUMER_CONF_PREFIX.length() + 1), confVal);
}
}
}
} catch (IOException ioe) {
logger.error("Can't read the specified config properties file: " + fileName);
ioe.printStackTrace();
} catch (ConfigurationException cex) {
logger.error("Error loading configuration items from the specified config properties file: " + fileName + ":" + cex.getMessage());
cex.printStackTrace();
}
}
public Map<String, String> getTopicConfMap() { return topicConfMap; }
public Map<String, String> getProducerConfMap() { return producerConfMap; }
public Map<String, String> getConsumerConfMap() { return consumerConfMap; }
public String toString() {
return new ToStringBuilder(this).
append("topicConfMap", topicConfMap).
append("producerConfMap", producerConfMap).
append("consumerConfMap", consumerConfMap).
toString();
}
}

View File

@@ -0,0 +1,41 @@
# Overview
This NB Kafka driver allows publishing messages to or consuming messages from
* a Kafka cluster, or
* a Pulsar cluster with [S4K](https://github.com/datastax/starlight-for-kafka) or [KoP](https://github.com/streamnative/kop) Kafka Protocol handler for Pulsar.
At high level, this driver supports the following Kafka functionalities
* Publishing messages to one Kafka topic with sync. or async. message-send acknowledgements (from brokers)
* Subscribing messages from one or multiple Kafka topics with sync. or async. message-recv acknowlegements (to brokers) (aka, message commits)
* auto message commit
* manual message commit with a configurable number of message commits in one batch
* Kafka Transaction support
## Example NB Yaml
* [kafka_producer.yaml](./kafka_producer.yaml)
*
* [kafka_consumer.yaml](./kafka_consumer.yaml)
# Usage
```bash
## Kafka Producer
$ <nb_cmd> run driver=kafka -vv cycles=100 threads=2 num_clnt=2 yaml=kafka_producer.yaml config=kafka_config.properties bootstrap_server=PLAINTEXT://localhost:9092
## Kafka Consumer
$ <nb_cmd> run driver=kafka -vv cycles=100 threads=4 num_clnt=2 num_cons_grp=2 yaml=kafka_producer.yaml config=kafka_config.properties bootstrap_server=PLAINTEXT://localhost:9092
```
## NB Kafka driver specific CLI parameters
* `num_clnt`: the number of Kafka clients to publish messages to or to receive messages from
* For producer workload, this is the number of the producer threads to publish messages to the same topic
* Can have multiple producer threads for one topic/partition (`KafkaProducer` is thread-safe)
* `threads` and `num_clnt` values MUST be the same.
* For consumer workload, this is the partition number of a topic
* Consumer workload supports to subscribe from multiple topics. If so, it requires all topics having the same partition number.
* Only one consumer thread for one topic/partition (`KafkaConsumer` is NOT thread-safe)
* `threads` MUST be equal to `num_clnt`*`num_cons_grp`
* `num_cons_grp`: the number of consumer groups
* Only relevant for consumer workload

View File

@@ -0,0 +1,30 @@
#####
# Topic related configurations (global) - topic.***
# - Valid settings: https://kafka.apache.org/documentation/#topicconfigs
#
#--------------------------------------
topic.compression.type=uncompressed
topic.flush.messages=2
#####
# Producer related configurations (global) - topic.***
# - Valid settings: https://kafka.apache.org/documentation/#producerconfigs
#
#--------------------------------------
producer.key.serializer=org.apache.kafka.common.serialization.StringSerializer
producer.value.serializer=org.apache.kafka.common.serialization.StringSerializer
#producer.client.id=nbDftClient
producer.transactional.id=nbDftTxn
#####
# Consumer related configurations (global) - topic.***
# - Valid settings: https://kafka.apache.org/documentation/#consumerconfigs
#
#--------------------------------------
consumer.key.deserializer=org.apache.kafka.common.serialization.StringDeserializer
consumer.value.deserializer=org.apache.kafka.common.serialization.StringDeserializer
consumer.group.id=nbDftGrp
#consumer.isolation.level=read_uncommitted
#consumer.enable.auto.commit=true

View File

@@ -0,0 +1,24 @@
# document level parameters that apply to all Pulsar client types:
params:
# Whether to commit message asynchronously
# - default: true
# - only relevant for manual commit
async_api: "true"
blocks:
msg-consume-block:
ops:
op1:
## The value represents the topic names
# - for consumer, a list of topics (separated by comma) are supported
MessageConsume: "nbktest1,nbktest2"
# The timeout value to poll messages (unit: milli-seconds)
# - default: 0
msg_poll_interval: "10"
# The number of messages to receive before doing a manual commit
# - default: 0
# - If 0, it could mean doing auto commit or not, which is determined
# by "enable.auto.commit" consumer config value
manual_commit_batch_num: "0"

View File

@@ -0,0 +1,40 @@
bindings:
mykey: Mod(5); ToString(); Prefix("key-")
mytext_val: AlphaNumericString(30)
random_text_val1: AlphaNumericString(10)
random_text_val2: AlphaNumericString(20)
# document level parameters that apply to all Pulsar client types:
params:
# whether to confirm message send ack. asynchronously
# - default: true
async_api: "true"
blocks:
msg-produce-block:
ops:
op1:
## The value represents a topic name
# - for producer, only ONE topic is supported
MessageProduce: "nbktest"
# The number of messages to put in one transaction
# - default: 0
# - value 0 or 1 means no transaction
# - it also requires "transactional.id" parameter is set
txn_batch_num: 8
## (Optional) Kafka message headers (in JSON format).
msg_header: |
{
"header-1": "{random_text_val1}",
"header-2": "{random_text_val2}"
}
## (Optional) Kafka message key.
# - message key and value can't be both empty at the same time
msg_key: "{mykey}"
## (Optional) Kafka message value.
# - message key and value can't be both empty at the same time
msg_body: "{mytext_val}"

View File

@@ -23,7 +23,7 @@
<parent>
<artifactId>mvn-defaults</artifactId>
<groupId>io.nosqlbench</groupId>
<version>4.17.31-SNAPSHOT</version>
<version>4.17.32-SNAPSHOT</version>
<relativePath>../mvn-defaults</relativePath>
</parent>
@@ -36,7 +36,7 @@
<dependency>
<groupId>io.nosqlbench</groupId>
<artifactId>engine-api</artifactId>
<version>4.17.31-SNAPSHOT</version>
<version>4.17.32-SNAPSHOT</version>
</dependency>
<dependency>

View File

@@ -23,7 +23,7 @@
<parent>
<artifactId>mvn-defaults</artifactId>
<groupId>io.nosqlbench</groupId>
<version>4.17.31-SNAPSHOT</version>
<version>4.17.32-SNAPSHOT</version>
<relativePath>../mvn-defaults</relativePath>
</parent>
@@ -41,13 +41,13 @@
<dependency>
<groupId>io.nosqlbench</groupId>
<artifactId>engine-api</artifactId>
<version>4.17.31-SNAPSHOT</version>
<version>4.17.32-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>io.nosqlbench</groupId>
<artifactId>adapters-api</artifactId>
<version>4.17.31-SNAPSHOT</version>
<version>4.17.32-SNAPSHOT</version>
</dependency>
<dependency>
@@ -62,6 +62,13 @@
<version>${pulsar.version}</version>
</dependency>
<!-- https://mvnrepository.com/artifact/org.apache.commons/commons-lang3 -->
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-lang3</artifactId>
<version>3.12.0</version>
</dependency>
<!-- https://mvnrepository.com/artifact/commons-beanutils/commons-beanutils -->
<dependency>
<groupId>commons-beanutils</groupId>
@@ -82,13 +89,6 @@
<artifactId>avro</artifactId>
<version>1.11.1</version>
</dependency>
<!-- https://mvnrepository.com/artifact/org.apache.commons/commons-lang3 -->
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-lang3</artifactId>
<version>3.12.0</version>
</dependency>
</dependencies>
</project>

View File

@@ -73,7 +73,7 @@ public class PulsarClientConf {
// Convert the raw configuration map (<String,String>) to the required map (<String,Object>)
producerConfMapTgt.putAll(PulsarConfConverter.convertStdRawProducerConf(producerConfMapRaw));
consumerConfMapTgt.putAll(PulsarConfConverter.convertStdRawConsumerConf(consumerConfMapRaw));
// TODO: Reader API is not disabled at the moment. Revisit when needed
// TODO: Reader API is not enabled at the moment. Revisit when needed
}

89
adapter-s4j/pom.xml Normal file
View File

@@ -0,0 +1,89 @@
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<artifactId>adapter-s4j</artifactId>
<packaging>jar</packaging>
<parent>
<artifactId>mvn-defaults</artifactId>
<groupId>io.nosqlbench</groupId>
<version>4.17.32-SNAPSHOT</version>
<relativePath>../mvn-defaults</relativePath>
</parent>
<name>${project.artifactId}</name>
<description>
A Starlight for JMS driver for nosqlbench. This provides the ability to inject synthetic data
into a pulsar system via JMS 2.0 compatible APIs.
NOTE: this is JMS compatible driver from DataStax that allows using a Pulsar cluster
as the potential JMS Destination
</description>
<properties>
<s4j.version>3.2.0</s4j.version>
</properties>
<build>
<extensions>
<extension>
<groupId>kr.motd.maven</groupId>
<artifactId>os-maven-plugin</artifactId>
<version>1.7.1</version>
</extension>
</extensions>
</build>
<dependencies>
<!-- core dependencies -->
<dependency>
<groupId>io.nosqlbench</groupId>
<artifactId>engine-api</artifactId>
<version>4.17.32-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>io.nosqlbench</groupId>
<artifactId>adapters-api</artifactId>
<version>4.17.32-SNAPSHOT</version>
</dependency>
<!-- https://mvnrepository.com/artifact/com.datastax.oss/pulsar-jms -->
<dependency>
<groupId>com.datastax.oss</groupId>
<artifactId>pulsar-jms-all</artifactId>
<version>${s4j.version}</version>
</dependency>
<!-- https://mvnrepository.com/artifact/org.apache.commons/commons-lang3 -->
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-lang3</artifactId>
<version>3.12.0</version>
</dependency>
<!-- https://mvnrepository.com/artifact/commons-beanutils/commons-beanutils -->
<dependency>
<groupId>commons-beanutils</groupId>
<artifactId>commons-beanutils</artifactId>
<version>1.9.4</version>
</dependency>
<!-- https://mvnrepository.com/artifact/org.apache.commons/commons-configuration2 -->
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-configuration2</artifactId>
<version>2.8.0</version>
</dependency>
<!-- https://mvnrepository.com/artifact/org.conscrypt/conscrypt-openjdk -->
<dependency>
<groupId>org.conscrypt</groupId>
<artifactId>conscrypt-openjdk</artifactId>
<version>2.5.2</version>
<classifier>${os.detected.classifier}</classifier>
</dependency>
</dependencies>
</project>

View File

@@ -0,0 +1,52 @@
/*
* Copyright (c) 2022 nosqlbench
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.nosqlbench.adapter.s4j;
import io.nosqlbench.adapter.s4j.ops.S4JOp;
import io.nosqlbench.api.config.standard.NBConfigModel;
import io.nosqlbench.api.config.standard.NBConfiguration;
import io.nosqlbench.engine.api.activityimpl.OpMapper;
import io.nosqlbench.engine.api.activityimpl.uniform.BaseDriverAdapter;
import io.nosqlbench.engine.api.activityimpl.uniform.DriverAdapter;
import io.nosqlbench.engine.api.activityimpl.uniform.DriverSpaceCache;
import io.nosqlbench.nb.annotations.Service;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import java.util.function.Function;
@Service(value = DriverAdapter.class, selector = "s4j")
public class S4JDriverAdapter extends BaseDriverAdapter<S4JOp, S4JSpace> {
private final static Logger logger = LogManager.getLogger(S4JDriverAdapter.class);
@Override
public OpMapper<S4JOp> getOpMapper() {
DriverSpaceCache<? extends S4JSpace> spaceCache = getSpaceCache();
NBConfiguration adapterConfig = getConfiguration();
return new S4JOpMapper(this, adapterConfig, spaceCache);
}
@Override
public Function<String, ? extends S4JSpace> getSpaceInitializer(NBConfiguration cfg) {
return (s) -> new S4JSpace(s, cfg);
}
@Override
public NBConfigModel getConfigModel() {
return super.getConfigModel().add(S4JSpace.getConfigModel());
}
}

View File

@@ -0,0 +1,71 @@
/*
* Copyright (c) 2022 nosqlbench
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.nosqlbench.adapter.s4j;
import io.nosqlbench.adapter.s4j.dispensers.MessageConsumerOpDispenser;
import io.nosqlbench.adapter.s4j.dispensers.MessageProducerOpDispenser;
import io.nosqlbench.adapter.s4j.ops.S4JOp;
import io.nosqlbench.api.config.standard.NBConfiguration;
import io.nosqlbench.engine.api.activityimpl.OpDispenser;
import io.nosqlbench.engine.api.activityimpl.OpMapper;
import io.nosqlbench.engine.api.activityimpl.uniform.DriverAdapter;
import io.nosqlbench.engine.api.activityimpl.uniform.DriverSpaceCache;
import io.nosqlbench.engine.api.templating.ParsedOp;
import io.nosqlbench.engine.api.templating.TypeAndTarget;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
public class S4JOpMapper implements OpMapper<S4JOp> {
private final static Logger logger = LogManager.getLogger(S4JOpMapper.class);
private final NBConfiguration cfg;
private final DriverSpaceCache<? extends S4JSpace> spaceCache;
private final DriverAdapter adapter;
public S4JOpMapper(DriverAdapter adapter, NBConfiguration cfg, DriverSpaceCache<? extends S4JSpace> spaceCache) {
this.cfg = cfg;
this.spaceCache = spaceCache;
this.adapter = adapter;
}
@Override
public OpDispenser<? extends S4JOp> apply(ParsedOp op) {
String spaceName = op.getStaticConfigOr("space", "default");
S4JSpace s4jSpace = spaceCache.get(spaceName);
/*
* If the user provides a body element, then they want to provide the JSON or
* a data structure that can be converted into JSON, bypassing any further
* specialized type-checking or op-type specific features
*/
if (op.isDefined("body")) {
throw new RuntimeException("This mode is reserved for later. Do not use the 'body' op field.");
}
else {
TypeAndTarget<S4JOpType, String> opType = op.getTypeAndTarget(S4JOpType.class, String.class);
return switch (opType.enumId) {
case MessageProduce ->
new MessageProducerOpDispenser(adapter, op, opType.targetFunction, s4jSpace);
case MessageConsume ->
new MessageConsumerOpDispenser(adapter, op, opType.targetFunction, s4jSpace);
};
}
}
}

View File

@@ -0,0 +1,31 @@
/*
* Copyright (c) 2022 nosqlbench
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.nosqlbench.adapter.s4j;
public enum S4JOpType {
// publishing/sending messages to a JMS queue or a topic
MessageProduce,
// consuming/receiving messages from a JMS queue or a topic
// for a topic, it can be:
// - non-durable, non-shared
// - durable, non-shared
// - non-durable, shared
// - durable, shared
MessageConsume;
}

View File

@@ -0,0 +1,446 @@
/*
* Copyright (c) 2022 nosqlbench
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.nosqlbench.adapter.s4j;
import com.datastax.oss.pulsar.jms.PulsarConnectionFactory;
import com.datastax.oss.pulsar.jms.PulsarJMSContext;
import io.nosqlbench.adapter.s4j.exception.S4JAdapterInvalidParamException;
import io.nosqlbench.adapter.s4j.exception.S4JAdapterUnexpectedException;
import io.nosqlbench.adapter.s4j.util.*;
import io.nosqlbench.api.config.standard.ConfigModel;
import io.nosqlbench.api.config.standard.NBConfigModel;
import io.nosqlbench.api.config.standard.NBConfiguration;
import io.nosqlbench.api.config.standard.Param;
import org.apache.commons.lang3.BooleanUtils;
import org.apache.commons.lang3.RandomUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.math.NumberUtils;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import javax.jms.*;
import java.util.Base64;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicLong;
public class S4JSpace implements AutoCloseable {
private final static Logger logger = LogManager.getLogger(S4JSpace.class);
private final String spaceName;
private final NBConfiguration cfg;
// - Each S4J space currently represents a number of JMS connections (\"num_conn\" NB CLI parameter);
// - JMS connection can have a number of JMS sessions (\"num_session\" NB CLI parameter).
// - Each JMS session has its own sets of JMS destinations, producers, consumers, etc.
private final ConcurrentHashMap<String, JMSContext> connLvlJmsContexts = new ConcurrentHashMap<>();
private final ConcurrentHashMap<String, S4JJMSContextWrapper> sessionLvlJmsContexts = new ConcurrentHashMap<>();
private final String pulsarSvcUrl;
private final String webSvcUrl;
private final String s4jClientConfFileName;
private final S4JClientConf s4JClientConf;
private final int sessionMode;
// Whether to do strict error handling while sending/receiving messages
// - Yes: any error returned from the Pulsar server while doing message receiving/sending will trigger NB execution stop
// - No: pause the current thread that received the error message for 1 second and then continue processing
private boolean strictMsgErrorHandling;
// Maximum time length to execute S4J operations (e.g. message send or consume)
// - when NB execution passes this threshold, it is simply NoOp
// - 0 means no maximum time constraint. S4JOp is always executed until NB execution cycle finishes
private long maxS4JOpTimeInSec;
private long s4JActivityStartTimeMills;
// Whether to keep track of the received message count, which includes
// - total received message count
// - received null message count (only relevant when non-blocking message receiving is used)
// By default, this setting is disabled
private boolean trackingMsgRecvCnt;
// How many JMS connections per NB S4J execution
private int maxNumConn;
// How many sessions per JMS connection
private int maxNumSessionPerConn;
// Total number of acknowledgement received
// - this can apply to both message production and consumption
// - for message consumption, this only applies to non-null messages received (which is for async API)
private final AtomicLong totalOpResponseCnt = new AtomicLong(0);
// Total number of null messages received
// - only applicable to message consumption
private final AtomicLong nullMsgRecvCnt = new AtomicLong(0);
// Keep track the transaction count per thread
private final ThreadLocal<Integer> txnBatchTrackingCnt = ThreadLocal.withInitial(() -> 0);
// Represents the JMS connection
private PulsarConnectionFactory s4jConnFactory;
private long totalCycleNum;
public S4JSpace(String spaceName, NBConfiguration cfg) {
this.spaceName = spaceName;
this.cfg = cfg;
this.pulsarSvcUrl = cfg.get("service_url");
this.webSvcUrl = cfg.get("web_url");
this.maxNumConn=
NumberUtils.toInt(cfg.getOptional("num_conn").orElse("1"));
this.maxNumSessionPerConn =
NumberUtils.toInt(cfg.getOptional("num_session").orElse("1"));
this.maxS4JOpTimeInSec =
NumberUtils.toLong(cfg.getOptional("max_s4jop_time").orElse("0L"));
this.trackingMsgRecvCnt =
BooleanUtils.toBoolean(cfg.getOptional("track_msg_cnt").orElse("false"));
this.strictMsgErrorHandling =
BooleanUtils.toBoolean(cfg.getOptional("strict_msg_error_handling").orElse("false"));
this.s4jClientConfFileName = cfg.get("config");
this.sessionMode = S4JAdapterUtil.getSessionModeFromStr(
cfg.getOptional("session_mode").orElse(""));
this.s4JClientConf = new S4JClientConf(pulsarSvcUrl, webSvcUrl, s4jClientConfFileName);
this.setS4JActivityStartTimeMills(System.currentTimeMillis());
this.initializeSpace(s4JClientConf);
}
@Override
public void close() {
shutdownSpace();
}
public static NBConfigModel getConfigModel() {
return ConfigModel.of(S4JSpace.class)
.add(Param.defaultTo("service_url", "pulsar://localhost:6650")
.setDescription("Pulsar broker service URL."))
.add(Param.defaultTo("web_url", "http://localhost:8080")
.setDescription("Pulsar web service URL."))
.add(Param.defaultTo("config", "config.properties")
.setDescription("Pulsar client connection configuration property file."))
.add(Param.defaultTo("num_conn", 1)
.setDescription("Number of JMS connections"))
.add(Param.defaultTo("num_session", 1)
.setDescription("Number of JMS sessions per JMS connection"))
.add(Param.defaultTo("max_s4jop_time", 0)
.setDescription("Maximum time (in seconds) to run NB S4J testing scenario."))
.add(Param.defaultTo("track_msg_cnt", false)
.setDescription("Whether to keep track of message count(s)"))
.add(Param.defaultTo("session_mode", "")
.setDescription("JMS session mode"))
.add(Param.defaultTo("strict_msg_error_handling", false)
.setDescription("Whether to do strict error handling which is to stop NB S4J execution."))
.asReadOnly();
}
public ConcurrentHashMap<String, JMSContext> getConnLvlJmsContexts() {
return connLvlJmsContexts;
}
public ConcurrentHashMap<String, S4JJMSContextWrapper> getSessionLvlJmsContexts() {
return sessionLvlJmsContexts;
}
public long getS4JActivityStartTimeMills() { return this.s4JActivityStartTimeMills; }
public void setS4JActivityStartTimeMills(long startTime) { this.s4JActivityStartTimeMills = startTime; }
public long getMaxS4JOpTimeInSec() { return this.maxS4JOpTimeInSec; }
public int getSessionMode() { return sessionMode; }
public String getS4jClientConfFileName() { return s4jClientConfFileName; }
public S4JClientConf getS4JClientConf() { return s4JClientConf; }
public boolean isTrackingMsgRecvCnt() { return trackingMsgRecvCnt; }
public int getMaxNumSessionPerConn() { return this.maxNumSessionPerConn; }
public int getMaxNumConn() { return this.maxNumConn; }
public boolean isStrictMsgErrorHandling() { return this.strictMsgErrorHandling; }
public int getTxnBatchTrackingCnt() { return txnBatchTrackingCnt.get(); }
public void incTxnBatchTrackingCnt() {
int curVal = getTxnBatchTrackingCnt();
txnBatchTrackingCnt.set(curVal + 1);
}
public long getTotalOpResponseCnt() { return totalOpResponseCnt.get();}
public long incTotalOpResponseCnt() { return totalOpResponseCnt.incrementAndGet();}
public void resetTotalOpResponseCnt() { totalOpResponseCnt.set(0); }
public long getTotalNullMsgRecvdCnt() { return nullMsgRecvCnt.get();}
public void resetTotalNullMsgRecvdCnt() { nullMsgRecvCnt.set(0); }
public long incTotalNullMsgRecvdCnt() { return nullMsgRecvCnt.incrementAndGet(); }
public PulsarConnectionFactory getS4jConnFactory() { return s4jConnFactory; }
public long getTotalCycleNum() { return totalCycleNum; }
public void setTotalCycleNum(long cycleNum) { totalCycleNum = cycleNum; }
public void initializeSpace(S4JClientConf s4JClientConnInfo) {
if (s4jConnFactory == null) {
Map<String, Object> cfgMap;
try {
cfgMap = s4JClientConnInfo.getS4jConfObjMap();
s4jConnFactory = new PulsarConnectionFactory(cfgMap);
for (int i=0; i<getMaxNumConn(); i++) {
// Establish a JMS connection
String connLvlJmsConnContextIdStr = getConnLvlJmsContextIdentifier(i);
String clientIdStr = Base64.getEncoder().encodeToString(connLvlJmsConnContextIdStr.getBytes());
JMSContext jmsConnContext = getOrCreateConnLvlJMSContext(s4jConnFactory, s4JClientConnInfo, sessionMode);
jmsConnContext.setClientID(clientIdStr);
jmsConnContext.setExceptionListener(e -> {
if (logger.isDebugEnabled()) {
logger.error("onException::Unexpected JMS error happened:" + e);
}
});
connLvlJmsContexts.put(connLvlJmsConnContextIdStr, jmsConnContext);
if (logger.isDebugEnabled()) {
logger.debug("[Connection level JMSContext] {} -- {}",
Thread.currentThread().getName(),
jmsConnContext );
}
}
}
catch (JMSRuntimeException e) {
if (logger.isDebugEnabled()) {
logger.debug("[ERROR] Unable to initialize JMS connection factory with the following configuration parameters: {}", s4JClientConnInfo.toString());
}
throw new S4JAdapterUnexpectedException("Unable to initialize JMS connection factory with the following error message: " + e.getCause());
}
catch (Exception e) {
e.printStackTrace();
}
}
}
public void shutdownSpace() {
long shutdownStartTimeMills = System.currentTimeMillis();
try {
waitUntilAllOpFinished(shutdownStartTimeMills);
this.txnBatchTrackingCnt.remove();
for (S4JJMSContextWrapper s4JJMSContextWrapper : sessionLvlJmsContexts.values()) {
if (s4JJMSContextWrapper != null) {
if (s4JJMSContextWrapper.isTransactedMode()) {
s4JJMSContextWrapper.getJmsContext().rollback();
}
s4JJMSContextWrapper.close();
}
}
for (JMSContext jmsContext : connLvlJmsContexts.values()) {
if (jmsContext != null) jmsContext.close();
}
s4jConnFactory.close();
}
catch (Exception e) {
e.printStackTrace();
throw new S4JAdapterUnexpectedException("Unexpected error when shutting down NB S4J space.");
}
}
// When completing NB execution, don't shut down right away because otherwise, async operation processing may fail.
// Instead, shut down when either one of the following condition is satisfied
// 1) the total number of the received operation response is the same as the total number of operations being executed;
// 2) time has passed for 10 seconds
private void waitUntilAllOpFinished(long shutdownStartTimeMills) {
long totalCycleNum = getTotalCycleNum();
long totalResponseCnt = 0;
long totalNullMsgCnt = 0;
long timeElapsedMills;
boolean trackingMsgCnt = isTrackingMsgRecvCnt();
boolean continueChk;
do {
S4JAdapterUtil.pauseCurThreadExec(1);
long curTimeMills = System.currentTimeMillis();
timeElapsedMills = curTimeMills - shutdownStartTimeMills;
continueChk = (timeElapsedMills <= 10000);
if (trackingMsgCnt) {
totalResponseCnt = this.getTotalOpResponseCnt();
totalNullMsgCnt = this.getTotalNullMsgRecvdCnt();
continueChk = continueChk && (totalResponseCnt < totalCycleNum);
}
if (logger.isTraceEnabled()) {
logger.trace(
buildExecSummaryString(trackingMsgCnt, timeElapsedMills, totalResponseCnt, totalNullMsgCnt));
}
} while (continueChk);
logger.info(
buildExecSummaryString(trackingMsgCnt, timeElapsedMills, totalResponseCnt, totalNullMsgCnt));
}
private String buildExecSummaryString(
boolean trackingMsgCnt,
long timeElapsedMills,
long totalResponseCnt,
long totalNullMsgCnt)
{
StringBuilder stringBuilder = new StringBuilder();
stringBuilder
.append("shutdownSpace::waitUntilAllOpFinished -- ")
.append("shutdown time elapsed: ").append(timeElapsedMills).append("ms; ");
if (trackingMsgCnt) {
stringBuilder.append("response received: ").append(totalResponseCnt).append("; ");
stringBuilder.append("null msg received: ").append(totalNullMsgCnt).append("; ");
}
return stringBuilder.toString();
}
public void processMsgAck(JMSContext jmsContext, Message message, float msgAckRatio, int slowAckInSec) throws JMSException {
int jmsSessionMode = jmsContext.getSessionMode();
if ((jmsSessionMode != Session.AUTO_ACKNOWLEDGE) &&
(jmsSessionMode != Session.SESSION_TRANSACTED)) {
float rndVal = RandomUtils.nextFloat(0, 1);
if (rndVal < msgAckRatio) {
S4JAdapterUtil.pauseCurThreadExec(slowAckInSec);
message.acknowledge();
}
}
}
public String getConnLvlJmsContextIdentifier(int jmsConnSeqNum) {
return S4JAdapterUtil.buildCacheKey(
this.spaceName,
StringUtils.join("conn-", jmsConnSeqNum));
}
public String getSessionLvlJmsContextIdentifier(int jmsConnSeqNum, int jmsSessionSeqNum) {
return S4JAdapterUtil.buildCacheKey(
this.spaceName,
StringUtils.join("conn-", jmsConnSeqNum),
StringUtils.join("session-", jmsSessionSeqNum));
}
// Create JMSContext that represents a new JMS connection
public JMSContext getOrCreateConnLvlJMSContext(
PulsarConnectionFactory s4jConnFactory,
S4JClientConf s4JClientConf,
int sessionMode)
{
if ( !S4JAdapterUtil.isAuthNRequired(s4JClientConf) &&
S4JAdapterUtil.isUseCredentialsEnabled(s4JClientConf) ) {
throw new S4JAdapterInvalidParamException(
"'jms.useCredentialsFromCreateConnection' can't set be true " +
"when Pulsar client authN parameters are not set. "
);
}
boolean useCredentialsEnable =
S4JAdapterUtil.isAuthNRequired(s4JClientConf) &&
S4JAdapterUtil.isUseCredentialsEnabled(s4JClientConf);
JMSContext jmsConnContext;
if (!useCredentialsEnable)
jmsConnContext = s4jConnFactory.createContext(sessionMode);
else {
String userName = S4JAdapterUtil.getCredentialUserName(s4JClientConf);
String passWord = S4JAdapterUtil.getCredentialPassword(s4JClientConf);
// Password must be in "token:<token vale>" format
if (! StringUtils.startsWith(passWord, "token:")) {
throw new S4JAdapterInvalidParamException(
"When 'jms.useCredentialsFromCreateConnection' is enabled, " +
"the provided password must be in format 'token:<token_value_...> ");
}
jmsConnContext = s4jConnFactory.createContext(userName, passWord, sessionMode);
}
return jmsConnContext;
}
public S4JJMSContextWrapper getOrCreateS4jJmsContextWrapper(long curCycle) {
return getOrCreateS4jJmsContextWrapper(curCycle, null);
}
// Get the next JMSContext Wrapper in the following approach
// - The JMSContext wrapper pool has the following sequence (assuming 3 [c]onnections and 2 [s]essions per connection):
// c0s0, c0s1, c1s0, c1s1, c2s0, c2s1
// - When getting the next JMSContext wrapper, always get from the next connection, starting from the first session
// When reaching the end of connection, move back to the first connection, but get the next session.
// e.g. first: c0s0 (0)
// next: c1s0 (1)
// next: c2s0 (2)
// next: c0s1 (3)
// next: c1s1 (4)
// next: c2s1 (5)
// next: c0s0 (6) <-- repeat the pattern
// next: c1s0 (7)
// next: c2s0 (8)
// next: c0s1 (9)
// ... ...
public S4JJMSContextWrapper getOrCreateS4jJmsContextWrapper(
long curCycle,
Map<String, Object> overrideS4jConfMap)
{
int totalConnNum = getMaxNumConn();
int totalSessionPerConnNum = getMaxNumSessionPerConn();
int connSeqNum = (int) curCycle % totalConnNum;
int sessionSeqNum = ( (int)(curCycle / totalConnNum) ) % totalSessionPerConnNum;
String jmsConnContextIdStr = getConnLvlJmsContextIdentifier(connSeqNum);
JMSContext connLvlJmsContext = connLvlJmsContexts.get(jmsConnContextIdStr);
// Connection level JMSContext objects should be already created during the initialization phase
assert (connLvlJmsContext != null);
String jmsSessionContextIdStr = getSessionLvlJmsContextIdentifier(connSeqNum, sessionSeqNum);
S4JJMSContextWrapper jmsContextWrapper = sessionLvlJmsContexts.get(jmsSessionContextIdStr);
if (jmsContextWrapper == null) {
JMSContext jmsContext = null;
if (overrideS4jConfMap == null || overrideS4jConfMap.isEmpty()) {
jmsContext = connLvlJmsContext.createContext(connLvlJmsContext.getSessionMode());
} else {
jmsContext = ((PulsarJMSContext) connLvlJmsContext).createContext(
connLvlJmsContext.getSessionMode(), overrideS4jConfMap);
}
jmsContextWrapper = new S4JJMSContextWrapper(jmsSessionContextIdStr, jmsContext);
sessionLvlJmsContexts.put(jmsSessionContextIdStr, jmsContextWrapper);
if (logger.isDebugEnabled()) {
logger.debug("[Session level JMSContext] {} -- {}",
Thread.currentThread().getName(),
jmsContextWrapper);
}
}
return jmsContextWrapper;
}
}

View File

@@ -0,0 +1,173 @@
/*
* Copyright (c) 2022 nosqlbench
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.nosqlbench.adapter.s4j.dispensers;
import io.nosqlbench.adapter.s4j.S4JSpace;
import io.nosqlbench.adapter.s4j.ops.MessageConsumerOp;
import io.nosqlbench.adapter.s4j.util.S4JAdapterUtil;
import io.nosqlbench.adapter.s4j.util.S4JJMSContextWrapper;
import io.nosqlbench.engine.api.activityimpl.uniform.DriverAdapter;
import io.nosqlbench.engine.api.templating.ParsedOp;
import org.apache.commons.lang3.StringUtils;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import javax.jms.*;
import java.util.HashMap;
import java.util.Map;
import java.util.function.LongFunction;
public class MessageConsumerOpDispenser extends S4JBaseOpDispenser {
private final static Logger logger = LogManager.getLogger("MessageConsumerOpDispenser");
// Doc-level parameter: blocking_msg_recv (default: false)
protected final boolean blockingMsgRecv;
// Doc-level parameter: shared_topic (default: false)
// - only applicable to Topic as the destination type
protected final boolean sharedTopic;
// Doc-level parameter: durable_topic (default: false)
// - only applicable to Topic as the destination type
protected final boolean durableTopic;
// default value: false
private final boolean noLocal;
// default value: 0
// value <= 0 : no timeout
private final int readTimeout;
// default value: false
private final boolean recvNoWait;
// default value: 1.0 (all received messages are acknowledged)
// value must be between 0 and 1 (inclusive)
private final float msgAckRatio;
// default value: 0
// value <= 0 : no slow message ack
private final int slowAckInSec;
private final LongFunction<String> subNameStrFunc;
private final LongFunction<String> localMsgSelectorFunc;
// Generally the consumer related configurations can be set in the global "config.properties" file,
// which can be applied to many testing scenarios.
// Setting them here will allow scenario-specific customer configurations. At the moment, only the
// DLT related settings are supported
private final Map<String, Object> combinedS4jConfigObjMap = new HashMap<>();
public MessageConsumerOpDispenser(DriverAdapter adapter,
ParsedOp op,
LongFunction<String> tgtNameFunc,
S4JSpace s4jSpace) {
super(adapter, op, tgtNameFunc, s4jSpace);
this.blockingMsgRecv =
parsedOp.getStaticConfigOr(S4JAdapterUtil.DOC_LEVEL_PARAMS.BLOCKING_MSG_RECV.label, Boolean.FALSE);
this.sharedTopic =
parsedOp.getStaticConfigOr(S4JAdapterUtil.DOC_LEVEL_PARAMS.SHARED_TOPIC.label, Boolean.FALSE);
this.durableTopic =
parsedOp.getStaticConfigOr(S4JAdapterUtil.DOC_LEVEL_PARAMS.DURABLE_TOPIC.label, Boolean.FALSE);
this.noLocal =
parsedOp.getStaticConfigOr("no_local", Boolean.FALSE);
this.readTimeout =
parsedOp.getStaticConfigOr("read_timeout", Integer.valueOf(0));
this.recvNoWait =
parsedOp.getStaticConfigOr("no_wait", Boolean.FALSE);
this.msgAckRatio =
parsedOp.getStaticConfigOr("msg_ack_ratio", Float.valueOf(1.0f));
this.slowAckInSec =
parsedOp.getStaticConfigOr("slow_ack_in_sec", Integer.valueOf(0));
this.localMsgSelectorFunc =
lookupOptionalStrOpValueFunc("msg_selector");
// Subscription name is OPTIONAL for queue and non-shared, non-durable topic;
// but mandatory for shared or shared topic
if ( StringUtils.equalsIgnoreCase(destType, S4JAdapterUtil.JMS_DEST_TYPES.QUEUE.label) ||
( StringUtils.equalsIgnoreCase(destType, S4JAdapterUtil.JMS_DEST_TYPES.TOPIC.label) &&
!durableTopic && !sharedTopic) ) {
this.subNameStrFunc =
lookupOptionalStrOpValueFunc("subscription_name");
}
else {
this.subNameStrFunc =
lookupMandtoryStrOpValueFunc("subscription_name");
}
String[] stmtLvlConsumerConfKeyNameList = {
"consumer.ackTimeoutMillis",
"consumer.deadLetterPolicy",
"consumer.negativeAckRedeliveryBackoff",
"consumer.ackTimeoutRedeliveryBackoff"};
HashMap<String, String> stmtLvlConsumerConfRawMap = new HashMap<>();
for (String confKey : stmtLvlConsumerConfKeyNameList ) {
String confVal = parsedOp.getStaticConfigOr(confKey, "");
stmtLvlConsumerConfRawMap.put(
StringUtils.substringAfter(confKey, "consumer."),
confVal);
}
this.combinedS4jConfigObjMap.putAll(
s4jSpace.getS4JClientConf().mergeExtraConsumerConfig(stmtLvlConsumerConfRawMap));
}
@Override
public MessageConsumerOp apply(long cycle) {
S4JJMSContextWrapper s4JJMSContextWrapper =
s4jSpace.getOrCreateS4jJmsContextWrapper(cycle, this.combinedS4jConfigObjMap);
JMSContext jmsContext = s4JJMSContextWrapper.getJmsContext();
boolean commitTransact = !super.commitTransaction(txnBatchNum, jmsContext.getSessionMode(), cycle);
Destination destination;
try {
destination = getOrCreateJmsDestination(
s4JJMSContextWrapper, temporaryDest, destType, destNameStrFunc.apply(cycle));
}
catch (JMSRuntimeException jmsRuntimeException) {
throw new RuntimeException("Unable to create the JMS destination!");
}
JMSConsumer jmsConsumer;
try {
jmsConsumer = getOrCreateJmsConsumer(
s4JJMSContextWrapper,
destination,
destType,
subNameStrFunc.apply(cycle),
localMsgSelectorFunc.apply(cycle),
msgAckRatio,
noLocal,
durableTopic,
sharedTopic,
asyncAPI,
slowAckInSec);
}
catch (JMSException jmsException) {
throw new RuntimeException("Unable to create the JMS consumer!");
}
return new MessageConsumerOp(
s4jAdapterMetrics,
s4jSpace,
jmsContext,
destination,
asyncAPI,
commitTransact,
jmsConsumer,
blockingMsgRecv,
msgAckRatio,
readTimeout,
recvNoWait,
slowAckInSec);
}
}

View File

@@ -0,0 +1,358 @@
/*
* Copyright (c) 2022 nosqlbench
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.nosqlbench.adapter.s4j.dispensers;
import io.nosqlbench.adapter.s4j.S4JSpace;
import io.nosqlbench.adapter.s4j.exception.S4JAdapterInvalidParamException;
import io.nosqlbench.adapter.s4j.exception.S4JAdapterUnexpectedException;
import io.nosqlbench.adapter.s4j.ops.MessageProducerOp;
import io.nosqlbench.adapter.s4j.util.S4JAdapterUtil;
import io.nosqlbench.adapter.s4j.util.S4JJMSContextWrapper;
import io.nosqlbench.engine.api.activityimpl.uniform.DriverAdapter;
import io.nosqlbench.engine.api.templating.ParsedOp;
import org.apache.commons.lang3.BooleanUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.math.NumberUtils;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import javax.jms.*;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.function.LongFunction;
public class MessageProducerOpDispenser extends S4JBaseOpDispenser {
private final static Logger logger = LogManager.getLogger("MessageProducerOpDispenser");
public static final String MSG_HEADER_OP_PARAM = "msg_header";
public static final String MSG_PROP_OP_PARAM = "msg_property";
public static final String MSG_BODY_OP_PARAM = "msg_body";
public static final String MSG_TYPE_OP_PARAM = "msg_type";
private final LongFunction<String> msgHeaderRawJsonStrFunc;
private final LongFunction<String> msgPropRawJsonStrFunc;
private final LongFunction<String> msgBodyRawJsonStrFunc;
private final LongFunction<String> msgTypeFunc;
public MessageProducerOpDispenser(DriverAdapter adapter,
ParsedOp op,
LongFunction<String> tgtNameFunc,
S4JSpace s4jSpace) {
super(adapter, op, tgtNameFunc, s4jSpace);
this.msgHeaderRawJsonStrFunc = lookupOptionalStrOpValueFunc(MSG_HEADER_OP_PARAM);
this.msgPropRawJsonStrFunc = lookupOptionalStrOpValueFunc(MSG_PROP_OP_PARAM);
this.msgBodyRawJsonStrFunc = lookupMandtoryStrOpValueFunc(MSG_BODY_OP_PARAM);
this.msgTypeFunc = lookupMandtoryStrOpValueFunc(MSG_TYPE_OP_PARAM);
}
private Message createAndSetMessagePayload(
S4JJMSContextWrapper s4JJMSContextWrapper,
String msgType, String msgBodyRawJsonStr) throws JMSException
{
Message message;
int messageSize = 0;
JMSContext jmsContext = s4JJMSContextWrapper.getJmsContext();
if (StringUtils.equalsIgnoreCase(msgType, S4JAdapterUtil.JMS_MESSAGE_TYPES.TEXT.label)) {
message = jmsContext.createTextMessage();
((TextMessage) message).setText(msgBodyRawJsonStr);
messageSize = msgBodyRawJsonStr.length();
} else if (StringUtils.equalsIgnoreCase(msgType, S4JAdapterUtil.JMS_MESSAGE_TYPES.MAP.label)) {
message = jmsContext.createMapMessage();
// The message body json string must be in the format of a collection of key/value pairs
// Otherwise, it is an error
Map<String, String> jmsMsgBodyMap;
try {
jmsMsgBodyMap = S4JAdapterUtil.convertJsonToMap(msgBodyRawJsonStr);
} catch (Exception e) {
throw new RuntimeException("The specified message payload can't be converted to a map when requiring a 'Map' message type!");
}
for (String key : jmsMsgBodyMap.keySet()) {
String value = jmsMsgBodyMap.get(key);
((MapMessage)message).setString(key, value);
messageSize += key.length();
messageSize += value.length();
}
} else if (StringUtils.equalsIgnoreCase(msgType, S4JAdapterUtil.JMS_MESSAGE_TYPES.STREAM.label)) {
message = jmsContext.createStreamMessage();
// The message body json string must be in the format of a list of objects
// Otherwise, it is an error
List<Object> jmsMsgBodyObjList;
try {
jmsMsgBodyObjList = S4JAdapterUtil.convertJsonToObjList(msgBodyRawJsonStr);
} catch (Exception e) {
throw new RuntimeException("The specified message payload can't be converted to a list of Objects when requiring a 'Stream' message type!");
}
for (Object obj : jmsMsgBodyObjList) {
((StreamMessage)message).writeObject(obj);
messageSize += ((String)obj).length();
}
} else if (StringUtils.equalsIgnoreCase(msgType, S4JAdapterUtil.JMS_MESSAGE_TYPES.OBJECT.label)) {
message = jmsContext.createObjectMessage();
((ObjectMessage) message).setObject(msgBodyRawJsonStr);
messageSize += msgBodyRawJsonStr.getBytes().length;
}
// default: BYTE message type
else {
message = jmsContext.createBytesMessage();
byte[] msgBytePayload = msgBodyRawJsonStr.getBytes();
((BytesMessage)message).writeBytes(msgBytePayload);
messageSize += msgBytePayload.length;
}
message.setStringProperty(S4JAdapterUtil.NB_MSG_SIZE_PROP, String.valueOf(messageSize));
return message;
}
private Message updateMessageHeaders(S4JJMSContextWrapper s4JJMSContextWrapper, Message message, String msgType, String msgHeaderRawJsonStr) throws JMSException {
int messageSize = Integer.parseInt(message.getStringProperty(S4JAdapterUtil.NB_MSG_SIZE_PROP));
// Check if msgHeaderRawJsonStr is a valid JSON string with a collection of key/value pairs
// - if Yes, convert it to a map
// - otherwise, log an error message and ignore message headers without throwing a runtime exception
Map<String, String> jmsMsgHeaders = new HashMap<>();
if (!StringUtils.isBlank(msgHeaderRawJsonStr)) {
try {
jmsMsgHeaders = S4JAdapterUtil.convertJsonToMap(msgHeaderRawJsonStr);
} catch (Exception e) {
logger.warn(
"Error parsing message header JSON string {}, ignore message headers!",
msgHeaderRawJsonStr);
}
}
// make sure the actual message type is used
jmsMsgHeaders.put(S4JAdapterUtil.JMS_MSG_HEADER_STD.JMSType.label, msgType);
Message outMessage = message;
for (String msgHeaderKey:jmsMsgHeaders.keySet()) {
// Ignore non-standard message headers
if (S4JAdapterUtil.isValidStdJmsMsgHeader(msgHeaderKey)) {
String value = jmsMsgHeaders.get(msgHeaderKey);
messageSize += msgHeaderKey.length();
if (value != null) {
messageSize += value.length();
}
try {
if (StringUtils.equalsIgnoreCase(msgHeaderKey, S4JAdapterUtil.JMS_MSG_HEADER_STD.JMSType.label)) {
outMessage.setJMSType(msgType);
} else if (StringUtils.equalsIgnoreCase(msgHeaderKey, S4JAdapterUtil.JMS_MSG_HEADER_STD.JMSPriority.label)) {
if (value != null) outMessage.setJMSPriority(Integer.parseInt(value));
} else if (StringUtils.equalsIgnoreCase(msgHeaderKey, S4JAdapterUtil.JMS_MSG_HEADER_STD.JMSDeliveryMode.label)) {
if (value != null) outMessage.setJMSDeliveryMode(Integer.parseInt(value));
} else if (StringUtils.equalsIgnoreCase(msgHeaderKey, S4JAdapterUtil.JMS_MSG_HEADER_STD.JMSExpiration.label)) {
// TODO: convert from a Date/Time string to the required long value
if (value != null) outMessage.setJMSExpiration(Long.parseLong(value));
} else if (StringUtils.equalsIgnoreCase(msgHeaderKey, S4JAdapterUtil.JMS_MSG_HEADER_STD.JMSCorrelationID.label)) {
if (value != null) outMessage.setJMSCorrelationID(value);
} else if (StringUtils.equalsIgnoreCase(msgHeaderKey, S4JAdapterUtil.JMS_MSG_HEADER_STD.JMSReplyTo.label)) {
// 'JMSReplyTo' value format: "[topic|queue]:<destination_name>"
if (value != null) {
String destType = StringUtils.substringBefore(value, ':');
String destName = StringUtils.substringAfter(value, ':');
outMessage.setJMSReplyTo(getOrCreateJmsDestination(s4JJMSContextWrapper,false, destType, destName));
}
}
// Ignore these headers - handled by S4J API automatically
/* else if (StringUtils.equalsAnyIgnoreCase(msgHeaderKey,
S4JAdapterUtil.JMS_MSG_HEADER_STD.JMSDestination.label,
S4JAdapterUtil.JMS_MSG_HEADER_STD.JMSMessageID.label,
S4JAdapterUtil.JMS_MSG_HEADER_STD.JMSTimestamp.label,
S4JAdapterUtil.JMS_MSG_HEADER_STD.JMSRedelivered.label
)) {
}*/
} catch (NumberFormatException nfe) {
logger.warn("Incorrect value format ('{}') for the message header field ('{}')!",
value, msgHeaderKey);
}
}
}
outMessage.setStringProperty(S4JAdapterUtil.NB_MSG_SIZE_PROP, String.valueOf(messageSize));
return outMessage;
}
private Message updateMessageProperties(Message message, String msgPropertyRawJsonStr) throws JMSException {
int messageSize = Integer.parseInt(message.getStringProperty(S4JAdapterUtil.NB_MSG_SIZE_PROP));
// Check if jmsMsgPropertyRawJsonStr is a valid JSON string with a collection of key/value pairs
// - if Yes, convert it to a map
// - otherwise, log an error message and ignore message headers without throwing a runtime exception
Map<String, String> jmsMsgProperties = new HashMap<>();
if (!StringUtils.isBlank(msgPropertyRawJsonStr)) {
try {
jmsMsgProperties = S4JAdapterUtil.convertJsonToMap(msgPropertyRawJsonStr);
} catch (Exception e) {
logger.warn(
"Error parsing message property JSON string {}, ignore message properties!",
msgPropertyRawJsonStr);
}
}
// Each key in the property json file may include value type information, such as:
// - key(string): value
// The above format specifies a message property that has "key" as the property key
// and "value" as the property value; and the type of the property value is "string"
//
// If the value type is not specified, use "string" as the default value type.
for (Map.Entry<String, String> entry : jmsMsgProperties.entrySet()) {
String rawKeyStr = entry.getKey();
String value = entry.getValue();
if (! StringUtils.isAnyBlank(rawKeyStr, value)) {
String key = rawKeyStr;
String valueType = S4JAdapterUtil.JMS_MSG_PROP_TYPES.STRING.label;
if (StringUtils.contains(rawKeyStr, '(')) {
key = StringUtils.substringBefore(rawKeyStr, "(").trim();
valueType = StringUtils.substringAfter(rawKeyStr, "(");
valueType = StringUtils.substringBefore(valueType, ")").trim();
}
if (StringUtils.isBlank(valueType)) {
message.setStringProperty(entry.getKey(), value);
}
else {
if (StringUtils.equalsIgnoreCase(valueType, S4JAdapterUtil.JMS_MSG_PROP_TYPES.SHORT.label))
message.setShortProperty(key, NumberUtils.toShort(value));
else if (StringUtils.equalsIgnoreCase(valueType, S4JAdapterUtil.JMS_MSG_PROP_TYPES.INT.label))
message.setIntProperty(key, NumberUtils.toInt(value));
else if (StringUtils.equalsIgnoreCase(valueType, S4JAdapterUtil.JMS_MSG_PROP_TYPES.LONG.label))
message.setLongProperty(key, NumberUtils.toLong(value));
else if (StringUtils.equalsIgnoreCase(valueType, S4JAdapterUtil.JMS_MSG_PROP_TYPES.FLOAT.label))
message.setFloatProperty(key, NumberUtils.toFloat(value));
else if (StringUtils.equalsIgnoreCase(valueType, S4JAdapterUtil.JMS_MSG_PROP_TYPES.DOUBLE.label))
message.setDoubleProperty(key, NumberUtils.toDouble(value));
else if (StringUtils.equalsIgnoreCase(valueType, S4JAdapterUtil.JMS_MSG_PROP_TYPES.BOOLEAN.label))
message.setBooleanProperty(key, BooleanUtils.toBoolean(value));
else if (StringUtils.equalsIgnoreCase(valueType, S4JAdapterUtil.JMS_MSG_PROP_TYPES.STRING.label))
message.setStringProperty(key, value);
else if (StringUtils.equalsIgnoreCase(valueType, S4JAdapterUtil.JMS_MSG_PROP_TYPES.BYTE.label))
message.setByteProperty(key, NumberUtils.toByte(value));
else
throw new S4JAdapterInvalidParamException(
"Unsupported JMS message property value type (\"" + valueType + "\"). " +
"Value types are: \"" + S4JAdapterUtil.getValidJmsMsgPropTypeList() + "\"");
}
messageSize += key.length();
messageSize += value.length();
}
}
message.setStringProperty(S4JAdapterUtil.NB_MSG_SIZE_PROP, String.valueOf(messageSize));
return message;
}
@Override
public MessageProducerOp apply(long cycle) {
String destName = destNameStrFunc.apply(cycle);
String jmsMsgHeaderRawJsonStr = msgHeaderRawJsonStrFunc.apply(cycle);
String jmsMsgPropertyRawJsonStr = msgPropRawJsonStrFunc.apply(cycle);
String jmsMsgBodyRawJsonStr = msgBodyRawJsonStrFunc.apply(cycle);
if (StringUtils.isBlank(jmsMsgBodyRawJsonStr)) {
throw new S4JAdapterInvalidParamException("Message payload must be specified and can't be empty!");
}
S4JJMSContextWrapper s4JJMSContextWrapper = s4jSpace.getOrCreateS4jJmsContextWrapper(cycle);
JMSContext jmsContext = s4JJMSContextWrapper.getJmsContext();
boolean commitTransaction = !super.commitTransaction(txnBatchNum, jmsContext.getSessionMode(), cycle);
Destination destination;
try {
destination = getOrCreateJmsDestination(s4JJMSContextWrapper, temporaryDest, destType, destName);
}
catch (JMSRuntimeException jmsRuntimeException) {
throw new S4JAdapterUnexpectedException("Unable to create the JMS destination!");
}
JMSProducer producer;
try {
producer = getOrCreateJmsProducer(s4JJMSContextWrapper, asyncAPI);
}
catch (JMSException jmsException) {
throw new S4JAdapterUnexpectedException("Unable to create the JMS producer!");
}
// Get the right JMS message type
String jmsMsgType = msgTypeFunc.apply(cycle);
if (! S4JAdapterUtil.isValidJmsMessageType(jmsMsgType) ) {
logger.warn(
"The specified JMS message type {} is not valid, use the default TextMessage type!",
jmsMsgType);
jmsMsgType = S4JAdapterUtil.JMS_MESSAGE_TYPES.TEXT.label;
}
/////////////
// Set proper message payload based on the message type and the specified input
// -----------------------
//
Message message;
try {
message = createAndSetMessagePayload(s4JJMSContextWrapper, jmsMsgType, jmsMsgBodyRawJsonStr);
}
catch (JMSException jmsException) {
throw new RuntimeException("Failed to set create a JMS message and set its payload!");
}
/////////////
// Set standard message headers
// -----------------------
//
try {
message = updateMessageHeaders(s4JJMSContextWrapper, message, jmsMsgType, jmsMsgHeaderRawJsonStr);
}
catch (JMSException jmsException) {
throw new S4JAdapterUnexpectedException("Failed to set create a JMS message and set its payload!");
}
/////////////
// Set defined JMS message properties and other custom properties
// -----------------------
//
try {
message = updateMessageProperties(message, jmsMsgPropertyRawJsonStr);
// for testing purpose
message.setLongProperty(S4JAdapterUtil.NB_MSG_SEQ_PROP, cycle);
}
catch (JMSException jmsException) {
throw new S4JAdapterUnexpectedException("Failed to set JMS message properties!");
}
return new MessageProducerOp(
s4jAdapterMetrics,
s4jSpace,
jmsContext,
destination,
asyncAPI,
commitTransaction,
producer,
message);
}
}

View File

@@ -0,0 +1,333 @@
package io.nosqlbench.adapter.s4j.dispensers;
/*
* Copyright (c) 2022 nosqlbench
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import io.nosqlbench.adapter.s4j.S4JSpace;
import io.nosqlbench.adapter.s4j.ops.S4JOp;
import io.nosqlbench.adapter.s4j.util.*;
import io.nosqlbench.engine.api.activityimpl.BaseOpDispenser;
import io.nosqlbench.engine.api.activityimpl.uniform.DriverAdapter;
import io.nosqlbench.engine.api.templating.ParsedOp;
import org.apache.commons.lang3.BooleanUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.math.NumberUtils;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import javax.jms.*;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.function.LongFunction;
import java.util.function.Predicate;
import java.util.stream.Collectors;
public abstract class S4JBaseOpDispenser extends BaseOpDispenser<S4JOp, S4JSpace> {
private final static Logger logger = LogManager.getLogger("PulsarBaseOpDispenser");
protected final ParsedOp parsedOp;
protected final S4JSpace s4jSpace;
protected final S4JAdapterMetrics s4jAdapterMetrics;
private final ConcurrentHashMap<String, JMSContext> connLvlJmsContexts = new ConcurrentHashMap<>();
private final ConcurrentHashMap<String, S4JJMSContextWrapper> sessionLvlJmsContexts = new ConcurrentHashMap<>();
protected final ConcurrentHashMap<String, Destination> jmsDestinations = new ConcurrentHashMap<>();
protected final ConcurrentHashMap<String, JMSProducer> jmsProducers = new ConcurrentHashMap<>();
protected final ConcurrentHashMap<String, JMSConsumer> jmsConsumers = new ConcurrentHashMap<>();
// Doc-level parameter: temporary_dest (default: false)
protected final boolean temporaryDest;
// Doc-level parameter: dest_type (default: Topic)
protected final String destType;
// Doc-level parameter: async_api (default: true)
protected final boolean asyncAPI;
// Doc-level parameter: txn_batch_num (default: 0)
// - value <=0 : no transaction
protected final int txnBatchNum;
protected final LongFunction<String> destNameStrFunc;
protected final int totalThreadNum;
protected final long totalCycleNum;
public S4JBaseOpDispenser(DriverAdapter adapter,
ParsedOp op,
LongFunction<String> destNameStrFunc,
S4JSpace s4jSpace) {
super(adapter, op);
this.parsedOp = op;
this.s4jSpace = s4jSpace;
this.connLvlJmsContexts.putAll(s4jSpace.getConnLvlJmsContexts());
this.sessionLvlJmsContexts.putAll(s4jSpace.getSessionLvlJmsContexts());
String defaultMetricsPrefix = getDefaultMetricsPrefix(this.parsedOp);
this.s4jAdapterMetrics = new S4JAdapterMetrics(defaultMetricsPrefix);
s4jAdapterMetrics.initS4JAdapterInstrumentation();
this.destNameStrFunc = destNameStrFunc;
this.temporaryDest =
parsedOp.getStaticConfigOr(S4JAdapterUtil.DOC_LEVEL_PARAMS.TEMP_DEST.label, Boolean.FALSE);
this.destType =
parsedOp.getStaticConfig(S4JAdapterUtil.DOC_LEVEL_PARAMS.DEST_TYPE.label, String.class);
this.asyncAPI =
parsedOp.getStaticConfigOr(S4JAdapterUtil.DOC_LEVEL_PARAMS.ASYNC_API.label, Boolean.TRUE);
this.txnBatchNum =
parsedOp.getStaticConfigOr(S4JAdapterUtil.DOC_LEVEL_PARAMS.TXN_BATCH_NUM.label, Integer.valueOf(0));
this.totalThreadNum = NumberUtils.toInt(parsedOp.getStaticConfig("threads", String.class));
this.totalCycleNum = NumberUtils.toLong(parsedOp.getStaticConfig("cycles", String.class));
s4jSpace.setTotalCycleNum(totalCycleNum);
}
public S4JSpace getS4jSpace() { return s4jSpace; }
public S4JAdapterMetrics getS4jAdapterMetrics() { return s4jAdapterMetrics; }
protected LongFunction<Boolean> lookupStaticBoolConfigValueFunc(String paramName, boolean defaultValue) {
LongFunction<Boolean> booleanLongFunction;
booleanLongFunction = (l) -> parsedOp.getOptionalStaticConfig(paramName, String.class)
.filter(Predicate.not(String::isEmpty))
.map(value -> BooleanUtils.toBoolean(value))
.orElse(defaultValue);
logger.info("{}: {}", paramName, booleanLongFunction.apply(0));
return booleanLongFunction;
}
protected LongFunction<Set<String>> lookupStaticStrSetOpValueFunc(String paramName) {
LongFunction<Set<String>> setStringLongFunction;
setStringLongFunction = (l) -> parsedOp.getOptionalStaticValue(paramName, String.class)
.filter(Predicate.not(String::isEmpty))
.map(value -> {
Set<String > set = new HashSet<>();
if (StringUtils.contains(value,',')) {
set = Arrays.stream(value.split(","))
.map(String::trim)
.filter(Predicate.not(String::isEmpty))
.collect(Collectors.toCollection(LinkedHashSet::new));
}
return set;
}).orElse(Collections.emptySet());
logger.info("{}: {}", paramName, setStringLongFunction.apply(0));
return setStringLongFunction;
}
// If the corresponding Op parameter is not provided, use the specified default value
protected LongFunction<Integer> lookupStaticIntOpValueFunc(String paramName, int defaultValue) {
LongFunction<Integer> integerLongFunction;
integerLongFunction = (l) -> parsedOp.getOptionalStaticValue(paramName, String.class)
.filter(Predicate.not(String::isEmpty))
.map(value -> NumberUtils.toInt(value))
.map(value -> {
if (value < 0) return 0;
else return value;
}).orElse(defaultValue);
logger.info("{}: {}", paramName, integerLongFunction.apply(0));
return integerLongFunction;
}
// If the corresponding Op parameter is not provided, use the specified default value
protected LongFunction<String> lookupOptionalStrOpValueFunc(String paramName, String defaultValue) {
LongFunction<String> stringLongFunction;
stringLongFunction = parsedOp.getAsOptionalFunction(paramName, String.class)
.orElse((l) -> defaultValue);
logger.info("{}: {}", paramName, stringLongFunction.apply(0));
return stringLongFunction;
}
protected LongFunction<String> lookupOptionalStrOpValueFunc(String paramName) {
return lookupOptionalStrOpValueFunc(paramName, "");
}
// Mandatory Op parameter. Throw an error if not specified or having empty value
protected LongFunction<String> lookupMandtoryStrOpValueFunc(String paramName) {
LongFunction<String> stringLongFunction;
stringLongFunction = parsedOp.getAsRequiredFunction(paramName, String.class);
logger.info("{}: {}", paramName, stringLongFunction.apply(0));
return stringLongFunction;
}
/**
* If the JMS destination that corresponds to a topic exists, reuse it; Otherwise, create it
*/
public Destination getOrCreateJmsDestination(
S4JJMSContextWrapper s4JJMSContextWrapper,
boolean tempDest,
String destType,
String destName) throws JMSRuntimeException
{
String jmsContextIdStr = s4JJMSContextWrapper.getJmsContextIdentifer();
JMSContext jmsContext = s4JJMSContextWrapper.getJmsContext();
// Regular, non-temporary destination
if (!tempDest) {
String destinationCacheKey = S4JAdapterUtil.buildCacheKey(jmsContextIdStr, destType, destName);
Destination destination = jmsDestinations.get(destinationCacheKey);
if (destination == null) {
if (StringUtils.equalsIgnoreCase(destType, S4JAdapterUtil.JMS_DEST_TYPES.QUEUE.label)) {
destination = jmsContext.createQueue(destName);
} else {
destination = jmsContext.createTopic(destName);
}
jmsDestinations.put(destinationCacheKey, destination);
}
return destination;
}
// Temporary destination
else {
if (StringUtils.equalsIgnoreCase(destType, S4JAdapterUtil.JMS_DEST_TYPES.QUEUE.label)) {
return jmsContext.createTemporaryQueue();
} else {
return jmsContext.createTemporaryTopic();
}
}
}
// Get simplified NB thread name
private String getSimplifiedNBThreadName(String fullThreadName) {
assert (StringUtils.isNotBlank(fullThreadName));
if (StringUtils.contains(fullThreadName, '/'))
return StringUtils.substringAfterLast(fullThreadName, "/");
else
return fullThreadName;
}
/**
* If the JMS producer that corresponds to a destination exists, reuse it; Otherwise, create it
*/
public JMSProducer getOrCreateJmsProducer(
S4JJMSContextWrapper s4JJMSContextWrapper,
boolean asyncApi) throws JMSException
{
JMSContext jmsContext = s4JJMSContextWrapper.getJmsContext();
String producerCacheKey = S4JAdapterUtil.buildCacheKey(
getSimplifiedNBThreadName(Thread.currentThread().getName()), "producer");
JMSProducer jmsProducer = jmsProducers.get(producerCacheKey);
if (jmsProducer == null) {
jmsProducer = jmsContext.createProducer();
if (asyncApi) {
jmsProducer.setAsync(new S4JCompletionListener(s4jSpace, this));
}
if (logger.isDebugEnabled()) {
logger.debug("Producer created: {} -- {} -- {}",
producerCacheKey, jmsProducer, s4JJMSContextWrapper);
}
jmsProducers.put(producerCacheKey, jmsProducer);
}
return jmsProducer;
}
/**
* If the JMS consumer that corresponds to a destination(, subscription, message selector) exists, reuse it; Otherwise, create it
*/
public JMSConsumer getOrCreateJmsConsumer(
S4JJMSContextWrapper s4JJMSContextWrapper,
Destination destination,
String destType,
String subName,
String msgSelector,
float msgAckRatio,
boolean nonLocal,
boolean durable,
boolean shared,
boolean asyncApi,
int slowAckInSec) throws JMSException
{
JMSContext jmsContext = s4JJMSContextWrapper.getJmsContext();
boolean isTopic = StringUtils.equalsIgnoreCase(destType, S4JAdapterUtil.JMS_DEST_TYPES.TOPIC.label);
String consumerCacheKey = S4JAdapterUtil.buildCacheKey(
getSimplifiedNBThreadName(Thread.currentThread().getName()), "consumer");
JMSConsumer jmsConsumer = jmsConsumers.get(consumerCacheKey);
if (jmsConsumer == null) {
if (isTopic) {
if (!durable && !shared)
jmsConsumer = jmsContext.createConsumer(destination, msgSelector, nonLocal);
else {
if (StringUtils.isBlank(subName)) {
throw new RuntimeException("Subscription name is required for receiving messages from a durable or shared topic!");
}
if (durable && !shared)
jmsConsumer = jmsContext.createDurableConsumer(
(Topic) destination, subName, msgSelector, nonLocal);
else if (!durable)
jmsConsumer = jmsContext.createSharedConsumer((Topic) destination, subName, msgSelector);
else
jmsConsumer = jmsContext.createSharedDurableConsumer((Topic) destination, subName, msgSelector);
}
}
else {
jmsConsumer = jmsContext.createConsumer(destination, msgSelector, nonLocal);
}
if (asyncApi) {
jmsConsumer.setMessageListener(
new S4JMessageListener(jmsContext, s4jSpace, this, msgAckRatio, slowAckInSec));
}
if (logger.isDebugEnabled()) {
logger.debug("Consumer created: {} -- {} -- {}",
consumerCacheKey, jmsConsumer, s4JJMSContextWrapper);
}
jmsConsumers.put(consumerCacheKey, jmsConsumer);
}
return jmsConsumer;
}
protected boolean commitTransaction(int txnBatchNum, int jmsSessionMode, long curCycleNum) {
// Whether to commit the transaction which happens when:
// - session mode is equal to "SESSION_TRANSACTED"
// - "txn_batch_num" has been reached since last reset
boolean commitTransaction = ( (Session.SESSION_TRANSACTED == jmsSessionMode) && (txnBatchNum > 0) );
if (commitTransaction) {
int txnBatchTackingCnt = s4jSpace.getTxnBatchTrackingCnt();
if ( ( (txnBatchTackingCnt > 0) && ((txnBatchTackingCnt % txnBatchNum) == 0) ) ||
( curCycleNum >= (totalCycleNum - 1) ) ) {
if (logger.isDebugEnabled()) {
logger.debug("Commit transaction ({}, {}, {})",
txnBatchTackingCnt,
s4jSpace.getTotalOpResponseCnt(), curCycleNum);
}
}
else {
commitTransaction = false;
}
s4jSpace.incTxnBatchTrackingCnt();
}
return !commitTransaction;
}
}

View File

@@ -0,0 +1,26 @@
/*
* Copyright (c) 2022 nosqlbench
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package io.nosqlbench.adapter.s4j.exception;
public class S4JAdapterAsyncOperationFailedException extends RuntimeException {
public S4JAdapterAsyncOperationFailedException(Throwable t) {
super(t);
printStackTrace();
}
}

View File

@@ -0,0 +1,29 @@
/*
* Copyright (c) 2022 nosqlbench
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package io.nosqlbench.adapter.s4j.exception;
public class S4JAdapterInvalidParamException extends RuntimeException {
public S4JAdapterInvalidParamException(String paramName, String errDesc) {
super("Invalid setting for parameter (" + paramName + "): " + errDesc);
}
public S4JAdapterInvalidParamException(String fullErrDesc) {
super(fullErrDesc);
}
}

View File

@@ -0,0 +1,30 @@
/*
* Copyright (c) 2022 nosqlbench
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package io.nosqlbench.adapter.s4j.exception;
public class S4JAdapterUnexpectedException extends RuntimeException {
public S4JAdapterUnexpectedException(String message) {
super(message);
printStackTrace();
}
public S4JAdapterUnexpectedException(Exception e) {
super(e);
printStackTrace();
}
}

View File

@@ -0,0 +1,25 @@
/*
* Copyright (c) 2022 nosqlbench
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package io.nosqlbench.adapter.s4j.exception;
public class S4JAdapterUnsupportedOpException extends RuntimeException {
public S4JAdapterUnsupportedOpException(String pulsarOpType) {
super("Unsupported Pulsar adapter operation type: \"" + pulsarOpType + "\"");
}
}

View File

@@ -0,0 +1,135 @@
/*
* Copyright (c) 2022 nosqlbench
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.nosqlbench.adapter.s4j.ops;
import com.codahale.metrics.Timer;
import io.nosqlbench.adapter.s4j.S4JSpace;
import io.nosqlbench.adapter.s4j.exception.S4JAdapterAsyncOperationFailedException;
import io.nosqlbench.adapter.s4j.exception.S4JAdapterUnexpectedException;
import io.nosqlbench.adapter.s4j.util.S4JAdapterMetrics;
import io.nosqlbench.adapter.s4j.util.S4JAdapterUtil;
import org.apache.commons.lang3.StringUtils;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.pulsar.shade.org.apache.avro.AvroRuntimeException;
import javax.jms.*;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
public class MessageConsumerOp extends S4JOp {
private final static Logger logger = LogManager.getLogger(MessageConsumerOp.class);
private final JMSConsumer jmsConsumer;
private final boolean blockingMsgRecv;
private final float msgAckRatio;
private final long msgReadTimeout;
private final boolean recvNoWait;
private final int slowInSec;
public MessageConsumerOp(S4JAdapterMetrics s4jAdapterMetrics,
S4JSpace s4jSpace,
JMSContext jmsContext,
Destination destination,
boolean asyncApi,
boolean commitTransact,
JMSConsumer jmsConsumer,
boolean blockingMsgRecv,
float msgAckRatio,
long readTimeout,
boolean recvNoWait,
int slowInSec)
{
super(s4jAdapterMetrics, s4jSpace, jmsContext, destination, asyncApi, commitTransact);
this.jmsConsumer = jmsConsumer;
this.blockingMsgRecv = blockingMsgRecv;
this.msgAckRatio = msgAckRatio;
this.msgReadTimeout = readTimeout;
this.recvNoWait = recvNoWait;
this.slowInSec = slowInSec;
}
@Override
public Object apply(long value) {
long timeElapsedMills = System.currentTimeMillis() - s4jOpStartTimeMills;
// If maximum S4J operation duration is specified, only receive messages
// before the maximum duration threshold is reached. Otherwise, this is
// just no-op.
if ( (maxS4jOpDurationInSec == 0) || (timeElapsedMills <= (maxS4jOpDurationInSec*1000)) ) {
// Please see S4JBaseOpDispenser::getOrCreateJmsConsumer() for async processing
if (!asyncApi) {
Message recvdMsg;
try {
// blocking message receiving only applies to synchronous API
if (blockingMsgRecv) {
recvdMsg = jmsConsumer.receive();
} else if (recvNoWait) {
recvdMsg = jmsConsumer.receiveNoWait();
} else {
// timeout value 0 means to wait forever
recvdMsg = jmsConsumer.receive(msgReadTimeout);
}
if (this.commitTransact) jmsContext.commit();
if (recvdMsg != null) {
s4jSpace.processMsgAck(jmsContext, recvdMsg, msgAckRatio, slowInSec);
byte[] recvdMsgBody = recvdMsg.getBody(byte[].class);
int messageSize = recvdMsgBody.length;
messageSizeHistogram.update(messageSize);
if (logger.isDebugEnabled()) {
// for testing purpose
String myMsgSeq = recvdMsg.getStringProperty(S4JAdapterUtil.NB_MSG_SEQ_PROP);
logger.debug("Sync message receive successful - message ID {} ({}) "
, recvdMsg.getJMSMessageID(), myMsgSeq);
}
if (s4jSpace.isTrackingMsgRecvCnt()) {
s4jSpace.incTotalOpResponseCnt();
}
} else {
if (s4jSpace.isTrackingMsgRecvCnt()) {
s4jSpace.incTotalNullMsgRecvdCnt();
}
}
} catch (JMSException | JMSRuntimeException e) {
S4JAdapterUtil.processMsgErrorHandling(
e,
s4jSpace.isStrictMsgErrorHandling(),
"Unexpected errors when sync receiving a JMS message.");
}
}
}
else {
if (logger.isTraceEnabled()) {
logger.trace("NB cycle number {} is no-op (maxS4jOpDurationInSec: {}, timeElapsedMills: {})",
value, maxS4jOpDurationInSec, timeElapsedMills);
}
}
return null;
}
}

View File

@@ -0,0 +1,101 @@
/*
* Copyright (c) 2022 nosqlbench
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.nosqlbench.adapter.s4j.ops;
import com.codahale.metrics.Histogram;
import io.nosqlbench.adapter.s4j.S4JSpace;
import io.nosqlbench.adapter.s4j.util.S4JAdapterMetrics;
import io.nosqlbench.adapter.s4j.util.S4JAdapterUtil;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import javax.jms.*;
import java.util.HashMap;
import java.util.Map;
public class MessageProducerOp extends S4JOp {
private final static Logger logger = LogManager.getLogger("MessageProducerOp");
private final JMSProducer jmsProducer;
private final Message message;
public MessageProducerOp(S4JAdapterMetrics s4jAdapterMetrics,
S4JSpace s4jSpace,
JMSContext jmsContext,
Destination destination,
boolean asyncApi,
boolean commitTransact,
JMSProducer jmsProducer,
Message message) {
super(s4jAdapterMetrics, s4jSpace, jmsContext, destination, asyncApi, commitTransact);
this.jmsProducer = jmsProducer;
this.message = message;
}
@Override
public Object apply(long value) {
long timeElapsedMills = System.currentTimeMillis() - s4jOpStartTimeMills;
// If maximum S4J operation duration is specified, only publish messages
// before the maximum duration threshold is reached. Otherwise, this is
// just no-op.
if ( (maxS4jOpDurationInSec == 0) || (timeElapsedMills <= (maxS4jOpDurationInSec*1000)) ) {
try {
jmsProducer.send(destination, message);
if (this.commitTransact) {
jmsContext.commit();
}
int msgSize = message.getIntProperty(S4JAdapterUtil.NB_MSG_SIZE_PROP);
messageSizeHistogram.update(msgSize);
// Please see S4JBaseOpDispenser::getOrCreateJmsProducer() for async processing
if (!asyncApi) {
if (logger.isDebugEnabled()) {
// for testing purpose
String myMsgSeq = message.getStringProperty(S4JAdapterUtil.NB_MSG_SEQ_PROP);
logger.debug("Sync message sending is successful - message ID {} ({}) "
, message.getJMSMessageID(), myMsgSeq);
}
if (s4jSpace.isTrackingMsgRecvCnt()) {
s4jSpace.incTotalOpResponseCnt();
}
}
} catch (JMSException | JMSRuntimeException e) {
S4JAdapterUtil.processMsgErrorHandling(
e,
s4jSpace.isStrictMsgErrorHandling(),
"Unexpected errors when sync sending a JMS message.");
}
}
else {
if (logger.isTraceEnabled()) {
logger.trace("NB cycle number {} is no-op (maxS4jOpDurationInSec: {}, timeElapsedMills: {})",
value, maxS4jOpDurationInSec, timeElapsedMills);
}
}
return null;
}
}

View File

@@ -0,0 +1,58 @@
/*
* Copyright (c) 2022 nosqlbench
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package io.nosqlbench.adapter.s4j.ops;
import com.codahale.metrics.Histogram;
import io.nosqlbench.adapter.s4j.S4JSpace;
import io.nosqlbench.adapter.s4j.util.S4JAdapterMetrics;
import io.nosqlbench.engine.api.activityimpl.uniform.flowtypes.CycleOp;
import javax.jms.Destination;
import javax.jms.JMSContext;
public abstract class S4JOp implements CycleOp<Object> {
protected S4JAdapterMetrics s4jAdapterMetrics;
protected final S4JSpace s4jSpace;
protected final JMSContext jmsContext;
protected final Destination destination;
protected final boolean asyncApi;
protected final boolean commitTransact;
protected final long s4jOpStartTimeMills;
protected final long maxS4jOpDurationInSec;
protected final Histogram messageSizeHistogram;
public S4JOp(
S4JAdapterMetrics s4jAdapterMetrics,
S4JSpace s4jSpace,
JMSContext jmsContext,
Destination destination,
boolean asyncApi,
boolean commitTransact)
{
this.s4jAdapterMetrics = s4jAdapterMetrics;
this.s4jSpace = s4jSpace;
this.jmsContext = jmsContext;
this.destination = destination;
this.asyncApi = asyncApi;
this.commitTransact = commitTransact;
this.s4jOpStartTimeMills = s4jSpace.getS4JActivityStartTimeMills();
this.maxS4jOpDurationInSec = s4jSpace.getMaxS4JOpTimeInSec();
this.messageSizeHistogram = s4jAdapterMetrics.getMessagesizeHistogram();
}
}

View File

@@ -0,0 +1,68 @@
/*
* Copyright (c) 2022 nosqlbench
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.nosqlbench.adapter.s4j.util;
import com.codahale.metrics.Histogram;
import com.codahale.metrics.Timer;
import io.nosqlbench.api.config.NBNamedElement;
import io.nosqlbench.api.engine.metrics.ActivityMetrics;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
public class S4JAdapterMetrics implements NBNamedElement {
private final static Logger logger = LogManager.getLogger("S4JAdapterMetrics");
private final String defaultAdapterMetricsPrefix;
private Histogram messageSizeHistogram;
private Timer bindTimer;
private Timer executeTimer;
public S4JAdapterMetrics(String defaultMetricsPrefix) {
this.defaultAdapterMetricsPrefix = defaultMetricsPrefix;
}
@Override
public String getName() {
return "S4JAdapterMetrics";
}
public void initS4JAdapterInstrumentation() {
// Histogram metrics
this.messageSizeHistogram =
ActivityMetrics.histogram(
this,
defaultAdapterMetricsPrefix + "message_size",
ActivityMetrics.DEFAULT_HDRDIGITS);
// Timer metrics
this.bindTimer =
ActivityMetrics.timer(
this,
defaultAdapterMetricsPrefix + "bind",
ActivityMetrics.DEFAULT_HDRDIGITS);
this.executeTimer =
ActivityMetrics.timer(
this,
defaultAdapterMetricsPrefix + "execute",
ActivityMetrics.DEFAULT_HDRDIGITS);
}
public Timer getBindTimer() { return bindTimer; }
public Timer getExecuteTimer() { return executeTimer; }
public Histogram getMessagesizeHistogram() { return messageSizeHistogram; }
}

View File

@@ -0,0 +1,342 @@
package io.nosqlbench.adapter.s4j.util;
/*
* Copyright (c) 2022 nosqlbench
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import com.datastax.oss.pulsar.jms.PulsarJMSConstants;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.ObjectMapper;
import io.nosqlbench.adapter.s4j.S4JOpType;
import org.apache.commons.lang3.BooleanUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import javax.jms.*;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
public class S4JAdapterUtil {
private final static Logger logger = LogManager.getLogger(S4JAdapterUtil.class);
///////
// Valid document level parameters for JMS NB yaml file
public final static String JMS_SPEC_VER_12 = "1.2";
public final static String JMS_SPEC_VER_20 = "2.0";
public enum DOC_LEVEL_PARAMS {
// Temporary destination
TEMP_DEST("temporary_dest"),
// JMS destination type - topic or queue
// String value
// - valid values: see JMS_DEST_TYPES
DEST_TYPE("dest_type"),
// Asynchronous message processing
ASYNC_API("async_api"),
// Transaction batch size
// - Only relevant when session mode is SESSION_TRANSACTED
TXN_BATCH_NUM("txn_batch_num"),
// Whether to use blocking message receiving as the default behavior
BLOCKING_MSG_RECV("blocking_msg_recv"),
// Whether the destination is a shared topic
SHARED_TOPIC("shared_topic"),
// Whether the destination is a durable topic
DURABLE_TOPIC("durable_topic");
public final String label;
DOC_LEVEL_PARAMS(String label) {
this.label = label;
}
}
public static boolean isValidDocLevelParam(String param) {
return Arrays.stream(DOC_LEVEL_PARAMS.values()).anyMatch(t -> t.label.equals(param));
}
public static String getValidDocLevelParamList() {
return Arrays.stream(DOC_LEVEL_PARAMS.values()).map(t -> t.label).collect(Collectors.joining(", "));
}
// JMS Destination Types
public enum JMS_DEST_TYPES {
QUEUE("queue"),
TOPIC("topic");
public final String label;
JMS_DEST_TYPES(String label) {
this.label = label;
}
}
public static boolean isValidJmsDestType(String type) {
return Arrays.stream(JMS_DEST_TYPES.values()).anyMatch(t -> t.label.equals(type));
}
public static String getValidJmsDestTypeList() {
return Arrays.stream(JMS_DEST_TYPES.values()).map(t -> t.label).collect(Collectors.joining(", "));
}
// Standard JMS message headers (by JMS specification)
public enum JMS_MSG_HEADER_STD {
JMSDestination("JMSDestination"),
JMSDeliveryMode("JMSDeliveryMode"),
JMSMessageID("JMSMessageID"),
JMSTimestamp("JMSTimestamp"),
JMSRedelivered("JMSRedelivered"),
JMSExpiration("JMSExpiration"),
JMSCorrelationID("JMSCorrelationID"),
JMSType("JMSType"),
JMSReplyTo("JMSReplyTo"),
JMSPriority("JMSPriority");
public final String label;
JMS_MSG_HEADER_STD(String label) {
this.label = label;
}
}
public static boolean isValidStdJmsMsgHeader(String header) {
return Arrays.stream(JMS_MSG_HEADER_STD.values()).anyMatch(t -> t.label.equals(header));
}
public static String getValidStdJmsMsgHeaderList() {
return Arrays.stream(JMS_MSG_HEADER_STD.values()).map(t -> t.label).collect(Collectors.joining(", "));
}
// JMS defined message properties (by JMS specification)
public enum JMS_DEFINED_MSG_PROPERTY {
JMSDestination("JMSDestination"),
JMSDeliveryMode("JMSDeliveryMode"),
JMSMessageID("JMSMessageID"),
JMSTimestamp("JMSTimestamp"),
JMSRedelivered("JMSRedelivered"),
JMSExpiration("JMSExpiration"),
JMSCorrelationID("JMSCorrelationID"),
JMSType("JMSType"),
JMSReplyTo("JMSReplyTo"),
JMSPriority("JMSPriority");
public final String label;
JMS_DEFINED_MSG_PROPERTY(String label) {
this.label = label;
}
}
public static boolean isValidJmsDfndMsgProp(String property) {
return Arrays.stream(JMS_DEFINED_MSG_PROPERTY.values()).anyMatch(t -> t.label.equals(property));
}
public static String getValidJmsDfndMsgPropList() {
return Arrays.stream(JMS_DEFINED_MSG_PROPERTY.values()).map(t -> t.label).collect(Collectors.joining(", "));
}
public final static String NB_MSG_SEQ_PROP = "NBMsgSeqProp";
public final static String NB_MSG_SIZE_PROP = "NBMsgSize";
// JMS Destination Types
public enum JMS_SESSION_MODES {
AUTO_ACK("auto_ack"),
CLIENT_ACK("client_ack"),
DUPS_OK_ACK("dups_ok_ack"),
INDIVIDUAL_ACK("individual_ack"),
TRANSACT("transact_ack");
public final String label;
JMS_SESSION_MODES(String label) {
this.label = label;
}
}
public static boolean isValidJmsSessionMode(String mode) {
return Arrays.stream(JMS_SESSION_MODES.values()).anyMatch(t -> t.label.equals(mode));
}
public static String getValidJmsSessionModeList() {
return Arrays.stream(JMS_SESSION_MODES.values()).map(t -> t.label).collect(Collectors.joining(", "));
}
// JMS Message Types
public enum JMS_MESSAGE_TYPES {
TEXT("text"),
BYTE("byte"),
MAP("map"),
STREAM("stream"),
OBJECT("object");
public final String label;
JMS_MESSAGE_TYPES(String label) {
this.label = label;
}
}
public static boolean isValidJmsMessageType(String type) {
return Arrays.stream(JMS_MESSAGE_TYPES.values()).anyMatch(t -> t.label.equals(type));
}
public static String getValidJmsMessageTypeList() {
return Arrays.stream(JMS_MESSAGE_TYPES.values()).map(t -> t.label).collect(Collectors.joining(", "));
}
// JMS Message Types
public enum JMS_MSG_PROP_TYPES {
SHORT("short"),
INT("int"),
LONG("long"),
FLOAT("float"),
DOUBLE("double"),
STRING("string"),
BOOLEAN("boolean"),
BYTE("byte");
public final String label;
JMS_MSG_PROP_TYPES(String label) {
this.label = label;
}
}
public static boolean isValidJmsMsgPropType(String type) {
return Arrays.stream(JMS_MSG_PROP_TYPES.values()).anyMatch(t -> t.label.equals(type));
}
public static String getValidJmsMsgPropTypeList() {
return Arrays.stream(JMS_MSG_PROP_TYPES.values()).map(t -> t.label).collect(Collectors.joining(", "));
}
///////
// Convert JSON string to a key/value map
public static Map<String, String> convertJsonToMap(String jsonStr) throws Exception {
ObjectMapper mapper = new ObjectMapper();
return mapper.readValue(jsonStr, new TypeReference<Map<String, String>>(){});
}
///////
// Convert JSON string to a list of objects
public static List<Object> convertJsonToObjList(String jsonStr) throws Exception {
ObjectMapper mapper = new ObjectMapper();
return Arrays.asList(mapper.readValue(jsonStr, Object[].class));
}
///////
// Get the destination name from the Destination object
public static String getDestinationName(Destination destination, String destType) throws JMSException {
String destName;
boolean isTopic = StringUtils.equalsIgnoreCase(destType, JMS_DEST_TYPES.TOPIC.label);
if (isTopic)
destName = ((Topic) destination).getTopicName();
else
destName = ((Queue) destination).getQueueName();
return destName;
}
///////
public static int getSessionModeFromStr(String sessionModeStr) {
// default ack mode: auto_ack
int sessionMode = -1;
if (StringUtils.isBlank(sessionModeStr))
sessionMode = JMSContext.AUTO_ACKNOWLEDGE;
else if (StringUtils.equalsIgnoreCase(sessionModeStr, JMS_SESSION_MODES.AUTO_ACK.label))
sessionMode = JMSContext.AUTO_ACKNOWLEDGE;
else if (StringUtils.equalsIgnoreCase(sessionModeStr, JMS_SESSION_MODES.CLIENT_ACK.label))
sessionMode = JMSContext.CLIENT_ACKNOWLEDGE;
else if (StringUtils.equalsIgnoreCase(sessionModeStr, JMS_SESSION_MODES.DUPS_OK_ACK.label))
sessionMode = JMSContext.DUPS_OK_ACKNOWLEDGE;
else if (StringUtils.equalsIgnoreCase(sessionModeStr, JMS_SESSION_MODES.TRANSACT.label))
sessionMode = JMSContext.SESSION_TRANSACTED;
else if (StringUtils.equalsIgnoreCase(sessionModeStr, JMS_SESSION_MODES.INDIVIDUAL_ACK.label))
sessionMode = PulsarJMSConstants.INDIVIDUAL_ACKNOWLEDGE;
else {
if (logger.isDebugEnabled()) {
logger.debug("Invalid session mode string \"{}\". Valid values are: {}. Use the default \"auto_ack\" mode!"
,sessionModeStr, getValidJmsSessionModeList());
sessionMode = JMSContext.AUTO_ACKNOWLEDGE;
}
}
return sessionMode;
}
public static boolean isAuthNRequired(S4JClientConf s4jClientConf) {
assert (s4jClientConf != null);
boolean required = false;
Map<String, Object> s4jClientConfObjMap = s4jClientConf.getS4jConfMapObj_client();
if (s4jClientConfObjMap.containsKey("authPlugin") && s4jClientConfObjMap.containsKey("authParams")) {
Object authPluginObj = s4jClientConfObjMap.get("authPlugin");
Object authParamsObj = s4jClientConfObjMap.get("authParams");
if ( (authPluginObj != null) && StringUtils.isNotBlank(authPluginObj.toString()) &&
(authParamsObj != null) && StringUtils.isNotBlank(authParamsObj.toString()) ) {
required = true;
}
}
return required;
}
public static boolean isUseCredentialsEnabled(S4JClientConf s4jClientConf) {
assert (s4jClientConf != null);
boolean enabled = false;
Map<String, Object> s4jConfMap = s4jClientConf.getS4jConfObjMap();
if (s4jConfMap.containsKey("jms.useCredentialsFromCreateConnection")) {
enabled = BooleanUtils.toBoolean(s4jConfMap.get("jms.useCredentialsFromCreateConnection").toString());
}
return enabled;
}
public static String getCredentialUserName(S4JClientConf s4jClientConf) {
return "dummy";
}
public static String getCredentialPassword(S4JClientConf s4jClientConf) {
Map<String, Object> s4jConfMap = s4jClientConf.getS4jConfObjMap();
if (s4jConfMap.containsKey("authParams"))
return s4jConfMap.get("authParams").toString();
else
return "";
}
///////
// Calculate a unique cache key from a series of input parameters
public static String buildCacheKey(String... keyParts) {
return String.join("::", keyParts);
}
///////
// Pause the execution of the current thread
public static void pauseCurThreadExec(int pauseInSec) {
if (pauseInSec > 0) {
try {
Thread.sleep(pauseInSec * 1000);
}
catch (InterruptedException ie) {
ie.printStackTrace();
}
}
}
///////
// Error handling for message processing
public static void processMsgErrorHandling(Exception exception, boolean strictErrorHandling, String errorMsg) {
exception.printStackTrace();
if (strictErrorHandling) {
throw new RuntimeException(errorMsg + " [ " + exception.getMessage() + " ]");
}
else {
S4JAdapterUtil.pauseCurThreadExec(1);
}
}
}

View File

@@ -0,0 +1,213 @@
/*
* Copyright (c) 2022 nosqlbench
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.nosqlbench.adapter.s4j.util;
import org.apache.commons.configuration2.Configuration;
import org.apache.commons.configuration2.FileBasedConfiguration;
import org.apache.commons.configuration2.PropertiesConfiguration;
import org.apache.commons.configuration2.builder.FileBasedConfigurationBuilder;
import org.apache.commons.configuration2.builder.fluent.Parameters;
import org.apache.commons.configuration2.ex.ConfigurationException;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.builder.ToStringBuilder;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import java.io.File;
import java.io.IOException;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
public class S4JClientConf {
private final static Logger logger = LogManager.getLogger(S4JClientConf.class);
public static final String CLIENT_CONF_PREFIX = "client";
public static final String PRODUCER_CONF_PREFIX = "producer";
public static final String CONSUMER_CONF_PREFIX = "consumer";
public static final String JMS_CONF_PREFIX = "jms";
// "Raw" map is what is read from the config properties file
// "Tgt" map is what is really needed in the Pulsar producer/consumer API
private Map<String, String> clientConfMapRaw = new HashMap<>();
private Map<String, String> producerConfMapRaw = new HashMap<>();
private Map<String, String> consumerConfMapRaw = new HashMap<>();
private Map<String, String> jmsConfMapRaw = new HashMap<>();
private Map<String, String> miscConfMapRaw = new HashMap<>();
private final Map<String, Object> s4jConfMapTgt = new HashMap<>();
private Map<String, Object> clientConfMapTgt = new HashMap<>();
private Map<String, Object> producerConfMapTgt = new HashMap<>();
private Map<String, Object> consumerConfMapTgt = new HashMap<>();
private Map<String, Object> jmsConfMapTgt = new HashMap<>();
private Map<String, Object> miscConfMapTgt = new HashMap<>();
public S4JClientConf(String webSvcUrl, String pulsarSvcUrl, String s4jConfFileName) {
//////////////////
// Read related Pulsar client configuration settings from a file
readRawConfFromFile(s4jConfFileName);
//////////////////
// Ignores the following Pulsar client/producer/consumer configurations since
// they're either not supported in the S4J API or the property must be specified
// as the NB CLI parameter or the NB yaml file parameter.
// <<< https://pulsar.apache.org/docs/reference-configuration/#client >>>
// pulsar client config
// * webServiceUrl
// * brokerServiceUrl
clientConfMapRaw.put("brokerServiceUrl", pulsarSvcUrl);
clientConfMapRaw.put("webServiceUrl", webSvcUrl);
// <<< https://pulsar.apache.org/docs/client-libraries-java/#configure-producer >>>
// producer config
// * topicName
producerConfMapRaw.remove("topicName");
// <<< https://pulsar.apache.org/docs/client-libraries-java/#configure-consumer >>>
// consumer config
// * topicNames
// * topicsPattern
// * subscriptionName
// * subscriptionType
consumerConfMapRaw.remove("topicNames");
consumerConfMapRaw.remove("topicPattern");
consumerConfMapRaw.remove("subscriptionName");
consumerConfMapRaw.remove("subscriptionType");
consumerConfMapRaw.remove("subscriptionInitialPosition");
consumerConfMapRaw.remove("regexSubscriptionMode");
//////////////////
// Convert the raw configuration map (<String,String>) to the required map (<String,Object>)
clientConfMapTgt.putAll(S4JClientConfConverter.convertRawClientConf(clientConfMapRaw));
producerConfMapTgt.putAll(S4JClientConfConverter.convertRawProducerConf(producerConfMapRaw));
consumerConfMapTgt.putAll(S4JClientConfConverter.convertRawConsumerConf(consumerConfMapRaw));
jmsConfMapTgt.putAll(S4JClientConfConverter.convertRawJmsConf(jmsConfMapRaw));
miscConfMapTgt.putAll(S4JClientConfConverter.convertRawMiscConf(miscConfMapRaw));
s4jConfMapTgt.putAll(clientConfMapTgt);
s4jConfMapTgt.put("producerConfig", producerConfMapTgt);
s4jConfMapTgt.put("consumerConfig", consumerConfMapTgt);
s4jConfMapTgt.putAll(jmsConfMapTgt);
s4jConfMapTgt.putAll(miscConfMapTgt);
}
public void readRawConfFromFile(String fileName) {
File file = new File(fileName);
try {
String canonicalFilePath = file.getCanonicalPath();
Parameters params = new Parameters();
FileBasedConfigurationBuilder<FileBasedConfiguration> builder =
new FileBasedConfigurationBuilder<FileBasedConfiguration>(PropertiesConfiguration.class)
.configure(params.properties()
.setFileName(fileName));
Configuration config = builder.getConfiguration();
for (Iterator<String> it = config.getKeys(); it.hasNext(); ) {
String confKey = it.next();
String confVal = config.getProperty(confKey).toString();
if (!StringUtils.isBlank(confVal)) {
// Get client connection specific configuration settings, removing "client." prefix
if (StringUtils.startsWith(confKey, CLIENT_CONF_PREFIX)) {
clientConfMapRaw.put(confKey.substring(CLIENT_CONF_PREFIX.length() + 1), confVal);
}
// Get producer specific configuration settings, removing "producer." prefix
else if (StringUtils.startsWith(confKey, PRODUCER_CONF_PREFIX)) {
producerConfMapRaw.put(confKey.substring(PRODUCER_CONF_PREFIX.length() + 1), confVal);
}
// Get consumer specific configuration settings, removing "consumer." prefix
else if (StringUtils.startsWith(confKey, CONSUMER_CONF_PREFIX)) {
consumerConfMapRaw.put(confKey.substring(CONSUMER_CONF_PREFIX.length() + 1), confVal);
}
// Get JMS specific configuration settings, keeping "jms." prefix
else if (StringUtils.startsWith(confKey, JMS_CONF_PREFIX)) {
jmsConfMapRaw.put(confKey, confVal);
}
// For all other configuration settings (not having any of the above prefixes), keep as is
else {
miscConfMapRaw.put(confKey, confVal);
}
}
}
} catch (IOException ioe) {
logger.error("Can't read the specified config properties file: " + fileName);
ioe.printStackTrace();
} catch (ConfigurationException cex) {
logger.error("Error loading configuration items from the specified config properties file: " + fileName + ":" + cex.getMessage());
cex.printStackTrace();
}
}
public Map<String, Object> getS4jConfObjMap() { return this.s4jConfMapTgt; }
public Map<String, Object> getS4jConfMapObj_client() { return this.clientConfMapTgt; }
public Map<String, Object> getS4jConfMapObj_producer() { return this.producerConfMapTgt; }
public Map<String, Object> getS4jConfMapObj_consumer() { return this.consumerConfMapTgt; }
public Map<String, Object> getS4jConfMapObj_jms() { return this.jmsConfMapTgt; }
public Map<String, Object> getS4jConfMapObj_misc() { return this.miscConfMapTgt; }
private Map<String, Object> mergeConfigObjMaps(
Map<String, Object> origConfigObjMap,
Map<String, Object> extraConfigObjMap )
{
Map<String, Object> newConfigObjMap = new HashMap<>();
// If there are the same settings in both "orig" and "extra" maps,
// the one in the "extra" map will take over
newConfigObjMap.putAll(origConfigObjMap);
newConfigObjMap.putAll(extraConfigObjMap);
return newConfigObjMap;
}
public Map<String, Object> mergeExtraConsumerConfig(
Map<String, String> extraConsumerConfigRaw)
{
if ( (extraConsumerConfigRaw == null) || (extraConsumerConfigRaw.isEmpty()) ) {
return getS4jConfObjMap();
}
else {
Map<String, Object> origConsumerConfigObjMap = getS4jConfMapObj_consumer();
Map<String, Object> extraConsumerConfigObjMap =
S4JClientConfConverter.convertRawConsumerConf(extraConsumerConfigRaw);
Map<String, Object> mergedConsumerConfigObjMap =
mergeConfigObjMaps(origConsumerConfigObjMap, extraConsumerConfigObjMap);
Map<String, Object> mergedS4JConfObjMap = getS4jConfObjMap();
mergedS4JConfObjMap.put("consumerConfig", mergedConsumerConfigObjMap);
return mergedS4JConfObjMap;
}
}
public String toString() {
return new ToStringBuilder(this).
append("effectiveS4jConfMap", s4jConfMapTgt).
toString();
}
}

View File

@@ -0,0 +1,429 @@
package io.nosqlbench.adapter.s4j.util;
/*
* Copyright (c) 2022 nosqlbench
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import com.datastax.oss.pulsar.jms.shaded.org.apache.pulsar.client.api.CompressionType;
import com.fasterxml.jackson.databind.ObjectMapper;
import io.nosqlbench.adapter.s4j.exception.S4JAdapterInvalidParamException;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.math.NumberUtils;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* This class is used to convert the configuration items in its raw
* format (as provided in the property file) to the format needed by
* the S4J driver
*/
public class S4JClientConfConverter {
public static Map<String, Object> convertRawClientConf(Map<String, String> pulsarClientConfMapRaw) {
Map<String, Object> s4jClientConfObjMap = new HashMap<>();
s4jClientConfObjMap.putAll(pulsarClientConfMapRaw);
/**
* No special handling for non-primitive types
*/
return s4jClientConfObjMap;
}
// <<< https://pulsar.apache.org/docs/client-libraries-java/#configure-producer >>>
private final static Map<String, String> validStdProducerConfKeyTypeMap = Map.ofEntries(
Map.entry("topicName", "String"),
Map.entry("producerName","String"),
Map.entry("sendTimeoutMs","long"),
Map.entry("blockIfQueueFull","boolean"),
Map.entry("maxPendingMessages","int"),
Map.entry("maxPendingMessagesAcrossPartitions","int"),
Map.entry("messageRoutingMode","MessageRoutingMode"),
Map.entry("hashingScheme","HashingScheme"),
Map.entry("cryptoFailureAction","ProducerCryptoFailureAction"),
Map.entry("batchingMaxPublishDelayMicros","long"),
Map.entry("batchingMaxMessages","int"),
Map.entry("batchingEnabled","boolean"),
Map.entry("chunkingEnabled","boolean"),
Map.entry("compressionType","CompressionType"),
Map.entry("initialSubscriptionName","string")
);
public static Map<String, Object> convertRawProducerConf(Map<String, String> pulsarProducerConfMapRaw) {
Map<String, Object> s4jProducerConfObjMap = new HashMap<>();
setConfObjMapForPrimitives(s4jProducerConfObjMap, pulsarProducerConfMapRaw, validStdProducerConfKeyTypeMap);
/**
* Non-primitive type processing for Pulsar producer configuration items
*/
// "compressionType" has value type "CompressionType"
// - expecting the following values: 'LZ4', 'ZLIB', 'ZSTD', 'SNAPPY'
String confKeyName = "compressionType";
String confVal = pulsarProducerConfMapRaw.get(confKeyName);
String expectedVal = "(LZ4|ZLIB|ZSTD|SNAPPY)";
if (StringUtils.isNotBlank(confVal)) {
if (StringUtils.equalsAnyIgnoreCase(confVal, "LZ4", "ZLIB", "ZSTD", "SNAPPY")) {
CompressionType compressionType = CompressionType.NONE;
switch (StringUtils.upperCase(confVal)) {
case "LZ4":
compressionType = CompressionType.LZ4;
case "ZLIB":
compressionType = CompressionType.ZLIB;
case "ZSTD":
compressionType = CompressionType.ZSTD;
case "SNAPPY":
compressionType = CompressionType.SNAPPY;
}
s4jProducerConfObjMap.put(confKeyName, compressionType);
} else {
throw new S4JAdapterInvalidParamException(
getInvalidConfValStr(confKeyName, confVal, "producer", expectedVal));
}
}
// TODO: Skip the following Pulsar configuration items for now because they're not really
// needed in the NB S4J testing at the moment. Add support for them when needed.
// * messageRoutingMode
// * hashingScheme
// * cryptoFailureAction
return s4jProducerConfObjMap;
}
// https://pulsar.apache.org/docs/client-libraries-java/#configure-consumer
private final static Map<String, String> validStdConsumerConfKeyTypeMap = Map.ofEntries(
Map.entry("topicNames", "Set<String>"),
Map.entry("topicsPattern","Pattern"),
Map.entry("subscriptionName","String"),
Map.entry("subscriptionType","SubscriptionType"),
Map.entry("receiverQueueSize","int"),
Map.entry("acknowledgementsGroupTimeMicros","long"),
Map.entry("negativeAckRedeliveryDelayMicros","long"),
Map.entry("maxTotalReceiverQueueSizeAcrossPartitions","int"),
Map.entry("consumerName","String"),
Map.entry("ackTimeoutMillis","long"),
Map.entry("tickDurationMillis","long"),
Map.entry("priorityLevel","int"),
Map.entry("cryptoFailureAction","ConsumerCryptoFailureAction"),
Map.entry("properties","SortedMap<String, String>"),
Map.entry("readCompacted","boolean"),
Map.entry("subscriptionInitialPosition", "SubscriptionInitialPosition"),
Map.entry("patternAutoDiscoveryPeriod", "int"),
Map.entry("regexSubscriptionMode", "RegexSubscriptionMode"),
Map.entry("deadLetterPolicy", "DeadLetterPolicy"),
Map.entry("autoUpdatePartitions", "boolean"),
Map.entry("replicateSubscriptionState", "boolean"),
Map.entry("negativeAckRedeliveryBackoff", "RedeliveryBackoff"),
Map.entry("ackTimeoutRedeliveryBackoff", "RedeliveryBackoff"),
Map.entry("autoAckOldestChunkedMessageOnQueueFull", "boolean"),
Map.entry("maxPendingChunkedMessage", "int"),
Map.entry("expireTimeOfIncompleteChunkedMessageMillis", "long")
);
public static Map<String, Object> convertRawConsumerConf(Map<String, String> pulsarConsumerConfMapRaw) {
Map<String, Object> s4jConsumerConfObjMap = new HashMap<>();
setConfObjMapForPrimitives(s4jConsumerConfObjMap, pulsarConsumerConfMapRaw, validStdConsumerConfKeyTypeMap);
/**
* Non-primitive type processing for Pulsar consumer configuration items
*/
// The following non-primitive type configuration items are already excluded
// and don't need to be processed.
// * topicNames
// * topicPattern
// * subscriptionName
// * subscriptionType
// * subscriptionInitialPosition
// * regexSubscriptionMode
// "properties" has value type "SortedMap<String, String>"
// - expecting the value string has the format: a JSON string that includes a set of key/value pairs
String confKeyName = "properties";
String confVal = pulsarConsumerConfMapRaw.get(confKeyName);
String expectedVal = "{\"property1\":\"value1\", \"property2\":\"value2\"}, ...";
ObjectMapper mapper = new ObjectMapper();
if (StringUtils.isNotBlank(confVal)) {
try {
Map<String, String> consumerProperties = mapper.readValue(confVal, Map.class);
// Empty map value is considered as no value
if (!consumerProperties.isEmpty()) {
s4jConsumerConfObjMap.put(confKeyName, consumerProperties);
}
} catch (Exception e) {
throw new S4JAdapterInvalidParamException(
getInvalidConfValStr(confKeyName, confVal, "consumer", expectedVal));
}
}
// "deadLetterPolicy"
// - expecting the value is a JSON string has the format:
// {"maxRedeliverCount":"<int_value>","deadLetterTopic":"<topic_name>","initialSubscriptionName":"<sub_name>"}
confKeyName = "deadLetterPolicy";
confVal = pulsarConsumerConfMapRaw.get(confKeyName);
expectedVal = "{" +
"\"maxRedeliverCount\":\"<int_value>\"," +
"\"deadLetterTopic\":\"<topic_name>\"," +
"\"initialSubscriptionName\":\"<sub_name>\"}";
if (StringUtils.isNotBlank(confVal)) {
try {
Map<String, String> dlqPolicyMap = mapper.readValue(confVal, Map.class);
// Empty map value is considered as no value
if (!dlqPolicyMap.isEmpty()) {
boolean valid = true;
// The JSON key must be one of "maxRedeliverCount", "deadLetterTopic", "initialSubscriptionName"
for (String key : dlqPolicyMap.keySet()) {
if (!StringUtils.equalsAnyIgnoreCase(key,
"maxRedeliverCount", "deadLetterTopic", "initialSubscriptionName")) {
valid = false;
break;
}
}
// DLQ.maxRedeliverCount is mandatory
if (valid && !dlqPolicyMap.containsKey("maxRedeliverCount")) {
valid = false;
}
String maxRedeliverCountStr = dlqPolicyMap.get("maxRedeliverCount");
if (!NumberUtils.isCreatable(maxRedeliverCountStr)) {
valid = false;
}
if (valid) {
// In S4J driver, DLQ setting is done via a Map
// <<< https://docs.datastax.com/en/fast-pulsar-jms/docs/1.1/pulsar-jms-implementation.html#dead-letter-policy >>>
s4jConsumerConfObjMap.put(confKeyName, dlqPolicyMap);
} else {
throw new S4JAdapterInvalidParamException(
getInvalidConfValStr(confKeyName, confVal, "consumer", expectedVal));
}
}
} catch (Exception e) {
throw new S4JAdapterInvalidParamException(
getInvalidConfValStr(confKeyName, confVal, "consumer", expectedVal));
}
}
// "negativeAckRedeliveryBackoff" or "ackTimeoutRedeliveryBackoff"
// - expecting the value is a JSON string has the format:
// {"minDelayMs":"<int_value>", "maxDelayMs":"<int_value>", "multiplier":"<double_value>"}
String[] redeliveryBackoffConfigSet = {"negativeAckRedeliveryBackoff", "ackTimeoutRedeliveryBackoff"};
expectedVal = "{" +
"\"minDelayMs\":\"<int_value>\"," +
"\"maxDelayMs\":\"<int_value>\"," +
"\"multiplier\":\"<double_value>\"}";
for (String confKey : redeliveryBackoffConfigSet) {
confVal = pulsarConsumerConfMapRaw.get(confKey);
if (StringUtils.isNotBlank(confVal)) {
try {
Map<String, String> redliveryBackoffMap = mapper.readValue(confVal, Map.class);
// Empty map value is considered as no value
if (! redliveryBackoffMap.isEmpty()) {
boolean valid = true;
// The JSON key must be one of "maxRedeliverCount", "deadLetterTopic", "initialSubscriptionName"
for (String key : redliveryBackoffMap.keySet()) {
if (!StringUtils.equalsAnyIgnoreCase(key,
"minDelayMs", "maxDelayMs", "multiplier")) {
valid = false;
break;
}
}
String minDelayMsStr = redliveryBackoffMap.get("minDelayMs");
String maxDelayMsStr = redliveryBackoffMap.get("maxDelayMs");
String multiplierStr = redliveryBackoffMap.get("multiplier");
if ((StringUtils.isNotBlank(minDelayMsStr) && !NumberUtils.isCreatable(minDelayMsStr)) ||
(StringUtils.isNotBlank(maxDelayMsStr) && !NumberUtils.isCreatable(maxDelayMsStr)) ||
(StringUtils.isNotBlank(multiplierStr) && !NumberUtils.isCreatable(multiplierStr))) {
valid = false;
}
if (valid) {
// In S4J driver, AckTimeOut and Negative TimeOut is done via a Map
// <<< https://docs.datastax.com/en/fast-pulsar-jms/docs/1.1/pulsar-jms-implementation.html#ack-timeout >>>
// <<< https://docs.datastax.com/en/fast-pulsar-jms/docs/1.1/pulsar-jms-implementation.html#negative-ack >>>
s4jConsumerConfObjMap.put(confKey, redliveryBackoffMap);
} else {
throw new S4JAdapterInvalidParamException(
getInvalidConfValStr(confKey, confVal, "consumer", expectedVal));
}
}
} catch (Exception e) {
throw new S4JAdapterInvalidParamException(
getInvalidConfValStr(confKey, confVal, "consumer", expectedVal));
}
}
}
// TODO: Skip the following Pulsar configuration items for now because they're not really
// needed in the NB S4J testing right now. Add the support for them when needed.
// * cryptoFailureAction
return s4jConsumerConfObjMap;
}
// https://docs.datastax.com/en/fast-pulsar-jms/docs/1.1/pulsar-jms-reference.html#_configuration_options
private final static Map<String, String> validS4jJmsConfKeyTypeMap = Map.ofEntries(
Map.entry("jms.acknowledgeRejectedMessages", "boolean"),
Map.entry("jms.clientId","String"),
Map.entry("jms.emulateTransactions","boolean"),
Map.entry("jms.enableClientSideEmulation","boolean"),
Map.entry("jms.forceDeleteTemporaryDestinations","boolean"),
Map.entry("jms.precreateQueueSubscription","boolean"),
Map.entry("jms.queueSubscriptionName","String"),
Map.entry("jms.systemNamespace","String"),
Map.entry("jms.topicSharedSubscriptionType","String"),
Map.entry("jms.useCredentialsFromCreateConnection","boolean"),
Map.entry("jms.useExclusiveSubscriptionsForSimpleConsumers","long"),
Map.entry("jms.usePulsarAdmin","boolean"),
Map.entry("jms.useServerSideFiltering","boolean"),
Map.entry("jms.waitForServerStartupTimeout","int"),
Map.entry("jms.transactionsStickyPartitions", "boolean")
);
public static Map<String, Object> convertRawJmsConf(Map<String, String> s4jJmsConfMapRaw) {
Map<String, Object> s4jJmsConfObjMap = new HashMap<>();
setConfObjMapForPrimitives(s4jJmsConfObjMap, s4jJmsConfMapRaw, validS4jJmsConfKeyTypeMap);
/**
* Non-primitive type processing for Pulsar client configuration items
*/
// None
return s4jJmsConfObjMap;
}
// https://docs.datastax.com/en/fast-pulsar-jms/docs/1.1/pulsar-jms-reference.html#_configuration_options
private final static Map<String, String> validS4jMiscConfKeyTypeMap = Map.ofEntries(
Map.entry("brokerServiceUrl","String"),
Map.entry("webServiceUrl","String"),
Map.entry("ackTimeout", "long"),
Map.entry("ackTimeoutMillis","long"),
Map.entry("enableTransaction","boolean"),
Map.entry("consumerConfig","Map<String,Object>"),
Map.entry("producerConfig","Map<String,Object>")
);
public static Map<String, Object> convertRawMiscConf(Map<String, String> s4jMiscConfMapRaw) {
Map<String, Object> s4jMiscConfObjMap = new HashMap<>();
setConfObjMapForPrimitives(s4jMiscConfObjMap, s4jMiscConfMapRaw, validS4jMiscConfKeyTypeMap);
/**
* Non-primitive type processing for Pulsar client configuration items
*/
// Only the following 2 non-primitive type settings will be set explicitly
// * producerConfig
// * consumerConfig
return s4jMiscConfObjMap;
}
// Utility function
// - get configuration key names by the value type
private static List<String> getConfKeyNameByValueType(Map<String, String> confKeyTypeMap, String tgtValType) {
ArrayList<String> confKeyNames = new ArrayList<>();
for (Map.Entry entry: confKeyTypeMap.entrySet()) {
if (StringUtils.equalsIgnoreCase(entry.getValue().toString(), tgtValType)) {
confKeyNames.add(entry.getKey().toString());
}
}
return confKeyNames;
}
// Conversion from Map<String, String> to Map<String, Object> for configuration items with primitive
// value types
private static void setConfObjMapForPrimitives(
Map<String, Object> tgtConfObjMap,
Map<String, String> srcConfMapRaw,
Map<String, String> validConfKeyTypeMap)
{
List<String> confKeyList = new ArrayList<>();
// All configuration items with "String" as the value type
confKeyList = getConfKeyNameByValueType(validConfKeyTypeMap, "String");
for (String confKey : confKeyList) {
if (srcConfMapRaw.containsKey(confKey)) {
String confVal = srcConfMapRaw.get(confKey);
if (StringUtils.isNotBlank(confVal)) {
tgtConfObjMap.put(confKey, confVal);
}
}
}
// All configuration items with "long" as the value type
confKeyList = getConfKeyNameByValueType(validConfKeyTypeMap, "long");
for (String confKey : confKeyList) {
if (srcConfMapRaw.containsKey(confKey)) {
String confVal = srcConfMapRaw.get(confKey);
if (StringUtils.isNotBlank(confVal)) {
tgtConfObjMap.put(confKey, Long.valueOf(confVal));
}
}
}
// All configuration items with "int" as the value type
confKeyList = getConfKeyNameByValueType(validConfKeyTypeMap, "int");
for (String confKey : confKeyList) {
if (srcConfMapRaw.containsKey(confKey)) {
String confVal = srcConfMapRaw.get(confKey);
if (StringUtils.isNotBlank(confVal)) {
tgtConfObjMap.put(confKey, Integer.valueOf(confVal));
}
}
}
// All configuration items with "boolean" as the value type
confKeyList = getConfKeyNameByValueType(validConfKeyTypeMap, "boolean");
for (String confKey : confKeyList) {
if (srcConfMapRaw.containsKey(confKey)) {
String confVal = srcConfMapRaw.get(confKey);
if (StringUtils.isNotBlank(confVal)) {
tgtConfObjMap.put(confKey, Boolean.valueOf(confVal));
}
}
}
// TODO: So far the above primitive types should be good enough.
// Add support for other types when needed
}
private static String getInvalidConfValStr(String confKey, String confVal, String configCategory, String expectedVal) {
return "Incorrect value \"" + confVal + "\" for Pulsar " + configCategory +
" configuration item of \"" + confKey + "\". Expecting the following value (format): " + expectedVal;
}
}

View File

@@ -0,0 +1,86 @@
package io.nosqlbench.adapter.s4j.util;
/*
* Copyright (c) 2022 nosqlbench
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import io.nosqlbench.adapter.s4j.S4JSpace;
import io.nosqlbench.adapter.s4j.dispensers.S4JBaseOpDispenser;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import javax.jms.CompletionListener;
import javax.jms.JMSException;
import javax.jms.Message;
/**
* Used for async message production
*/
public class S4JCompletionListener implements CompletionListener {
private final static Logger logger = LogManager.getLogger(S4JCompletionListener.class);
private final S4JSpace s4JSpace;
private final S4JBaseOpDispenser s4jBaseOpDispenser;
public S4JCompletionListener(S4JSpace s4JSpace, S4JBaseOpDispenser s4jBaseOpDispenser) {
assert (s4JSpace != null);
assert (s4jBaseOpDispenser != null);
this.s4JSpace = s4JSpace;
this.s4jBaseOpDispenser = s4jBaseOpDispenser;
}
@Override
public void onCompletion(Message message) {
try {
if (logger.isTraceEnabled()) {
// for testing purpose
String myMsgSeq = message.getStringProperty(S4JAdapterUtil.NB_MSG_SEQ_PROP);
logger.trace("onCompletion::Async message send successful - message ID {} ({}) "
, message.getJMSMessageID(), myMsgSeq);
}
if (s4JSpace.isTrackingMsgRecvCnt() ) {
long totalResponseCnt = s4JSpace.incTotalOpResponseCnt();
if (logger.isTraceEnabled()) {
logger.trace("... async op response received so far: {}", totalResponseCnt);
}
}
}
catch (JMSException e) {
S4JAdapterUtil.processMsgErrorHandling(
e,
s4JSpace.isStrictMsgErrorHandling(),
"Unexpected errors when async sending a JMS message.");
}
}
@Override
public void onException(Message message, Exception e) {
try {
if (logger.isDebugEnabled()) {
// for testing purpose
String myMsgSeq = message.getStringProperty(S4JAdapterUtil.NB_MSG_SEQ_PROP);
logger.debug("onException::Async message send failed - message ID {} ({}) "
, message.getJMSMessageID(), myMsgSeq);
}
}
catch (JMSException jmsException) {
logger.warn("onException::Unexpected error: " + jmsException.getMessage());
}
}
}

View File

@@ -0,0 +1,52 @@
package io.nosqlbench.adapter.s4j.util;
import org.apache.commons.lang3.builder.ToStringBuilder;
import javax.jms.JMSContext;
import javax.jms.Session;
/*
* Copyright (c) 2022 nosqlbench
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
public class S4JJMSContextWrapper {
private final String jmsContextIdentifer;
private final JMSContext jmsContext;
private final int jmsSessionMode;
public S4JJMSContextWrapper(String identifer, JMSContext jmsContext) {
this.jmsContextIdentifer = identifer;
this.jmsContext = jmsContext;
this.jmsSessionMode = jmsContext.getSessionMode();
}
public int getJmsSessionMode() { return jmsSessionMode; }
public boolean isTransactedMode() { return Session.SESSION_TRANSACTED == this.getJmsSessionMode(); }
public String getJmsContextIdentifer() { return jmsContextIdentifer; }
public JMSContext getJmsContext() { return jmsContext; }
public void close() {
if (jmsContext != null) {
jmsContext.close();
}
}
public String toString() {
return new ToStringBuilder(this).
append("jmsContextIdentifer", jmsContextIdentifer).
append("jmsContext", jmsContext.toString()).
toString();
}
}

View File

@@ -0,0 +1,97 @@
package io.nosqlbench.adapter.s4j.util;
/*
* Copyright (c) 2022 nosqlbench
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import com.codahale.metrics.Histogram;
import io.nosqlbench.adapter.s4j.S4JSpace;
import io.nosqlbench.adapter.s4j.dispensers.S4JBaseOpDispenser;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import javax.jms.JMSContext;
import javax.jms.JMSException;
import javax.jms.Message;
import javax.jms.MessageListener;
/**
* Used for async message consumption
*/
public class S4JMessageListener implements MessageListener {
private final static Logger logger = LogManager.getLogger(S4JMessageListener.class);
private final float msgAckRatio;
private final int slowAckInSec;
private final JMSContext jmsContext;
private final S4JSpace s4jSpace;
private final S4JBaseOpDispenser s4jBaseOpDispenser;
public S4JMessageListener(
JMSContext jmsContext,
S4JSpace s4jSpace,
S4JBaseOpDispenser s4jBaseOpDispenser,
float msgAckRatio,
int slowAckInSec)
{
assert (jmsContext != null);
assert (s4jSpace != null);
assert (s4jBaseOpDispenser != null);
this.jmsContext = jmsContext;
this.s4jSpace = s4jSpace;
this.s4jBaseOpDispenser = s4jBaseOpDispenser;
this.msgAckRatio = msgAckRatio;
this.slowAckInSec = slowAckInSec;
}
@Override
public void onMessage(Message message) {
try {
if (message != null) {
s4jSpace.processMsgAck(jmsContext, message, msgAckRatio, slowAckInSec);
int msgSize = message.getIntProperty(S4JAdapterUtil.NB_MSG_SIZE_PROP);
S4JAdapterMetrics s4JAdapterMetrics = s4jBaseOpDispenser.getS4jAdapterMetrics();
Histogram messageSizeHistogram = s4JAdapterMetrics.getMessagesizeHistogram();
messageSizeHistogram.update(msgSize);
if (logger.isTraceEnabled()) {
// for testing purpose
String myMsgSeq = message.getStringProperty(S4JAdapterUtil.NB_MSG_SEQ_PROP);
logger.trace("onMessage::Async message receive successful - message ID {} ({}) "
, message.getJMSMessageID(), myMsgSeq);
}
if (s4jSpace.isTrackingMsgRecvCnt()) {
s4jSpace.incTotalOpResponseCnt();
}
}
else {
if (s4jSpace.isTrackingMsgRecvCnt()) {
s4jSpace.incTotalNullMsgRecvdCnt();
}
}
}
catch (JMSException e) {
S4JAdapterUtil.processMsgErrorHandling(
e,
s4jSpace.isStrictMsgErrorHandling(),
"Unexpected errors when async receiving a JMS message.");
}
}
}

View File

@@ -0,0 +1,73 @@
# document level parameters that apply to all Pulsar client types:
params:
temporary_dest: "false"
dest_type: "topic"
# default: true
async_api: "false"
# whether to wait indefinitely (as the default behavior)
# - only applies when "async_api" is false (synchronous API)
# - only applies to message receiving
# - default: false
blocking_msg_recv: "true"
## (Optional) If shared topic or not (only relevant when the destination is a topic)
share_topic: "true"
## (Optional) If durable topic or not (only relevant when the destination is a topic)
durable_topic: "false"
blocks:
msg-consume-block:
ops:
op1:
## The value represents the destination (queue or topic) name)
MessageConsume: "mys4jtest_t"
## Subscription name
## - optional for queue and non-shared, non-durable topic
## - mandatory for shared and/or durable topic
subscription_name: "nbs4j-sub"
## (Optional) client side message selector
msg_selector: ""
## (Optional) No Local
no_local: "true"
## (Optional) Read Timeout
read_timeout: "10"
## (Optional) Receive message without wait
no_wait: "true"
## (Optional) Message acknowledgement ratio
msg_ack_ratio: "0.5"
## (Optional) Simulate slow consumer acknowledgement
# must be non-negative numbers. negative numbers will be treated as 0
# 0 - means no simulation
# positive value - the number of seconds to pause before acknowledgement
slow_ack_in_sec: "0"
#####
## (Optional) Statement level settings for Consumer
#
## AckTimeout value (at least 1 second)
consumer.ackTimeoutMillis: 1000
## DLQ policy
consumer.deadLetterPolicy: '{ "maxRedeliverCount": "2" }'
## NegativeAck Redelivery policy
consumer.negativeAckRedeliveryBackoff: |
{
}
## AckTimeout Redelivery policy
consumer.ackTimeoutRedeliveryBackoff: |
{
"minDelayMs":"10",
"maxDelayMs":"20",
"multiplier":"1.2"
}

View File

@@ -0,0 +1,54 @@
bindings:
cur_cycle: ToString()
mykey: Mod(5); ToString(); Prefix("key-")
mytext_val: AlphaNumericString(30)
mymap_val1: AlphaNumericString(10)
mymap_val2: AlphaNumericString(20)
mystream_val1: AlphaNumericString(50)
# document level parameters that apply to all Pulsar client types:
params:
temporary_dest: "false"
dest_type: "queue"
async_api: "true"
blocks:
msg-produce-block:
ops:
op1:
## The value represents the destination (queue or topic) name)
MessageProduce: "mys4jtest_t"
## (Optional) JMS headers (in JSON format).
msg_header: |
{
"JMSPriority": "9"
}
## (Optional) JMS properties, predefined or customized (in JSON format).
msg_property: |
{
"JMSXGroupID": "{mykey}"
}
## (Optional) JMS message types, default to be BYTES.
msg_type: "text"
## (Mandatory) JMS message body. Value depends on msg_type.
msg_body: "{mytext_val}"
# # example of having "map" as the message type
# msg_type: "map"
# msg_body: |
# {
# "prop-key-1": "{mymap_val1}",
# "prop-key-2": "{mymap_val2}"
# }
# # example of having "stream" as the message type
# msg_type: "stream"
# msg_body: |
# [
# "100",
# "{mystream_val1}",
# "abcdef"
# ]

View File

@@ -0,0 +1,244 @@
- [1. Overview](#1-overview)
- [2. Execute NB S4J Workload](#2-execute-nb-s4j-workload)
- [3. NB S4J Driver Configuration Parameter File](#3-nb-s4j-driver-configuration-parameter-file)
- [4. NB S4J Scenario Definition File](#4-nb-s4j-scenario-definition-file)
- [4.1. Document Level Parameters](#41-document-level-parameters)
- [4.2. NB S4J Workload Types](#42-nb-s4j-workload-types)
- [4.2.1. Publish Messages to a JMS Destination, Queue or Topic](#421-publish-messages-to-a-jms-destination-queue-or-topic)
- [4.2.2. Receiving Messages from a JMS Destination, Queue or Topic](#422-receiving-messages-from-a-jms-destination-queue-or-topic)
# 1. Overview
This driver is similar to [NB Pulsar driver](../../../../driver-pulsar/src/main/resources/pulsar.md) that allows NB based workload generation and performance testing against a Pulsar cluster. It also follows a similar pattern to configure and connect to the Pulsar cluster for workload execution.
However, the major difference is instead of simulating native Pulsar client workloads, the NB S4J driver allows simulating JMS oriented workloads (that follows JMS spec 2.0 and 1.1) to be executed on the Pulsar cluster. Under the hood, this is achieved through DataStax's [Starlight for JMS API] (https://github.com/datastax/pulsar-jms).
# 2. Execute NB S4J Workload
The following is an example of executing a NB S4J workload (defined as *pulsar_s4j.yaml*)
```
$ <nb_cmd> run driver=s4j cycles=10000 threads=4 num_conn=2 num_session=2 session_mode="client_ack" strict_msg_error_handling="false" web_url=http://localhost:8080 service_url=pulsar://localhost:6650 config=/path/to/nb_s4j_config.properties yaml=/path/to/pulsar_s4j.yaml -vv --logs-dir=s4j_log
```
In the above NB CLI command, the S4J driver specific parameters are listed as below:
* num_conn: the number of JMS connections to be created
* num_session: the number of JMS sessions per JMS connection
* Note that multiple JMS sessions can be created from one JMS connection, and they share the same connection characteristics.
* session_mode: the session mode used when creating a JMS session
* web_url: the URL of the Pulsar web service
* service_url: the URL of the Pulsar native protocol service
* (optional) strict_msg_error_handling: whether to do strict error handling
* when true, Pulsar client error will not stop NB S4J execution
* otherwise, any Pulsar client error will stop NB S4J execution
* (optional) max_s4jop_time: maximum time (in seconds) to execute the actual S4J operations (e.g. message sending or receiving). If NB execution time is beyond this limit, each NB cycle is just a no-op. Please NOTE:
* this is useful when controlled NB execution is needed with NB CLI scripting.
* if this parameter is not specified or the value is 0, it means no time limitation. Every single NB cycle will trigger an actual S4J operation.
* (optional) track_msg_cnt: When set to true (with default as false), the S4J driver will keep track of the confirmed response count for message sending and receiving.
Other NB engine parameters are straight forward:
* driver: must be **s4j**
* threads: depending on the workload type, the NB thread number determines how many producers or consumers will be created. All producers or consumers will share the available JMS connections and sessions
* yamL: the NB S4J scenario definition yaml file
* config: specify the file that contains the connection parameters used by the S4J API
# 3. NB S4J Driver Configuration Parameter File
The S4J API has a list of configuration options that can be found here: https://docs.datastax.com/en/fast-pulsar-jms/docs/1.1/pulsar-jms-reference.html#_configuration_options.
The NB S4J driver supports these configuration options via a config property file, an example of which is listed below. The configuration parameters in this file are grouped into several groups. The comments below explain how the grouping works.
```
###########
# Overview: Starlight for JMS (S4J) API configuration items are listed at:
# https://docs.datastax.com/en/fast-pulsar-jms/docs/1.1/pulsar-jms-reference.html#_configuration_options
enableTransaction=true
####
# S4J API specific configurations (non Pulsar specific) - jms.***
jms.enableClientSideEmulation=true
jms.usePulsarAdmin=false
#...
#####
# Pulsar client related configurations - client.***
# - Valid settings: http://pulsar.apache.org/docs/en/client-libraries-java/#client
#
# - These Pulsar client settings (without the "client." prefix) will be
# directly used as S4J configuration settings, on a 1-to-1 basis.
#--------------------------------------
# only relevant when authentication is enabled
client.authPlugin=org.apache.pulsar.client.impl.auth.AuthenticationToken
client.authParams=file:///path/to/authentication/jwt/file
# only relevant when in-transit encryption is enabled
client.tlsTrustCertsFilePath=/path/to/certificate/file
#...
#####
# Producer related configurations (global) - producer.***
# - Valid settings: http://pulsar.apache.org/docs/en/client-libraries-java/#configure-producer
#
# - These Pulsar producer settings (without "producer." prefix) will be collectively (as a map)
# mapped to S4J connection setting of "producerConfig"
#--------------------------------------
producer.blockIfQueueFull=true
# disable producer batching
#producer.batchingEnabled=false
#...
#####
# Consumer related configurations (global) - consumer.***
# - Valid settings: http://pulsar.apache.org/docs/en/client-libraries-java/#configure-consumer
#
# - These Pulsar producer settings (without "consumer." portion) will be collectively (as a map)
# mapped to S4J connection setting of "consumerConfig"
#--------------------------------------
#...
```
# 4. NB S4J Scenario Definition File
Like any NB scenario yaml file, the NB S4J yaml file is composed of 3 major components:
* bindings: define NB bindings
* params: define document level parameters
* blocks: define various statement blocks. Each statement block represents one JMS workload type
```
bindings:
... ...
params:
... ...
blocks:
... ...
```
## 4.1. Document Level Parameters
The parameters defined in this section will be applicable to all statement blocks. An example of some common parameters that can be set at the document level is listed below:
* temporary_dest: whether JMS workload is dealing with a temporary destination
* dest_type: JMS destination type - queue or topic
```
params:
temporary_dest: "false"
dest_type: "<jms_destination_type>"
async_api: "true"
txn_batch_num: <number_of_message_ops_in_one_transaction>
blocking_msg_recv: <whehter_to_block_when_receiving_messages>
shared_topic: <if_shared_topic_or_not> // only relevant when the destination type is a topic
durable_topic: <if_durable_topic_or_not> // only relevant when the destination type is a topic
```
Please **NOTE** that the above parameters won't necessarily be specified at the document level. If they're specified at the statement level, they will only impact the statement within which they're specified.
## 4.2. NB S4J Workload Types
The NB S4J driver supports 2 types of JMS operations:
* One for message producing/sending/publishing
* this is identified by NB Op identifier ***MessageProduce***
* One for message consuming/receiving/subscribing
* this is identified by NB Op identifier ***MessageConsume***
### 4.2.1. Publish Messages to a JMS Destination, Queue or Topic
The NB S4J statement block for publishing messages to a JMS destination (either a Queue or a topic) has the following format.
* Optionally, you can specify the JMS headers (**msg_header**) and properties (**msg_property**) via valid JSON strings in key: value format.
* The default message type (**msg_type**) is "byte". But optionally, you can specify other message types such as "text", "map", etc.
* The message payload (**msg_body**) is the only mandatory field.
```
blocks:
msg-produce-block:
ops:
op1:
## The value represents the destination (queue or topic) name)
MessageProduce: "mys4jtest_t"
## (Optional) JMS headers (in JSON format).
msg_header: |
{
"<header_key>": "<header_value>"
}
## (Optional) JMS properties, predefined or customized (in JSON format).
msg_property: |
{
"<property1_key>": "<property_value1>",
"<property2_key>": "<property_value2>"
}
## (Optional) JMS message types, default to be BYTES.
msg_type: "text"
## (Mandatory) JMS message body. Value depends on msg_type.
msg_body: "{mytext_val}"
```
### 4.2.2. Receiving Messages from a JMS Destination, Queue or Topic
The generic NB S4J statement block for receiving messages to a JMS destination (either a Queue or a topic) has the following format. All the statement specific parameters are listed as below.
* **msg_selector**: Message selector string
* **no_local**: Only applicable to a Topic as the destination. This allows a subscriber to inhibit the delivery of messages published by its own connection.
* **read_timeout**: The timeout value for receiving a message from a destination
* This setting only works if **no_wait** is false
* If the **read_timeout** value is 0, it behaves the same as **no_wait** is true
* **no_wait**: Whether to receive the next message immediately if one is available
* **msg_ack_ratio**: the ratio of the received messages being acknowledged
* **slow_ack_in_sec**: whether to simulate a slow consumer (pause before acknowledging after receiving a message)
* value 0 means no simulation (consumer acknowledges right away)
* negative ack/ack timeout/deadletter topic related settings
* The settings here (as the scenario specific settings) will be merged with the
* global settings in *s4j_config.properties* file
```
blocks:
msg-produce-block:
ops:
op1:
## The value represents the destination (queue or topic) name)
MessageProduce: "mys4jtest_t"
## (Optional) client side message selector
msg_selector: ""
## (Optional) No Local
no_local: "true"
## (Optional) Read Timeout
read_timeout: "10"
## (Optional) Receive message without wait
no_wait: "true"
## (Optional) Message acknowledgement ratio
msg_ack_ratio: "0.5"
## (Optional) Simulate slow consumer acknowledgement
# must be non-negative numbers. negative numbers will be treated as 0
# 0 - means no simulation
# positive value - the number of seconds to pause before acknowledgement
slow_ack_in_sec: "0"
#####
## (Optional) Statement level settings for Consumer
#
## AckTimeout value (at least 1 second)
consumer.ackTimeoutMillis: 1000
## DLQ policy
consumer.deadLetterPolicy: '{ "maxRedeliverCount": "2" }'
## NegativeAck Redelivery policy
consumer.negativeAckRedeliveryBackoff: |
{
}
## AckTimeout Redelivery policy
consumer.ackTimeoutRedeliveryBackoff: |
{
"minDelayMs":"10",
"maxDelayMs":"20",
"multiplier":"1.2"
}
```

View File

@@ -0,0 +1,61 @@
###########
# Overview: Starlight for JMS (S4J) API configuration items are listed at:
# https://docs.datastax.com/en/fast-pulsar-jms/docs/1.1/pulsar-jms-reference.html#_configuration_options
enableTransaction=true
####
# S4J API specific configurations (non Pulsar specific) - jms.***
#--------------------------------------
jms.usePulsarAdmin=false
jms.precreateQueueSubscription=false
jms.enableClientSideEmulation=false
jms.useServerSideFiltering=true
jms.useCredentialsFromCreateConnection=false
jms.transactionsStickyPartitions=true
# for JMS priority
jms.enableJMSPriority=true
jms.priorityMapping=non-linear
#...
#####
# Pulsar client related configurations - client.***
# - Valid settings: http://pulsar.apache.org/docs/en/client-libraries-java/#client
#
# - These Pulsar client settings (without the "client." prefix) will be
# directly used as S4J configuration settings, on 1-to-1 basis.
#--------------------------------------
client.connectionTimeoutMs=5000
#client.authPlugin=org.apache.pulsar.client.impl.auth.AuthenticationToken
#client.authParams=
#...
#####
# Producer related configurations (global) - producer.***
# - Valid settings: http://pulsar.apache.org/docs/en/client-libraries-java/#configure-producer
#
# - These Pulsar producer settings (without "producer." prefix) will be collectively (as a map)
# mapped to S4J connection setting of "producerConfig"
#--------------------------------------
#producer.sendTimeoutMs=
producer.blockIfQueueFull=true
#producer.maxPendingMessages=10000
#producer.batchingMaxMessages=10000
#...
#####
# Consumer related configurations (global) - consumer.***
# - Valid settings: http://pulsar.apache.org/docs/en/client-libraries-java/#configure-consumer
#
# - These Pulsar producer settings (without "consumer." portion) will be collectively (as a map)
# mapped to S4J connection setting of "consumerConfig"
#--------------------------------------
consumer.receiverQueueSize=2000
consumer.acknowledgementsGroupTimeMicros=0
consumer.ackTimeoutMillis=2000
consumer.deadLetterPolicy={ "maxRedeliverCount":"5", "deadLetterTopic":"", "initialSubscriptionName":"" }
consumer.ackTimeoutRedeliveryBackoff={"minDelayMs":"50", "maxDelayMs":"100", "multiplier":"2.0"}
consumer.negativeAckRedeliveryBackoff={}
#...

View File

@@ -23,7 +23,7 @@
<parent>
<artifactId>mvn-defaults</artifactId>
<groupId>io.nosqlbench</groupId>
<version>4.17.31-SNAPSHOT</version>
<version>4.17.32-SNAPSHOT</version>
<relativePath>../mvn-defaults</relativePath>
</parent>
@@ -38,14 +38,14 @@
<dependency>
<groupId>io.nosqlbench</groupId>
<artifactId>nb-annotations</artifactId>
<version>4.17.31-SNAPSHOT</version>
<version>4.17.32-SNAPSHOT</version>
<scope>compile</scope>
</dependency>
<dependency>
<groupId>io.nosqlbench</groupId>
<artifactId>adapters-api</artifactId>
<version>4.17.31-SNAPSHOT</version>
<version>4.17.32-SNAPSHOT</version>
<scope>compile</scope>
</dependency>

View File

@@ -23,7 +23,7 @@
<parent>
<artifactId>mvn-defaults</artifactId>
<groupId>io.nosqlbench</groupId>
<version>4.17.31-SNAPSHOT</version>
<version>4.17.32-SNAPSHOT</version>
<relativePath>../mvn-defaults</relativePath>
</parent>
@@ -40,7 +40,7 @@
<dependency>
<groupId>io.nosqlbench</groupId>
<artifactId>engine-api</artifactId>
<version>4.17.31-SNAPSHOT</version>
<version>4.17.32-SNAPSHOT</version>
</dependency>
</dependencies>

View File

@@ -21,7 +21,7 @@
<parent>
<artifactId>mvn-defaults</artifactId>
<groupId>io.nosqlbench</groupId>
<version>4.17.31-SNAPSHOT</version>
<version>4.17.32-SNAPSHOT</version>
<relativePath>../mvn-defaults</relativePath>
</parent>
@@ -39,19 +39,19 @@
<dependency>
<groupId>io.nosqlbench</groupId>
<artifactId>nb-spectest</artifactId>
<version>4.17.31-SNAPSHOT</version>
<version>4.17.32-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>io.nosqlbench</groupId>
<artifactId>nb-api</artifactId>
<version>4.17.31-SNAPSHOT</version>
<version>4.17.32-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>io.nosqlbench</groupId>
<artifactId>virtdata-userlibs</artifactId>
<version>4.17.31-SNAPSHOT</version>
<version>4.17.32-SNAPSHOT</version>
</dependency>

View File

@@ -19,16 +19,25 @@ package io.nosqlbench.engine.api.activityimpl.uniform.flowtypes;
import java.util.function.Function;
/**
* Run a function on the current cached result and replace it
* with the result of the function. Functions are one way of invoking
* <H2>ChainingOp<I,O>: f(I) -> O</I,O></H2>
* <P>
* Run a function on the current cached result in the current thread and replace it
* with the result of the function. ChainingOps are one way of invoking
* logic within a cycle. However, they are not intended to stand alone.
* A CycleFunction must always have an input to work on. This input is
* provided by a Supplier as optionally implemented by an Op
* A ChainingOp must always have an input to work on,
* provided by either a {@link CycleOp} OR <em>another</em> call to a {@link ChainingOp}</P>
*
* @param <I> Some input type.
* @param <I> Some input type, as determined by a previous {@link CycleOp} or {@link ChainingOp} on the same thread.
* @param <O> Some output type.
*/
public interface ChainingOp<I,O> extends Op, Function<I,O> {
public interface ChainingOp<I, O> extends Op, Function<I, O> {
/**
* Transform a value from a previous action and provide the result for a subsequent action.
*
* @param lastResult object form a previous operation or action
* @return a new result
*/
@Override
O apply(I i);
O apply(I lastResult);
}

View File

@@ -19,32 +19,33 @@ package io.nosqlbench.engine.api.activityimpl.uniform.flowtypes;
import java.util.function.LongFunction;
/**
* A CycleRunnable is simply a variation of a Runnable type.
* The main difference is that it is supplied with the cycle
* as input.
* <H2>CycleOp: f(cycle) -> T</H2>
* <p>A CycleOp of T is an operation which takes a long input value
* and produces a value of type T. It is implemented as
* {@link LongFunction} of T.</p>
*
* <P>This variant of {@link Op} has the ability to see the cycle
* which was previously used to select the op implementation.</p>
*
* <p>It also has the ability to emit an value which can be seen a subsequent operation, if
* and only if it is a {@link ChainingOp}s.</P>
*
* <h2>Designer Notes</h2>
* <p>
* If you are using the value in this call to select a specific type of behavior, it is very
* likely a candidate for factoring into separate op implementations.
* The {@link io.nosqlbench.engine.api.activityimpl.OpMapper}
* and {@link io.nosqlbench.engine.api.activityimpl.OpDispenser} abstractions are meant to move
* op type selection and scheduling to earlier in the activity.
* </p>
*
*/
public interface CycleOp<T> extends Op, LongFunction<T> {
// /**
// * <p>Run an action for the given cycle. The cycle is provided for anecdotal
// * usage such as logging and debugging. It is valid to use the cycle value in these places,
// * but you should not use it to determine the logic of what is run. The mechanism
// * for doing this is provided in {@link io.nosqlbench.engine.api.activityimpl.OpMapper}
// * and {@link io.nosqlbench.engine.api.activityimpl.OpDispenser} types.</p>
// *
// *
// * @param cycle The cycle value for which an operation is run
// */
//// * This method should do the same thing that {@link #apply(long)} does, except that
//// * there is no need to prepare or return a result. This is the form that will be called
//// * if there is no chaining operation to consume the result of this operation.
// void accept(long cycle);
/**
* <p>Run an action for the given cycle. The cycle
* value is only to be used for anecdotal presentation. This form is called
* when there is a chaining operation which will do something with this result.</p>
* <p>Run an action for the given cycle.</p>
*
* @param value The cycle value for which an operation is run
* @return A result which is the native result type for the underlying driver.
* @return A result object which <em>may</em> be used by a subsequent {@link ChainingOp}
*/
@Override
T apply(long value);

View File

@@ -17,21 +17,20 @@
package io.nosqlbench.engine.api.activityimpl.uniform.flowtypes;
/**
* This is the root type of any operation which is used in a NoSQLBench
* <p>This is the root type of any operation which is used in a NoSQLBench
* DriverAdapter. It is a tagging interface for incremental type validation
* in the NB runtime. You probably don't want to use it directly.
* in the NB runtime. You probably don't want to use it directly.</p>
*
* Instead, use these:
* <p>Instead, use <em>one</em> of these:
* <ul>
* <li>{@link CycleOp}</li> - An interface that will called if there is nothing to consume
* the result type from your operation. In some cases preparing a result body to
* hand down the chain is more costly, so implementing this interface allows ...
* hand down the chain is more costly, so implementing this interface allows the runtime
* to be more optimized.</li>
* <li>{@link ChainingOp}</li>
* </ul>
*
* either {@link CycleOp} or {@link ChainingOp} (but not both!)
*
* In the standard flow of an activity, either of the above interfaces is called
* so long as an Op implements one of them.
* </p>
*/
// TODO: optimize the runtime around the specific op type
public interface Op extends OpResultSize {
}

View File

@@ -16,5 +16,19 @@
package io.nosqlbench.engine.api.activityimpl.uniform.flowtypes;
/**
* <H2>RunnableOp</H2>
* <P>This is the simplest form of an executable operation in NoSQLBench.
* It is simply an operation is run for side-effect only.</P>
*/
public interface RunnableOp extends Op, Runnable {
/**
* Invoke the operation. If you need to see the value of the current
* cycle, then you can use {@link CycleOp} instead. If you need to
* use a cached result of a previous operation, then you may need to
* use {@link ChainingOp}.
*/
@Override
void run();
}

View File

@@ -0,0 +1,743 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!--
- Copyright (c) 2022 nosqlbench
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-->
<svg
width="14in"
height="8.5in"
viewBox="0 0 355.6 215.9"
version="1.1"
id="svg8"
inkscape:version="1.1.2 (0a00cf5339, 2022-02-04)"
sodipodi:docname="docs-publishing-flow.svg"
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns="http://www.w3.org/2000/svg"
xmlns:svg="http://www.w3.org/2000/svg"
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:cc="http://creativecommons.org/ns#"
xmlns:dc="http://purl.org/dc/elements/1.1/">
<defs
id="defs2">
<marker
style="overflow:visible;"
id="Arrow1Lend"
refX="0.0"
refY="0.0"
orient="auto"
inkscape:stockid="Arrow1Lend"
inkscape:isstock="true">
<path
transform="scale(0.8) rotate(180) translate(12.5,0)"
style="fill-rule:evenodd;fill:context-stroke;stroke:context-stroke;stroke-width:1.0pt;"
d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
id="path10457" />
</marker>
<marker
style="overflow:visible"
id="Arrow1Lstart"
refX="0.0"
refY="0.0"
orient="auto"
inkscape:stockid="Arrow1Lstart"
inkscape:isstock="true">
<path
transform="scale(0.8) translate(12.5,0)"
style="fill-rule:evenodd;fill:context-stroke;stroke:context-stroke;stroke-width:1.0pt"
d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
id="path10454" />
</marker>
<linearGradient
inkscape:collect="always"
id="linearGradient983">
<stop
style="stop-color:#000000;stop-opacity:1;"
offset="0"
id="stop979" />
<stop
style="stop-color:#000000;stop-opacity:0;"
offset="1"
id="stop981" />
</linearGradient>
<linearGradient
inkscape:collect="always"
xlink:href="#linearGradient983"
id="linearGradient985"
x1="131.67278"
y1="63.5"
x2="173.12723"
y2="63.5"
gradientUnits="userSpaceOnUse" />
<linearGradient
gradientTransform="matrix(3.7795276,0,0,3.7795276,-496.71822,-166.61139)"
inkscape:collect="always"
xlink:href="#linearGradient983"
id="linearGradient985-1"
x1="131.67278"
y1="63.5"
x2="173.12723"
y2="63.5"
gradientUnits="userSpaceOnUse" />
<linearGradient
gradientTransform="matrix(3.7795276,0,0,3.7795276,-304.71821,-473.13109)"
inkscape:collect="always"
xlink:href="#linearGradient983"
id="linearGradient985-14"
x1="131.67278"
y1="63.5"
x2="173.12723"
y2="63.5"
gradientUnits="userSpaceOnUse" />
<marker
style="overflow:visible"
id="Arrow1Lend-3"
refX="0"
refY="0"
orient="auto"
inkscape:stockid="Arrow1Lend"
inkscape:isstock="true">
<path
transform="matrix(-0.8,0,0,-0.8,-10,0)"
style="fill:context-stroke;fill-rule:evenodd;stroke:context-stroke;stroke-width:1pt"
d="M 0,0 5,-5 -12.5,0 5,5 Z"
id="path10457-6" />
</marker>
<marker
style="overflow:visible"
id="Arrow1Lend-6"
refX="0"
refY="0"
orient="auto"
inkscape:stockid="Arrow1Lend"
inkscape:isstock="true">
<path
transform="matrix(-0.8,0,0,-0.8,-10,0)"
style="fill:context-stroke;fill-rule:evenodd;stroke:context-stroke;stroke-width:1pt"
d="M 0,0 5,-5 -12.5,0 5,5 Z"
id="path10457-2" />
</marker>
<marker
style="overflow:visible"
id="Arrow1Lend-1"
refX="0"
refY="0"
orient="auto"
inkscape:stockid="Arrow1Lend"
inkscape:isstock="true">
<path
transform="matrix(-0.8,0,0,-0.8,-10,0)"
style="fill:context-stroke;fill-rule:evenodd;stroke:context-stroke;stroke-width:1pt"
d="M 0,0 5,-5 -12.5,0 5,5 Z"
id="path10457-8" />
</marker>
<marker
style="overflow:visible"
id="Arrow1Lend-9"
refX="0"
refY="0"
orient="auto"
inkscape:stockid="Arrow1Lend"
inkscape:isstock="true">
<path
transform="matrix(-0.8,0,0,-0.8,-10,0)"
style="fill:context-stroke;fill-rule:evenodd;stroke:context-stroke;stroke-width:1pt"
d="M 0,0 5,-5 -12.5,0 5,5 Z"
id="path10457-20" />
</marker>
<marker
style="overflow:visible"
id="Arrow1Lend-37"
refX="0"
refY="0"
orient="auto"
inkscape:stockid="Arrow1Lend"
inkscape:isstock="true">
<path
transform="matrix(-0.8,0,0,-0.8,-10,0)"
style="fill:context-stroke;fill-rule:evenodd;stroke:context-stroke;stroke-width:1pt"
d="M 0,0 5,-5 -12.5,0 5,5 Z"
id="path10457-5" />
</marker>
<marker
style="overflow:visible"
id="Arrow1Lend-2"
refX="0"
refY="0"
orient="auto"
inkscape:stockid="Arrow1Lend"
inkscape:isstock="true">
<path
transform="matrix(-0.8,0,0,-0.8,-10,0)"
style="fill:context-stroke;fill-rule:evenodd;stroke:context-stroke;stroke-width:1pt"
d="M 0,0 5,-5 -12.5,0 5,5 Z"
id="path10457-28" />
</marker>
<marker
style="overflow:visible"
id="Arrow1Lend-7"
refX="0"
refY="0"
orient="auto"
inkscape:stockid="Arrow1Lend"
inkscape:isstock="true">
<path
transform="matrix(-0.8,0,0,-0.8,-10,0)"
style="fill:context-stroke;fill-rule:evenodd;stroke:context-stroke;stroke-width:1pt"
d="M 0,0 5,-5 -12.5,0 5,5 Z"
id="path10457-53" />
</marker>
<marker
style="overflow:visible"
id="Arrow1Lend-62"
refX="0"
refY="0"
orient="auto"
inkscape:stockid="Arrow1Lend"
inkscape:isstock="true">
<path
transform="matrix(-0.8,0,0,-0.8,-10,0)"
style="fill:context-stroke;fill-rule:evenodd;stroke:context-stroke;stroke-width:1pt"
d="M 0,0 5,-5 -12.5,0 5,5 Z"
id="path10457-9" />
</marker>
<marker
style="overflow:visible"
id="Arrow1Lend-27"
refX="0"
refY="0"
orient="auto"
inkscape:stockid="Arrow1Lend"
inkscape:isstock="true">
<path
transform="matrix(-0.8,0,0,-0.8,-10,0)"
style="fill:context-stroke;fill-rule:evenodd;stroke:context-stroke;stroke-width:1pt"
d="M 0,0 5,-5 -12.5,0 5,5 Z"
id="path10457-0" />
</marker>
<marker
style="overflow:visible"
id="Arrow1Lend-36"
refX="0"
refY="0"
orient="auto"
inkscape:stockid="Arrow1Lend"
inkscape:isstock="true">
<path
transform="matrix(-0.8,0,0,-0.8,-10,0)"
style="fill:context-stroke;fill-rule:evenodd;stroke:context-stroke;stroke-width:1pt"
d="M 0,0 5,-5 -12.5,0 5,5 Z"
id="path10457-06" />
</marker>
<marker
style="overflow:visible"
id="Arrow1Lend-61"
refX="0"
refY="0"
orient="auto"
inkscape:stockid="Arrow1Lend"
inkscape:isstock="true">
<path
transform="matrix(-0.8,0,0,-0.8,-10,0)"
style="fill:context-stroke;fill-rule:evenodd;stroke:context-stroke;stroke-width:1pt"
d="M 0,0 5,-5 -12.5,0 5,5 Z"
id="path10457-87" />
</marker>
<marker
style="overflow:visible"
id="Arrow1Lend-20"
refX="0"
refY="0"
orient="auto"
inkscape:stockid="Arrow1Lend"
inkscape:isstock="true">
<path
transform="matrix(-0.8,0,0,-0.8,-10,0)"
style="fill:context-stroke;fill-rule:evenodd;stroke:context-stroke;stroke-width:1pt"
d="M 0,0 5,-5 -12.5,0 5,5 Z"
id="path10457-23" />
</marker>
</defs>
<sodipodi:namedview
id="base"
pagecolor="#ffffff"
bordercolor="#666666"
borderopacity="1.0"
inkscape:pageopacity="0.0"
inkscape:pageshadow="2"
inkscape:zoom="2.060049"
inkscape:cx="685.17788"
inkscape:cy="241.49911"
inkscape:document-units="mm"
inkscape:current-layer="layer2"
showgrid="true"
units="in"
inkscape:window-width="3840"
inkscape:window-height="2019"
inkscape:window-x="0"
inkscape:window-y="2160"
inkscape:window-maximized="1"
inkscape:pagecheckerboard="0">
<inkscape:grid
type="xygrid"
id="grid895"
units="in"
spacingx="3.175"
spacingy="3.175"
empspacing="8"
color="#3f3fff"
opacity="0.21568627"
empcolor="#3f3fff"
empopacity="0.42352941" />
<inkscape:grid
type="xygrid"
id="grid918"
units="in"
spacingx="1.5875"
spacingy="1.5875"
empspacing="8"
dotted="false"
color="#3f3fff"
opacity="0.0627451" />
</sodipodi:namedview>
<metadata
id="metadata5">
<rdf:RDF>
<cc:Work
rdf:about="">
<dc:format>image/svg+xml</dc:format>
<dc:type
rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
</cc:Work>
</rdf:RDF>
</metadata>
<g
inkscape:groupmode="layer"
id="layer2"
inkscape:label="data"
style="display:inline"
transform="translate(0,76.2)">
<g
id="g3395"
transform="translate(-101.6,28.574999)">
<rect
style="fill:none;stroke:#4861d1;stroke-width:1;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;stop-color:#000000"
id="rect853"
width="79.375"
height="30.1625"
x="117.475"
y="-47.625" />
<text
xml:space="preserve"
style="font-style:normal;font-weight:normal;font-size:10.5833px;line-height:1.25;font-family:sans-serif;text-align:center;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
x="163.23721"
y="-35.909264"
id="text1713"><tspan
sodipodi:role="line"
id="tspan1711"
style="text-align:center;text-anchor:middle;stroke-width:0.264583"
x="163.23721"
y="-35.909264">NB</tspan><tspan
sodipodi:role="line"
style="text-align:center;text-anchor:middle;stroke-width:0.264583"
x="163.23721"
y="-22.68014"
id="tspan1580">main branch</tspan></text>
</g>
<g
id="g3230"
transform="translate(171.45,17.4625)">
<rect
style="display:inline;fill:none;stroke:#4861d1;stroke-width:1;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;stop-color:#000000"
id="rect853-3"
width="60.324997"
height="31.749998"
x="96.837502"
y="22.225" />
<text
xml:space="preserve"
style="font-style:normal;font-weight:normal;font-size:10.5833px;line-height:1.25;font-family:sans-serif;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
x="102.53144"
y="33.94603"
id="text2399"><tspan
sodipodi:role="line"
id="tspan2397"
style="stroke-width:0.264583"
x="102.53144"
y="33.94603">RELEASED</tspan><tspan
sodipodi:role="line"
style="stroke-width:0.264583"
x="102.53144"
y="47.175156"
id="tspan24378">NB-DOCS</tspan></text>
</g>
<g
id="g3230-6"
transform="translate(173.0375,-41.275)"
style="display:inline">
<rect
style="display:inline;fill:none;stroke:#4861d1;stroke-width:1;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;stop-color:#000000"
id="rect853-3-7"
width="58.737499"
height="30.162498"
x="96.837502"
y="22.225" />
<text
xml:space="preserve"
style="font-style:normal;font-weight:normal;font-size:10.5833px;line-height:1.25;font-family:sans-serif;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
x="99.298256"
y="34.417007"
id="text2399-5"><tspan
sodipodi:role="line"
id="tspan2397-3"
style="stroke-width:0.264583"
x="99.298256"
y="34.417007">NB-BUILD-</tspan><tspan
sodipodi:role="line"
style="stroke-width:0.264583"
x="99.298256"
y="47.646133"
id="tspan3616">DOCS</tspan></text>
</g>
<g
id="g8349"
transform="translate(36.515457,20.6375)">
<ellipse
style="fill:none;stroke:#4861d1;stroke-width:1;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;stop-color:#000000"
id="path4433"
cx="127.79375"
cy="-24.606251"
rx="18.25625"
ry="15.08125" />
<text
xml:space="preserve"
style="font-style:normal;font-weight:normal;font-size:5.64444px;line-height:1.25;font-family:sans-serif;text-align:center;text-anchor:middle;display:inline;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
x="127.69779"
y="-30.149092"
id="text1713-6"><tspan
sodipodi:role="line"
id="tspan1711-2"
style="font-size:5.64444px;text-align:center;text-anchor:middle;stroke-width:0.264583"
x="127.69779"
y="-30.149092">non-</tspan><tspan
sodipodi:role="line"
style="font-size:5.64444px;text-align:center;text-anchor:middle;stroke-width:0.264583"
x="127.69779"
y="-23.093542"
id="tspan4984">release</tspan><tspan
sodipodi:role="line"
style="font-size:5.64444px;text-align:center;text-anchor:middle;stroke-width:0.264583"
x="127.69779"
y="-16.037991"
id="tspan4986">build</tspan></text>
</g>
<g
id="g8349-9"
style="display:inline"
transform="translate(36.152018,80.16875)">
<ellipse
style="fill:none;stroke:#4861d1;stroke-width:1;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;stop-color:#000000"
id="path4433-1"
cx="127.79375"
cy="-24.606251"
rx="18.25625"
ry="15.08125" />
<text
xml:space="preserve"
style="font-style:normal;font-weight:normal;font-size:5.64444px;line-height:1.25;font-family:sans-serif;text-align:center;text-anchor:middle;display:inline;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
x="127.69779"
y="-26.017361"
id="text1713-6-2"><tspan
sodipodi:role="line"
style="font-size:5.64444px;text-align:center;text-anchor:middle;stroke-width:0.264583"
x="127.69779"
y="-26.017361"
id="tspan4984-0">release</tspan><tspan
sodipodi:role="line"
style="font-size:5.64444px;text-align:center;text-anchor:middle;stroke-width:0.264583"
x="127.69779"
y="-18.961811"
id="tspan4986-9">build</tspan></text>
</g>
<path
style="font-variation-settings:normal;opacity:1;vector-effect:none;fill:none;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.5;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;-inkscape-stroke:none;marker-end:url(#Arrow1Lend);stop-color:#000000;stop-opacity:1"
d="m 95.249998,-3.9687508 50.802952,0"
id="path10370"
inkscape:connector-type="polyline"
inkscape:connector-curvature="0"
inkscape:connection-start="#g3395"
inkscape:connection-end="#g8349" />
<path
style="font-variation-settings:normal;opacity:1;vector-effect:none;fill:none;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.5;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;-inkscape-stroke:none;marker-end:url(#Arrow1Lend);stop-color:#000000;stop-opacity:1"
d="m 83.019593,11.112499 65.723847,36.09988"
id="path10372"
inkscape:connector-type="polyline"
inkscape:connector-curvature="0"
inkscape:connection-start="#g3395"
inkscape:connection-end="#g8349-9" />
<path
style="font-variation-settings:normal;opacity:1;vector-effect:none;fill:none;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.5;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;-inkscape-stroke:none;marker-end:url(#Arrow1Lend);stop-color:#000000;stop-opacity:1"
d="m 182.56545,-3.9687507 87.30955,2e-7"
id="path10817"
inkscape:connector-type="polyline"
inkscape:connector-curvature="0"
inkscape:connection-start="#g8349"
inkscape:connection-end="#g3230-6" />
<path
style="font-variation-settings:normal;opacity:1;vector-effect:none;fill:none;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.5;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;-inkscape-stroke:none;marker-end:url(#Arrow1Lend);stop-color:#000000;stop-opacity:1"
d="m 182.20202,55.562499 86.08548,0"
id="path10921"
inkscape:connector-type="polyline"
inkscape:connector-curvature="0"
inkscape:connection-start="#g8349-9"
inkscape:connection-end="#g3230" />
<g
id="g11511"
transform="translate(155.575,-112.7125)">
<circle
style="font-variation-settings:normal;opacity:1;fill:none;fill-opacity:1;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;stop-color:#000000;stop-opacity:1"
id="path11306"
cx="56.960236"
cy="65.919228"
r="2.7336953" />
<path
style="fill:none;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
d="m 57.15,68.2625 -10e-7,6.35 -3.174999,6.35"
id="path11421"
sodipodi:nodetypes="ccc" />
<path
style="fill:none;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
d="m 57.149999,74.6125 3.175001,6.35"
id="path11423"
sodipodi:nodetypes="cc" />
<path
style="font-variation-settings:normal;opacity:1;vector-effect:none;fill:none;fill-opacity:1;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;-inkscape-stroke:none;stop-color:#000000;stop-opacity:1"
d="m 53.975,71.4375 h 6.35"
id="path11425"
sodipodi:nodetypes="cc" />
</g>
<path
style="font-variation-settings:normal;display:inline;opacity:1;vector-effect:none;fill:none;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.5;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;-inkscape-stroke:none;marker-end:url(#Arrow1Lend);stop-color:#000000;stop-opacity:1"
d="m 212.725,-40.77668 57.15,24.313495"
id="path11775"
inkscape:connector-type="polyline"
inkscape:connector-curvature="0"
inkscape:connection-start="#g11511"
inkscape:connection-end="#g3230-6" />
<path
style="font-variation-settings:normal;opacity:1;fill:none;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.5;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker-end:url(#Arrow1Lend);stop-color:#000000;stop-opacity:1"
d="M 212.725,-40.77668 95.249998,-13.263682"
id="path11944"
inkscape:connector-type="polyline"
inkscape:connector-curvature="0"
inkscape:connection-start="#g11511"
inkscape:connection-end="#g3395" />
<path
style="font-variation-settings:normal;opacity:1;vector-effect:none;fill:none;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.5;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;-inkscape-stroke:none;marker-end:url(#Arrow1Lend);stop-color:#000000;stop-opacity:1"
d="m 299.04267,11.112499 -0.381,28.575001"
id="path12048"
inkscape:connector-type="polyline"
inkscape:connector-curvature="0"
inkscape:connection-start="#g3230-6"
inkscape:connection-end="#g3230" />
<text
xml:space="preserve"
style="font-style:normal;font-weight:normal;font-size:6.35px;line-height:1.25;font-family:sans-serif;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
x="127.40736"
y="-43.492035"
id="text13414"><tspan
sodipodi:role="line"
id="tspan13412"
style="font-size:6.35px;stroke-width:0.264583"
x="127.40736"
y="-43.492035">edit embeded</tspan><tspan
sodipodi:role="line"
style="font-size:6.35px;stroke-width:0.264583"
x="127.40736"
y="-35.554535"
id="tspan13944">docs, javadoc,</tspan><tspan
sodipodi:role="line"
style="font-size:6.35px;stroke-width:0.264583"
x="127.40736"
y="-27.617033"
id="tspan26081">and code</tspan></text>
<text
xml:space="preserve"
style="font-style:normal;font-weight:normal;font-size:6.35px;line-height:1.25;font-family:sans-serif;display:inline;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
x="229.11111"
y="-41.987053"
id="text13414-7"><tspan
sodipodi:role="line"
style="font-size:6.35px;stroke-width:0.264583"
x="229.11111"
y="-41.987053"
id="tspan13944-6">edit docs</tspan><tspan
sodipodi:role="line"
style="font-size:6.35px;stroke-width:0.264583"
x="229.11111"
y="-34.049553"
id="tspan18006">content</tspan></text>
<text
xml:space="preserve"
style="font-style:normal;font-weight:normal;font-size:6.35px;line-height:1.25;font-family:sans-serif;display:inline;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
x="300.85052"
y="18.575722"
id="text13414-7-1"><tspan
sodipodi:role="line"
style="font-size:6.35px;stroke-width:0.264583"
x="300.85052"
y="18.575722"
id="tspan18006-9">publish <tspan
style="font-weight:bold"
id="tspan19860">all</tspan> docs</tspan><tspan
sodipodi:role="line"
style="font-size:6.35px;stroke-width:0.264583"
x="300.85052"
y="26.513222"
id="tspan18420">content to <tspan
style="font-weight:bold"
id="tspan21172">main</tspan></tspan><tspan
sodipodi:role="line"
style="font-size:6.35px;stroke-width:0.264583"
x="300.85052"
y="34.450722"
id="tspan18422">docs site</tspan></text>
<text
xml:space="preserve"
style="font-style:normal;font-weight:normal;font-size:6.35px;line-height:1.25;font-family:sans-serif;display:inline;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
x="185.48294"
y="52.958786"
id="text13414-7-1-3"><tspan
sodipodi:role="line"
style="font-size:6.35px;stroke-width:0.264583"
x="185.48294"
y="52.958786"
id="tspan18006-9-1">publish <tspan
style="font-weight:bold"
id="tspan19284">embedded</tspan></tspan><tspan
sodipodi:role="line"
style="font-size:6.35px;stroke-width:0.264583"
x="185.48294"
y="60.896286"
id="tspan18941">docs content to <tspan
style="font-weight:bold"
id="tspan10164">main</tspan></tspan><tspan
sodipodi:role="line"
style="font-size:6.35px;stroke-width:0.264583"
x="185.48294"
y="68.833786"
id="tspan18422-4">docs site</tspan></text>
<text
xml:space="preserve"
style="font-style:normal;font-weight:normal;font-size:6.35px;line-height:1.25;font-family:sans-serif;display:inline;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
x="185.37083"
y="-5.9306483"
id="text13414-7-1-3-7"><tspan
sodipodi:role="line"
style="font-size:6.35px;stroke-width:0.264583"
x="185.37083"
y="-5.9306483"
id="tspan18006-9-1-8">publish <tspan
style="font-weight:bold"
id="tspan18684">embedded</tspan></tspan><tspan
sodipodi:role="line"
style="font-size:6.35px;stroke-width:0.264583"
x="185.37083"
y="2.0068517"
id="tspan18941-4">docs content to <tspan
style="font-weight:bold"
id="tspan11314">preview</tspan></tspan><tspan
sodipodi:role="line"
style="font-size:6.35px;stroke-width:0.264583"
x="185.37083"
y="9.9443502"
id="tspan18422-4-5">docs site</tspan></text>
<text
xml:space="preserve"
style="font-size:3.52777px;line-height:1.25;font-family:sans-serif;fill:#5939cc;fill-opacity:1;stroke:none;stroke-width:0.264583;stroke-opacity:1"
x="133.32997"
y="27.209949"
id="text27722"><tspan
sodipodi:role="line"
id="tspan27720"
style="font-weight:bold;fill:#5939cc;fill-opacity:1;stroke:none;stroke-width:0.264583;stroke-opacity:1"
x="133.32997"
y="27.209949">on push to main</tspan><tspan
sodipodi:role="line"
style="font-weight:bold;fill:#5939cc;fill-opacity:1;stroke:none;stroke-width:0.264583;stroke-opacity:1"
x="133.32997"
y="31.619661"
id="tspan27724">and change to</tspan><tspan
sodipodi:role="line"
style="font-weight:bold;fill:#5939cc;fill-opacity:1;stroke:none;stroke-width:0.264583;stroke-opacity:1"
x="133.32997"
y="36.029373"
id="tspan27726">RELEASENOTES.md</tspan></text>
<text
xml:space="preserve"
style="font-size:3.52777px;line-height:1.25;font-family:sans-serif;display:inline;fill:#5939cc;fill-opacity:1;stroke:none;stroke-width:0.264583;stroke-opacity:1"
x="259.55676"
y="18.714157"
id="text27722-6"><tspan
sodipodi:role="line"
style="font-weight:bold;fill:#5939cc;fill-opacity:1;stroke:none;stroke-width:0.264583;stroke-opacity:1"
x="259.55676"
y="18.714157"
id="tspan27726-6">Initially, this doc site</tspan><tspan
sodipodi:role="line"
style="font-weight:bold;fill:#5939cc;fill-opacity:1;stroke:none;stroke-width:0.264583;stroke-opacity:1"
x="259.55676"
y="23.123869"
id="tspan69260">will be checkpointed </tspan><tspan
sodipodi:role="line"
style="font-weight:bold;fill:#5939cc;fill-opacity:1;stroke:none;stroke-width:0.264583;stroke-opacity:1"
x="259.55676"
y="27.533583"
id="tspan69262">manually into the </tspan><tspan
sodipodi:role="line"
style="font-weight:bold;fill:#5939cc;fill-opacity:1;stroke:none;stroke-width:0.264583;stroke-opacity:1"
x="259.55676"
y="31.943295"
id="tspan69264">main docs site</tspan></text>
<text
xml:space="preserve"
style="font-size:3.52777px;line-height:1.25;font-family:sans-serif;display:inline;stroke-width:0.264583"
x="106.08576"
y="0.33000755"
id="text27722-5"><tspan
sodipodi:role="line"
id="tspan27720-9"
style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:3.52777px;line-height:1.25;font-family:sans-serif;font-variant-ligatures:normal;font-variant-position:normal;font-variant-caps:normal;font-variant-numeric:normal;font-variant-alternates:normal;font-variant-east-asian:normal;font-feature-settings:normal;font-variation-settings:normal;text-indent:0;text-align:start;text-decoration-line:none;text-decoration-style:solid;text-decoration-color:#000000;letter-spacing:normal;word-spacing:normal;text-transform:none;writing-mode:lr-tb;direction:ltr;text-orientation:mixed;dominant-baseline:auto;baseline-shift:baseline;white-space:normal;opacity:1;vector-effect:none;fill:#5939cc;fill-opacity:1;stroke:none;stroke-width:0.264583;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;-inkscape-stroke:none;stop-color:#000000;stop-opacity:1"
x="106.08576"
y="0.33000755">on push to main</tspan><tspan
sodipodi:role="line"
style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:3.52777px;line-height:1.25;font-family:sans-serif;font-variant-ligatures:normal;font-variant-position:normal;font-variant-caps:normal;font-variant-numeric:normal;font-variant-alternates:normal;font-variant-east-asian:normal;font-feature-settings:normal;font-variation-settings:normal;text-indent:0;text-align:start;text-decoration-line:none;text-decoration-style:solid;text-decoration-color:#000000;letter-spacing:normal;word-spacing:normal;text-transform:none;writing-mode:lr-tb;direction:ltr;text-orientation:mixed;dominant-baseline:auto;baseline-shift:baseline;white-space:normal;opacity:1;vector-effect:none;fill:#5939cc;fill-opacity:1;stroke:none;stroke-width:0.264583;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;-inkscape-stroke:none;stop-color:#000000;stop-opacity:1"
x="106.08576"
y="4.7397203"
id="tspan27724-2">and NO change to</tspan><tspan
sodipodi:role="line"
style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:3.52777px;line-height:1.25;font-family:sans-serif;font-variant-ligatures:normal;font-variant-position:normal;font-variant-caps:normal;font-variant-numeric:normal;font-variant-alternates:normal;font-variant-east-asian:normal;font-feature-settings:normal;font-variation-settings:normal;text-indent:0;text-align:start;text-decoration-line:none;text-decoration-style:solid;text-decoration-color:#000000;letter-spacing:normal;word-spacing:normal;text-transform:none;writing-mode:lr-tb;direction:ltr;text-orientation:mixed;dominant-baseline:auto;baseline-shift:baseline;white-space:normal;opacity:1;vector-effect:none;fill:#5939cc;fill-opacity:1;stroke:none;stroke-width:0.264583;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;-inkscape-stroke:none;stop-color:#000000;stop-opacity:1"
x="106.08576"
y="9.1494331"
id="tspan27726-2">RELEASENOTES.md</tspan></text>
</g>
<g
inkscape:label="boxes"
inkscape:groupmode="layer"
id="layer1"
transform="translate(0,-4.9)"
style="display:inline;opacity:0.438" />
</svg>

After

Width:  |  Height:  |  Size: 31 KiB

View File

@@ -28,7 +28,7 @@
<parent>
<artifactId>mvn-defaults</artifactId>
<groupId>io.nosqlbench</groupId>
<version>4.17.31-SNAPSHOT</version>
<version>4.17.32-SNAPSHOT</version>
<relativePath>../mvn-defaults</relativePath>
</parent>
@@ -37,7 +37,7 @@
<dependency>
<groupId>io.nosqlbench</groupId>
<artifactId>nb-api</artifactId>
<version>4.17.31-SNAPSHOT</version>
<version>4.17.32-SNAPSHOT</version>
</dependency>
<dependency>
@@ -93,19 +93,19 @@
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-compress</artifactId>
<version>1.21</version>
<version>1.22</version>
</dependency>
<dependency>
<groupId>org.glassfish.jersey.media</groupId>
<artifactId>jersey-media-json-jackson</artifactId>
<version>3.0.8</version>
<version>3.1.0</version>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.jaxrs</groupId>
<artifactId>jackson-jaxrs-json-provider</artifactId>
<version>2.14.0</version>
<version>2.14.1</version>
</dependency>
<dependency>
@@ -133,7 +133,7 @@
<dependency>
<groupId>io.nosqlbench</groupId>
<artifactId>virtdata-api</artifactId>
<version>4.17.31-SNAPSHOT</version>
<version>4.17.32-SNAPSHOT</version>
</dependency>
</dependencies>

View File

@@ -21,7 +21,7 @@
<parent>
<artifactId>mvn-defaults</artifactId>
<groupId>io.nosqlbench</groupId>
<version>4.17.31-SNAPSHOT</version>
<version>4.17.32-SNAPSHOT</version>
<relativePath>../mvn-defaults</relativePath>
</parent>
@@ -39,31 +39,31 @@
<dependency>
<groupId>io.nosqlbench</groupId>
<artifactId>nb-api</artifactId>
<version>4.17.31-SNAPSHOT</version>
<version>4.17.32-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>io.nosqlbench</groupId>
<artifactId>adapters-api</artifactId>
<version>4.17.31-SNAPSHOT</version>
<version>4.17.32-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>io.nosqlbench</groupId>
<artifactId>nb-spectest</artifactId>
<version>4.17.31-SNAPSHOT</version>
<version>4.17.32-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>io.nosqlbench</groupId>
<artifactId>nb-annotations</artifactId>
<version>4.17.31-SNAPSHOT</version>
<version>4.17.32-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>io.nosqlbench</groupId>
<artifactId>virtdata-userlibs</artifactId>
<version>4.17.31-SNAPSHOT</version>
<version>4.17.32-SNAPSHOT</version>
</dependency>
<dependency>

View File

@@ -30,11 +30,14 @@ import org.apache.logging.log4j.Logger;
@Service(value = ErrorHandler.class, selector = "count")
public class CountErrorHandler extends CounterErrorHandler {
public CountErrorHandler() {
logger.warn("Starting with v4.17 onward, use 'counter'. See cql-errors.md for usage.");
}
private static final Logger logger = LogManager.getLogger(CountErrorHandler.class);
@Override
public ErrorDetail handleError(String name, Throwable t, long cycle, long durationInNanos, ErrorDetail detail) {
logger.warn("Starting with v4.17 onward, use 'counter'. See cql-errors.md for usage.");
return super.handleError(name, t, cycle, durationInNanos, detail);
}
}

View File

@@ -32,11 +32,11 @@ import org.apache.logging.log4j.Logger;
import java.util.concurrent.TimeUnit;
/**
* This is the generified version of an Action. All activity types should endeavor to use
* this, as the API surface is being consolidated so that the internal machinery of NB
* works in a very consistent and uniform way.
* There will be changes to multiple drivers to support this consolidation, but the bulk
* of this work will be undertaken by the project maintainers.
* This is the generified version of an Action. All driver adapters us this, as opposed
* to previous NB versions where it was implemented for each driver.
*
* This allows the API to be consolidated so that the internal machinery of NB
* works in a very consistent and uniform way for all users and drivers.
*
* @param <A> The type of activity
* @param <R> The type of operation

View File

@@ -4,7 +4,7 @@
<parent>
<artifactId>mvn-defaults</artifactId>
<groupId>io.nosqlbench</groupId>
<version>4.17.31-SNAPSHOT</version>
<version>4.17.32-SNAPSHOT</version>
<relativePath>../mvn-defaults</relativePath>
</parent>
@@ -23,13 +23,13 @@
<dependency>
<groupId>io.nosqlbench</groupId>
<artifactId>engine-core</artifactId>
<version>4.17.31-SNAPSHOT</version>
<version>4.17.32-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>io.nosqlbench</groupId>
<artifactId>engine-docker</artifactId>
<version>4.17.31-SNAPSHOT</version>
<version>4.17.32-SNAPSHOT</version>
</dependency>
</dependencies>

View File

@@ -179,7 +179,7 @@ public class NBCLIOptions {
private Path statepath;
private final List<String> statePathAccesses = new ArrayList<>();
private final String hdrForChartFileName = DEFAULT_CHART_HDR_LOG_NAME;
private String dockerPromRetentionDays = "183d";
private String dockerPromRetentionDays = "3650d";
private String reportSummaryTo = REPORT_SUMMARY_TO_DEFAULT;
private boolean enableAnsi = System.getenv("TERM")!=null && !System.getenv("TERM").isEmpty();
private Maturity minMaturity = Maturity.Unspecified;
@@ -371,7 +371,7 @@ public class NBCLIOptions {
break;
case DOCKER_PROM_RETENTION_DAYS:
arglist.removeFirst();
dockerPromRetentionDays = readWordOrThrow(arglist, "prometheus retention (183d by default)");
dockerPromRetentionDays = readWordOrThrow(arglist, "prometheus retention (3650d by default)");
break;
case DOCKER_GRAFANA_TAG:
arglist.removeFirst();

View File

@@ -5,7 +5,7 @@
<parent>
<artifactId>mvn-defaults</artifactId>
<groupId>io.nosqlbench</groupId>
<version>4.17.31-SNAPSHOT</version>
<version>4.17.32-SNAPSHOT</version>
<relativePath>../mvn-defaults</relativePath>
</parent>
@@ -21,7 +21,7 @@
<dependency>
<groupId>io.nosqlbench</groupId>
<artifactId>engine-api</artifactId>
<version>4.17.31-SNAPSHOT</version>
<version>4.17.32-SNAPSHOT</version>
</dependency>
</dependencies>

View File

@@ -21,7 +21,7 @@
<parent>
<artifactId>mvn-defaults</artifactId>
<groupId>io.nosqlbench</groupId>
<version>4.17.31-SNAPSHOT</version>
<version>4.17.32-SNAPSHOT</version>
<relativePath>../mvn-defaults</relativePath>
</parent>
@@ -38,7 +38,7 @@
<dependency>
<groupId>io.nosqlbench</groupId>
<artifactId>engine-api</artifactId>
<version>4.17.31-SNAPSHOT</version>
<version>4.17.32-SNAPSHOT</version>
</dependency>
<dependency>
@@ -89,7 +89,7 @@
<dependency>
<groupId>io.nosqlbench</groupId>
<artifactId>engine-clients</artifactId>
<version>4.17.31-SNAPSHOT</version>
<version>4.17.32-SNAPSHOT</version>
<scope>compile</scope>
</dependency>

View File

@@ -4,7 +4,7 @@
<parent>
<artifactId>mvn-defaults</artifactId>
<groupId>io.nosqlbench</groupId>
<version>4.17.31-SNAPSHOT</version>
<version>4.17.32-SNAPSHOT</version>
<relativePath>../mvn-defaults</relativePath>
</parent>
@@ -56,7 +56,7 @@
<dependency>
<groupId>io.nosqlbench</groupId>
<artifactId>engine-api</artifactId>
<version>4.17.31-SNAPSHOT</version>
<version>4.17.32-SNAPSHOT</version>
</dependency>
</dependencies>

View File

@@ -4,7 +4,7 @@
<parent>
<artifactId>mvn-defaults</artifactId>
<groupId>io.nosqlbench</groupId>
<version>4.17.31-SNAPSHOT</version>
<version>4.17.32-SNAPSHOT</version>
<relativePath>../mvn-defaults</relativePath>
</parent>
@@ -28,7 +28,7 @@
<dependency>
<groupId>io.nosqlbench</groupId>
<artifactId>docsys</artifactId>
<version>4.17.31-SNAPSHOT</version>
<version>4.17.32-SNAPSHOT</version>
</dependency>
</dependencies>

View File

@@ -4,7 +4,7 @@
<parent>
<artifactId>mvn-defaults</artifactId>
<groupId>io.nosqlbench</groupId>
<version>4.17.31-SNAPSHOT</version>
<version>4.17.32-SNAPSHOT</version>
<relativePath>../mvn-defaults</relativePath>
</parent>
@@ -22,7 +22,7 @@
<dependency>
<groupId>io.nosqlbench</groupId>
<artifactId>engine-api</artifactId>
<version>4.17.31-SNAPSHOT</version>
<version>4.17.32-SNAPSHOT</version>
</dependency>
</dependencies>

View File

@@ -4,7 +4,7 @@
<parent>
<artifactId>mvn-defaults</artifactId>
<groupId>io.nosqlbench</groupId>
<version>4.17.31-SNAPSHOT</version>
<version>4.17.32-SNAPSHOT</version>
<relativePath>../mvn-defaults</relativePath>
</parent>
@@ -23,19 +23,19 @@
<dependency>
<groupId>io.swagger.parser.v3</groupId>
<artifactId>swagger-parser</artifactId>
<version>2.1.7</version>
<version>2.1.9</version>
</dependency>
<dependency>
<groupId>io.swagger.core.v3</groupId>
<artifactId>swagger-models</artifactId>
<version>2.2.4</version>
<version>2.2.6</version>
</dependency>
<dependency>
<groupId>io.nosqlbench</groupId>
<artifactId>engine-cli</artifactId>
<version>4.17.31-SNAPSHOT</version>
<version>4.17.32-SNAPSHOT</version>
</dependency>
</dependencies>

View File

@@ -19,7 +19,7 @@
<groupId>io.nosqlbench</groupId>
<artifactId>mvn-defaults</artifactId>
<version>4.17.31-SNAPSHOT</version>
<version>4.17.32-SNAPSHOT</version>
<packaging>pom</packaging>
<properties>
@@ -141,7 +141,7 @@
<dependency>
<groupId>io.dropwizard.metrics</groupId>
<artifactId>metrics-core</artifactId>
<version>4.2.12</version>
<version>4.2.13</version>
</dependency>
<dependency>
@@ -153,12 +153,12 @@
<dependency>
<groupId>org.openjdk.jmh</groupId>
<artifactId>jmh-core</artifactId>
<version>1.35</version>
<version>1.36</version>
</dependency>
<dependency>
<groupId>org.openjdk.jmh</groupId>
<artifactId>jmh-generator-annprocess</artifactId>
<version>1.35</version>
<version>1.36</version>
</dependency>
<dependency>
@@ -189,7 +189,7 @@
<dependency>
<groupId>io.netty</groupId>
<artifactId>netty-handler</artifactId>
<version>4.1.84.Final</version>
<version>4.1.86.Final</version>
</dependency>
<dependency>
@@ -209,7 +209,7 @@
<dependency>
<groupId>io.netty</groupId>
<artifactId>netty-codec-haproxy</artifactId>
<version>4.1.54.Final</version>
<version>4.1.86.Final</version>
</dependency>
<dependency>
@@ -265,7 +265,7 @@
<dependency>
<groupId>com.github.oshi</groupId>
<artifactId>oshi-core-java11</artifactId>
<version>6.3.1</version>
<version>6.3.2</version>
</dependency>
<dependency>

View File

@@ -5,7 +5,7 @@
<parent>
<artifactId>mvn-defaults</artifactId>
<groupId>io.nosqlbench</groupId>
<version>4.17.31-SNAPSHOT</version>
<version>4.17.32-SNAPSHOT</version>
<relativePath>../mvn-defaults</relativePath>
</parent>

View File

@@ -21,7 +21,7 @@
<parent>
<artifactId>mvn-defaults</artifactId>
<groupId>io.nosqlbench</groupId>
<version>4.17.31-SNAPSHOT</version>
<version>4.17.32-SNAPSHOT</version>
<relativePath>../mvn-defaults</relativePath>
</parent>
@@ -58,7 +58,7 @@
<dependency>
<groupId>io.nosqlbench</groupId>
<artifactId>nb-annotations</artifactId>
<version>4.17.31-SNAPSHOT</version>
<version>4.17.32-SNAPSHOT</version>
</dependency>
<dependency>
@@ -97,7 +97,7 @@
<dependency>
<groupId>com.amazonaws</groupId>
<artifactId>aws-java-sdk-s3</artifactId>
<version>1.12.340</version>
<version>1.12.364</version>
</dependency>
<dependency>

View File

@@ -20,7 +20,7 @@
<parent>
<artifactId>mvn-defaults</artifactId>
<groupId>io.nosqlbench</groupId>
<version>4.17.31-SNAPSHOT</version>
<version>4.17.32-SNAPSHOT</version>
<relativePath>../mvn-defaults</relativePath>
</parent>

View File

@@ -21,7 +21,7 @@
<parent>
<artifactId>mvn-defaults</artifactId>
<groupId>io.nosqlbench</groupId>
<version>4.17.31-SNAPSHOT</version>
<version>4.17.32-SNAPSHOT</version>
<relativePath>../mvn-defaults</relativePath>
</parent>
@@ -40,7 +40,7 @@
<dependency>
<groupId>io.nosqlbench</groupId>
<artifactId>nbr</artifactId>
<version>4.17.31-SNAPSHOT</version>
<version>4.17.32-SNAPSHOT</version>
</dependency>
<!-- Everything below this line is in common between nb and nb5 -->
@@ -49,49 +49,61 @@
<dependency>
<groupId>io.nosqlbench</groupId>
<artifactId>adapter-tcp</artifactId>
<version>4.17.31-SNAPSHOT</version>
<version>4.17.32-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>io.nosqlbench</groupId>
<artifactId>adapter-mongodb</artifactId>
<version>4.17.31-SNAPSHOT</version>
<version>4.17.32-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>io.nosqlbench</groupId>
<artifactId>adapter-stdout</artifactId>
<version>4.17.31-SNAPSHOT</version>
<version>4.17.32-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>io.nosqlbench</groupId>
<artifactId>adapter-diag</artifactId>
<version>4.17.31-SNAPSHOT</version>
<version>4.17.32-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>io.nosqlbench</groupId>
<artifactId>adapter-dynamodb</artifactId>
<version>4.17.31-SNAPSHOT</version>
<version>4.17.32-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>io.nosqlbench</groupId>
<artifactId>adapter-cqld4</artifactId>
<version>4.17.31-SNAPSHOT</version>
<version>4.17.32-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>io.nosqlbench</groupId>
<artifactId>adapter-http</artifactId>
<version>4.17.31-SNAPSHOT</version>
<version>4.17.32-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>io.nosqlbench</groupId>
<artifactId>adapter-pulsar</artifactId>
<version>4.17.31-SNAPSHOT</version>
<version>4.17.32-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>io.nosqlbench</groupId>
<artifactId>adapter-s4j</artifactId>
<version>4.17.32-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>io.nosqlbench</groupId>
<artifactId>adapter-kafka</artifactId>
<version>4.17.32-SNAPSHOT</version>
</dependency>
</dependencies>
@@ -166,7 +178,7 @@
<dependency>
<groupId>io.nosqlbench</groupId>
<artifactId>adapter-mongodb</artifactId>
<version>4.17.31-SNAPSHOT</version>
<version>4.17.32-SNAPSHOT</version>
</dependency>
</dependencies>
</profile>

View File

@@ -20,7 +20,7 @@
<parent>
<artifactId>mvn-defaults</artifactId>
<groupId>io.nosqlbench</groupId>
<version>4.17.31-SNAPSHOT</version>
<version>4.17.32-SNAPSHOT</version>
<relativePath>../mvn-defaults</relativePath>
</parent>
@@ -39,13 +39,13 @@
<dependency>
<groupId>io.nosqlbench</groupId>
<artifactId>nbr</artifactId>
<version>4.17.31-SNAPSHOT</version>
<version>4.17.32-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>io.nosqlbench</groupId>
<artifactId>adapter-diag</artifactId>
<version>4.17.31-SNAPSHOT</version>
<version>4.17.32-SNAPSHOT</version>
</dependency>
</dependencies>

View File

@@ -21,7 +21,7 @@
<parent>
<artifactId>mvn-defaults</artifactId>
<groupId>io.nosqlbench</groupId>
<version>4.17.31-SNAPSHOT</version>
<version>4.17.32-SNAPSHOT</version>
<relativePath>../mvn-defaults</relativePath>
</parent>
@@ -40,37 +40,37 @@
<dependency>
<groupId>io.nosqlbench</groupId>
<artifactId>engine-rest</artifactId>
<version>4.17.31-SNAPSHOT</version>
<version>4.17.32-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>io.nosqlbench</groupId>
<artifactId>engine-cli</artifactId>
<version>4.17.31-SNAPSHOT</version>
<version>4.17.32-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>io.nosqlbench</groupId>
<artifactId>engine-docs</artifactId>
<version>4.17.31-SNAPSHOT</version>
<version>4.17.32-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>io.nosqlbench</groupId>
<artifactId>engine-core</artifactId>
<version>4.17.31-SNAPSHOT</version>
<version>4.17.32-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>io.nosqlbench</groupId>
<artifactId>engine-extensions</artifactId>
<version>4.17.31-SNAPSHOT</version>
<version>4.17.32-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>io.nosqlbench</groupId>
<artifactId>adapter-diag</artifactId>
<version>4.17.31-SNAPSHOT</version>
<version>4.17.32-SNAPSHOT</version>
</dependency>
</dependencies>

View File

@@ -100,4 +100,5 @@ class ExitStatusIntegrationTests {
// assertThat(result.exception).isNotNull();
// assertThat(result.exception.getMessage()).contains("diag space was configured to throw");
// }
}

View File

@@ -23,7 +23,7 @@
<parent>
<artifactId>mvn-defaults</artifactId>
<groupId>io.nosqlbench</groupId>
<version>4.17.31-SNAPSHOT</version>
<version>4.17.32-SNAPSHOT</version>
<relativePath>mvn-defaults</relativePath>
</parent>
@@ -62,6 +62,8 @@
<module>adapter-dynamodb</module>
<module>adapter-mongodb</module>
<module>adapter-pulsar</module>
<module>adapter-s4j</module>
<module>adapter-kafka</module>
<!-- VIRTDATA MODULES -->
@@ -76,7 +78,6 @@
<!-- Documentation -->
<module>docsys</module>
</modules>
<profiles>
@@ -88,7 +89,6 @@
<modules>
<module>nb</module>
<module>driver-tcp</module>
<module>driver-kafka</module>
<module>driver-jmx</module>
<module>driver-jdbc</module>
<module>driver-cockroachdb</module>

View File

@@ -7,7 +7,7 @@
<parent>
<groupId>io.nosqlbench</groupId>
<artifactId>mvn-defaults</artifactId>
<version>4.17.31-SNAPSHOT</version>
<version>4.17.32-SNAPSHOT</version>
<relativePath>../mvn-defaults</relativePath>
</parent>
@@ -23,14 +23,14 @@
<dependency>
<groupId>io.nosqlbench</groupId>
<version>4.17.31-SNAPSHOT</version>
<version>4.17.32-SNAPSHOT</version>
<artifactId>nb-api</artifactId>
</dependency>
<dependency>
<groupId>io.nosqlbench</groupId>
<artifactId>virtdata-lang</artifactId>
<version>4.17.31-SNAPSHOT</version>
<version>4.17.32-SNAPSHOT</version>
</dependency>

Some files were not shown because too many files have changed in this diff Show More