mirror of
https://github.com/nosqlbench/nosqlbench.git
synced 2024-12-01 04:49:18 -06:00
Merge pull request #1143 from MMirelli/mm-ls797-continue-add-e2e-error-metrics-to-kafka-adapter
[kafka-adapter] Add e2e error metrics -- to be continued
This commit is contained in:
commit
9295adf03f
@ -23,6 +23,7 @@ import io.nosqlbench.adapter.kafka.ops.OpTimeTrackKafkaClient;
|
||||
import io.nosqlbench.adapter.kafka.ops.OpTimeTrackKafkaConsumer;
|
||||
import io.nosqlbench.adapter.kafka.util.EndToEndStartingTimeSource;
|
||||
import io.nosqlbench.adapter.kafka.util.KafkaAdapterUtil;
|
||||
import io.nosqlbench.engine.api.metrics.ReceivedMessageSequenceTracker;
|
||||
import io.nosqlbench.engine.api.activityimpl.uniform.DriverAdapter;
|
||||
import io.nosqlbench.engine.api.templating.ParsedOp;
|
||||
import org.apache.commons.lang3.BooleanUtils;
|
||||
@ -50,10 +51,15 @@ public class MessageConsumerOpDispenser extends KafkaBaseOpDispenser {
|
||||
// - This is only relevant when the effective setting (global level and statement level)
|
||||
// of "enable.auto.commit" is false
|
||||
protected final int maxMsgCntPerCommit;
|
||||
private final LongFunction<String> e2eStartTimeSrcParamStrFunc;
|
||||
|
||||
protected boolean autoCommitEnabled;
|
||||
|
||||
private final LongFunction<String> e2eStartTimeSrcParamStrFunc;
|
||||
|
||||
private final ThreadLocal<Map<String, ReceivedMessageSequenceTracker>>
|
||||
receivedMessageSequenceTrackersForTopicThreadLocal = ThreadLocal.withInitial(HashMap::new);
|
||||
protected final LongFunction<Boolean> seqTrackingFunc;
|
||||
|
||||
public MessageConsumerOpDispenser(DriverAdapter adapter,
|
||||
ParsedOp op,
|
||||
LongFunction<String> tgtNameFunc,
|
||||
@ -80,6 +86,9 @@ public class MessageConsumerOpDispenser extends KafkaBaseOpDispenser {
|
||||
}
|
||||
this.e2eStartTimeSrcParamStrFunc = lookupOptionalStrOpValueFunc(
|
||||
KafkaAdapterUtil.DOC_LEVEL_PARAMS.E2E_STARTING_TIME_SOURCE.label, "none");
|
||||
this.seqTrackingFunc = lookupStaticBoolConfigValueFunc(
|
||||
KafkaAdapterUtil.DOC_LEVEL_PARAMS.SEQ_TRACKING.label, false);
|
||||
;
|
||||
}
|
||||
|
||||
private String getEffectiveGroupId(long cycle) {
|
||||
@ -129,15 +138,26 @@ public class MessageConsumerOpDispenser extends KafkaBaseOpDispenser {
|
||||
autoCommitEnabled,
|
||||
maxMsgCntPerCommit,
|
||||
consumer,
|
||||
kafkaAdapterMetrics,
|
||||
EndToEndStartingTimeSource.valueOf(e2eStartTimeSrcParamStrFunc.apply(cycle).toUpperCase()),
|
||||
kafkaAdapterMetrics
|
||||
);
|
||||
this::getReceivedMessageSequenceTracker,
|
||||
seqTrackingFunc.apply(cycle));
|
||||
kafkaSpace.addOpTimeTrackKafkaClient(cacheKey, opTimeTrackKafkaClient);
|
||||
}
|
||||
|
||||
return opTimeTrackKafkaClient;
|
||||
}
|
||||
|
||||
private ReceivedMessageSequenceTracker getReceivedMessageSequenceTracker(String topicName) {
|
||||
return receivedMessageSequenceTrackersForTopicThreadLocal.get()
|
||||
.computeIfAbsent(topicName, k -> createReceivedMessageSequenceTracker());
|
||||
}
|
||||
|
||||
private ReceivedMessageSequenceTracker createReceivedMessageSequenceTracker() {
|
||||
return new ReceivedMessageSequenceTracker(kafkaAdapterMetrics.getMsgErrOutOfSeqCounter(),
|
||||
kafkaAdapterMetrics.getMsgErrDuplicateCounter(),
|
||||
kafkaAdapterMetrics.getMsgErrLossCounter());
|
||||
}
|
||||
|
||||
protected List<String> getEffectiveTopicNameList(long cycle) {
|
||||
String explicitTopicListStr = topicNameStrFunc.apply(cycle);
|
||||
|
@ -23,6 +23,7 @@ import io.nosqlbench.adapter.kafka.ops.OpTimeTrackKafkaClient;
|
||||
import io.nosqlbench.adapter.kafka.ops.OpTimeTrackKafkaProducer;
|
||||
import io.nosqlbench.adapter.kafka.util.KafkaAdapterUtil;
|
||||
import io.nosqlbench.engine.api.activityimpl.uniform.DriverAdapter;
|
||||
import io.nosqlbench.engine.api.metrics.EndToEndMetricsAdapterUtil;
|
||||
import io.nosqlbench.engine.api.templating.ParsedOp;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.apache.kafka.clients.producer.KafkaProducer;
|
||||
@ -34,6 +35,14 @@ import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.Properties;
|
||||
import java.util.function.LongFunction;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.Set;
|
||||
import java.util.HashSet;
|
||||
import java.util.Arrays;
|
||||
import java.util.Optional;
|
||||
import java.util.Collections;
|
||||
import java.util.LinkedHashSet;
|
||||
|
||||
public class MessageProducerOpDispenser extends KafkaBaseOpDispenser {
|
||||
|
||||
@ -49,13 +58,14 @@ public class MessageProducerOpDispenser extends KafkaBaseOpDispenser {
|
||||
private final LongFunction<String> msgHeaderJsonStrFunc;
|
||||
private final LongFunction<String> msgKeyStrFunc;
|
||||
private final LongFunction<String> msgValueStrFunc;
|
||||
protected final LongFunction<Boolean> seqTrackingFunc;
|
||||
protected final LongFunction<Set<EndToEndMetricsAdapterUtil.MSG_SEQ_ERROR_SIMU_TYPE>> msgSeqErrSimuTypeSetFunc;
|
||||
|
||||
public MessageProducerOpDispenser(DriverAdapter adapter,
|
||||
ParsedOp op,
|
||||
LongFunction<String> tgtNameFunc,
|
||||
KafkaSpace kafkaSpace) {
|
||||
super(adapter, op, tgtNameFunc, kafkaSpace);
|
||||
|
||||
this.producerClientConfMap.putAll(kafkaSpace.getKafkaClientConf().getProducerConfMap());
|
||||
producerClientConfMap.put("bootstrap.servers", kafkaSpace.getBootstrapSvr());
|
||||
|
||||
@ -64,6 +74,11 @@ public class MessageProducerOpDispenser extends KafkaBaseOpDispenser {
|
||||
this.msgHeaderJsonStrFunc = lookupOptionalStrOpValueFunc(MSG_HEADER_OP_PARAM);
|
||||
this.msgKeyStrFunc = lookupOptionalStrOpValueFunc(MSG_KEY_OP_PARAM);
|
||||
this.msgValueStrFunc = lookupMandtoryStrOpValueFunc(MSG_BODY_OP_PARAM);
|
||||
|
||||
this.msgSeqErrSimuTypeSetFunc = getStaticErrSimuTypeSetOpValueFunc();
|
||||
// Doc-level parameter: seq_tracking
|
||||
this.seqTrackingFunc = lookupStaticBoolConfigValueFunc(
|
||||
KafkaAdapterUtil.DOC_LEVEL_PARAMS.SEQ_TRACKING.label, false);
|
||||
}
|
||||
|
||||
private String getEffectiveClientId(long cycle) {
|
||||
@ -126,6 +141,8 @@ public class MessageProducerOpDispenser extends KafkaBaseOpDispenser {
|
||||
asyncAPI,
|
||||
transactionEnabled,
|
||||
txnBatchNum,
|
||||
seqTrackingFunc.apply(cycle),
|
||||
msgSeqErrSimuTypeSetFunc.apply(cycle),
|
||||
producer);
|
||||
kafkaSpace.addOpTimeTrackKafkaClient(cacheKey, opTimeTrackKafkaClient);
|
||||
}
|
||||
@ -208,4 +225,28 @@ public class MessageProducerOpDispenser extends KafkaBaseOpDispenser {
|
||||
opTimeTrackKafkaProducer,
|
||||
message);
|
||||
}
|
||||
|
||||
protected LongFunction<Set<EndToEndMetricsAdapterUtil.MSG_SEQ_ERROR_SIMU_TYPE>> getStaticErrSimuTypeSetOpValueFunc() {
|
||||
LongFunction<Set<EndToEndMetricsAdapterUtil.MSG_SEQ_ERROR_SIMU_TYPE>> setStringLongFunction;
|
||||
setStringLongFunction = (l) ->
|
||||
parsedOp.getOptionalStaticValue(KafkaAdapterUtil.DOC_LEVEL_PARAMS.SEQERR_SIMU.label, String.class)
|
||||
.filter(Predicate.not(String::isEmpty))
|
||||
.map(value -> {
|
||||
Set<EndToEndMetricsAdapterUtil.MSG_SEQ_ERROR_SIMU_TYPE> set = new HashSet<>();
|
||||
|
||||
if (StringUtils.contains(value,',')) {
|
||||
set = Arrays.stream(value.split(","))
|
||||
.map(EndToEndMetricsAdapterUtil.MSG_SEQ_ERROR_SIMU_TYPE::parseSimuType)
|
||||
.filter(Optional::isPresent)
|
||||
.map(Optional::get)
|
||||
.collect(Collectors.toCollection(LinkedHashSet::new));
|
||||
}
|
||||
|
||||
return set;
|
||||
}).orElse(Collections.emptySet());
|
||||
logger.info(
|
||||
KafkaAdapterUtil.DOC_LEVEL_PARAMS.SEQERR_SIMU.label + ": {}",
|
||||
setStringLongFunction.apply(0));
|
||||
return setStringLongFunction;
|
||||
}
|
||||
}
|
||||
|
@ -19,7 +19,7 @@ package io.nosqlbench.adapter.kafka.exception;
|
||||
|
||||
public class KafkaAdapterUnsupportedOpException extends RuntimeException {
|
||||
|
||||
public KafkaAdapterUnsupportedOpException(String pulsarOpType) {
|
||||
super("Unsupported Pulsar adapter operation type: \"" + pulsarOpType + "\"");
|
||||
public KafkaAdapterUnsupportedOpException(String kafkaOpType) {
|
||||
super("Unsupported Kafka adapter operation type: \"" + kafkaOpType + "\"");
|
||||
}
|
||||
}
|
||||
|
@ -22,6 +22,8 @@ import io.nosqlbench.adapter.kafka.KafkaSpace;
|
||||
import io.nosqlbench.adapter.kafka.util.EndToEndStartingTimeSource;
|
||||
import io.nosqlbench.adapter.kafka.util.KafkaAdapterMetrics;
|
||||
import io.nosqlbench.adapter.kafka.util.KafkaAdapterUtil;
|
||||
import io.nosqlbench.engine.api.metrics.ReceivedMessageSequenceTracker;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.apache.kafka.clients.consumer.*;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
import org.apache.kafka.common.header.Header;
|
||||
@ -30,6 +32,7 @@ import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.function.Function;
|
||||
|
||||
public class OpTimeTrackKafkaConsumer extends OpTimeTrackKafkaClient {
|
||||
private final static Logger logger = LogManager.getLogger("OpTimeTrackKafkaConsumer");
|
||||
@ -44,6 +47,8 @@ public class OpTimeTrackKafkaConsumer extends OpTimeTrackKafkaClient {
|
||||
|
||||
private final KafkaConsumer<String, String> consumer;
|
||||
private Histogram e2eMsgProcLatencyHistogram;
|
||||
private final Function<String, ReceivedMessageSequenceTracker> receivedMessageSequenceTrackerForTopic;
|
||||
private final boolean seqTracking;
|
||||
|
||||
public OpTimeTrackKafkaConsumer(KafkaSpace kafkaSpace,
|
||||
boolean asyncMsgCommit,
|
||||
@ -51,8 +56,10 @@ public class OpTimeTrackKafkaConsumer extends OpTimeTrackKafkaClient {
|
||||
boolean autoCommitEnabled,
|
||||
int maxMsgCntPerCommit,
|
||||
KafkaConsumer<String, String> consumer,
|
||||
KafkaAdapterMetrics kafkaAdapterMetrics,
|
||||
EndToEndStartingTimeSource e2eStartingTimeSrc,
|
||||
KafkaAdapterMetrics kafkaAdapterMetrics) {
|
||||
Function<String, ReceivedMessageSequenceTracker> receivedMessageSequenceTrackerForTopic,
|
||||
boolean seqTracking) {
|
||||
super(kafkaSpace);
|
||||
this.msgPoolIntervalInMs = msgPoolIntervalInMs;
|
||||
this.asyncMsgCommit = asyncMsgCommit;
|
||||
@ -61,6 +68,8 @@ public class OpTimeTrackKafkaConsumer extends OpTimeTrackKafkaClient {
|
||||
this.consumer = consumer;
|
||||
this.e2eStartingTimeSrc = e2eStartingTimeSrc;
|
||||
this.e2eMsgProcLatencyHistogram = kafkaAdapterMetrics.getE2eMsgProcLatencyHistogram();
|
||||
this.receivedMessageSequenceTrackerForTopic = receivedMessageSequenceTrackerForTopic;
|
||||
this.seqTracking = seqTracking;
|
||||
}
|
||||
|
||||
public int getManualCommitTrackingCnt() { return manualCommitTrackingCnt.get(); }
|
||||
@ -123,12 +132,14 @@ public class OpTimeTrackKafkaConsumer extends OpTimeTrackKafkaClient {
|
||||
for (ConsumerRecord<String, String> record : records) {
|
||||
if (record != null) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
Header msg_seq_header = record.headers().lastHeader(KafkaAdapterUtil.MSG_SEQUENCE_NUMBER);
|
||||
logger.debug(
|
||||
"Receiving message is successful: [{}] - offset({}), cycle ({}), e2e_latency_ms({})",
|
||||
"Receiving message is successful: [{}] - offset({}), cycle ({}), e2e_latency_ms({}), e2e_seq_number({})",
|
||||
printRecvedMsg(record),
|
||||
record.offset(),
|
||||
cycle,
|
||||
System.currentTimeMillis() - record.timestamp());
|
||||
System.currentTimeMillis() - record.timestamp(),
|
||||
(msg_seq_header != null ? new String(msg_seq_header.value()) : "null"));
|
||||
}
|
||||
|
||||
if (!autoCommitEnabled) {
|
||||
@ -136,7 +147,7 @@ public class OpTimeTrackKafkaConsumer extends OpTimeTrackKafkaClient {
|
||||
if (bCommitMsg) {
|
||||
if (!asyncMsgCommit) {
|
||||
consumer.commitSync();
|
||||
updateE2ELatencyMetric(record);
|
||||
checkAndUpdateMessageE2EMetrics(record);
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug(
|
||||
"Sync message commit is successful: cycle ({}), maxMsgCntPerCommit ({})",
|
||||
@ -153,7 +164,7 @@ public class OpTimeTrackKafkaConsumer extends OpTimeTrackKafkaClient {
|
||||
"Async message commit succeeded: cycle({}), maxMsgCntPerCommit ({})",
|
||||
cycle,
|
||||
maxMsgCntPerCommit);
|
||||
updateE2ELatencyMetric(record);
|
||||
checkAndUpdateMessageE2EMetrics(record);
|
||||
} else {
|
||||
logger.debug(
|
||||
"Async message commit failed: cycle ({}), maxMsgCntPerCommit ({}), error ({})",
|
||||
@ -168,16 +179,22 @@ public class OpTimeTrackKafkaConsumer extends OpTimeTrackKafkaClient {
|
||||
|
||||
resetManualCommitTrackingCnt();
|
||||
} else {
|
||||
updateE2ELatencyMetric(record);
|
||||
checkAndUpdateMessageE2EMetrics(record);
|
||||
incManualCommitTrackingCnt();
|
||||
}
|
||||
}
|
||||
updateE2ELatencyMetric(record);
|
||||
checkAndUpdateMessageE2EMetrics(record);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void checkAndUpdateMessageE2EMetrics(ConsumerRecord<String, String> record) {
|
||||
// keep track of message errors and update error counters
|
||||
if(seqTracking) checkAndUpdateMessageErrorCounter(record);
|
||||
updateE2ELatencyMetric(record);
|
||||
}
|
||||
|
||||
private void updateE2ELatencyMetric(ConsumerRecord<String, String> record) {
|
||||
long startTimeStamp = 0L;
|
||||
switch (e2eStartingTimeSrc) {
|
||||
@ -191,6 +208,19 @@ public class OpTimeTrackKafkaConsumer extends OpTimeTrackKafkaClient {
|
||||
}
|
||||
}
|
||||
|
||||
private void checkAndUpdateMessageErrorCounter(ConsumerRecord<String, String> record) {
|
||||
Header msg_seq_number_header = record.headers().lastHeader(KafkaAdapterUtil.MSG_SEQUENCE_NUMBER);
|
||||
String msgSeqIdStr = msg_seq_number_header != null ? new String(msg_seq_number_header.value()) : StringUtils.EMPTY;
|
||||
if (!StringUtils.isBlank(msgSeqIdStr)) {
|
||||
long sequenceNumber = Long.parseLong(msgSeqIdStr);
|
||||
ReceivedMessageSequenceTracker receivedMessageSequenceTracker =
|
||||
receivedMessageSequenceTrackerForTopic.apply(record.topic());
|
||||
receivedMessageSequenceTracker.sequenceNumberReceived(sequenceNumber);
|
||||
} else {
|
||||
logger.warn("Message sequence number header is null, skipping e2e message error metrics generation.");
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
try {
|
||||
|
@ -20,6 +20,8 @@ package io.nosqlbench.adapter.kafka.ops;
|
||||
import io.nosqlbench.adapter.kafka.KafkaSpace;
|
||||
import io.nosqlbench.adapter.kafka.exception.KafkaAdapterUnexpectedException;
|
||||
import io.nosqlbench.adapter.kafka.util.KafkaAdapterUtil;
|
||||
import io.nosqlbench.engine.api.metrics.MessageSequenceNumberSendingHandler;
|
||||
import io.nosqlbench.engine.api.metrics.EndToEndMetricsAdapterUtil;
|
||||
import org.apache.kafka.clients.producer.Callback;
|
||||
import org.apache.kafka.clients.producer.KafkaProducer;
|
||||
import org.apache.kafka.clients.producer.ProducerRecord;
|
||||
@ -31,6 +33,9 @@ import org.apache.kafka.common.errors.ProducerFencedException;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.HashMap;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.Future;
|
||||
import org.apache.kafka.common.errors.TimeoutException;
|
||||
@ -45,6 +50,10 @@ public class OpTimeTrackKafkaProducer extends OpTimeTrackKafkaClient {
|
||||
private final boolean asyncMsgAck;
|
||||
private final boolean transactEnabledConfig;
|
||||
private final int txnBatchNum;
|
||||
private final ThreadLocal<Map<String, MessageSequenceNumberSendingHandler>> MessageSequenceNumberSendingHandlersThreadLocal =
|
||||
ThreadLocal.withInitial(HashMap::new);
|
||||
private final boolean seqTracking;
|
||||
private final Set<EndToEndMetricsAdapterUtil.MSG_SEQ_ERROR_SIMU_TYPE> errSimuTypeSet;
|
||||
|
||||
enum TxnProcResult {
|
||||
SUCCESS,
|
||||
@ -67,11 +76,15 @@ public class OpTimeTrackKafkaProducer extends OpTimeTrackKafkaClient {
|
||||
boolean asyncMsgAck,
|
||||
boolean transactEnabledConfig,
|
||||
int txnBatchNum,
|
||||
boolean seqTracking,
|
||||
Set<EndToEndMetricsAdapterUtil.MSG_SEQ_ERROR_SIMU_TYPE> errSimuTypeSet,
|
||||
KafkaProducer<String, String> producer) {
|
||||
super(kafkaSpace);
|
||||
this.asyncMsgAck = asyncMsgAck;
|
||||
this.transactEnabledConfig = transactEnabledConfig;
|
||||
this.txnBatchNum = txnBatchNum;
|
||||
this.seqTracking = seqTracking;
|
||||
this.errSimuTypeSet = errSimuTypeSet;
|
||||
this.transactionEnabled = transactEnabledConfig && (txnBatchNum > 2);
|
||||
this.producer = producer;
|
||||
}
|
||||
@ -193,6 +206,11 @@ public class OpTimeTrackKafkaProducer extends OpTimeTrackKafkaClient {
|
||||
}
|
||||
|
||||
ProducerRecord<String, String> message = (ProducerRecord<String, String>) cycleObj;
|
||||
if (seqTracking) {
|
||||
long nextSequenceNumber = getMessageSequenceNumberSendingHandler(message.topic())
|
||||
.getNextSequenceNumber(errSimuTypeSet);
|
||||
message.headers().add(KafkaAdapterUtil.MSG_SEQUENCE_NUMBER, String.valueOf(nextSequenceNumber).getBytes());
|
||||
}
|
||||
try {
|
||||
if (result == TxnProcResult.SUCCESS) {
|
||||
Future<RecordMetadata> responseFuture = producer.send(message, new Callback() {
|
||||
@ -261,4 +279,9 @@ public class OpTimeTrackKafkaProducer extends OpTimeTrackKafkaClient {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
private MessageSequenceNumberSendingHandler getMessageSequenceNumberSendingHandler(String topicName) {
|
||||
return MessageSequenceNumberSendingHandlersThreadLocal.get()
|
||||
.computeIfAbsent(topicName, k -> new MessageSequenceNumberSendingHandler());
|
||||
}
|
||||
}
|
||||
|
@ -103,4 +103,28 @@ public class KafkaAdapterMetrics implements NBNamedElement {
|
||||
public Timer getBindTimer() { return bindTimer; }
|
||||
public Timer getExecuteTimer() { return executeTimer; }
|
||||
public Histogram getMessagesizeHistogram() { return messageSizeHistogram; }
|
||||
|
||||
public Counter getMsgErrOutOfSeqCounter() {
|
||||
return msgErrOutOfSeqCounter;
|
||||
}
|
||||
|
||||
public void setMsgErrOutOfSeqCounter(Counter msgErrOutOfSeqCounter) {
|
||||
this.msgErrOutOfSeqCounter = msgErrOutOfSeqCounter;
|
||||
}
|
||||
|
||||
public Counter getMsgErrLossCounter() {
|
||||
return msgErrLossCounter;
|
||||
}
|
||||
|
||||
public void setMsgErrLossCounter(Counter msgErrLossCounter) {
|
||||
this.msgErrLossCounter = msgErrLossCounter;
|
||||
}
|
||||
|
||||
public Counter getMsgErrDuplicateCounter() {
|
||||
return msgErrDuplicateCounter;
|
||||
}
|
||||
|
||||
public void setMsgErrDuplicateCounter(Counter msgErrDuplicateCounter) {
|
||||
this.msgErrDuplicateCounter = msgErrDuplicateCounter;
|
||||
}
|
||||
}
|
||||
|
@ -31,7 +31,7 @@ import java.util.Map;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
public class KafkaAdapterUtil {
|
||||
|
||||
public static final String MSG_SEQUENCE_NUMBER = "sequence_number";
|
||||
private final static Logger logger = LogManager.getLogger(KafkaAdapterUtil.class);
|
||||
|
||||
public static String DFT_CONSUMER_GROUP_NAME_PREFIX = "nbKafkaGrp";
|
||||
@ -42,7 +42,9 @@ public class KafkaAdapterUtil {
|
||||
public enum DOC_LEVEL_PARAMS {
|
||||
// Blocking message producing or consuming
|
||||
ASYNC_API("async_api"),
|
||||
E2E_STARTING_TIME_SOURCE("e2e_starting_time_source");
|
||||
SEQERR_SIMU("seqerr_simu"),
|
||||
E2E_STARTING_TIME_SOURCE("e2e_starting_time_source"),
|
||||
SEQ_TRACKING("seq_tracking");
|
||||
public final String label;
|
||||
|
||||
DOC_LEVEL_PARAMS(String label) {
|
||||
|
@ -49,7 +49,7 @@ public class KafkaClientConf {
|
||||
public KafkaClientConf(String clientConfFileName) {
|
||||
|
||||
//////////////////
|
||||
// Read related Pulsar client configuration settings from a file
|
||||
// Read related Kafka client configuration settings from a file
|
||||
readRawConfFromFile(clientConfFileName);
|
||||
|
||||
|
||||
|
@ -1,10 +1,10 @@
|
||||
# Overview
|
||||
|
||||
This NB Kafka driver allows publishing messages to or consuming messages from
|
||||
This NB Kafka adapter allows publishing messages to or consuming messages from
|
||||
* a Kafka cluster, or
|
||||
* a Pulsar cluster with [S4K](https://github.com/datastax/starlight-for-kafka) or [KoP](https://github.com/streamnative/kop) Kafka Protocol handler for Pulsar.
|
||||
|
||||
At high level, this driver supports the following Kafka functionalities
|
||||
At high level, this adapter supports the following Kafka functionalities
|
||||
* Publishing messages to one Kafka topic with sync. or async. message-send acknowledgements (from brokers)
|
||||
* Subscribing messages from one or multiple Kafka topics with sync. or async. message-recv acknowlegements (to brokers) (aka, message commits)
|
||||
* auto message commit
|
||||
@ -26,7 +26,7 @@ $ <nb_cmd> run driver=kafka -vv cycles=100 threads=2 num_clnt=2 yaml=kafka_produ
|
||||
$ <nb_cmd> run driver=kafka -vv cycles=100 threads=4 num_clnt=2 num_cons_grp=2 yaml=kafka_producer.yaml config=kafka_config.properties bootstrap_server=PLAINTEXT://localhost:9092
|
||||
```
|
||||
|
||||
## NB Kafka driver specific CLI parameters
|
||||
## NB Kafka adapter specific CLI parameters
|
||||
|
||||
* `num_clnt`: the number of Kafka clients to publish messages to or to receive messages from
|
||||
* For producer workload, this is the number of the producer threads to publish messages to the same topic
|
||||
@ -39,3 +39,24 @@ $ <nb_cmd> run driver=kafka -vv cycles=100 threads=4 num_clnt=2 num_cons_grp=2 y
|
||||
|
||||
* `num_cons_grp`: the number of consumer groups
|
||||
* Only relevant for consumer workload
|
||||
|
||||
|
||||
|
||||
For the Kafka NB adapter, Document level parameters can only be statically bound; and currently, the following Document level configuration parameters are supported:
|
||||
|
||||
* `async_api` (boolean):
|
||||
* When true, use async Kafka client API.
|
||||
* `seq_tracking` (boolean):
|
||||
* When true, a sequence number is created as part of each message's properties
|
||||
* This parameter is used in conjunction with the next one in order to simulate abnormal message processing errors and then be able to detect such errors successfully.
|
||||
* `seqerr_simu`:
|
||||
* A list of error simulation types separated by comma (,)
|
||||
* Valid error simulation types
|
||||
* `out_of_order`: simulate message out of sequence
|
||||
* `msg_loss`: simulate message loss
|
||||
* `msg_dup`: simulate message duplication
|
||||
* This value should be used only for testing purposes. It is not recommended to use this parameter in actual testing environments.
|
||||
* `e2e_starting_time_source`:
|
||||
* Starting timestamp for end-to-end operation. When specified, will update the `e2e_msg_latency` histogram with the calculated end-to-end latency. The latency is calculated by subtracting the starting time from the current time. The starting time is determined from a configured starting time source. The unit of the starting time is milliseconds since epoch.
|
||||
* The possible values for `e2e_starting_time_source`:
|
||||
* `message_publish_time` : uses the message publishing timestamp as the starting time. The message publishing time, in this case, [is computed by the Kafka client on record generation](https://kafka.apache.org/34/javadoc/org/apache/kafka/clients/producer/ProducerRecord.html). This is the case, as [`CreateTime` is the default](https://docs.confluent.io/platform/current/installation/configuration/topic-configs.html#message-timestamp-type).
|
||||
|
8
adapter-kafka/src/main/resources/build-nb-kafka-driver.sh
Executable file
8
adapter-kafka/src/main/resources/build-nb-kafka-driver.sh
Executable file
@ -0,0 +1,8 @@
|
||||
#!/usr/local/bin/bash
|
||||
: "${SKIP_TESTS:=1}"
|
||||
(
|
||||
cd "$(git rev-parse --show-toplevel)" && \
|
||||
mvn clean install "-DskipTests" -pl adapters-api,adapter-kafka,nb5 && \
|
||||
[[ ${SKIP_TESTS} -ne 1 ]] && \
|
||||
mvn test -pl adapters-api,adapter-pulsar
|
||||
)
|
@ -5,7 +5,8 @@
|
||||
#--------------------------------------
|
||||
topic.compression.type=uncompressed
|
||||
topic.flush.messages=2
|
||||
|
||||
# this is likely unused as this file doesn't seem to be loaded
|
||||
topic.log.message.timestamp.type=CreateTime
|
||||
|
||||
#####
|
||||
# Producer related configurations (global) - topic.***
|
||||
|
@ -1,10 +1,16 @@
|
||||
# document level parameters that apply to all Pulsar client types:
|
||||
# document level parameters that apply to all Kafka client types:
|
||||
params:
|
||||
# Whether to commit message asynchronously
|
||||
# - default: true
|
||||
# - only relevant for manual commit
|
||||
# async_api: "true"
|
||||
# activates e2e latency metrics
|
||||
# - default: "none" (i.e. disabled)
|
||||
e2e_starting_time_source: "message_publish_time"
|
||||
# activates e2e error metrics (message duplication, message loss and out-of-order detection)
|
||||
# it needs to be enabled both on the producer and the consumer
|
||||
# - default: false
|
||||
seq_tracking: "true"
|
||||
|
||||
blocks:
|
||||
msg-consume-block:
|
||||
@ -12,7 +18,7 @@ blocks:
|
||||
op1:
|
||||
## The value represents the topic names
|
||||
# - for consumer, a list of topics (separated by comma) are supported
|
||||
MessageConsume: "nbktest1,nbktest2"
|
||||
MessageConsume: "nbktest"
|
||||
|
||||
# The timeout value to poll messages (unit: milli-seconds)
|
||||
# - default: 0
|
||||
|
@ -9,6 +9,12 @@ params:
|
||||
# whether to confirm message send ack. asynchronously
|
||||
# - default: true
|
||||
async_api: "true"
|
||||
# activates e2e error metrics (message duplication, message loss and out-of-order detection)
|
||||
# it needs to be enabled both on the producer and the consumer
|
||||
# - default: false
|
||||
seq_tracking: "true"
|
||||
# test error injection, remove in production
|
||||
seqerr_simu: 'out_of_order,msg_loss,msg_dup'
|
||||
|
||||
blocks:
|
||||
msg-produce-block:
|
||||
@ -22,7 +28,7 @@ blocks:
|
||||
# - default: 0
|
||||
# - value 0 or 1 means no transaction
|
||||
# - it also requires "transactional.id" parameter is set
|
||||
txn_batch_num: 8
|
||||
txn_batch_num: 1
|
||||
|
||||
## (Optional) Kafka message headers (in JSON format).
|
||||
msg_header: |
|
||||
|
20
adapter-kafka/src/main/resources/start_kafka_consumer.sh
Executable file
20
adapter-kafka/src/main/resources/start_kafka_consumer.sh
Executable file
@ -0,0 +1,20 @@
|
||||
#!/usr/local/bin/bash
|
||||
: "${REBUILD:=1}"
|
||||
: "${CYCLES:=1000000000}"
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" &>/dev/null && pwd)"
|
||||
if [[ ${REBUILD} -eq 1 ]]; then
|
||||
"${SCRIPT_DIR}/build-nb-kafka-driver.sh"
|
||||
fi
|
||||
java -jar nb5/target/nb5.jar \
|
||||
run \
|
||||
driver=kafka \
|
||||
-vv \
|
||||
--report-interval 5 \
|
||||
--docker-metrics \
|
||||
cycles=${CYCLES} \
|
||||
threads=1 \
|
||||
num_clnt=1 \
|
||||
num_cons_grp=1 \
|
||||
yaml="${SCRIPT_DIR}/kafka_consumer.yaml" \
|
||||
config="${SCRIPT_DIR}/kafka_config.properties" \
|
||||
bootstrap_server=PLAINTEXT://localhost:9092
|
22
adapter-kafka/src/main/resources/start_kafka_producer.sh
Executable file
22
adapter-kafka/src/main/resources/start_kafka_producer.sh
Executable file
@ -0,0 +1,22 @@
|
||||
#!/usr/local/bin/bash
|
||||
: "${REBUILD:=1}"
|
||||
: "${CYCLES:=1000000000}"
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" &>/dev/null && pwd)"
|
||||
if [[ ${REBUILD} -eq 1 ]]; then
|
||||
"${SCRIPT_DIR}/build-nb-kafka-driver.sh"
|
||||
fi
|
||||
while [[ 1 -eq 1 ]]; do
|
||||
java -jar nb5/target/nb5.jar \
|
||||
run \
|
||||
driver=kafka \
|
||||
-vv \
|
||||
--report-interval 5 \
|
||||
--docker-metrics \
|
||||
cycles="${CYCLES}" \
|
||||
threads=1 \
|
||||
num_clnt=1 \
|
||||
yaml="${SCRIPT_DIR}/kafka_producer.yaml" \
|
||||
config="${SCRIPT_DIR}/kafka_config.properties" \
|
||||
bootstrap_server=PLAINTEXT://localhost:9092
|
||||
sleep 10
|
||||
done
|
@ -20,7 +20,7 @@ import io.nosqlbench.adapter.pulsar.PulsarSpace;
|
||||
import io.nosqlbench.adapter.pulsar.ops.MessageConsumerOp;
|
||||
import io.nosqlbench.adapter.pulsar.util.EndToEndStartingTimeSource;
|
||||
import io.nosqlbench.adapter.pulsar.util.PulsarAdapterUtil;
|
||||
import io.nosqlbench.adapter.pulsar.util.ReceivedMessageSequenceTracker;
|
||||
import io.nosqlbench.engine.api.metrics.ReceivedMessageSequenceTracker;
|
||||
import io.nosqlbench.engine.api.activityimpl.uniform.DriverAdapter;
|
||||
import io.nosqlbench.engine.api.templating.ParsedOp;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
|
@ -20,6 +20,7 @@ import com.codahale.metrics.Timer;
|
||||
import io.nosqlbench.adapter.pulsar.PulsarSpace;
|
||||
import io.nosqlbench.adapter.pulsar.util.PulsarAdapterUtil;
|
||||
import io.nosqlbench.engine.api.activityimpl.uniform.DriverAdapter;
|
||||
import io.nosqlbench.engine.api.metrics.EndToEndMetricsAdapterUtil;
|
||||
import io.nosqlbench.engine.api.templating.ParsedOp;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
@ -49,7 +50,7 @@ public abstract class PulsarClientOpDispenser extends PulsarBaseOpDispenser {
|
||||
protected final LongFunction<Boolean> seqTrackingFunc;
|
||||
protected final LongFunction<String> payloadRttFieldFunc;
|
||||
protected final LongFunction<Supplier<Transaction>> transactSupplierFunc;
|
||||
protected final LongFunction<Set<PulsarAdapterUtil.MSG_SEQ_ERROR_SIMU_TYPE>> msgSeqErrSimuTypeSetFunc;
|
||||
protected final LongFunction<Set<EndToEndMetricsAdapterUtil.MSG_SEQ_ERROR_SIMU_TYPE>> msgSeqErrSimuTypeSetFunc;
|
||||
|
||||
public PulsarClientOpDispenser(DriverAdapter adapter,
|
||||
ParsedOp op,
|
||||
@ -101,17 +102,17 @@ public abstract class PulsarClientOpDispenser extends PulsarBaseOpDispenser {
|
||||
};
|
||||
}
|
||||
|
||||
protected LongFunction<Set<PulsarAdapterUtil.MSG_SEQ_ERROR_SIMU_TYPE>> getStaticErrSimuTypeSetOpValueFunc() {
|
||||
LongFunction<Set<PulsarAdapterUtil.MSG_SEQ_ERROR_SIMU_TYPE>> setStringLongFunction;
|
||||
protected LongFunction<Set<EndToEndMetricsAdapterUtil.MSG_SEQ_ERROR_SIMU_TYPE>> getStaticErrSimuTypeSetOpValueFunc() {
|
||||
LongFunction<Set<EndToEndMetricsAdapterUtil.MSG_SEQ_ERROR_SIMU_TYPE>> setStringLongFunction;
|
||||
setStringLongFunction = (l) ->
|
||||
parsedOp.getOptionalStaticValue(PulsarAdapterUtil.DOC_LEVEL_PARAMS.SEQERR_SIMU.label, String.class)
|
||||
.filter(Predicate.not(String::isEmpty))
|
||||
.map(value -> {
|
||||
Set<PulsarAdapterUtil.MSG_SEQ_ERROR_SIMU_TYPE> set = new HashSet<>();
|
||||
Set<EndToEndMetricsAdapterUtil.MSG_SEQ_ERROR_SIMU_TYPE> set = new HashSet<>();
|
||||
|
||||
if (StringUtils.contains(value,',')) {
|
||||
set = Arrays.stream(value.split(","))
|
||||
.map(PulsarAdapterUtil.MSG_SEQ_ERROR_SIMU_TYPE::parseSimuType)
|
||||
.map(EndToEndMetricsAdapterUtil.MSG_SEQ_ERROR_SIMU_TYPE::parseSimuType)
|
||||
.filter(Optional::isPresent)
|
||||
.map(Optional::get)
|
||||
.collect(Collectors.toCollection(LinkedHashSet::new));
|
||||
|
@ -20,6 +20,7 @@ import com.codahale.metrics.Timer;
|
||||
import io.nosqlbench.adapter.pulsar.exception.PulsarAdapterAsyncOperationFailedException;
|
||||
import io.nosqlbench.adapter.pulsar.exception.PulsarAdapterUnexpectedException;
|
||||
import io.nosqlbench.adapter.pulsar.util.*;
|
||||
import io.nosqlbench.engine.api.metrics.ReceivedMessageSequenceTracker;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
|
@ -19,10 +19,11 @@ package io.nosqlbench.adapter.pulsar.ops;
|
||||
import com.codahale.metrics.Timer;
|
||||
import io.nosqlbench.adapter.pulsar.exception.PulsarAdapterAsyncOperationFailedException;
|
||||
import io.nosqlbench.adapter.pulsar.exception.PulsarAdapterUnexpectedException;
|
||||
import io.nosqlbench.adapter.pulsar.util.MessageSequenceNumberSendingHandler;
|
||||
import io.nosqlbench.engine.api.metrics.MessageSequenceNumberSendingHandler;
|
||||
import io.nosqlbench.adapter.pulsar.util.PulsarAdapterMetrics;
|
||||
import io.nosqlbench.adapter.pulsar.util.PulsarAdapterUtil;
|
||||
import io.nosqlbench.adapter.pulsar.util.PulsarAvroSchemaUtil;
|
||||
import io.nosqlbench.engine.api.metrics.EndToEndMetricsAdapterUtil;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
@ -49,7 +50,7 @@ public class MessageProducerOp extends PulsarClientOp {
|
||||
private final boolean useTransact;
|
||||
private final boolean seqTracking;
|
||||
private final Supplier<Transaction> transactSupplier;
|
||||
private final Set<PulsarAdapterUtil.MSG_SEQ_ERROR_SIMU_TYPE> errSimuTypeSet;
|
||||
private final Set<EndToEndMetricsAdapterUtil.MSG_SEQ_ERROR_SIMU_TYPE> errSimuTypeSet;
|
||||
private final Producer<?> producer;
|
||||
private final String msgKey;
|
||||
private final String msgPropRawJsonStr;
|
||||
@ -66,7 +67,7 @@ public class MessageProducerOp extends PulsarClientOp {
|
||||
boolean useTransact,
|
||||
boolean seqTracking,
|
||||
Supplier<Transaction> transactSupplier,
|
||||
Set<PulsarAdapterUtil.MSG_SEQ_ERROR_SIMU_TYPE> errSimuTypeSet,
|
||||
Set<EndToEndMetricsAdapterUtil.MSG_SEQ_ERROR_SIMU_TYPE> errSimuTypeSet,
|
||||
Producer<?> producer,
|
||||
String msgKey,
|
||||
String msgProp,
|
||||
@ -134,7 +135,7 @@ public class MessageProducerOp extends PulsarClientOp {
|
||||
}
|
||||
|
||||
// set message properties
|
||||
if ( !msgPropRawJsonStr.isEmpty() ) {
|
||||
if (!StringUtils.isBlank(msgPropRawJsonStr) || seqTracking) {
|
||||
typedMessageBuilder = typedMessageBuilder.properties(msgProperties);
|
||||
}
|
||||
|
||||
|
@ -33,9 +33,7 @@ import java.nio.charset.StandardCharsets;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.Paths;
|
||||
import java.util.Base64;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.stream.Collectors;
|
||||
@ -68,35 +66,6 @@ public class PulsarAdapterUtil {
|
||||
}
|
||||
}
|
||||
|
||||
///////
|
||||
// Message processing sequence error simulation types
|
||||
public enum MSG_SEQ_ERROR_SIMU_TYPE {
|
||||
OutOfOrder("out_of_order"),
|
||||
MsgLoss("msg_loss"),
|
||||
MsgDup("msg_dup");
|
||||
|
||||
public final String label;
|
||||
|
||||
MSG_SEQ_ERROR_SIMU_TYPE(String label) {
|
||||
this.label = label;
|
||||
}
|
||||
|
||||
private static final Map<String, MSG_SEQ_ERROR_SIMU_TYPE> MAPPING = Stream.of(values())
|
||||
.flatMap(simuType ->
|
||||
Stream.of(simuType.label,
|
||||
simuType.label.toLowerCase(),
|
||||
simuType.label.toUpperCase(),
|
||||
simuType.name(),
|
||||
simuType.name().toLowerCase(),
|
||||
simuType.name().toUpperCase())
|
||||
.distinct().map(key -> Map.entry(key, simuType)))
|
||||
.collect(Collectors.toUnmodifiableMap(Map.Entry::getKey, Map.Entry::getValue));
|
||||
|
||||
public static Optional<MSG_SEQ_ERROR_SIMU_TYPE> parseSimuType(String simuTypeString) {
|
||||
return Optional.ofNullable(MAPPING.get(simuTypeString.trim()));
|
||||
}
|
||||
}
|
||||
|
||||
///////
|
||||
// Valid Pulsar API type
|
||||
public enum PULSAR_API_TYPE {
|
||||
|
8
adapter-pulsar/src/main/resources/build-nb-pulsar-driver.sh
Executable file
8
adapter-pulsar/src/main/resources/build-nb-pulsar-driver.sh
Executable file
@ -0,0 +1,8 @@
|
||||
#!/usr/local/bin/bash
|
||||
: "${SKIP_TESTS:=1}"
|
||||
(
|
||||
cd "$(git rev-parse --show-toplevel)" && \
|
||||
mvn clean install "-DskipTests" -pl adapters-api,adapter-pulsar,nb5 && \
|
||||
[[ ${SKIP_TESTS} -ne 1 ]] && \
|
||||
mvn test -pl adapters-api,adapter-pulsar
|
||||
)
|
@ -155,6 +155,7 @@ For the Pulsar NB driver, Document level parameters can only be statically bound
|
||||
* `out_of_order`: simulate message out of sequence
|
||||
* `msg_loss`: simulate message loss
|
||||
* `msg_dup`: simulate message duplication
|
||||
* This value should be used only for testing purposes. It is not recommended to use this parameter in actual testing environments.
|
||||
* ***e2e_starting_time_source***:
|
||||
* Starting timestamp for end-to-end operation. When specified, will update the `e2e_msg_latency` histogram with the calculated end-to-end latency. The latency is calculated by subtracting the starting time from the current time. The starting time is determined from a configured starting time source. The unit of the starting time is milliseconds since epoch.
|
||||
* The possible values for `e2e_starting_time_source`:
|
||||
|
16
adapter-pulsar/src/main/resources/start_pulsar_consumer.sh
Executable file
16
adapter-pulsar/src/main/resources/start_pulsar_consumer.sh
Executable file
@ -0,0 +1,16 @@
|
||||
#!/usr/local/bin/bash
|
||||
: "${REBUILD:=1}"
|
||||
: "${CYCLES:=1000000000}"
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" &>/dev/null && pwd)"
|
||||
if [[ ${REBUILD} -eq 1 ]]; then
|
||||
"${SCRIPT_DIR}/build-nb-pulsar-driver.sh"
|
||||
fi
|
||||
java -jar nb5/target/nb5.jar \
|
||||
run \
|
||||
driver=pulsar \
|
||||
-vv \
|
||||
--report-interval 5 \
|
||||
--docker-metrics \
|
||||
cycles=${CYCLES} \
|
||||
yaml="${SCRIPT_DIR}/yaml_examples/consumer_4KB_workload.yaml" \
|
||||
config="${SCRIPT_DIR}/config.properties"
|
22
adapter-pulsar/src/main/resources/start_pulsar_producer.sh
Executable file
22
adapter-pulsar/src/main/resources/start_pulsar_producer.sh
Executable file
@ -0,0 +1,22 @@
|
||||
#!/usr/local/bin/bash
|
||||
: "${REBUILD:=1}"
|
||||
: "${CYCLES:=1000000000}"
|
||||
: "${CYCLERATE:=100}"
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" &>/dev/null && pwd)"
|
||||
if [[ ${REBUILD} -eq 1 ]]; then
|
||||
"${SCRIPT_DIR}/build-nb-pulsar-driver.sh"
|
||||
fi
|
||||
while [[ 1 -eq 1 ]]; do
|
||||
java -jar nb5/target/nb5.jar \
|
||||
run \
|
||||
driver=pulsar \
|
||||
-vv \
|
||||
--report-interval 5 \
|
||||
--docker-metrics \
|
||||
cycles="${CYCLES}" \
|
||||
cyclerate="${CYCLERATE}" \
|
||||
threads=1 \
|
||||
yaml="${SCRIPT_DIR}/yaml_examples/producer_4KB_workload.yaml" \
|
||||
config="${SCRIPT_DIR}/config.properties"
|
||||
sleep 10
|
||||
done
|
@ -0,0 +1,14 @@
|
||||
description: consumer
|
||||
params:
|
||||
topic_uri:
|
||||
seq_tracking: 'true'
|
||||
seqerr_simu: 'out_of_order,msg_loss,msg_dup'
|
||||
e2e_starting_time_source: 'message_publish_time'
|
||||
blocks:
|
||||
ec2-msg-proc-consume:
|
||||
ops:
|
||||
op1:
|
||||
MessageConsume: "persistent://public/default/perftest"
|
||||
consumerName: ''
|
||||
subscriptionName: "perfsub"
|
||||
subscriptionType: 'Exclusive'
|
@ -0,0 +1,13 @@
|
||||
description: async producer with 4k message
|
||||
params:
|
||||
topic_uri: persistent://public/default/perftest
|
||||
async_api: 'true'
|
||||
seqerr_simu: 'out_of_order,msg_loss,msg_dup'
|
||||
seq_tracking: 'true'
|
||||
blocks:
|
||||
ec2-msg-proc-send:
|
||||
ops:
|
||||
op1:
|
||||
MessageProduce: "persistent://public/default/perftest"
|
||||
producerName: ''
|
||||
msg_value: "Wieghophuuwie5id9ied4oe2bae7geiSohzoo7harohf9Ea4Chi0thooDa2Zeceith3oacoh8ing0ohs3Eephaid2ahxikunuCh1que0phooP6iuRa8Thai0aemohsh0eQuaiphee0aehu6jequai8eixae2xau0agoo4saeQua7ro6chie4xuseicai7aivonohy6rai3Ohw7jiPaa0eequ9Kao4xeabaih9ohf5De8quaveig5hieB2tiecaiqu5ios1aek4faipae7uFiathashahw6feipae7fob7pieng4oongei7taalaiGeiy6aeGeiBu4obegeereetaaMuiciseehei0iugee3uip7Ailaixeis5Ge0Fabeepie7um8quiphoh9ushohPheeK5Leigheix0ahzailoer7ugh0shiqu3theeVimoh6Uwahm7Aiyae0Shaifeek5aiSemo2peiV9bi4Umezia5teex5eech8thei2aekee8oof2Geegh7waidoJ2thaogiexaehaixogaiw5vahr3kei0choi9chaefaputhu3IeziiNaik2thah8IeX7apak3ier3ish1yei9ziebuHeonahr6nahghooxee3eujaingopooge9Aivei0Ii7ieviv5xahc9eexong9aeveem4aigav0Be5iela0pheijeiJieM5xib8ohloen5oov3Aishaesufou8Dokeig7outhai3bacaeThae6goo0cahgue6iePhon8Eingac2zooseiphi9ush5jaiGheicaic6aetootood8idizahS7eeree2boh5eavu3Gie2ke3fosh7lieghuibeihongouXeejie0su7uoQu5ahwic4tang2bohtee3gae3tohQuie8ki1ReeGhiC2peiyaiGai3ighah4Ooh0thueb1ThaeRiemoh2ier1eyei4ahl9oopho5aih6mijohLatha1saek0aiBiet2sie3Uc0re0Taengah4nohshaeG1uD4guleeshe8ouyuQuoo4oa3eiMaeVi8phahXie5deipee2Ho0eenge8ahchah1lo2ig0meedaiBiep9Ie0ma8ookahs7quigie2mohlephaihiutahHeidi8Thoo5shaeGhoh8shei0shohyahmeewe3eiroo8naiDoh0ObeeWae3Jie4wahhei3iov9Waedileebeib5aifeedoo3aik8chailaij4awohdoo9aejae8ThootaeSaetooQu6yaihoPhiehaezeeQuaixei1ahbaShaegi8EimacechiequeetahcoocheQu8Ahkohthee5jaizeiphiicahphoon9iph3iifaicaz0gieshaic8ue7Cei2eamoov1Iefoo8zeNg9Roomu4ahp4uoceuCahf6EiJ6Kai2Shooy1paghu8aica1atheiLah7Vaid7wiem4too8apahjaghei1ko3uk0Eehe4ahlaiZ1ieghieth3aap7voo5wai4oongoimahhiebier7Zei6eic3baih2phoh1rei8pe2nahl1AengiepaYaphoh1ohngoov6wi7eich6aep3Oozoo4veesho2uquiu9aoLiod7ioveungeecooCiCaik7cuv0aeji4mooG1hiozeem6ushahp3eiQuumoo9chooz1CheoWiewiengahmaeFooyao9iepheneineimah8bimathee5eeseis0ooc5ya1kiabai4oTieThohgh3viez2tieyain9eiloo9Ahgho8aungey6ishooPhohloh8eegeed0ieshae6fie0aiJaePhewuR4Zohlie1ooph3niengio0ahpaifai1EehePieshae0geePhaikoh8ietiequu8iekuiph3phogeifaesai4aa4phaeziXi0aX8ooquaephahzeid9ohkahrilaim3tou7ge0gaikeePhoh4kaecahsee0aac0Aengool5Vie1ahza3oojohchohthogo6taeThohpa3ahun2gif2Jea9JahphePhaniekathailooyeegh5xahph1diek9oor7iFieCheeghaeLohtaiko8keil1oothuphohki8TohtohPh7son4cahgu1cu8amaof0aYeu0aeXiexoucukuazaeGhe2haighosahSh5SiGhaChiezoo3eophaeKiGeena8tos1Chiuveimohqu0dai1xaidieThee0ieGheibuz6aHa4Ahphee6cuuFah3pha9fohSiequohXei6thi1iepeC4ohhi5oofaen5Eechaixee2nigha9aeceePhooVeigaje5fohT2thoh5go5zei8oofeo2AluNahf9OhphaR3pefohz6jo3Ku2ughaigeehae3ahdeubeetaengeig6quoh8aibohbohrohTh8sae2Ahlae6emae2Haech3iePahXoh1Eu4leib9Ohngie6ex2Saiboothahnees0etung2veevie2iet1ohz9Yohyeipa7ohle3Eepaechoo0thuuWie7looNei2oopeengah7aiDaeSie7GiapooThee1viChem9phohphahrua9eithiXee0ees6foNo8hieba0sa2phohLaephieGh3ahZ1ophaechahyei9ohs0phoh9aenaomue2pa5ooNg2caha6Doo5eenohzaiwei7gae0mice0etai2eeseZ8nahwieSoolaey2yeiZee4aekiePae2Seezog2zie6modem8de2thosae3shonobohthooce3oopahw3vou5ingee4peijei1Ooyiinu3HowaiNgife1geejohqu8SeelieweePooHee9eeNuung8go4oobuoquahngoequingee4opohjeiNg9ahpohqu4oofaivae0Erie9zei4mohng6zaehieL7Pheboic6Paech2hai3Ahbo3oikew3vi5Uox0nooSheo7oGaiS3baevae9phulooyu0OomeuChiB3feeWeun1Ieghai6chaiz5Caeli2eic5chuweib0kohtaezae5Chahgh6ge4oopou8rohJ1heiquei2shohlau1eegh8ooyiochee1AetuY6eizeegoh9Iechoj2kee9geb4Peitah2ahFoong9kie1egho1aeM2ug9ieyaLeigheshiusoeSoquiev1Ied5nooGhingu3Eivetah5Aipohf3aipi5eithooga9aij7eeng7kee6caishoh7iiphies7AhSo9choo0yaezeiChohzeeyohsohc4quu9iequaenoo4shai8kePeechei1ei9neuph1na6oitiChaepo2geeng5Aghietheez8iTh3zeec9ailei6Yoh2duthee4fei7aduciedeeShoaha1ohy6xaew1vo1ueR9eiGohsenu3iesh3Oomai3Ieh3PheebahJ2phaoraibohngeeh1Vei1aiyoceetei7eep3iechee1veF5Upaeg4ahtiuJ4aila9Ieph7ethee8Gaxee8dei8neid4Kae4Sho2Ies7eebaiMieCere1aem5de9oKuvif0aeshai3ku8ou8biequieghaivahfohr0heiyupe9Goixa8OhmiDiqua5Nootai0Ooqu4Eugeicai5airoo8YithahHoh7kuliivu4Guphoop8aoNg6zei5etohyieNgo4eiWaiwaihohchuxiu5oirei3eeb1ahdiaFubaithiethaeQu9thoh6eefu4xohngahGhai7eewoh8aeRa4zeeshie2iopheemephahfe5eib4jialaeBiecheePheigooqu7Eboriech1ahziemie3ieThaefohvah7yaiJoodae5roozaePuj3ieciGhoob2yai9hie0Isheeg7Iengoongahfaceep4shahx4KeiJohJah2tohpiaSh4ahja1woh6being7yoz1aik9Ca4jaeCie1caijaiKie0aeth3iebie"
|
@ -0,0 +1,55 @@
|
||||
package io.nosqlbench.engine.api.metrics;
|
||||
|
||||
/*
|
||||
* Copyright (c) 2022 nosqlbench
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
public class EndToEndMetricsAdapterUtil {
|
||||
///////
|
||||
// Message processing sequence error simulation types
|
||||
public enum MSG_SEQ_ERROR_SIMU_TYPE {
|
||||
OutOfOrder("out_of_order"),
|
||||
MsgLoss("msg_loss"),
|
||||
MsgDup("msg_dup");
|
||||
|
||||
public final String label;
|
||||
|
||||
MSG_SEQ_ERROR_SIMU_TYPE(String label) {
|
||||
this.label = label;
|
||||
}
|
||||
|
||||
private static final Map<String, MSG_SEQ_ERROR_SIMU_TYPE> MAPPING = Stream.of(values())
|
||||
.flatMap(simuType ->
|
||||
Stream.of(simuType.label,
|
||||
simuType.label.toLowerCase(),
|
||||
simuType.label.toUpperCase(),
|
||||
simuType.name(),
|
||||
simuType.name().toLowerCase(),
|
||||
simuType.name().toUpperCase())
|
||||
.distinct().map(key -> Map.entry(key, simuType)))
|
||||
.collect(Collectors.toUnmodifiableMap(Map.Entry::getKey, Map.Entry::getValue));
|
||||
|
||||
public static Optional<MSG_SEQ_ERROR_SIMU_TYPE> parseSimuType(String simuTypeString) {
|
||||
return Optional.ofNullable(MAPPING.get(simuTypeString.trim()));
|
||||
}
|
||||
}
|
||||
}
|
@ -1,4 +1,4 @@
|
||||
package io.nosqlbench.adapter.pulsar.util;
|
||||
package io.nosqlbench.engine.api.metrics;
|
||||
|
||||
/*
|
||||
* Copyright (c) 2022 nosqlbench
|
||||
@ -33,16 +33,16 @@ public class MessageSequenceNumberSendingHandler {
|
||||
long number = 1;
|
||||
Queue<Long> outOfOrderNumbers;
|
||||
|
||||
public long getNextSequenceNumber(Set<PulsarAdapterUtil.MSG_SEQ_ERROR_SIMU_TYPE> simulatedErrorTypes) {
|
||||
public long getNextSequenceNumber(Set<EndToEndMetricsAdapterUtil.MSG_SEQ_ERROR_SIMU_TYPE> simulatedErrorTypes) {
|
||||
return getNextSequenceNumber(simulatedErrorTypes, SIMULATED_ERROR_PROBABILITY_PERCENTAGE);
|
||||
}
|
||||
|
||||
long getNextSequenceNumber(Set<PulsarAdapterUtil.MSG_SEQ_ERROR_SIMU_TYPE> simulatedErrorTypes, int errorProbabilityPercentage) {
|
||||
long getNextSequenceNumber(Set<EndToEndMetricsAdapterUtil.MSG_SEQ_ERROR_SIMU_TYPE> simulatedErrorTypes, int errorProbabilityPercentage) {
|
||||
simulateError(simulatedErrorTypes, errorProbabilityPercentage);
|
||||
return nextNumber();
|
||||
}
|
||||
|
||||
private void simulateError(Set<PulsarAdapterUtil.MSG_SEQ_ERROR_SIMU_TYPE> simulatedErrorTypes, int errorProbabilityPercentage) {
|
||||
private void simulateError(Set<EndToEndMetricsAdapterUtil.MSG_SEQ_ERROR_SIMU_TYPE> simulatedErrorTypes, int errorProbabilityPercentage) {
|
||||
if (!simulatedErrorTypes.isEmpty() && shouldSimulateError(errorProbabilityPercentage)) {
|
||||
int selectIndex = 0;
|
||||
int numberOfErrorTypes = simulatedErrorTypes.size();
|
||||
@ -50,7 +50,7 @@ public class MessageSequenceNumberSendingHandler {
|
||||
// pick one of the simulated error type randomly
|
||||
selectIndex = RandomUtils.nextInt(0, numberOfErrorTypes);
|
||||
}
|
||||
PulsarAdapterUtil.MSG_SEQ_ERROR_SIMU_TYPE errorType = simulatedErrorTypes.stream()
|
||||
EndToEndMetricsAdapterUtil.MSG_SEQ_ERROR_SIMU_TYPE errorType = simulatedErrorTypes.stream()
|
||||
.skip(selectIndex)
|
||||
.findFirst()
|
||||
.get();
|
@ -1,4 +1,4 @@
|
||||
package io.nosqlbench.adapter.pulsar.util;
|
||||
package io.nosqlbench.engine.api.metrics;
|
||||
|
||||
/*
|
||||
* Copyright (c) 2022 nosqlbench
|
@ -14,7 +14,7 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package io.nosqlbench.adapter.pulsar.util;
|
||||
package io.nosqlbench.engine.api.metrics;
|
||||
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
||||
@ -39,19 +39,19 @@ class MessageSequenceNumberSendingHandlerTest {
|
||||
@Test
|
||||
void shouldInjectMessageLoss() {
|
||||
assertEquals(1L, sequenceNumberSendingHandler.getNextSequenceNumber(Collections.emptySet()));
|
||||
assertEquals(3L, sequenceNumberSendingHandler.getNextSequenceNumber(Collections.singleton(PulsarAdapterUtil.MSG_SEQ_ERROR_SIMU_TYPE.MsgLoss), 100));
|
||||
assertEquals(3L, sequenceNumberSendingHandler.getNextSequenceNumber(Collections.singleton(EndToEndMetricsAdapterUtil.MSG_SEQ_ERROR_SIMU_TYPE.MsgLoss), 100));
|
||||
}
|
||||
|
||||
@Test
|
||||
void shouldInjectMessageDuplication() {
|
||||
assertEquals(1L, sequenceNumberSendingHandler.getNextSequenceNumber(Collections.emptySet()));
|
||||
assertEquals(1L, sequenceNumberSendingHandler.getNextSequenceNumber(Collections.singleton(PulsarAdapterUtil.MSG_SEQ_ERROR_SIMU_TYPE.MsgDup), 100));
|
||||
assertEquals(1L, sequenceNumberSendingHandler.getNextSequenceNumber(Collections.singleton(EndToEndMetricsAdapterUtil.MSG_SEQ_ERROR_SIMU_TYPE.MsgDup), 100));
|
||||
}
|
||||
|
||||
@Test
|
||||
void shouldInjectMessageOutOfOrder() {
|
||||
assertEquals(1L, sequenceNumberSendingHandler.getNextSequenceNumber(Collections.emptySet()));
|
||||
assertEquals(4L, sequenceNumberSendingHandler.getNextSequenceNumber(Collections.singleton(PulsarAdapterUtil.MSG_SEQ_ERROR_SIMU_TYPE.OutOfOrder), 100));
|
||||
assertEquals(4L, sequenceNumberSendingHandler.getNextSequenceNumber(Collections.singleton(EndToEndMetricsAdapterUtil.MSG_SEQ_ERROR_SIMU_TYPE.OutOfOrder), 100));
|
||||
assertEquals(2L, sequenceNumberSendingHandler.getNextSequenceNumber(Collections.emptySet()));
|
||||
assertEquals(3L, sequenceNumberSendingHandler.getNextSequenceNumber(Collections.emptySet()));
|
||||
assertEquals(5L, sequenceNumberSendingHandler.getNextSequenceNumber(Collections.emptySet()));
|
||||
@ -60,7 +60,7 @@ class MessageSequenceNumberSendingHandlerTest {
|
||||
|
||||
@Test
|
||||
void shouldInjectOneOfTheSimulatedErrorsRandomly() {
|
||||
Set<PulsarAdapterUtil.MSG_SEQ_ERROR_SIMU_TYPE> allErrorTypes = new HashSet<>(Arrays.asList(PulsarAdapterUtil.MSG_SEQ_ERROR_SIMU_TYPE.values()));
|
||||
Set<EndToEndMetricsAdapterUtil.MSG_SEQ_ERROR_SIMU_TYPE> allErrorTypes = new HashSet<>(Arrays.asList(EndToEndMetricsAdapterUtil.MSG_SEQ_ERROR_SIMU_TYPE.values()));
|
||||
|
||||
assertEquals(1L, sequenceNumberSendingHandler.getNextSequenceNumber(Collections.emptySet()));
|
||||
long previousSequenceNumber = 1L;
|
@ -15,7 +15,7 @@
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package io.nosqlbench.adapter.pulsar.util;
|
||||
package io.nosqlbench.engine.api.metrics;
|
||||
|
||||
import com.codahale.metrics.Counter;
|
||||
import org.junit.jupiter.api.Test;
|
Loading…
Reference in New Issue
Block a user