This commit is contained in:
Madhavan Sridharan 2022-12-22 16:58:03 -05:00
commit 938cd434c8
316 changed files with 4712 additions and 8650 deletions

View File

@ -2,43 +2,107 @@ name: build
on:
push:
branches:
- main
pull_request:
jobs:
build:
runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@v3
name: checkout nosqlbench
- uses: actions/checkout@v3
name: checkout nosqlbench
- uses: actions/setup-java@v3
name: setup java
with:
java-version: '17'
java-package: jdk
architecture: x64
distribution: 'temurin'
- uses: actions/setup-java@v3
name: setup java
with:
java-version: '17'
java-package: jdk
architecture: x64
distribution: 'temurin'
- name: Cache Maven packages
uses: actions/cache@v1
with:
path: ~/.m2
key: ${{ runner.os }}-m2-${{ hashFiles('**/pom.xml') }}
restore-keys: ${{ runner.os }}-m2
- name: Cache Maven packages
uses: actions/cache@v1
with:
path: ~/.m2
key: ${{ runner.os }}-m2-${{ hashFiles('**/pom.xml') }}
restore-keys: ${{ runner.os }}-m2
- name: mvn-package
run: mvn package
- name: mvn-package
run: mvn package
- name: export docs
run: nb5/target/nb5 export-docs
- name: mvn-verify
run: mvn verify
- name: upload docs artifact
uses: actions/upload-artifact@v3
with:
name: exported-docs
path: exported_docs.zip
- name: Capture
if: success() || failure()
run: tar -cvf logfiles.tar [a-zA-Z]**/logs/*
- name: mvn verify
run: mvn verify
- name: Archive Test Results
if: success() || failure()
uses: actions/upload-artifact@v3
with:
name: test-results
path: logfiles.tar
- name: Capture
if: success() || failure()
run: tar -cvf logfiles.tar [a-zA-Z]**/logs/*
- name: Archive Test Results
if: success() || failure()
uses: actions/upload-artifact@v3
with:
name: test-results
path: logfiles.tar
docs:
needs: build
runs-on: ubuntu-20.04
steps:
- name: set git username
run: git config --global user.email "${{ secrets.NBDROID_EMAIL }}"
- name: set git email
run: git config --global user.name "${{ secrets.NBDROID_NAME }}"
- name: download exported-docs
uses: actions/download-artifact@v3
with:
name: exported-docs
- name: unzip docs
run: unzip exported_docs.zip
- run: ls -la
- name: clone nosqlbench-build-docs
env:
NBDROID_NAME: ${{ secrets.NBDROID_NAME }}
NBDROID_TOKEN: ${{ secrets.NBDROID_TOKEN }}
run: |
git clone https://${{secrets.NBDROID_NAME}}:${{secrets.NBDROID_TOKEN}}@github.com/nosqlbench/nosqlbench-build-docs.git nosqlbench-build-docs
cd nosqlbench-build-docs
echo "files listing"
find .
git remote set-url origin https://${{secrets.NBDROID_NAME}}:${{secrets.NBDROID_TOKEN}}@github.com/nosqlbench/nosqlbench-build-docs.git
git remote -v
- name: push changes
env:
NBDROID_NAME: ${{ secrets.NBDROID_NAME }}
NBDROID_TOKEN: ${{ secrets.NBDROID_TOKEN }}
run: |
set -x
find . -ls
rsync -av --delete -I --exclude '_index.md' drivers/ nosqlbench-build-docs/site/content/docs/drivers
rsync -av --delete -I --exclude '_index.md' bindings/ nosqlbench-build-docs/site/content/docs/bindings
echo "previewdocs.nosqlbench.io" > nosqlbench-build-docs/site/staticCNAME
cd nosqlbench-build-docs
git add -A
CHANGES=$(git status --porcelain 2>/dev/null| wc -l)
echo "found $CHANGES to push for doc updates"
if (( $CHANGES > 0 ))
then
git commit -m"docs update for $GITHUB_REF"
git push
fi
echo "push completed"

View File

@ -100,7 +100,7 @@
<plugin>
<groupId>org.antlr</groupId>
<artifactId>antlr4-maven-plugin</artifactId>
<version>4.10.1</version>
<version>4.11.1</version>
<configuration>
<sourceDirectory>src/main/java/io/nosqlbench/cqlgen/grammars
</sourceDirectory>

View File

@ -110,7 +110,7 @@ public abstract class Cqld4BaseOpDispenser extends BaseOpDispenser<Cqld4CqlOp, C
private Statement showstmt(Statement stmt, boolean input) {
String query = cqlFor(stmt, new StringBuilder());
logger.info("CQL(SIMPLE): " + query);
logger.info(() -> "CQL(SIMPLE): " + query);
return stmt;
}

View File

@ -66,7 +66,7 @@ public class Cqld4CoreOpMapper implements OpMapper<Op> {
TypeAndTarget<CqlD4OpType, String> target = op.getTypeAndTarget(CqlD4OpType.class, String.class, "type", "stmt");
logger.info("Using " + target.enumId + " statement form for '" + op.getName());
logger.info(() -> "Using " + target.enumId + " statement form for '" + op.getName());
return switch (target.enumId) {
case raw -> new CqlD4RawStmtMapper(adapter, sessionFunc, target.targetFunction).apply(op);

View File

@ -103,7 +103,7 @@ public class Cqld4FluentGraphOpMapper implements OpMapper<Op> {
try {
loader.loadClass(candidateName);
classNames.add(candidateName);
logger.debug("added import " + candidateName);
logger.debug(() -> "added import " + candidateName);
} catch (Exception e) {
throw new RuntimeException("Class '" + candidateName + "' was not found for fluent imports.");
}

View File

@ -32,7 +32,7 @@ public class RingAnalyzer implements BundledApp {
CommandLine cli = new CommandLine(cfg);
CommandLine.ParseResult cl = cli.parseArgs(args);
logger.info("filename: " + cfg.filename);
logger.info(() -> "filename: " + cfg.filename);
return 0;
}
}

View File

@ -117,7 +117,7 @@ public class BindingsAccumulator {
accumulated.forEach((k,v) -> {
inverted.computeIfAbsent(v,def -> new HashSet<>()).add(k);
});
logger.info("computed " + accumulated.size() + " raw bindings, consisting of " + inverted.size() + " unique definitions.");
logger.info(() -> "computed " + accumulated.size() + " raw bindings, consisting of " + inverted.size() + " unique definitions.");
return accumulated;
}
}

View File

@ -74,7 +74,7 @@ public class CGTextTransformers implements Consumer<List<Map<String, ?>>>, Suppl
Object cfgvalues = cfgmap.get("config");
if (cfgvalues != null) {
configurable.accept((cfgvalues));
logger.info("configured transformer with " + cfgvalues);
logger.info(() -> "configured transformer with " + cfgvalues);
}
}

View File

@ -92,7 +92,7 @@ public class CGWorkloadExporter implements BundledApp {
@Override
public int applyAsInt(String[] args) {
logger.info("running CQL workload exporter with args:" + Arrays.toString(args));
logger.info(() -> "running CQL workload exporter with args:" + Arrays.toString(args));
if (args.length == 0) {
throw new RuntimeException("Usage example: PROG filepath.cql filepath.yaml");
@ -195,7 +195,7 @@ public class CGWorkloadExporter implements BundledApp {
private String loadFile(Path path) {
try {
String ddl = Files.readString(path);
logger.info("read " + ddl.length() + " character DDL file");
logger.info(() -> "read " + ddl.length() + " character DDL file");
return ddl;
} catch (IOException e) {
throw new RuntimeException(e);
@ -348,7 +348,7 @@ public class CGWorkloadExporter implements BundledApp {
blockdata.put("ops", ops);
for (CqlTable table : model.getTableDefs()) {
if (table.getClusteringColumns().size() == 0) {
logger.debug("skipping table " + table.getFullName() + " for scan since there are no clustering columns");
logger.debug(() -> "skipping table " + table.getFullName() + " for scan since there are no clustering columns");
}
ops.put(
namer.nameFor(table, "optype", "scan", "blockname", blockname),

View File

@ -75,18 +75,18 @@ public class CqlModel {
CqlKeyspaceDef ksdef = getKeyspace(statsKeyspacename);
if (ksdef !=null) {
logger.debug("setting keyspace stats for '" + statsKeyspacename + "'");
logger.debug(() -> "setting keyspace stats for '" + statsKeyspacename + "'");
ksdef.setStats(keyspaceStats);
keyspaceStats.getKeyspaceTables().forEach((tbname, tbstats) -> {
CqlTable table = ksdef.getTable(tbname);
if (table != null) {
table.setStats(tbstats);
} else {
logger.debug(" skipping table '" + statsKeyspacename + "." + tbname + ", since it was not found in the model.");
logger.debug(() -> " skipping table '" + statsKeyspacename + "." + tbname + ", since it was not found in the model.");
}
});
} else {
logger.debug(" skipping keyspace stats for '" + statsKeyspacename + "'");
logger.debug(() -> " skipping keyspace stats for '" + statsKeyspacename + "'");
}
}

View File

@ -49,7 +49,7 @@ public class CqlModelBuilder extends CqlParserBaseListener {
@Override
public void exitEveryRule(ParserRuleContext ctx) {
if ((counted++ & 0b11111111111111) == 0b10000000000000) {
logger.trace("parsed " + counted + " elements...");
logger.trace(() -> "parsed " + counted + " elements...");
}
}
@ -59,7 +59,7 @@ public class CqlModelBuilder extends CqlParserBaseListener {
ParseTree parent = node.getParent();
String errorNodeType = parent.getClass().getSimpleName();
logger.info("PARSE ERROR: " + errorNodeType + "\n" + node.getSourceInterval());
logger.info(() -> "PARSE ERROR: " + errorNodeType + "\n" + node.getSourceInterval());
super.visitErrorNode(node);
}

View File

@ -42,9 +42,9 @@ public class CqlModelParser {
public static CqlModel parse(Path path) {
try {
String ddl = Files.readString(path);
logger.info("read " + ddl.length() + " character DDL file, parsing");
logger.info(() -> "read " + ddl.length() + " character DDL file, parsing");
CqlModel parsed = parse(ddl, null);
logger.info("parsed cql model: " + parsed.getSummaryLine());
logger.info(() -> "parsed cql model: " + parsed.getSummaryLine());
return parsed;
} catch (IOException e) {
@ -85,7 +85,7 @@ public class CqlModelParser {
}
} catch (Exception e) {
logger.warn("Error while parsing flow:" + e.getMessage());
logger.warn(() -> "Error while parsing flow:" + e.getMessage());
throw e;
// return new ParseResult(e);
}

View File

@ -59,7 +59,7 @@ public class CGGenStatsInjector implements CGModelTransformer, CGTransformerConf
String histogramPath = config.get("path").toString();
if (histogramPath != null) {
if (!Files.exists(Path.of(histogramPath))) {
logger.info("No tablestats file was found. at '" + histogramPath + "'.");
logger.info(() -> "No tablestats file was found. at '" + histogramPath + "'.");
Object onmissing = config.get("onmissing");
if (onmissing==null || !String.valueOf(onmissing).toLowerCase(Locale.ROOT).equals("skip")) {
logger.error("Unable to load tablestats file from '" + histogramPath + "' because it doesn't exists, and onmissing!=skip.");

View File

@ -61,10 +61,10 @@ public class CGKeyspaceFilter implements CGModelTransformer, CGTransformerConfig
action = pattern.apply(keyspace);
switch (action) {
case add:
logger.debug("including all definitions in " + keyspace + " with inclusion pattern " + pattern);
logger.debug(() -> "including all definitions in " + keyspace + " with inclusion pattern " + pattern);
break;
case remove:
logger.info("removing all definitions in " + keyspace + " with exclusion pattern " + pattern);
logger.info(() -> "removing all definitions in " + keyspace + " with exclusion pattern " + pattern);
model.removeKeyspaceDef(keyspace);
case inderminate:
}

View File

@ -93,7 +93,7 @@ public class CGModelTransformers implements
Object cfgvalues = cfgmap.get("config");
if (cfgvalues !=null ) {
configurable.accept((cfgvalues));
logger.info("configured transformer with " + cfgvalues);
logger.info(() -> "configured transformer with " + cfgvalues);
}
}

View File

@ -108,7 +108,7 @@ public class CGRegexReplacer implements CGTextTransformer, CGTransformerConfigur
while (matcher.find()) {
matcher.appendReplacement(sb, replacement);
// if (matcher.end() - matcher.start() > 10000) {
// logger.info("whoops");
// logger.info(()-> "whoops");
// }
// logger.info("matcher:[" + matcher.group(0) + "][" + matcher.group(1) + "][" + matcher.group(2) + "][" + matcher.group(3));
// logger.info(String.format("\tat %2.2f%%", (float) ((float) matcher.start() / (float) s.length())));

View File

@ -52,7 +52,7 @@ public class CGUdtReplacer implements CGModelTransformer {
String[] words = typedef.split("\\W+");
for (String word : words) {
if (word.toLowerCase(Locale.ROOT).equals(searchFor.toLowerCase(Locale.ROOT))) {
logger.info("replacing '" + typedef + "' with blob");
logger.info(() -> "replacing '" + typedef + "' with blob");
coldef.setTypeDef("blob");
break;
}

View File

@ -45,7 +45,7 @@ public class UnusedTableRemover implements CGModelTransformer, CGTransformerConf
String weightedOpsSpec = table.getTableAttributes().getAttribute("weighted_ops");
double weightedOps = Double.parseDouble(weightedOpsSpec);
if (weightedOps < minimumThreshold) {
logger.info(String.format(
logger.info(() -> String.format(
"removing table " + table.getKeyspace().getName() + "." + table.getName() + " with minimum weighted_ops of %1.5f under %1.5f",
weightedOps, minimumThreshold)
);

View File

@ -72,7 +72,7 @@ public class NameCache {
if (this.path.equals(path)) {
logger.debug("mapfile unchanged '" + path + "'");
} else {
logger.info("mapfile changed from '" + this.path + "' to '" + path + "'");
logger.info(() -> "mapfile changed from '" + this.path + "' to '" + path + "'");
this.path = path;
}
}

View File

@ -28,7 +28,7 @@ import io.nosqlbench.cqlgen.parser.CqlModelParser;
import java.util.function.LongFunction;
/**
* map<A,B>
* map&lt;A,B&gt;
* where:
* A := (f1 text, f2 text, f3 int)
* AND

View File

@ -53,7 +53,7 @@ public class SequenceBlocker {
try {
task.run();
} catch (Exception e) {
logger.error("Runnable errored in SequenceBlocker: " + e.getMessage());
logger.error(() -> "Runnable errored in SequenceBlocker: " + e.getMessage());
if (errorsAreFatal) {
this.fatalError = e;
}

View File

@ -38,7 +38,8 @@ public class DiagSpace implements ActivityDefObserver, AutoCloseable {
public DiagSpace(String name, NBConfiguration cfg) {
this.cfg = cfg;
this.name = name;
logger.trace("diag space initialized as '" + name + "'");
applyConfig(cfg);
logger.trace(() -> "diag space initialized as '" + name + "'");
}
public void applyConfig(NBConfiguration cfg) {
@ -67,7 +68,7 @@ public class DiagSpace implements ActivityDefObserver, AutoCloseable {
@Override
public void close() throws Exception {
logger.debug("closing diag space '" + this.name + "'");
logger.debug(() -> "closing diag space '" + this.name + "'");
if (errorOnClose) {
throw new RuntimeException("diag space was configured to throw this error when it was configured.");
}

View File

@ -16,21 +16,22 @@
package io.nosqlbench.activitytype.diag;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.junit.jupiter.api.Test;
import java.io.PrintStream;
public class SequenceBlockerTest {
private final static Logger logger = LogManager.getLogger(SequenceBlockerTest.class);
@Test
public void await() throws Exception {
SequenceBlocker sb = new SequenceBlocker(234L, true);
new Thread(() -> sb.awaitAndRun(249L,253L, new Printer(System.out, "249-253"))).start();
new Thread(() -> sb.awaitAndRun(249L,253L, new Printer(logger, "249-253"))).start();
Thread.sleep(100);
new Thread(() -> sb.awaitAndRun(247L,249L, new Printer(System.out, "247-249"))).start();
new Thread(() -> sb.awaitAndRun(247L,249L, new Printer(logger, "247-249"))).start();
Thread.sleep(100);
new Thread(() -> sb.awaitAndRun(234L,247L, new Printer(System.out, "234-247"))).start();
new Thread(() -> sb.awaitAndRun(234L,247L, new Printer(logger, "234-247"))).start();
sb.awaitCompletion();
System.out.flush();
@ -38,17 +39,17 @@ public class SequenceBlockerTest {
private final static class Printer implements Runnable {
private final PrintStream printStream;
private final Logger logger;
private final String out;
public Printer(PrintStream printStream, String out) {
this.printStream = printStream;
public Printer(Logger logger, String out) {
this.logger = logger;
this.out = out;
}
@Override
public void run() {
printStream.println(out);
logger.debug(out);
}
}

View File

@ -45,7 +45,7 @@
<dependency>
<groupId>com.amazonaws</groupId>
<artifactId>aws-java-sdk-dynamodb</artifactId>
<version>1.12.364</version>
<version>1.12.368</version>
</dependency>
</dependencies>

View File

@ -20,6 +20,8 @@ import io.nosqlbench.adapter.http.core.HttpFormatParser;
import io.nosqlbench.adapter.http.core.HttpOp;
import io.nosqlbench.adapter.http.core.HttpOpMapper;
import io.nosqlbench.adapter.http.core.HttpSpace;
import io.nosqlbench.api.config.standard.ConfigModel;
import io.nosqlbench.api.config.standard.Param;
import io.nosqlbench.engine.api.activityimpl.OpMapper;
import io.nosqlbench.engine.api.activityimpl.uniform.BaseDriverAdapter;
import io.nosqlbench.engine.api.activityimpl.uniform.DriverAdapter;
@ -68,6 +70,11 @@ public class HttpDriverAdapter extends BaseDriverAdapter<HttpOp, HttpSpace> {
@Override
public NBConfigModel getConfigModel() {
return super.getConfigModel().add(HttpSpace.getConfigModel());
NBConfigModel thisCfgModel = ConfigModel.of(HttpDriverAdapter.class)
.add(Param.defaultTo("enable_urlencode", false)
.setDescription("Override auto-detection of URLENCODE[[ requirements in the uri field."))
.asReadOnly();
return super.getConfigModel().add(HttpSpace.getConfigModel()).add(thisCfgModel);
}
}

View File

@ -94,12 +94,12 @@ public class HttpFormatParser {
return props;
}
private final static Pattern DOENCODE = Pattern.compile("(URLENCODE|E)\\[\\[(?<data>.+?)\\]\\]");
public final static Pattern URLENCODER_PATTERN = Pattern.compile("(URLENCODE|E)\\[\\[(?<data>.+?)\\]\\]");
public static String rewriteExplicitSections(String template) {
StringBuilder sb = new StringBuilder();
Matcher matcher = DOENCODE.matcher(template);
Matcher matcher = URLENCODER_PATTERN.matcher(template);
while (matcher.find()) {
String rewrite = matcher.group("data");
String encoded = rewriteStaticsOnly(rewrite);

View File

@ -29,7 +29,7 @@ import java.util.Optional;
import java.util.function.LongFunction;
import java.util.regex.Pattern;
public class HttpOpDispenser extends BaseOpDispenser<HttpOp,HttpSpace> {
public class HttpOpDispenser extends BaseOpDispenser<HttpOp, HttpSpace> {
private final LongFunction<HttpOp> opFunc;
public static final String DEFAULT_OK_BODY = ".+?";
@ -63,9 +63,27 @@ public class HttpOpDispenser extends BaseOpDispenser<HttpOp,HttpSpace> {
)
);
initBuilderF = op.enhanceFuncOptionally(initBuilderF, "uri", String.class, (b, v) -> b.uri(URI.create(v)));
Optional<LongFunction<String>> optionalUriFunc = op.getAsOptionalFunction("uri", String.class);
LongFunction<String> urifunc;
// Add support for URLENCODE on the uri field if either it statically or dynamically contains the E or URLENCODE pattern,
// OR the enable_urlencode op field is set to true.
if (optionalUriFunc.isPresent()) {
String testUriValue = optionalUriFunc.get().apply(0L);
if (HttpFormatParser.URLENCODER_PATTERN.matcher(testUriValue).find()
|| op.getStaticConfigOr("enable_urlencode", true)) {
initBuilderF =
op.enhanceFuncOptionally(
initBuilderF,
"uri",
String.class,
(b, v) -> b.uri(URI.create(HttpFormatParser.rewriteExplicitSections(v)))
);
}
} else {
initBuilderF = op.enhanceFuncOptionally(initBuilderF, "uri", String.class, (b, v) -> b.uri(URI.create(v)));
}
op.getOptionalStaticValue("follow_redirects",boolean.class);
op.getOptionalStaticValue("follow_redirects", boolean.class);
/**
* Add header adders for any key provided in the op template which is capitalized
@ -74,19 +92,19 @@ public class HttpOpDispenser extends BaseOpDispenser<HttpOp,HttpSpace> {
.filter(n -> n.charAt(0) >= 'A')
.filter(n -> n.charAt(0) <= 'Z')
.toList();
if (headerNames.size()>0) {
if (headerNames.size() > 0) {
for (String headerName : headerNames) {
initBuilderF = op.enhanceFunc(initBuilderF,headerName,String.class, (b,h) -> b.header(headerName,h));
initBuilderF = op.enhanceFunc(initBuilderF, headerName, String.class, (b, h) -> b.header(headerName, h));
}
}
initBuilderF = op.enhanceFuncOptionally(initBuilderF,"timeout",long.class,(b,v) -> b.timeout(Duration.ofMillis(v)));
initBuilderF = op.enhanceFuncOptionally(initBuilderF, "timeout", long.class, (b, v) -> b.timeout(Duration.ofMillis(v)));
LongFunction<HttpRequest.Builder> finalInitBuilderF = initBuilderF;
LongFunction<HttpRequest> reqF = l -> finalInitBuilderF.apply(l).build();
Pattern ok_status = op.getOptionalStaticValue("ok-status",String.class)
Pattern ok_status = op.getOptionalStaticValue("ok-status", String.class)
.map(Pattern::compile)
.orElse(Pattern.compile(DEFAULT_OK_STATUS));
@ -99,7 +117,7 @@ public class HttpOpDispenser extends BaseOpDispenser<HttpOp,HttpSpace> {
reqF.apply(cycle),
ok_status,
ok_body,
ctxF.apply(cycle),cycle
ctxF.apply(cycle), cycle
);
return opFunc;
}

View File

@ -62,7 +62,7 @@ public class HttpSpace implements NBNamedElement {
private HttpClient newClient() {
HttpClient.Builder builder = HttpClient.newBuilder();
logger.debug("follow_redirects=>" + followRedirects);
logger.debug(() -> "follow_redirects=>" + followRedirects);
builder = builder.followRedirects(this.followRedirects);
builder = builder.connectTimeout(this.timeout);
return builder.build();
@ -120,7 +120,7 @@ public class HttpSpace implements NBNamedElement {
.setDescription("Print extended diagnostics. This option has numerous" +
" possible values. See the markdown docs for details. (nb help http)")
)
.add(Param.defaultTo("timeout", Long.MAX_VALUE)
.add(Param.defaultTo("timeout", 1000L*60L*15L) // 15 minutes
.setDescription("How long to wait for requests before timeout out. Default is forever."))
.add(Param.defaultTo("hdr_digits", 4)
.setDescription("number of digits of precision to keep in HDR histograms"))

View File

@ -15,10 +15,10 @@ scenarios:
bindings:
# To enable an optional weighted set of hosts in place of a load balancer
# Examples
# single host: stargate_host=host1
# multiple hosts: stargate_host=host1,host2,host3
# multiple weighted hosts: stargate_host=host1:3,host2:7
weighted_hosts: WeightedStrings('<<stargate_host:stargate>>')
# single host: restapi_host=host1
# multiple hosts: restapi_host=host1,host2,host3
# multiple weighted hosts: restapi_host=host1:3,host2:7
weighted_hosts: WeightedStrings('<<restapi_host:stargate>>')
# http request id
request_id: ToHashedUUID(); ToString();
@ -31,7 +31,8 @@ blocks:
schema:
ops:
create-keyspace:
op: POST <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/schemas/keyspaces
method: POST
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/schemas/keyspaces
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
@ -41,15 +42,19 @@ blocks:
"name": "<<keyspace:baselines>>",
"replicas": <<rf:1>>
}
drop-table:
op: DELETE <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/schemas/keyspaces/<<keyspace:baselines>>/tables/<<table:keyvalue>>
method: DELETE
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/schemas/keyspaces/<<keyspace:baselines>>/tables/<<table:keyvalue>>
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
Content-Type: "application/json"
ok-status: "[2-4][0-9][0-9]"
create-table:
op: POST <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/schemas/keyspaces/<<keyspace:baselines>>/tables
method: POST
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/schemas/keyspaces/<<keyspace:baselines>>/tables
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
@ -74,10 +79,12 @@ blocks:
},
"ifNotExists": true
}
schema-astra:
statements:
create-table-astra:
op: POST <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/schemas/keyspaces/<<keyspace:baselines>>/tables
method: POST
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/schemas/keyspaces/<<keyspace:baselines>>/tables
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
@ -102,10 +109,12 @@ blocks:
},
"ifNotExists": true
}
rampup:
ops:
rampup-insert:
op: POST <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/keyspaces/<<keyspace:baselines>>/<<table:keyvalue>>
method: POST
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/keyspaces/<<keyspace:baselines>>/<<table:keyvalue>>
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
@ -115,23 +124,27 @@ blocks:
"key": "{seq_key}",
"value": "{seq_value}"
}
main-read:
params:
ratio: <<read_ratio:5>>
ops:
main-select:
op: GET <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/keyspaces/<<keyspace:baselines>>/<<table:keyvalue>>/{rw_key}
method: GET
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/keyspaces/<<keyspace:baselines>>/<<table:keyvalue>>/{rw_key}
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
Content-Type: "application/json"
ok-status: "[2-4][0-9][0-9]"
main-write:
params:
ratio: <<write_ratio:5>>
ops:
main-write:
op: POST <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/keyspaces/<<keyspace:baselines>>/<<table:keyvalue>>
method: POST
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/keyspaces/<<keyspace:baselines>>/<<table:keyvalue>>
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"

View File

@ -16,10 +16,10 @@ scenarios:
bindings:
# To enable an optional weighted set of hosts in place of a load balancer
# Examples
# single host: stargate_host=host1
# multiple hosts: stargate_host=host1,host2,host3
# multiple weighted hosts: stargate_host=host1:3,host2:7
weighted_hosts: WeightedStrings('<<stargate_host:stargate>>')
# single host: restapi_host=host1
# multiple hosts: restapi_host=host1,host2,host3
# multiple weighted hosts: restapi_host=host1:3,host2:7
weighted_hosts: WeightedStrings('<<restapi_host:stargate>>')
# http request id
request_id: ToHashedUUID(); ToString();
# for ramp-up and verify
@ -39,7 +39,8 @@ blocks:
schema:
ops:
create-keyspace:
op: POST <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/schemas/keyspaces
method: POST
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/schemas/keyspaces
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
@ -49,43 +50,19 @@ blocks:
"name": "<<keyspace:baselines>>",
"replicas": <<rf:1>>
}
drop-table:
method: DELETE
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/schemas/keyspaces/<<keyspace:baselines>>/tables/<<table:tabular>>
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
Content-Type: "application/json"
ok-status: "[2-4][0-9][0-9]"
create-table:
op: POST <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/schemas/keyspaces/<<keyspace:baselines>>/tables
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
Content-Type: "application/json"
body: |
{
"name": "<<table:tabular>>",
"columnDefinitions": [
{
"name": "part",
"typeDefinition": "text"
},
{
"name": "clust",
"typeDefinition": "text"
},
{
"name": "data",
"typeDefinition": "text"
}
],
"primaryKey": {
"partitionKey": [
"part"
],
"clusteringKey": [
"clust"
]
},
"ifNotExists": true
}
schema-astra:
ops:
create-table-astra:
op: POST <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/schemas/keyspaces/<<keyspace:baselines>>/tables
method: POST
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/schemas/keyspaces/<<keyspace:baselines>>/tables
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
@ -117,10 +94,12 @@ blocks:
},
"ifNotExists": true
}
rampup:
ops:
rampup-insert:
op: POST <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/keyspaces/<<keyspace:baselines>>/<<table:tabular>>
method: POST
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/keyspaces/<<keyspace:baselines>>/<<table:tabular>>
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
@ -131,22 +110,26 @@ blocks:
"clust": "{clust_layout}",
"data": "{data}"
}
main-read:
params:
ratio: 5
ops:
main-select:
op: GET <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/keyspaces/<<keyspace:baselines>>/<<table:tabular>>/{part_read}&page-size={limit}
method: GET
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/keyspaces/<<keyspace:baselines>>/<<table:tabular>>/{part_read}&page-size={limit}
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
Content-Type: "application/json"
main-write:
params:
ratio: 5
ops:
main-write:
op: POST <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/keyspaces/<<keyspace:baselines>>/<<table:tabular>>
method: POST
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/keyspaces/<<keyspace:baselines>>/<<table:tabular>>
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"

View File

@ -20,10 +20,10 @@ scenarios:
bindings:
# To enable an optional weighted set of hosts in place of a load balancer
# Examples
# single host: stargate_host=host1
# multiple hosts: stargate_host=host1,host2,host3
# multiple weighted hosts: stargate_host=host1:3,host2:7
weighted_hosts: WeightedStrings('<<stargate_host:stargate>>')
# single host: restapi_host=host1
# multiple hosts: restapi_host=host1,host2,host3
# multiple weighted hosts: restapi_host=host1:3,host2:7
weighted_hosts: WeightedStrings('<<restapi_host:stargate>>')
# http request id
request_id: ToHashedUUID(); ToString();
@ -36,47 +36,91 @@ bindings:
blocks:
schema:
params:
prepared: false
ops:
create-keyspace: |
create keyspace if not exists <<keyspace:baselines>>
WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '<<rf:1>>'}
AND durable_writes = true;
create-table: |
create table if not exists <<keyspace:baselines>>.<<table:iot>> (
machine_id UUID, // source machine
sensor_name text, // sensor name
time timestamp, // timestamp of collection
sensor_value double, //
station_id UUID, // source location
data text,
PRIMARY KEY ((machine_id, sensor_name), time)
) WITH CLUSTERING ORDER BY (time DESC)
AND compression = { 'sstable_compression' : '<<compression:LZ4Compressor>>' }
AND compaction = {
'class': 'TimeWindowCompactionStrategy',
'compaction_window_size': <<expiry_minutes:60>>,
'compaction_window_unit': 'MINUTES'
};
truncate-table: |
truncate table <<keyspace:baselines>>.<<table:iot>>;
schema-astra:
ops:
create-table-astra: |
create table if not exists <<keyspace:baselines>>.<<table:iot>> (
machine_id UUID, // source machine
sensor_name text, // sensor name
time timestamp, // timestamp of collection
sensor_value double, //
station_id UUID, // source location
data text,
PRIMARY KEY ((machine_id, sensor_name), time)
) WITH CLUSTERING ORDER BY (time DESC);
create-keyspace:
method: POST
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/schemas/keyspaces
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
Content-Type: "application/json"
body: |
{
"name": "<<keyspace:baselines>>",
"replicas": <<rf:1>>
}
drop-table:
method: DELETE
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/schemas/keyspaces/<<keyspace:baselines>>/tables/<<table:iot>>
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
Content-Type: "application/json"
ok-status: "[2-4][0-9][0-9]"
create-table:
method: POST
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/schemas/keyspaces/<<keyspace:baselines>>/tables
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
Content-Type: "application/json"
body: |
{
"name": "<<table:iot>>",
"columnDefinitions": [
{
"name": "machine_id",
"typeDefinition": "uuid"
},
{
"name": "sensor_name",
"typeDefinition": "text"
},
{
"name": "time",
"typeDefinition": "timestamp"
},
{
"name": "sensor_value",
"typeDefinition": "double"
},
{
"name": "station_id",
"typeDefinition": "uuid"
},
{
"name": "data",
"typeDefinition": "text"
}
],
"primaryKey": {
"partitionKey": [
"machine_id",
"sensor_name"
],
"clusteringKey": [
"time"
]
},
"tableOptions": {
"clusteringExpression": [
{
"column": "time",
"order": "DESC"
}
]
},
"ifNotExists": true
}
rampup:
ops:
rampup-insert:
op: POST <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/keyspaces/<<keyspace:baselines>>/<<table:iot>>
method: POST
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/keyspaces/<<keyspace:baselines>>/<<table:iot>>
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
@ -90,22 +134,26 @@ blocks:
"station_id": "{station_id}",
"data": "{data}"
}
main-read:
params:
ratio: <<read_ratio:1>>
ops:
main-select:
op: GET <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/keyspaces/<<keyspace:baselines>>/<<table:iot>>?where=E[[{"machine_id":{"$eq":"{machine_id}"},"sensor_name":{"$eq":"{sensor_name}"}}]]&page-size=<<limit:10>>
method: GET
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/keyspaces/<<keyspace:baselines>>/<<table:iot>>?where=URLENCODE[[{"machine_id":{"$eq":"{machine_id}"},"sensor_name":{"$eq":"{sensor_name}"}}]]&page-size=<<limit:10>>
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
Content-Type: "application/json"
main-write:
params:
ratio: <<write_ratio:9>>
ops:
main-write:
op: POST <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/keyspaces/<<keyspace:baselines>>/<<table:iot>>
method: POST
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/keyspaces/<<keyspace:baselines>>/<<table:iot>>
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"

View File

@ -1,27 +1,27 @@
min_version: "4.17.15"
# nb -v run driver=http yaml=http-docsapi-crud-basic tags=phase:schema stargate_host=my_stargate_host auth_token=$AUTH_TOKEN
# nb -v run driver=http yaml=http-docsapi-crud-basic tags=phase:schema docsapi_host=my_docsapi_host auth_token=$AUTH_TOKEN
description: |
This workload emulates CRUD operations for the Stargate Documents API.
It generates a simple JSON document to be used for writes and updates.
Note that stargate_port should reflect the port where the Docs API is exposed (defaults to 8082).
Note that docsapi_port should reflect the port where the Docs API is exposed (defaults to 8180).
scenarios:
default:
schema: run driver=http tags==block:schema threads==1 cycles==UNDEF
write: run driver=http tags==name:"write.*" cycles===TEMPLATE(write-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
read: run driver=http tags==name:"read.*" cycles===TEMPLATE(read-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
update: run driver=http tags==name:"update.*" cycles===TEMPLATE(update-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
delete: run driver=http tags==name:"delete.*" cycles===TEMPLATE(delete-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
write: run driver=http tags==block:"write.*" cycles===TEMPLATE(write-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
read: run driver=http tags==block:"read.*" cycles===TEMPLATE(read-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
update: run driver=http tags==block:"update.*" cycles===TEMPLATE(update-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
delete: run driver=http tags==block:"delete.*" cycles===TEMPLATE(delete-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
bindings:
# To enable an optional weighted set of hosts in place of a load balancer
# Examples
# single host: stargate_host=host1
# multiple hosts: stargate_host=host1,host2,host3
# multiple weighted hosts: stargate_host=host1:3,host2:7
weighted_hosts: WeightedStrings('<<stargate_host:stargate>>')
# single host: docsapi_host=host1
# multiple hosts: docsapi_host=host1,host2,host3
# multiple weighted hosts: docsapi_host=host1:3,host2:7
weighted_hosts: WeightedStrings('<<docsapi_host:stargate>>')
# http request id
request_id: ToHashedUUID(); ToString();
@ -42,40 +42,45 @@ bindings:
blocks:
schema:
ops:
create-keyspace:
op: POST <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/schemas/keyspaces
create-namespace:
method: POST
uri: <<protocol:http>>://{weighted_hosts}:<<docsapi_port:8180>><<path_prefix:>>/v2/schemas/namespaces
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
Content-Type: "application/json"
body: |
{
"name": "<<keyspace:docs_crud_basic>>",
"name": "<<namespace:docs_crud_basic>>",
"replicas": <<rf:1>>
}
ok-status: ".*"
delete-docs-collection:
op: DELETE <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/namespaces/<<keyspace:docs_crud_basic>>/collections/<<table:docs_collection>>
method: DELETE
uri: <<protocol:http>>://{weighted_hosts}:<<docsapi_port:8180>><<path_prefix:>>/v2/namespaces/<<namespace:docs_crud_basic>>/collections/<<collection:docs_collection>>
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
ok-status: "[2-4][0-9][0-9]"
create-docs-collection:
op: POST <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/namespaces/<<keyspace:docs_crud_basic>>/collections
uri: <<protocol:http>>://{weighted_hosts}:<<docsapi_port:8180>><<path_prefix:>>/v2/namespaces/<<namespace:docs_crud_basic>>/collections
method: POST
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
Content-Type: "application/json"
body: |
{
"name": "<<table:docs_collection>>"
"name": "<<collection:docs_collection>>"
}
main:
write:
ops:
write-document:
op: PUT <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/namespaces/<<keyspace:docs_crud_basic>>/collections/<<table:docs_collection>>/{seq_key}
method: PUT
uri: <<protocol:http>>://{weighted_hosts}:<<docsapi_port:8180>><<path_prefix:>>/v2/namespaces/<<namespace:docs_crud_basic>>/collections/<<collection:docs_collection>>/{seq_key}
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
@ -105,15 +110,21 @@ blocks:
"debt": null
}
read:
ops:
read-document:
op: GET <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/namespaces/<<keyspace:docs_crud_basic>>/collections/<<table:docs_collection>>/{random_key}
method: GET
uri: <<protocol:http>>://{weighted_hosts}:<<docsapi_port:8180>><<path_prefix:>>/v2/namespaces/<<namespace:docs_crud_basic>>/collections/<<collection:docs_collection>>/{random_key}
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
ok-status: "[2-4][0-9][0-9]"
update:
ops:
update-document:
op: PUT <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/namespaces/<<keyspace:docs_crud_basic>>/collections/<<table:docs_collection>>/{random_key}
method: PUT
uri: <<protocol:http>>://{weighted_hosts}:<<docsapi_port:8180>><<path_prefix:>>/v2/namespaces/<<namespace:docs_crud_basic>>/collections/<<collection:docs_collection>>/{random_key}
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
@ -143,8 +154,11 @@ blocks:
"debt": null
}
delete:
ops:
delete-document:
op: DELETE <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/namespaces/<<keyspace:docs_crud_basic>>/collections/<<table:docs_collection>>/{seq_key}
method: DELETE
uri: <<protocol:http>>://{weighted_hosts}:<<docsapi_port:8180>><<path_prefix:>>/v2/namespaces/<<namespace:docs_crud_basic>>/collections/<<collection:docs_collection>>/{seq_key}
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"

View File

@ -3,7 +3,7 @@ min_version: "4.17.15"
description: |
This workload emulates CRUD operations for the Stargate Documents API.
It requires a data set file, where each line is a single JSON document to be used for writes and updates.
Note that stargate_port should reflect the port where the Docs API is exposed (defaults to 8082).
Note that docsapi_port should reflect the port where the Docs API is exposed (defaults to 8180).
scenarios:
default:
@ -16,10 +16,10 @@ scenarios:
bindings:
# To enable an optional weighted set of hosts in place of a load balancer
# Examples
# single host: stargate_host=host1
# multiple hosts: stargate_host=host1,host2,host3
# multiple weighted hosts: stargate_host=host1:3,host2:7
weighted_hosts: WeightedStrings('<<stargate_host:stargate>>')
# single host: docsapi_host=host1
# multiple hosts: docsapi_host=host1,host2,host3
# multiple weighted hosts: docsapi_host=host1:3,host2:7
weighted_hosts: WeightedStrings('<<docsapi_host:stargate>>')
# http request id
request_id: ToHashedUUID(); ToString();
@ -29,34 +29,38 @@ bindings:
blocks:
schema:
ops:
create-keyspace:
op: POST <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/schemas/keyspaces
create-namespace:
method: POST
uri: <<protocol:http>>://{weighted_hosts}:<<docsapi_port:8180>><<path_prefix:>>/v2/schemas/namespaces
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
Content-Type: "application/json"
body: |
{
"name": "<<keyspace:docs_crud_dataset>>",
"name": "<<namespace:docs_crud_dataset>>",
"replicas": <<rf:1>>
}
ok-status: ".*"
delete-docs-collection:
op: DELETE <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/namespaces/<<keyspace:docs_crud_dataset>>/collections/<<table:docs_collection>>
method: DELETE
uri: <<protocol:http>>://{weighted_hosts}:<<docsapi_port:8180>><<path_prefix:>>/v2/namespaces/<<namespace:docs_crud_dataset>>/collections/<<collection:docs_collection>>
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
ok-status: "[2-4][0-9][0-9]"
create-docs-collection:
op: POST <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/namespaces/<<keyspace:docs_crud_dataset>>/collections
method: POST
uri: <<protocol:http>>://{weighted_hosts}:<<docsapi_port:8180>><<path_prefix:>>/v2/namespaces/<<namespace:docs_crud_dataset>>/collections
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
Content-Type: "application/json"
body: |
{
"name": "<<table:docs_collection>>"
"name": "<<collection:docs_collection>>"
}
main:
@ -64,7 +68,8 @@ blocks:
document_json: ModuloLineToString('<<dataset_file>>');
ops:
write-document:
op: PUT <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/namespaces/<<keyspace:docs_crud_dataset>>/collections/<<table:docs_collection>>/{seq_key}
method: PUT
uri: <<protocol:http>>://{weighted_hosts}:<<docsapi_port:8180>><<path_prefix:>>/v2/namespaces/<<namespace:docs_crud_dataset>>/collections/<<collection:docs_collection>>/{seq_key}
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
@ -72,14 +77,17 @@ blocks:
body: "{document_json}"
read-document:
op: GET <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/namespaces/<<keyspace:docs_crud_dataset>>/collections/<<table:docs_collection>>/{random_key}
method: GET
uri: <<protocol:http>>://{weighted_hosts}:<<docsapi_port:8180>><<path_prefix:>>/v2/namespaces/<<namespace:docs_crud_dataset>>/collections/<<collection:docs_collection>>/{random_key}
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
ok-status: "[2-4][0-9][0-9]"
# TODO - what is the purpose of this? does it overwrite? is there a way to make sure it is actually overwriting existing documents?
update-document:
op: PUT <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/namespaces/<<keyspace:docs_crud_dataset>>/collections/<<table:docs_collection>>/{random_key}
method: PUT
uri: <<protocol:http>>://{weighted_hosts}:<<docsapi_port:8180>><<path_prefix:>>/v2/namespaces/<<namespace:docs_crud_dataset>>/collections/<<collection:docs_collection>>/{random_key}
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
@ -87,7 +95,8 @@ blocks:
body: "{document_json}"
delete-document:
op: DELETE <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/namespaces/<<keyspace:docs_crud_dataset>>/collections/<<table:docs_collection>>/{seq_key}
method: DELETE
uri: <<protocol:http>>://{weighted_hosts}:<<docsapi_port:8180>><<path_prefix:>>/v2/namespaces/<<namespace:docs_crud_dataset>>/collections/<<collection:docs_collection>>/{seq_key}
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"

View File

@ -6,7 +6,7 @@ description: |
- Schema creation with the Docs API, we don't use cql because the Docs API is opinionated about schema.
- There is no instrumentation with the http driver.
- There is no async mode with the http driver.
Note that stargate_port should reflect the port where the Docs API is exposed (defaults to 8082).
Note that docsapi_port should reflect the port where the Docs API is exposed (defaults to 8180).
scenarios:
default:
@ -17,10 +17,10 @@ scenarios:
bindings:
# To enable an optional weighted set of hosts in place of a load balancer
# Examples
# single host: stargate_host=host1
# multiple hosts: stargate_host=host1,host2,host3
# multiple weighted hosts: stargate_host=host1:3,host2:7
weighted_hosts: WeightedStrings('<<stargate_host:stargate>>')
# single host: docsapi_host=host1
# multiple hosts: docsapi_host=host1,host2,host3
# multiple weighted hosts: docsapi_host=host1:3,host2:7
weighted_hosts: WeightedStrings('<<docsapi_host:stargate>>')
# http request id
request_id: ToHashedUUID(); ToString();
@ -32,40 +32,45 @@ bindings:
blocks:
schema:
ops:
create-keyspace:
op: POST <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/schemas/keyspaces
create-namespace:
method: POST
uri: <<protocol:http>>://{weighted_hosts}:<<docsapi_port:8180>><<path_prefix:>>/v2/schemas/namespaces
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
Content-Type: "application/json"
body: |
{
"name": "<<keyspace:docs_keyvalue>>",
"name": "<<namespace:docs_keyvalue>>",
"replicas": <<rf:1>>
}
ok-status: ".*"
delete-docs-collection:
op: DELETE <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/namespaces/<<keyspace:docs_keyvalue>>/collections/<<table:docs_collection>>
method: DELETE
uri: <<protocol:http>>://{weighted_hosts}:<<docsapi_port:8180>><<path_prefix:>>/v2/namespaces/<<namespace:docs_keyvalue>>/collections/<<collection:docs_collection>>
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
ok-status: "[2-4][0-9][0-9]"
create-docs-collection:
op: POST <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/namespaces/<<keyspace:docs_keyvalue>>/collections
method: POST
uri: <<protocol:http>>://{weighted_hosts}:<<docsapi_port:8180>><<path_prefix:>>/v2/namespaces/<<namespace:docs_keyvalue>>/collections
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
Content-Type: "application/json"
body: |
{
"name": "<<table:docs_collection>>"
"name": "<<collection:docs_collection>>"
}
rampup:
ops:
rampup-insert:
op: PUT <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/namespaces/<<keyspace:docs_keyvalue>>/collections/<<table:docs_collection>>/{seq_key}
method: PUT
uri: <<protocol:http>>://{weighted_hosts}:<<docsapi_port:8180>><<path_prefix:>>/v2/namespaces/<<namespace:docs_keyvalue>>/collections/<<collection:docs_collection>>/{seq_key}
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
@ -80,14 +85,16 @@ blocks:
ratio: <<read_ratio:5>>
ops:
main-select:
op: GET <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/namespaces/<<keyspace:docs_keyvalue>>/collections/<<table:docs_collection>>/{rw_key}
method: GET
uri: <<protocol:http>>://{weighted_hosts}:<<docsapi_port:8180>><<path_prefix:>>/v2/namespaces/<<namespace:docs_keyvalue>>/collections/<<collection:docs_collection>>/{rw_key}
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
ok-status: "[2-4][0-9][0-9]"
main-write:
op: PUT <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/namespaces/<<keyspace:docs_keyvalue>>/collections/<<table:docs_collection>>/{rw_key}
method: PUT
uri: <<protocol:http>>://{weighted_hosts}:<<docsapi_port:8180>><<path_prefix:>>/v2/namespaces/<<namespace:docs_keyvalue>>/collections/<<collection:docs_collection>>/{rw_key}
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"

View File

@ -2,9 +2,9 @@ min_version: "4.17.15"
description: |
This workload emulates advanced search filter combinations for the Stargate Documents API.
During the rampup phase, it generates documents, writes them to a table, and then warms up the search paths.
During the rampup phase, it generates documents, writes them to a collection, and then warms up the search paths.
During the main phase it performs various basic search filters and times their execution.
Note that stargate_port should reflect the port where the Docs API is exposed (defaults to 8082).
Note that docsapi_port should reflect the port where the Docs API is exposed (defaults to 8180).
# These are the filter combinations tested in this workload, and their names:
# in: match1 IN [0]
@ -32,10 +32,10 @@ scenarios:
bindings:
# To enable an optional weighted set of hosts in place of a load balancer
# Examples
# single host: stargate_host=host1
# multiple hosts: stargate_host=host1,host2,host3
# multiple weighted hosts: stargate_host=host1:3,host2:7
weighted_hosts: WeightedStrings('<<stargate_host:stargate>>')
# single host: docsapi_host=host1
# multiple hosts: docsapi_host=host1,host2,host3
# multiple weighted hosts: docsapi_host=host1:3,host2:7
weighted_hosts: WeightedStrings('<<docsapi_host:stargate>>')
# http request id
request_id: ToHashedUUID(); ToString();
@ -61,40 +61,45 @@ bindings:
blocks:
schema:
statements:
create-keyspace:
op: POST <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/schemas/keyspaces
create-namespace:
method: POST
uri: <<protocol:http>>://{weighted_hosts}:<<docsapi_port:8180>><<path_prefix:>>/v2/schemas/namespaces
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
Content-Type: "application/json"
body: |
{
"name": "<<keyspace:docs_search_advanced>>",
"name": "<<namespace:docs_search_advanced>>",
"replicas": <<rf:1>>
}
ok-status: ".*"
delete-docs-collection:
op: DELETE <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/namespaces/<<keyspace:docs_search_advanced>>/collections/<<table:docs_collection>>
method: DELETE
uri: <<protocol:http>>://{weighted_hosts}:<<docsapi_port:8180>><<path_prefix:>>/v2/namespaces/<<namespace:docs_search_advanced>>/collections/<<collection:docs_collection>>
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
ok-status: "[2-4][0-9][0-9]"
create-docs-collection:
op: POST <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/namespaces/<<keyspace:docs_search_advanced>>/collections
method: POST
uri: <<protocol:http>>://{weighted_hosts}:<<docsapi_port:8180>><<path_prefix:>>/v2/namespaces/<<namespace:docs_search_advanced>>/collections
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
Content-Type: "application/json"
body: |
{
"name": "<<table:docs_collection>>"
"name": "<<collection:docs_collection>>"
}
rampup:
ops:
rampup-put:
op: PUT <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/namespaces/<<keyspace:docs_search_advanced>>/collections/<<table:docs_collection>>/{seq_key}
method: PUT
uri: <<protocol:http>>://{weighted_hosts}:<<docsapi_port:8180>><<path_prefix:>>/v2/namespaces/<<namespace:docs_search_advanced>>/collections/<<collection:docs_collection>>/{seq_key}
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
@ -129,7 +134,9 @@ blocks:
# where={"match1":{"$in":[0]}}
rampup-get-in:
op: GET <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/namespaces/<<keyspace:docs_search_advanced>>/collections/<<table:docs_collection>>?where=URLENCODE[[{"match1":{"$in":[0]}}]]&page-size=<<page-size,3>>&fields=<<fields,%5b%5d>>
driver: http
method: GET
uri: <<protocol:http>>://{weighted_hosts}:<<docsapi_port:8180>><<path_prefix:>>/v2/namespaces/<<namespace:docs_search_advanced>>/collections/<<collection:docs_collection>>?where=URLENCODE[[{"match1":{"$in":[0]}}]]&page-size=<<page-size,3>>&fields=<<fields,%5b%5d>>
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
@ -137,7 +144,8 @@ blocks:
# where={"match2":{"$nin":["false"]}}
rampup-get-not-in:
op: GET <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/namespaces/<<keyspace:docs_search_advanced>>/collections/<<table:docs_collection>>?where=URLENCODE[[{"match2":{"$nin":["false"]}}]]&page-size=<<page-size,3>>&fields=<<fields,%5b%5d>>
method: GET
uri: <<protocol:http>>://{weighted_hosts}:<<docsapi_port:8180>><<path_prefix:>>/v2/namespaces/<<namespace:docs_search_advanced>>/collections/<<collection:docs_collection>>?where=URLENCODE[[{"match2":{"$nin":["false"]}}]]&page-size=<<page-size,3>>&fields=<<fields,%5b%5d>>
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
@ -145,7 +153,8 @@ blocks:
# where={"match2":{"$eq":"true"},"match3":{"$ne": false}}
rampup-get-mem-and:
op: GET <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/namespaces/<<keyspace:docs_search_advanced>>/collections/<<table:docs_collection>>?where=URLENCODE[[{"match2":{"$eq":"true"},"match3":{"$ne":false}}]]&page-size=<<page-size,3>>&fields=<<fields,%5b%5d>>
method: GET
uri: <<protocol:http>>://{weighted_hosts}:<<docsapi_port:8180>><<path_prefix:>>/v2/namespaces/<<namespace:docs_search_advanced>>/collections/<<collection:docs_collection>>?where=URLENCODE[[{"match2":{"$eq":"true"},"match3":{"$ne":false}}]]&page-size=<<page-size,3>>&fields=<<fields,%5b%5d>>
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
@ -153,7 +162,8 @@ blocks:
rampup-get-mem-or:
# where={"$or":[{"match1":{"$lt":1}},{"match3":{"$exists":true}}]}
op: GET <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/namespaces/<<keyspace:docs_search_advanced>>/collections/<<table:docs_collection>>?where=URLENCODE[[{"$or":[{"match1":{"$lt":1}},{"match3":{"$exists":true}}]}]]&page-size=<<page-size,3>>&fields=<<fields,%5b%5d>>
method: GET
uri: <<protocol:http>>://{weighted_hosts}:<<docsapi_port:8180>><<path_prefix:>>/v2/namespaces/<<namespace:docs_search_advanced>>/collections/<<collection:docs_collection>>?where=URLENCODE[[{"$or":[{"match1":{"$lt":1}},{"match3":{"$exists":true}}]}]]&page-size=<<page-size,3>>&fields=<<fields,%5b%5d>>
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
@ -161,7 +171,8 @@ blocks:
# where={"$and":[{"match1":{"$eq":0}},{"$or":[{"match2":{"$eq":"true"}},{"match3":{"$eq":false}}]}]}
rampup-get-complex1:
op: GET <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/namespaces/<<keyspace:docs_search_advanced>>/collections/<<table:docs_collection>>?where=URLENCODE[[{"$and":[{"match1":{"$eq":0}},{"$or":[{"match2":{"$eq":"true"}},{"match3":{"$eq":false}}]}]}]]&page-size=<<page-size,3>>&fields=<<fields,%5b%5d>>
method: GET
uri: <<protocol:http>>://{weighted_hosts}:<<docsapi_port:8180>><<path_prefix:>>/v2/namespaces/<<namespace:docs_search_advanced>>/collections/<<collection:docs_collection>>?where=URLENCODE[[{"$and":[{"match1":{"$eq":0}},{"$or":[{"match2":{"$eq":"true"}},{"match3":{"$eq":false}}]}]}]]&page-size=<<page-size,3>>&fields=<<fields,%5b%5d>>
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
@ -169,7 +180,8 @@ blocks:
rampup-get-complex2:
# where={"$and":[{"$or":[{"match1":{"$lte":0}},{"match2":{"$eq":"false"}}]},{"$or":[{"match2":{"$eq":"false"}},{"match3":{"$eq":true}}]}]}
op: GET <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/namespaces/<<keyspace:docs_search_advanced>>/collections/<<table:docs_collection>>?where=URLENCODE[[{"$and":[{"$or":[{"match1":{"$lte":0}},{"match2":{"$eq":"false"}}]},{"$or":[{"match2":{"$eq":"false"}},{"match3":{"$eq":true}}]}]}]]&page-size=<<page-size,3>>&fields=<<fields,%5b%5d>>
method: GET
uri: <<protocol:http>>://{weighted_hosts}:<<docsapi_port:8180>><<path_prefix:>>/v2/namespaces/<<namespace:docs_search_advanced>>/collections/<<collection:docs_collection>>?where=URLENCODE[[{"$and":[{"$or":[{"match1":{"$lte":0}},{"match2":{"$eq":"false"}}]},{"$or":[{"match2":{"$eq":"false"}},{"match3":{"$eq":true}}]}]}]]&page-size=<<page-size,3>>&fields=<<fields,%5b%5d>>
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
@ -177,7 +189,8 @@ blocks:
# where={"$or":[{"$and":[{"match1":{"$lte":0}},{"match2":{"$eq":"true"}}]},{"$and":[{"match2":{"$eq":"false"}},{"match3":{"$eq":true}}]}]}
rampup-get-complex3:
op: GET <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/namespaces/<<keyspace:docs_search_advanced>>/collections/<<table:docs_collection>>?where=URLENCODE[[{"$or":[{"$and":[{"match1":{"$lte":0}},{"match2":{"$eq":"true"}}]},{"$and":[{"match2":{"$eq":"false"}},{"match3":{"$eq":true}}]}]}]]&page-size=<<page-size,3>>&fields=<<fields,%5b%5d>>
method: GET
uri: <<protocol:http>>://{weighted_hosts}:<<docsapi_port:8180>><<path_prefix:>>/v2/namespaces/<<namespace:docs_search_advanced>>/collections/<<collection:docs_collection>>?where=URLENCODE[[{"$or":[{"$and":[{"match1":{"$lte":0}},{"match2":{"$eq":"true"}}]},{"$and":[{"match2":{"$eq":"false"}},{"match3":{"$eq":true}}]}]}]]&page-size=<<page-size,3>>&fields=<<fields,%5b%5d>>
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
@ -187,7 +200,8 @@ blocks:
ops:
# where={"match1":{"$in":[0]}}
main-get-in:
op: GET <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/namespaces/<<keyspace:docs_search_advanced>>/collections/<<table:docs_collection>>?where=URLENCODE[[{"match1":{"$in":[0]}}]]&page-size=<<page-size,3>>&fields=<<fields,%5b%5d>>
method: GET
uri: <<protocol:http>>://{weighted_hosts}:<<docsapi_port:8180>><<path_prefix:>>/v2/namespaces/<<namespace:docs_search_advanced>>/collections/<<collection:docs_collection>>?where=URLENCODE[[{"match1":{"$in":[0]}}]]&page-size=<<page-size,3>>&fields=<<fields,%5b%5d>>
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
@ -195,7 +209,8 @@ blocks:
# where={"match2":{"$nin":["false"]}}
main-get-not-in:
op: GET <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/namespaces/<<keyspace:docs_search_advanced>>/collections/<<table:docs_collection>>?where=URLENCODE[[{"match2":{"$nin":["false"]}}]]&page-size=<<page-size,3>>&fields=<<fields,%5b%5d>>
method: GET
uri: <<protocol:http>>://{weighted_hosts}:<<docsapi_port:8180>><<path_prefix:>>/v2/namespaces/<<namespace:docs_search_advanced>>/collections/<<collection:docs_collection>>?where=URLENCODE[[{"match2":{"$nin":["false"]}}]]&page-size=<<page-size,3>>&fields=<<fields,%5b%5d>>
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
@ -203,7 +218,8 @@ blocks:
# where={"match2":{"$eq":"true"},"match3":{"$ne": false}}
main-get-mem-and:
op: GET <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/namespaces/<<keyspace:docs_search_advanced>>/collections/<<table:docs_collection>>?where=URLENCODE[[{"match2":{"$eq":"true"},"match3":{"$ne":false}}]]&page-size=<<page-size,3>>&fields=<<fields,%5b%5d>>
method: GET
uri: <<protocol:http>>://{weighted_hosts}:<<docsapi_port:8180>><<path_prefix:>>/v2/namespaces/<<namespace:docs_search_advanced>>/collections/<<collection:docs_collection>>?where=URLENCODE[[{"match2":{"$eq":"true"},"match3":{"$ne":false}}]]&page-size=<<page-size,3>>&fields=<<fields,%5b%5d>>
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
@ -211,7 +227,8 @@ blocks:
# where={"$or":[{"match1":{"$lt":1}},{"match3":{"$exists":true}}]}
main-get-mem-or:
op: GET <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/namespaces/<<keyspace:docs_search_advanced>>/collections/<<table:docs_collection>>?where=URLENCODE[[{"$or":[{"match1":{"$lt":1}},{"match3":{"$exists":true}}]}]]&page-size=<<page-size,3>>&fields=<<fields,%5b%5d>>
method: GET
uri: <<protocol:http>>://{weighted_hosts}:<<docsapi_port:8180>><<path_prefix:>>/v2/namespaces/<<namespace:docs_search_advanced>>/collections/<<collection:docs_collection>>?where=URLENCODE[[{"$or":[{"match1":{"$lt":1}},{"match3":{"$exists":true}}]}]]&page-size=<<page-size,3>>&fields=<<fields,%5b%5d>>
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
@ -219,7 +236,8 @@ blocks:
# where={"$and":[{"match1":{"$eq":0}},{"$or":[{"match2":{"$eq":"true"}},{"match3":{"$eq":false}}]}]}
main-get-complex1:
op: GET <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/namespaces/<<keyspace:docs_search_advanced>>/collections/<<table:docs_collection>>?where=URLENCODE[[{"$and":[{"match1":{"$eq":0}},{"$or":[{"match2":{"$eq":"true"}},{"match3":{"$eq":false}}]}]}]]&page-size=<<page-size,3>>&fields=<<fields,%5b%5d>>
method: GET
uri: <<protocol:http>>://{weighted_hosts}:<<docsapi_port:8180>><<path_prefix:>>/v2/namespaces/<<namespace:docs_search_advanced>>/collections/<<collection:docs_collection>>?where=URLENCODE[[{"$and":[{"match1":{"$eq":0}},{"$or":[{"match2":{"$eq":"true"}},{"match3":{"$eq":false}}]}]}]]&page-size=<<page-size,3>>&fields=<<fields,%5b%5d>>
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
@ -227,7 +245,8 @@ blocks:
# where={"$and":[{"$or":[{"match1":{"$lte":0}},{"match2":{"$eq":"false"}}]},{"$or":[{"match2":{"$eq":"false"}},{"match3":{"$eq":true}}]}]}
main-get-complex2:
op: GET <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/namespaces/<<keyspace:docs_search_advanced>>/collections/<<table:docs_collection>>?where=URLENCODE[[{"$and":[{"$or":[{"match1":{"$lte":0}},{"match2":{"$eq":"false"}}]},{"$or":[{"match2":{"$eq":"false"}},{"match3":{"$eq":true}}]}]}]]&page-size=<<page-size,3>>&fields=<<fields,%5b%5d>>
method: GET
uri: <<protocol:http>>://{weighted_hosts}:<<docsapi_port:8180>><<path_prefix:>>/v2/namespaces/<<namespace:docs_search_advanced>>/collections/<<collection:docs_collection>>?where=URLENCODE[[{"$and":[{"$or":[{"match1":{"$lte":0}},{"match2":{"$eq":"false"}}]},{"$or":[{"match2":{"$eq":"false"}},{"match3":{"$eq":true}}]}]}]]&page-size=<<page-size,3>>&fields=<<fields,%5b%5d>>
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
@ -235,7 +254,8 @@ blocks:
# where={"$or":[{"$and":[{"match1":{"$lte":0}},{"match2":{"$eq":"true"}}]},{"$and":[{"match2":{"$eq":"false"}},{"match3":{"$eq":true}}]}]}
main-get-complex3:
op: GET <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/namespaces/<<keyspace:docs_search_advanced>>/collections/<<table:docs_collection>>?where=URLENCODE[[{"$or":[{"$and":[{"match1":{"$lte":0}},{"match2":{"$eq":"true"}}]},{"$and":[{"match2":{"$eq":"false"}},{"match3":{"$eq":true}}]}]}]]&page-size=<<page-size,3>>&fields=<<fields,%5b%5d>>
method: GET
uri: <<protocol:http>>://{weighted_hosts}:<<docsapi_port:8180>><<path_prefix:>>/v2/namespaces/<<namespace:docs_search_advanced>>/collections/<<collection:docs_collection>>?where=URLENCODE[[{"$or":[{"$and":[{"match1":{"$lte":0}},{"match2":{"$eq":"true"}}]},{"$and":[{"match2":{"$eq":"false"}},{"match3":{"$eq":true}}]}]}]]&page-size=<<page-size,3>>&fields=<<fields,%5b%5d>>
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"

View File

@ -2,9 +2,9 @@ min_version: "4.17.15"
description: |
This workload emulates basic search operations for the Stargate Documents API.
During the rampup phase, it generates documents, writes them to a table, and then warms up the search paths.
During the rampup phase, it generates documents, writes them to a collection, and then warms up the search paths.
During the main phase it performs various basic search filters and times their execution.
Note that stargate_port should reflect the port where the Docs API is exposed (defaults to 8082).
Note that docsapi_port should reflect the port where the Docs API is exposed (defaults to 8180).
scenarios:
schema: run driver=http tags==block:schema threads==<<threads:1>> cycles==UNDEF
@ -22,10 +22,10 @@ scenarios:
bindings:
# To enable an optional weighted set of hosts in place of a load balancer
# Examples
# single host: stargate_host=host1
# multiple hosts: stargate_host=host1,host2,host3
# multiple weighted hosts: stargate_host=host1:3,host2:7
weighted_hosts: WeightedStrings('<<stargate_host:stargate>>')
# single host: docsapi_host=host1
# multiple hosts: docsapi_host=host1,host2,host3
# multiple weighted hosts: docsapi_host=host1:3,host2:7
weighted_hosts: WeightedStrings('<<docsapi_host:stargate>>')
# http request id
request_id: ToHashedUUID(); ToString();
@ -49,40 +49,45 @@ bindings:
blocks:
schema:
ops:
create-keyspace:
op: POST <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/schemas/keyspaces
create-namespace:
method: POST
uri: <<protocol:http>>://{weighted_hosts}:<<docsapi_port:8180>><<path_prefix:>>/v2/schemas/namespaces
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
Content-Type: "application/json"
body: |
{
"name": "<<keyspace:docs_search_basic>>",
"name": "<<namespace:docs_search_basic>>",
"replicas": <<rf:1>>
}
ok-status: ".*"
delete-docs-collection:
op: DELETE <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/namespaces/<<keyspace:docs_search_basic>>/collections/<<table:docs_collection>>
method: DELETE
uri: <<protocol:http>>://{weighted_hosts}:<<docsapi_port:8180>><<path_prefix:>>/v2/namespaces/<<namespace:docs_search_basic>>/collections/<<collection:docs_collection>>
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
ok-status: "[2-4][0-9][0-9]"
create-docs-collection:
op: POST <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/namespaces/<<keyspace:docs_search_basic>>/collections
method: POST
uri: <<protocol:http>>://{weighted_hosts}:<<docsapi_port:8180>><<path_prefix:>>/v2/namespaces/<<namespace:docs_search_basic>>/collections
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
Content-Type: "application/json"
body: |
{
"name": "<<table:docs_collection>>"
"name": "<<collection:docs_collection>>"
}
rampup:
ops:
rampup-put:
op: PUT <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/namespaces/<<keyspace:docs_search_basic>>/collections/<<table:docs_collection>>/{seq_key}
method: PUT
uri: <<protocol:http>>://{weighted_hosts}:<<docsapi_port:8180>><<path_prefix:>>/v2/namespaces/<<namespace:docs_search_basic>>/collections/<<collection:docs_collection>>/{seq_key}
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
@ -117,7 +122,8 @@ blocks:
# where={"match3":{"$eq":true}}
rampup-get-eq:
op: GET <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/namespaces/<<keyspace:docs_search_basic>>/collections/<<table:docs_collection>>?where=URLENCODE[[{"match3":{"$eq":true}}]]&page-size=<<page-size,3>>&fields=<<fields,%5b%5d>>
method: GET
uri: <<protocol:http>>://{weighted_hosts}:<<docsapi_port:8180>><<path_prefix:>>/v2/namespaces/<<namespace:docs_search_basic>>/collections/<<collection:docs_collection>>?where=URLENCODE[[{"match3":{"$eq":true}}]]&page-size=<<page-size,3>>&fields=<<fields,%5b%5d>>
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
@ -125,7 +131,8 @@ blocks:
# where={"match1":{"$lt":1}}
rampup-get-lt:
op: GET <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/namespaces/<<keyspace:docs_search_basic>>/collections/<<table:docs_collection>>?where=URLENCODE[[{"match1":{"$lt":1}}]]&page-size=<<page-size,3>>&fields=<<fields,%5b%5d>>
method: GET
uri: <<protocol:http>>://{weighted_hosts}:<<docsapi_port:8180>><<path_prefix:>>/v2/namespaces/<<namespace:docs_search_basic>>/collections/<<collection:docs_collection>>?where=URLENCODE[[{"match1":{"$lt":1}}]]&page-size=<<page-size,3>>&fields=<<fields,%5b%5d>>
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
@ -133,7 +140,8 @@ blocks:
# where={"match1":{"$lt":1},"match2":{"$eq":"true"}}
rampup-get-and:
op: GET <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/namespaces/<<keyspace:docs_search_basic>>/collections/<<table:docs_collection>>?where=URLENCODE[[{"match1":{"$lt":1},"match2":{"$eq":"true"}}]]&page-size=<<page-size,3>>&fields=<<fields,%5b%5d>>
method: GET
uri: <<protocol:http>>://{weighted_hosts}:<<docsapi_port:8180>><<path_prefix:>>/v2/namespaces/<<namespace:docs_search_basic>>/collections/<<collection:docs_collection>>?where=URLENCODE[[{"match1":{"$lt":1},"match2":{"$eq":"true"}}]]&page-size=<<page-size,3>>&fields=<<fields,%5b%5d>>
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
@ -141,7 +149,8 @@ blocks:
# where={"$or":[{"match1":{"$lt":1}},{"match3":{"$eq":true}}]}
rampup-get-or:
op: GET <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/namespaces/<<keyspace:docs_search_basic>>/collections/<<table:docs_collection>>?where=URLENCODE[[{"$or":[{"match1":{"$lt":1}},{"match3":{"$eq":true}}]}]]&page-size=<<page-size,3>>&fields=<<fields,%5b%5d>>
method: GET
uri: <<protocol:http>>://{weighted_hosts}:<<docsapi_port:8180>><<path_prefix:>>/v2/namespaces/<<namespace:docs_search_basic>>/collections/<<collection:docs_collection>>?where=URLENCODE[[{"$or":[{"match1":{"$lt":1}},{"match3":{"$eq":true}}]}]]&page-size=<<page-size,3>>&fields=<<fields,%5b%5d>>
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
@ -149,7 +158,8 @@ blocks:
# where={"$or":[{"match1":{"$lt":1}},{"match2":{"$eq":"notamatch"}}]}
main-get-or-single-match:
op: GET <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/namespaces/<<keyspace:docs_search_basic>>/collections/<<table:docs_collection>>?where=URLENCODE[[{"$or":[{"match1":{"$lt":1}},{"match2":{"$eq":"notamatch"}}]}]]&page-size=<<page-size,3>>&fields=<<fields,%5b%5d>>
method: GET
uri: <<protocol:http>>://{weighted_hosts}:<<docsapi_port:8180>><<path_prefix:>>/v2/namespaces/<<namespace:docs_search_basic>>/collections/<<collection:docs_collection>>?where=URLENCODE[[{"$or":[{"match1":{"$lt":1}},{"match2":{"$eq":"notamatch"}}]}]]&page-size=<<page-size,3>>&fields=<<fields,%5b%5d>>
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
@ -159,7 +169,8 @@ blocks:
main:
ops:
main-get-eq:
op: GET <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/namespaces/<<keyspace:docs_search_basic>>/collections/<<table:docs_collection>>?where=URLENCODE[[{"match3":{"$eq":true}}]]&page-size=<<page-size,3>>&fields=<<fields,%5b%5d>>
method: GET
uri: <<protocol:http>>://{weighted_hosts}:<<docsapi_port:8180>><<path_prefix:>>/v2/namespaces/<<namespace:docs_search_basic>>/collections/<<collection:docs_collection>>?where=URLENCODE[[{"match3":{"$eq":true}}]]&page-size=<<page-size,3>>&fields=<<fields,%5b%5d>>
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
@ -167,7 +178,8 @@ blocks:
# where={"match1":{"$lt":1}}
main-get-lt:
op: GET <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/namespaces/<<keyspace:docs_search_basic>>/collections/<<table:docs_collection>>?where=URLENCODE[[{"match1":{"$lt":1}}]]&page-size=<<page-size,3>>&fields=<<fields,%5b%5d>>
method: GET
uri: <<protocol:http>>://{weighted_hosts}:<<docsapi_port:8180>><<path_prefix:>>/v2/namespaces/<<namespace:docs_search_basic>>/collections/<<collection:docs_collection>>?where=URLENCODE[[{"match1":{"$lt":1}}]]&page-size=<<page-size,3>>&fields=<<fields,%5b%5d>>
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
@ -175,7 +187,8 @@ blocks:
# where={"match1":{"$lt":1},"match2":{"$eq":"true"}}
main-get-and:
op: GET <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/namespaces/<<keyspace:docs_search_basic>>/collections/<<table:docs_collection>>?where=URLENCODE[[{"match1":{"$lt":1},"match2":{"$eq":"true"}}]]&page-size=<<page-size,3>>&fields=<<fields,%5b%5d>>
method: GET
uri: <<protocol:http>>://{weighted_hosts}:<<docsapi_port:8180>><<path_prefix:>>/v2/namespaces/<<namespace:docs_search_basic>>/collections/<<collection:docs_collection>>?where=URLENCODE[[{"match1":{"$lt":1},"match2":{"$eq":"true"}}]]&page-size=<<page-size,3>>&fields=<<fields,%5b%5d>>
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
@ -183,7 +196,8 @@ blocks:
# where={"$or":[{"match1":{"$lt":1}},{"match3":{"$eq":true}}]}
main-get-or:
op: GET <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/namespaces/<<keyspace:docs_search_basic>>/collections/<<table:docs_collection>>?where=URLENCODE[[{"$or":[{"match1":{"$lt":1}},{"match3":{"$eq":true}}]}]]&page-size=<<page-size,3>>&fields=<<fields,%5b%5d>>
method: GET
uri: <<protocol:http>>://{weighted_hosts}:<<docsapi_port:8180>><<path_prefix:>>/v2/namespaces/<<namespace:docs_search_basic>>/collections/<<collection:docs_collection>>?where=URLENCODE[[{"$or":[{"match1":{"$lt":1}},{"match3":{"$eq":true}}]}]]&page-size=<<page-size,3>>&fields=<<fields,%5b%5d>>
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
@ -191,7 +205,8 @@ blocks:
# where={"$or":[{"match1":{"$lt":1}},{"match2":{"$eq":"notamatch"}}]}
main-get-or-single-match:
op: GET <<protocol:http>>://{weighted_hosts}:<<stargate_port:8082>><<path_prefix:>>/v2/namespaces/<<keyspace:docs_search_basic>>/collections/<<table:docs_collection>>?where=URLENCODE[[{"$or":[{"match1":{"$lt":1}},{"match2":{"$eq":"notamatch"}}]}]]&page-size=<<page-size,3>>&fields=<<fields,%5b%5d>>
method: GET
uri: <<protocol:http>>://{weighted_hosts}:<<docsapi_port:8180>><<path_prefix:>>/v2/namespaces/<<namespace:docs_search_basic>>/collections/<<collection:docs_collection>>?where=URLENCODE[[{"$or":[{"match1":{"$lt":1}},{"match2":{"$eq":"notamatch"}}]}]]&page-size=<<page-size,3>>&fields=<<fields,%5b%5d>>
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"

View File

@ -5,7 +5,7 @@ description: |
This should be identical to the cql variant except for:
- There is no instrumentation with the http driver.
- There is no async mode with the http driver.
Note that stargate_port should reflect the port where GraphQL API is exposed (defaults to 8080).
Note that graphql_port should reflect the port where GraphQL API is exposed (defaults to 8080).
scenarios:
default:
@ -16,10 +16,10 @@ scenarios:
bindings:
# To enable an optional weighted set of hosts in place of a load balancer
# Examples
# single host: stargate_host=host1
# multiple hosts: stargate_host=host1,host2,host3
# multiple weighted hosts: stargate_host=host1:3,host2:7
weighted_hosts: WeightedStrings('<<stargate_host:stargate>>')
# single host: graphql_host=host1
# multiple hosts: graphql_host=host1,host2,host3
# multiple weighted hosts: graphql_host=host1:3,host2:7
weighted_hosts: WeightedStrings('<<graphql_host:stargate>>')
# http request id
request_id: ToHashedUUID(); ToString();
@ -32,59 +32,58 @@ blocks:
schema:
ops:
create-keyspace:
op: POST <<protocol:http>>://{weighted_hosts}:<<stargate_port:8080>><<path_prefix:>>/graphql-schema
method: POST
uri: <<protocol:http>>://{weighted_hosts}:<<graphql_port:8080>><<path_prefix:>>/graphql-schema
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
Content-Type: "application/json"
body: |
{"query":"mutation {\n createKeyspace(name:\"<<keyspace:gqlcf_keyvalue>>\", replicas: <<rf:1>>, ifNotExists: true)\n}"}
create-table:
op: POST <<protocol:http>>://{weighted_hosts}:<<stargate_port:8080>><<path_prefix:>>/graphql-schema
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
Content-Type: "application/json"
body: |
{"query":"mutation {\n createTable(\n keyspaceName: \"<<keyspace:gqlcf_keyvalue>>\"\n tableName: \"<<table:keyvalue>>\"\n partitionKeys: [{ name: \"key\", type: { basic: TEXT } }]\n values: [{ name: \"value\", type: { basic: TEXT } }]\n ifNotExists: true\n )\n}"}
schema-astra:
ops:
create-table-astra:
op: POST <<protocol:http>>://{weighted_hosts}:<<stargate_port:8080>><<path_prefix:>>/graphql-schema
method: POST
uri: <<protocol:http>>://{weighted_hosts}:<<graphql_port:8080>><<path_prefix:>>/graphql-schema
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
Content-Type: "application/json"
body: |
{"query":"mutation {\n createTable(\n keyspaceName: \"<<keyspace:gqlcf_keyvalue>>\"\n tableName: \"<<table:keyvalue>>\"\n partitionKeys: [{ name: \"key\", type: { basic: TEXT } }]\n values: [{ name: \"value\", type: { basic: TEXT } }]\n ifNotExists: true\n )\n}"}
rampup:
ops:
rampup-insert:
op: POST <<protocol:http>>://{weighted_hosts}:<<stargate_port:8080>><<path_prefix:>>/graphql/<<keyspace:gqlcf_keyvalue>>
method: POST
uri: <<protocol:http>>://{weighted_hosts}:<<graphql_port:8080>><<path_prefix:>>/graphql/<<keyspace:gqlcf_keyvalue>>
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
Content-Type: "application/json"
body: |
{"query":"mutation {\n insert<<table:keyvalue>>( value: {key: \"{seq_key}\", value: \"{seq_value}\",}) {value {key, value}}}"}
main-read:
params:
ratio: <<read_ratio:5>>
ops:
main-select:
op: POST <<protocol:http>>://{weighted_hosts}:<<stargate_port:8080>><<path_prefix:>>/graphql/<<keyspace:gqlcf_keyvalue>>
method: POST
uri: <<protocol:http>>://{weighted_hosts}:<<graphql_port:8080>><<path_prefix:>>/graphql/<<keyspace:gqlcf_keyvalue>>
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
Content-Type: "application/json"
body: |
{"query":"{<<table:keyvalue>>(value: {key: \"{rw_key}\"}) {values {key, value}}}"}
main-write:
params:
ratio: <<write_ratio:5>>
ops:
main-write:
op: POST <<protocol:http>>://{weighted_hosts}:<<stargate_port:8080>><<path_prefix:>>/graphql/<<keyspace:gqlcf_keyvalue>>
method: POST
uri: <<protocol:http>>://{weighted_hosts}:<<graphql_port:8080>><<path_prefix:>>/graphql/<<keyspace:gqlcf_keyvalue>>
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"

View File

@ -6,7 +6,7 @@ description: |
- We need to URLEncode the `data` and `data_write` bindings because newlines can't be sent in REST calls.
- There is no instrumentation with the http driver.
- There is no async mode with the http driver.
Note that stargate_port should reflect the port where GraphQL API is exposed (defaults to 8080).
Note that graphql_port should reflect the port where GraphQL API is exposed (defaults to 8080).
scenarios:
default:
@ -17,10 +17,10 @@ scenarios:
bindings:
# To enable an optional weighted set of hosts in place of a load balancer
# Examples
# single host: stargate_host=host1
# multiple hosts: stargate_host=host1,host2,host3
# multiple weighted hosts: stargate_host=host1:3,host2:7
weighted_hosts: WeightedStrings('<<stargate_host:stargate>>')
# single host: graphql_host=host1
# multiple hosts: graphql_host=host1,host2,host3
# multiple weighted hosts: graphql_host=host1:3,host2:7
weighted_hosts: WeightedStrings('<<graphql_host:stargate>>')
# http request id
request_id: ToHashedUUID(); ToString();
# for ramp-up and verify
@ -42,59 +42,58 @@ blocks:
phase: schema
ops:
create-keyspace:
op: POST <<protocol:http>>://{weighted_hosts}:<<stargate_port:8080>><<path_prefix:>>/graphql-schema
method: POST
uri: <<protocol:http>>://{weighted_hosts}:<<graphql_port:8080>><<path_prefix:>>/graphql-schema
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
Content-Type: "application/json"
body: |
{"query":"mutation {\n createKeyspace(name:\"<<keyspace:gqlcf_tabular>>\", replicas: <<rf:1>>, ifNotExists: true)\n}"}
create-table:
op: POST <<protocol:http>>://{weighted_hosts}:<<stargate_port:8080>><<path_prefix:>>/graphql-schema
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
Content-Type: "application/json"
body: |
{"query":"mutation {\n createTable(\n keyspaceName: \"<<keyspace:gqlcf_tabular>>\"\n tableName: \"<<table:tabular>>\"\n partitionKeys: [{ name: \"part\", type: { basic: TEXT } }]\n clusteringKeys: [{ name: \"clust\", type: { basic: TEXT } }]\n values: [{ name: \"data\", type: { basic: TEXT } }]\n ifNotExists: true\n )\n}\n"}
schema-astra:
ops:
create-table-astra:
op: POST <<protocol:http>>://{weighted_hosts}:<<stargate_port:8080>><<path_prefix:>>/graphql-schema
method: POST
uri: <<protocol:http>>://{weighted_hosts}:<<graphql_port:8080>><<path_prefix:>>/graphql-schema
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
Content-Type: "application/json"
body: |
{"query":"mutation {\n createTable(\n keyspaceName: \"<<keyspace:gqlcf_tabular>>\"\n tableName: \"<<table:tabular>>\"\n partitionKeys: [{ name: \"part\", type: { basic: TEXT } }]\n clusteringKeys: [{ name: \"clust\", type: { basic: TEXT } }]\n values: [{ name: \"data\", type: { basic: TEXT } }]\n ifNotExists: true\n )\n}\n"}
rampup:
ops:
rampup-insert:
op: POST <<protocol:http>>://{weighted_hosts}:<<stargate_port:8080>><<path_prefix:>>/graphql/<<keyspace:gqlcf_tabular>>
method: POST
uri: <<protocol:http>>://{weighted_hosts}:<<graphql_port:8080>><<path_prefix:>>/graphql/<<keyspace:gqlcf_tabular>>
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
Content-Type: "application/json"
body: |
{"query":"mutation {\n insert<<table:tabular>>( value: {part: \"{part_layout}\", clust: \"{clust_layout}\", data: \"{data}\"}) {value {part, clust, data}}}"}
main-read:
params:
ratio: 5
ops:
main-select:
op: POST <<protocol:http>>://{weighted_hosts}:<<stargate_port:8080>><<path_prefix:>>/graphql/<<keyspace:gqlcf_tabular>>
method: POST
uri: <<protocol:http>>://{weighted_hosts}:<<graphql_port:8080>><<path_prefix:>>/graphql/<<keyspace:gqlcf_tabular>>
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
Content-Type: "application/json"
body: |
{"query":"{<<table:tabular>>(value: {part: \"{part_read}\"}, options: { pageSize: <<limit:10>> }) {values {part, clust, data}}}"}
main-write:
params:
ratio: 5
ops:
main-write:
op: POST <<protocol:http>>://{weighted_hosts}:<<stargate_port:8080>><<path_prefix:>>/graphql/<<keyspace:gqlcf_tabular>>
method: POST
uri: <<protocol:http>>://{weighted_hosts}:<<graphql_port:8080>><<path_prefix:>>/graphql/<<keyspace:gqlcf_tabular>>
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"

View File

@ -9,7 +9,7 @@ description: |
- Schema creation is cql of the lack of being able to define compaction strategy in the graphql API.
- There is no instrumentation with the http driver.
- There is no async mode with the http driver.
Note that stargate_port should reflect the port where GraphQL API is exposed (defaults to 8080).
Note that graphql_port should reflect the port where GraphQL API is exposed (defaults to 8080).
scenarios:
default:
@ -20,10 +20,10 @@ scenarios:
bindings:
# To enable an optional weighted set of hosts in place of a load balancer
# Examples
# single host: stargate_host=host1
# multiple hosts: stargate_host=host1,host2,host3
# multiple weighted hosts: stargate_host=host1:3,host2:7
weighted_hosts: WeightedStrings('<<stargate_host:stargate>>')
# single host: graphql_host=host1
# multiple hosts: graphql_host=host1,host2,host3
# multiple weighted hosts: graphql_host=host1:3,host2:7
weighted_hosts: WeightedStrings('<<graphql_host:stargate>>')
# http request id
request_id: ToHashedUUID(); ToString();
@ -39,70 +39,59 @@ blocks:
params:
prepared: false
ops:
create-keyspace: |
create keyspace if not exists <<keyspace:gqlcf_iot>>
WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '<<rf:1>>'}
AND durable_writes = true;
create-table : |
create table if not exists <<keyspace:gqlcf_iot>>.<<table:iot>> (
machine_id UUID, // source machine
sensor_name text, // sensor name
time timestamp, // timestamp of collection
sensor_value double, //
station_id UUID, // source location
data text,
PRIMARY KEY ((machine_id, sensor_name), time)
) WITH CLUSTERING ORDER BY (time DESC)
AND compression = { 'sstable_compression' : '<<compression:LZ4Compressor>>' }
AND compaction = {
'class': 'TimeWindowCompactionStrategy',
'compaction_window_size': <<expiry_minutes:60>>,
'compaction_window_unit': 'MINUTES'
};
truncate-table: |
truncate table <<keyspace:gqlcf_iot>>.<<table:iot>>;
schema-astra:
params:
prepared: false
ops:
create-table-astra : |
create table if not exists <<keyspace:gqlcf_iot>>.<<table:iot>> (
machine_id UUID, // source machine
sensor_name text, // sensor name
time timestamp, // timestamp of collection
sensor_value double, //
station_id UUID, // source location
data text,
PRIMARY KEY ((machine_id, sensor_name), time)
) WITH CLUSTERING ORDER BY (time DESC);
create-keyspace:
method: POST
uri: <<protocol:http>>://{weighted_hosts}:<<graphql_port:8080>><<path_prefix:>>/graphql-schema
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
Content-Type: "application/json"
body: |
{"query":"mutation {\n createKeyspace(name:\"<<keyspace:gqlcf_iot>>\", replicas: <<rf:1>>, ifNotExists: true)\n}"}
create-table:
method: POST
uri: <<protocol:http>>://{weighted_hosts}:<<graphql_port:8080>><<path_prefix:>>/graphql-schema
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
Content-Type: "application/json"
body: |
{"query":"mutation {\n createTable(\n keyspaceName: \"<<keyspace:gqlcf_iot>>\"\n tableName: \"<<table:iot>>\"\n partitionKeys: [{ name: \"machine_id\", type: { basic: UUID } }, { name: \"sensor_name\", type: { basic: TEXT } }]\n clusteringKeys: [{ name: \"time\", type: { basic: TIMESTAMP }, order: \"DESC\" }]\n values: [{ name: \"sensor_value\", type: { basic: FLOAT } }, { name: \"station_id\", type: { basic: UUID } }, { name: \"data\", type: { basic: TEXT } }]\n ifNotExists: true\n )\n}\n"}
rampup:
ops:
rampup-insert:
op: POST <<protocol:http>>://{weighted_hosts}:<<stargate_port:8080>><<path_prefix:>>/graphql/<<keyspace:gqlcf_iot>>
method: POST
uri: <<protocol:http>>://{weighted_hosts}:<<graphql_port:8080>><<path_prefix:>>/graphql/<<keyspace:gqlcf_iot>>
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
Content-Type: "application/json"
body: |
{"query":"mutation insertReading {\n reading: insert<<table:iot>>( value: {machine_id: \"{machine_id}\", sensor_name: \"{sensor_name}\", time: \"{time}\", data: \"{data}\", sensor_value: {sensor_value}, station_id: \"{station_id}\"}) {value {machine_id, sensor_name, time, data, sensor_value, station_id}}}"}
main-read:
params:
ratio: <<read_ratio:1>>
ops:
main-select:
op: POST <<protocol:http>>://{weighted_hosts}:<<stargate_port:8080>><<path_prefix:>>/graphql/<<keyspace:gqlcf_iot>>
method: POST
uri: <<protocol:http>>://{weighted_hosts}:<<graphql_port:8080>><<path_prefix:>>/graphql/<<keyspace:gqlcf_iot>>
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
Content-Type: "application/json"
body: |
{"query":"query readings {<<table:iot>>(value: {machine_id: \"{machine_id}\",sensor_name: \"{sensor_name}\"}, options: { pageSize: <<limit:10>> }) {values {machine_id, sensor_name, time, data, sensor_value, station_id}}}"}
main-write:
params:
ratio: <<write_ratio:9>>
ops:
main-write:
op: POST <<protocol:http>>://{weighted_hosts}:<<stargate_port:8080>><<path_prefix:>>/graphql/<<keyspace:gqlcf_iot>>
method: POST
uri: <<protocol:http>>://{weighted_hosts}:<<graphql_port:8080>><<path_prefix:>>/graphql/<<keyspace:gqlcf_iot>>
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"

View File

@ -9,7 +9,7 @@ description: |
- Schema creation GraphQL first, we don't use cql and thus can only create schema with limited options.
- There is no instrumentation with the http driver.
- There is no async mode with the http driver.
Note that stargate_port should reflect the port where GraphQL API V2 is exposed (defaults to 8080).
Note that graphql_port should reflect the port where GraphQL API V2 is exposed (defaults to 8080).
scenarios:
default:
@ -20,10 +20,10 @@ scenarios:
bindings:
# To enable an optional weighted set of hosts in place of a load balancer
# Examples
# single host: stargate_host=host1
# multiple hosts: stargate_host=host1,host2,host3
# multiple weighted hosts: stargate_host=host1:3,host2:7
weighted_hosts: WeightedStrings('<<stargate_host:stargate>>')
# single host: graphql_host=host1
# multiple hosts: graphql_host=host1,host2,host3
# multiple weighted hosts: graphql_host=host1:3,host2:7
weighted_hosts: WeightedStrings('<<graphql_host:stargate>>')
# http request id
request_id: ToHashedUUID(); ToString();
seq_key: Mod(<<keycount:10000000>>); ToString() -> String
@ -35,7 +35,8 @@ blocks:
schema:
ops:
create-keyspace:
op: POST <<protocol:http>>://{weighted_hosts}:<<stargate_port:8080>><<path_prefix:>>/graphql-schema
method: POST
uri: <<protocol:http>>://{weighted_hosts}:<<graphql_port:8080>><<path_prefix:>>/graphql-schema
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
@ -44,20 +45,10 @@ blocks:
{
"query":"mutation {\n createKeyspace(name: \"<<keyspace:gqlsf_keyvalue>>\", replicas: <<rf:1>>, ifNotExists: true) \n}\n"
}
create-gql-schema:
op: POST <<protocol:http>>://{weighted_hosts}:<<stargate_port:8080>><<path_prefix:>>/graphql-admin
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
Content-Type: "application/json"
body: |
{
"query":"mutation {\n deploySchema(keyspace: \"<<keyspace:gqlsf_keyvalue>>\", schema: \"\"\"\n type KeyValue @cql_input {\n key: String! @cql_column(partitionKey: true)\n value: String!\n }\n type Query {\n getKeyValue(\n key: String!,\n ): KeyValue\n }\n type Mutation {\n \t\tinsertKeyValue(keyValue: KeyValueInput): KeyValue\n }\n \"\"\") {\n version\n }\n}\n"
}
schema-astra:
ops:
create-gql-schema:
op: POST <<protocol:http>>://{weighted_hosts}:<<stargate_port:8080>><<path_prefix:>>/graphql-admin
method: POST
uri: <<protocol:http>>://{weighted_hosts}:<<graphql_port:8080>><<path_prefix:>>/graphql-admin
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
@ -66,10 +57,12 @@ blocks:
{
"query":"mutation {\n deploySchema(keyspace: \"<<keyspace:gqlsf_keyvalue>>\", schema: \"\"\"\n type KeyValue @cql_input {\n key: String! @cql_column(partitionKey: true)\n value: String!\n }\n type Query {\n getKeyValue(\n key: String!,\n ): KeyValue\n }\n type Mutation {\n \t\tinsertKeyValue(keyValue: KeyValueInput): KeyValue\n }\n \"\"\") {\n version\n }\n}\n"
}
rampup:
ops:
rampup-insert:
op: POST <<protocol:http>>://{weighted_hosts}:<<stargate_port:8080>><<path_prefix:>>/graphql/<<keyspace:gqlsf_keyvalue>>
method: POST
uri: <<protocol:http>>://{weighted_hosts}:<<graphql_port:8080>><<path_prefix:>>/graphql/<<keyspace:gqlsf_keyvalue>>
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
@ -78,12 +71,14 @@ blocks:
{
"query":"mutation {\n insertKeyValue(keyValue: {key: \"{seq_key}\", value: \"{seq_value}\"}) {\n key\n value\n }\n}\n"
}
main-read:
params:
ratio: <<read_ratio:1>>
ops:
main-select:
op: POST <<protocol:http>>://{weighted_hosts}:<<stargate_port:8080>><<path_prefix:>>/graphql/<<keyspace:gqlsf_keyvalue>>
method: POST
uri: <<protocol:http>>://{weighted_hosts}:<<graphql_port:8080>><<path_prefix:>>/graphql/<<keyspace:gqlsf_keyvalue>>
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
@ -92,12 +87,14 @@ blocks:
{
"query":"{\n getKeyValue(key: \"rw_key\") {\n key\n value\n }\n}\n"
}
main-write:
params:
ratio: <<write_ratio:9>>
ops:
main-write:
op: POST <<protocol:http>>://{weighted_hosts}:<<stargate_port:8080>><<path_prefix:>>/graphql/<<keyspace:gqlsf_keyvalue>>
method: POST
uri: <<protocol:http>>://{weighted_hosts}:<<graphql_port:8080>><<path_prefix:>>/graphql/<<keyspace:gqlsf_keyvalue>>
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"

View File

@ -11,7 +11,7 @@ description: |
- Schema creation GraphQL first, we don't use cql and thus can only create schema with limited options.
- There is no instrumentation with the http driver.
- There is no async mode with the http driver.
Note that stargate_port should reflect the port where GraphQL API V2 is exposed (defaults to 8080).
Note that graphql_port should reflect the port where GraphQL API V2 is exposed (defaults to 8080).
scenarios:
default:
@ -22,10 +22,10 @@ scenarios:
bindings:
# To enable an optional weighted set of hosts in place of a load balancer
# Examples
# single host: stargate_host=host1
# multiple hosts: stargate_host=host1,host2,host3
# multiple weighted hosts: stargate_host=host1:3,host2:7
weighted_hosts: WeightedStrings('<<stargate_host:stargate>>')
# single host: graphql_host=host1
# multiple hosts: graphql_host=host1,host2,host3
# multiple weighted hosts: graphql_host=host1:3,host2:7
weighted_hosts: WeightedStrings('<<graphql_host:stargate>>')
# http request id
request_id: ToHashedUUID(); ToString();
# for ramp-up and verify
@ -45,7 +45,8 @@ blocks:
schema:
ops:
create-keyspace:
op: POST <<protocol:http>>://{weighted_hosts}:<<stargate_port:8080>><<path_prefix:>>/graphql-schema
method: POST
uri: <<protocol:http>>://{weighted_hosts}:<<graphql_port:8080>><<path_prefix:>>/graphql-schema
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
@ -54,20 +55,10 @@ blocks:
{
"query":"mutation {\n createKeyspace(name: \"<<keyspace:gqlsf_tabular>>\", replicas: <<rf:1>>, ifNotExists: true) \n}\n"
}
create-gql-schema:
op: POST <<protocol:http>>://{weighted_hosts}:<<stargate_port:8080>><<path_prefix:>>/graphql-admin
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
Content-Type: "application/json"
body: |
{
"query":"mutation {\n deploySchema(keyspace: \"<<keyspace:gqlsf_tabular>>\", schema: \"\"\"\n type Tabular @cql_input {\n part: String! @cql_column(partitionKey: true)\n clust: String! @cql_column(partitionKey: true)\n data: String! \n }\n type SelectTabularResult @cql_payload {\n \t\tdata: [Tabular]\n \t\tpagingState: String\n }\n type Query {\n getTabulars(\n part: String!,\n clust: String!,\n pagingState: String @cql_pagingState\n ): SelectTabularResult @cql_select(pageSize: 10)\n }\n type Mutation {\n \t\tinsertTabular(tabular: TabularInput): Tabular\n }\n \"\"\") {\n version\n }\n}\n"
}
schema-astra:
ops:
create-gql-schema:
op: POST <<protocol:http>>://{weighted_hosts}:<<stargate_port:8080>><<path_prefix:>>/graphql-admin
method: POST
uri: <<protocol:http>>://{weighted_hosts}:<<graphql_port:8080>><<path_prefix:>>/graphql-admin
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
@ -76,10 +67,12 @@ blocks:
{
"query":"mutation {\n deploySchema(keyspace: \"<<keyspace:gqlsf_tabular>>\", schema: \"\"\"\n type Tabular @cql_input {\n part: String! @cql_column(partitionKey: true)\n clust: String! @cql_column(partitionKey: true)\n data: String! \n }\n type SelectTabularResult @cql_payload {\n \t\tdata: [Tabular]\n \t\tpagingState: String\n }\n type Query {\n getTabulars(\n part: String!,\n clust: String!,\n pagingState: String @cql_pagingState\n ): SelectTabularResult @cql_select(pageSize: 10)\n }\n type Mutation {\n \t\tinsertTabular(tabular: TabularInput): Tabular\n }\n \"\"\") {\n version\n }\n}\n"
}
rampup:
ops:
rampup-insert:
op: POST <<protocol:http>>://{weighted_hosts}:<<stargate_port:8080>><<path_prefix:>>/graphql/<<keyspace:gqlsf_tabular>>
method: POST
uri: <<protocol:http>>://{weighted_hosts}:<<graphql_port:8080>><<path_prefix:>>/graphql/<<keyspace:gqlsf_tabular>>
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
@ -88,12 +81,14 @@ blocks:
{
"query":"mutation {\n insertTabular(tabular: {part: \"{part_layout}\", clust: \"{clust_layout}\", data: \"{data}\"}) {\n part\n clust\n data\n }\n}\n"
}
main-read:
params:
ratio: <<read_ratio:1>>
ops:
main-select:
op: POST <<protocol:http>>://{weighted_hosts}:<<stargate_port:8080>><<path_prefix:>>/graphql/<<keyspace:gqlsf_tabular>>
method: POST
uri: <<protocol:http>>://{weighted_hosts}:<<graphql_port:8080>><<path_prefix:>>/graphql/<<keyspace:gqlsf_tabular>>
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
@ -102,12 +97,14 @@ blocks:
{
"query":"{\n getTabulars(part: \"{part_read}\", clust: \"{clust_read}\") {\n data {\n part\n clust\n data\n }\n pagingState\n }\n}\n"
}
main-write:
params:
ratio: <<write_ratio:9>>
ops:
main-write:
op: POST <<protocol:http>>://{weighted_hosts}:<<stargate_port:8080>><<path_prefix:>>/graphql/<<keyspace:gqlsf_tabular>>
method: POST
uri: <<protocol:http>>://{weighted_hosts}:<<graphql_port:8080>><<path_prefix:>>/graphql/<<keyspace:gqlsf_tabular>>
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"

View File

@ -15,7 +15,7 @@ description: |
- Schema creation GraphQL first, we don't use cql and thus can only create schema with limited options.
- There is no instrumentation with the http driver.
- There is no async mode with the http driver.
Note that stargate_port should reflect the port where GraphQL API V2 is exposed (defaults to 8080).
Note that graphql_port should reflect the port where GraphQL API V2 is exposed (defaults to 8080).
scenarios:
default:
@ -26,10 +26,10 @@ scenarios:
bindings:
# To enable an optional weighted set of hosts in place of a load balancer
# Examples
# single host: stargate_host=host1
# multiple hosts: stargate_host=host1,host2,host3
# multiple weighted hosts: stargate_host=host1:3,host2:7
weighted_hosts: WeightedStrings('<<stargate_host:stargate>>')
# single host: graphql_host=host1
# multiple hosts: graphql_host=host1,host2,host3
# multiple weighted hosts: graphql_host=host1:3,host2:7
weighted_hosts: WeightedStrings('<<graphql_host:stargate>>')
# http request id
request_id: ToHashedUUID(); ToString();
machine_id: Mod(<<sources:10000>>); ToHashedUUID() -> java.util.UUID
@ -43,7 +43,8 @@ blocks:
schema:
ops:
create-keyspace:
op: POST <<protocol:http>>://{weighted_hosts}:<<stargate_port:8080>><<path_prefix:>>/graphql-schema
method: POST
uri: <<protocol:http>>://{weighted_hosts}:<<graphql_port:8080>><<path_prefix:>>/graphql-schema
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
@ -52,8 +53,10 @@ blocks:
{
"query":"mutation {\n createKeyspace(name: \"<<keyspace:gqlsf_timeseries>>\", replicas: <<rf:1>>, ifNotExists: true) \n}\n"
}
create-gql-schema:
op: POST <<protocol:http>>://{weighted_hosts}:<<stargate_port:8080>><<path_prefix:>>/graphql-admin
method: POST
uri: <<protocol:http>>://{weighted_hosts}:<<graphql_port:8080>><<path_prefix:>>/graphql-admin
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
@ -62,24 +65,12 @@ blocks:
{
"query":"mutation {\n deploySchema(keyspace: \"<<keyspace:gqlsf_timeseries>>\", schema: \"\"\"\n type Iot @cql_input {\n machine_id: Uuid! @cql_column(partitionKey: true)\n sensor_name: String! @cql_column(partitionKey: true)\n time: Timestamp! @cql_column(clusteringOrder: DESC)\n sensor_value: Float!\n \tstation_id: Uuid!\n data: String!\n }\n type SelectIotResult @cql_payload {\n \t\tdata: [Iot]\n \t\tpagingState: String\n }\n type Query {\n getIots(\n machine_id: Uuid!,\n sensor_name: String!,\n pagingState: String @cql_pagingState\n ): SelectIotResult @cql_select(pageSize: 10)\n }\n type Mutation {\n \t\tinsertIot(iot: IotInput): Iot\n }\n \"\"\") {\n version\n }\n}\n"
}
schema-astra:
ops:
create-gql-schema:
op: POST <<protocol:http>>://{weighted_hosts}:<<stargate_port:8080>><<path_prefix:>>/graphql-admin
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
Content-Type: "application/json"
body: |
{
"query":"mutation {\n deploySchema(namespace: \"<<keyspace:gqlsf_timeseries>>\", schema: \"\"\"\n type Iot @cql_input {\n machine_id: Uuid! @cql_column(partitionKey: true)\n sensor_name: String! @cql_column(partitionKey: true)\n time: Timestamp! @cql_column(clusteringOrder: DESC)\n sensor_value: Float!\n \tstation_id: Uuid!\n data: String!\n }\n type SelectIotResult @cql_payload {\n \t\tdata: [Iot]\n \t\tpagingState: String\n }\n type Query {\n getIots(\n machine_id: Uuid!,\n sensor_name: String!,\n pagingState: String @cql_pagingState\n ): SelectIotResult @cql_select(pageSize: 10)\n }\n type Mutation {\n \t\tinsertIot(iot: IotInput): Iot\n }\n \"\"\") {\n version\n }\n}\n"
}
tags:
name: create-gql-schema
rampup:
ops:
rampup-insert:
op: POST <<protocol:http>>://{weighted_hosts}:<<stargate_port:8080>><<path_prefix:>>/graphql/<<keyspace:gqlsf_timeseries>>
method: POST
uri: <<protocol:http>>://{weighted_hosts}:<<graphql_port:8080>><<path_prefix:>>/graphql/<<keyspace:gqlsf_timeseries>>
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
@ -94,7 +85,8 @@ blocks:
ratio: <<read_ratio:1>>
ops:
main-select:
op: POST <<protocol:http>>://{weighted_hosts}:<<stargate_port:8080>><<path_prefix:>>/graphql/<<keyspace:gqlsf_timeseries>>
method: POST
uri: <<protocol:http>>://{weighted_hosts}:<<graphql_port:8080>><<path_prefix:>>/graphql/<<keyspace:gqlsf_timeseries>>
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
@ -109,7 +101,8 @@ blocks:
ratio: <<write_ratio:9>>
ops:
main-write:
op: POST <<protocol:http>>://{weighted_hosts}:<<stargate_port:8080>><<path_prefix:>>/graphql/<<keyspace:gqlsf_timeseries>>
method: POST
uri: <<protocol:http>>://{weighted_hosts}:<<graphql_port:8080>><<path_prefix:>>/graphql/<<keyspace:gqlsf_timeseries>>
Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>"

View File

@ -141,6 +141,9 @@ defaults:
ensure that the values that are inserted at binding points are produced
in a valid form for a URI. You can use the `URLEncode()`
binding function where needed to achieve this.
*NOTE*, If you are using dynamic values for the uri field, and
a test value for cycle 0 includes neither `URLENCODE[[` nor `E[]`,
then it is skipped. You can override this with `enable_urlencode: true`.
- **method** - An optional request method. If not provided, "GET" is
assumed. Any method name will work here, even custom ones that are
specific to a given target system. No validation is done for standard
@ -187,11 +190,22 @@ configuration. If needed, more configurable SSL support will be added.
## Client Behavior
### TCP Sessions
### TCP Sessions & Clients
The HTTP clients are allocated one to each thread. The TCP connection
caching is entirely left to the defaults for the current HttpClient
library that is bundled within the JVM.
Client instances are created for each unique `space` value. NoSQLBench
provides a way for all driver adapters to instance native clients according
to a data from a binding. This is standardized under the op template parameter
`space`, which is wired by default to the static value `default`. This means
that each activity that uses the http driver shares a client instance across
all threads by default. If you want to have a new http client per-thread,
simply add a binding for `space: ThreadNumToInteger()` and reference it in
an op template like `space: {space}`, OR use an inline op field in your op
template like `space: {{ThreadNumToInteger()}}`.
You can use any binding function you want for the space op field. However,
if you were to assign it something like "space: {{Identity()}}" you would
not have a good result, as you would be spinning up and caching a new http client
instance for every single cycle.
### Chunked encoding and web sockets

View File

@ -19,11 +19,14 @@ package io.nosqlbench.activitytype.http.statuscodes;
import io.nosqlbench.adapter.http.statuscodes.HttpStatusCodes;
import io.nosqlbench.adapter.http.statuscodes.HttpStatusRanges;
import io.nosqlbench.adapter.http.statuscodes.IetfStatusCode;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.junit.jupiter.api.Test;
import static org.assertj.core.api.Assertions.assertThat;
public class HttpStatusCodesTest {
private final static Logger logger = LogManager.getLogger(HttpStatusCodesTest.class);
@Test
public void testLookup() {
@ -32,7 +35,7 @@ public class HttpStatusCodesTest {
assertThat(result.getReference()).isEqualTo("[RFC7231, Section 6.5.4]");
assertThat(result.getValues()).isEqualTo("404");
assertThat(result.getDescription()).isEqualTo("Not Found");
System.out.println(result.toString(404));
logger.debug(() -> result.toString(404));
assertThat(result.toString(404)).isEqualTo("404, Not Found, [https://www.iana.org/go/rfc7231#section-6.5.4], CLIENT_ERROR (The request contains bad syntax or cannot be fulfilled.)");
}
@ -43,7 +46,7 @@ public class HttpStatusCodesTest {
assertThat(result.getReference()).isEqualTo("[check https://www.iana.org/assignments/http-status-codes/http-status-codes.xhtml]");
assertThat(result.getValues()).isEqualTo("496");
assertThat(result.getDescription()).isNullOrEmpty();
System.out.println(result.toString(496));
logger.debug(() -> result.toString(496));
assertThat(result.toString(496)).isEqualTo("496, [check https://www.iana.org/assignments/http-status-codes/http-status-codes.xhtml], CLIENT_ERROR (The request contains bad syntax or cannot be fulfilled.)");
}
@ -54,7 +57,7 @@ public class HttpStatusCodesTest {
assertThat(result.getReference()).isEqualTo("[check https://www.iana.org/assignments/http-status-codes/http-status-codes.xhtml]");
assertThat(result.getValues()).isEqualTo("747");
assertThat(result.getDescription()).isNullOrEmpty();
System.out.println(result.toString(747));
logger.debug(() -> result.toString(747));
assertThat(result.toString(747)).isEqualTo("747, [check https://www.iana.org/assignments/http-status-codes/http-status-codes.xhtml], UNKNOWN_ERROR (This error type is not known based on IANA registered HTTP status codes.)");
}

View File

@ -18,12 +18,14 @@ package io.nosqlbench.adapter.http;
import io.nosqlbench.adapter.http.core.HttpOpMapper;
import io.nosqlbench.adapter.http.core.HttpSpace;
import io.nosqlbench.api.config.standard.NBConfiguration;
import io.nosqlbench.engine.api.activityconfig.StatementsLoader;
import io.nosqlbench.engine.api.activityconfig.yaml.OpTemplate;
import io.nosqlbench.engine.api.activityconfig.yaml.StmtsDocList;
import io.nosqlbench.engine.api.activityimpl.uniform.DriverSpaceCache;
import io.nosqlbench.engine.api.templating.ParsedOp;
import io.nosqlbench.api.config.standard.NBConfiguration;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
@ -34,6 +36,7 @@ import static org.assertj.core.api.Assertions.assertThat;
public class HttpOpMapperTest {
private final static Logger logger = LogManager.getLogger(HttpOpMapperTest.class);
static NBConfiguration cfg;
static HttpDriverAdapter adapter;
static HttpOpMapper mapper;
@ -132,7 +135,7 @@ public class HttpOpMapperTest {
body: StaticStringMapper('test')
""");
System.out.println(pop);
logger.debug(pop);
assertThat(pop.getDefinedNames()).containsAll(List.of(
"method","uri","version","Header1","body"
));

View File

@ -43,7 +43,7 @@ public class MongoOpMapper implements OpMapper<Op> {
LongFunction<MongoSpace> spaceF = l -> adapter.getSpaceCache().get(ctxNamer.apply(l));
Optional<LongFunction<String>> oDatabaseF = op.getAsOptionalFunction("database");
if (oDatabaseF.isEmpty()) {
logger.warn(() -> "op field 'database' was not defined");
logger.warn("op field 'database' was not defined");
}
Optional<TypeAndTarget<MongoDBOpTypes, String>> target = op.getOptionalTypeAndTargetEnum(MongoDBOpTypes.class, String.class);

View File

@ -1,3 +1,5 @@
# Pulsar
- [1. Overview](#1-overview)
- [1.1. Issues Tracker](#11-issues-tracker)
- [2. NB Pulsar Driver Workload Definition Yaml File - High Level Structure](#2-nb-pulsar-driver-workload-definition-yaml-file---high-level-structure)

View File

@ -1,3 +1,4 @@
# Table of contents
- [1. Overview](#1-overview)
- [1.1. Issues Tracker](#11-issues-tracker)
- [2. Execute the NB Pulsar Driver Workload](#2-execute-the-nb-pulsar-driver-workload)

View File

@ -291,8 +291,7 @@ public class S4JSpace implements AutoCloseable {
}
if (logger.isTraceEnabled()) {
logger.trace(
buildExecSummaryString(trackingMsgCnt, timeElapsedMills, totalResponseCnt, totalNullMsgCnt));
logger.trace(buildExecSummaryString(trackingMsgCnt, timeElapsedMills, totalResponseCnt, totalNullMsgCnt));
}
} while (continueChk);

View File

@ -1,3 +1,4 @@
# S4J Adapter
- [1. Overview](#1-overview)
- [2. Execute NB S4J Workload](#2-execute-nb-s4j-workload)
- [3. NB S4J Driver Configuration Parameter File](#3-nb-s4j-driver-configuration-parameter-file)

View File

@ -70,10 +70,10 @@ public class StdoutDriverAdapter extends BaseDriverAdapter<StdoutOp, StdoutSpace
.stream()
.filter(n -> {
if (bindingsFilter.matcher(n).matches()) {
logger.trace("bindings filter kept binding '" + n + "'");
logger.trace(() -> "bindings filter kept binding '" + n + "'");
return true;
} else {
logger.trace("bindings filter removed binding '" + n + "'");
logger.trace(() -> "bindings filter removed binding '" + n + "'");
return false;
}
})

View File

@ -54,7 +54,7 @@ public class TcpAdapterSpace {
try {
Socket socket = socketFactory.createSocket(host, port);
logger.info("connected to " + socket.toString());
logger.info(() -> "connected to " + socket.toString());
return new PrintWriter(socket.getOutputStream());
} catch (IOException e) {
throw new RuntimeException("Error opening socket:" + e, e);
@ -110,7 +110,7 @@ public class TcpAdapterSpace {
// InetAddress hostAddr = InetAddress.getByName(host);
// listenerSocket = socketFactory.createServerSocket(port, 10, hostAddr);
// if (socketFactory instanceof SSLServerSocketFactory) {
// logger.info("SSL enabled on server socket " + listenerSocket);
// logger.info(() -> "SSL enabled on server socket " + listenerSocket);
// }
// TCPServerActivity.SocketAcceptor socketAcceptor = new TCPServerActivity.SocketAcceptor(queue, listenerSocket);
// managedShutdown.add(socketAcceptor);
@ -124,7 +124,7 @@ public class TcpAdapterSpace {
// }
//
// TCPServerActivity.QueueWriterAdapter queueWriterAdapter = new TCPServerActivity.QueueWriterAdapter(this.queue);
// logger.info("initialized queue writer:" + queueWriterAdapter);
// logger.info(() -> "initialized queue writer:" + queueWriterAdapter);
// return queueWriterAdapter;
//
// }
@ -227,7 +227,7 @@ public class TcpAdapterSpace {
// writerThread.setName("SocketWriter/" + connectedSocket);
// writerThread.setDaemon(true);
// writerThread.start();
// logger.info("Started writer thread for " + connectedSocket);
// logger.info(() -> "Started writer thread for " + connectedSocket);
// } catch (SocketTimeoutException ignored) {
// }
// }

View File

@ -46,10 +46,11 @@ public class RawStmtsLoader {
Collections.addAll(this.transformers, newTransformer);
}
public RawStmtsDocList loadString(Logger logger, String data) {
public RawStmtsDocList loadString(Logger logger, final String originalData) {
String data = originalData;
try {
if (logger != null) logger.trace("Applying string transformer to yaml data:" + data);
if (logger != null) logger.trace(() -> "Applying string transformer to yaml data:" + originalData);
for (Function<String, String> transformer : transformers) {
data = transformer.apply(data);
}
@ -103,7 +104,7 @@ public class RawStmtsLoader {
protected String applyTransforms(Logger logger, String data) {
for (Function<String, String> xform : stringTransformers) {
try {
if (logger != null) logger.trace("Applying string transformer to yaml data:" + xform);
if (logger != null) logger.trace(() -> "Applying string transformer to yaml data:" + xform);
data = xform.apply(data);
} catch (Exception e) {
RuntimeException t = new OpConfigError("Error applying string transforms to input", e);

View File

@ -42,10 +42,10 @@ public class RawYamlLoader {
addTransformer(new StrInterpolator());
}
public List<Map<String,Object>> loadString(Logger logger, String data) {
public List<Map<String,Object>> loadString(Logger logger, String originalData) {
String data = originalData;
try {
if (logger != null) logger.trace("Applying string transformer to yaml data:" + data);
if (logger != null) logger.trace(() -> "Applying string transformer to yaml data:" + originalData);
for (Function<String, String> transformer : transformers) {
data = transformer.apply(data);
}
@ -92,7 +92,7 @@ public class RawYamlLoader {
protected String applyTransforms(Logger logger, String data) {
for (Function<String, String> xform : stringTransformers) {
try {
if (logger != null) logger.trace("Applying string transformer to yaml data:" + xform);
if (logger != null) logger.trace(() -> "Applying string transformer to yaml data:" + xform);
data = xform.apply(data);
} catch (Exception e) {
RuntimeException t = new OpConfigError("Error applying string transforms to input", e);

View File

@ -166,6 +166,7 @@ public abstract class BaseDriverAdapter<R extends Op, S> implements DriverAdapte
.add(Param.optional("instrument", Boolean.class))
.add(Param.optional(List.of("workload", "yaml"), String.class, "location of workload yaml file"))
.add(Param.optional("driver", String.class))
.add(Param.defaultTo("dryrun",false))
.asReadOnly();
}

View File

@ -19,23 +19,27 @@ package io.nosqlbench.engine.api.activityimpl.uniform;
import io.nosqlbench.api.docsapi.BundledMarkdownManifest;
import io.nosqlbench.api.docsapi.Docs;
import io.nosqlbench.api.docsapi.DocsBinder;
import io.nosqlbench.api.docsapi.DocsNameSpace;
import io.nosqlbench.nb.annotations.Maturity;
import io.nosqlbench.nb.annotations.Service;
import io.nosqlbench.api.spi.SimpleServiceLoader;
import java.nio.file.Path;
import java.util.List;
import java.util.Map;
import java.util.Set;
@Service(value = BundledMarkdownManifest.class, selector = "adapter-docs")
@Service(value = BundledMarkdownManifest.class, selector = "drivers")
public class BundledDriverAdapterDocs implements BundledMarkdownManifest {
@Override
public DocsBinder getDocs() {
Docs docs = new Docs().namespace("adapter-docs");
DocsBinder docs = new Docs();
SimpleServiceLoader<DriverAdapter> loader = new SimpleServiceLoader<>(DriverAdapter.class, Maturity.Any);
List<SimpleServiceLoader.Component<? extends DriverAdapter>> namedProviders = loader.getNamedProviders();
for (SimpleServiceLoader.Component<? extends DriverAdapter> namedProvider : namedProviders) {
DriverAdapter driverAdapter = namedProvider.provider.get();
DocsBinder bundledDocs = driverAdapter.getBundledDocs();
docs.merge(bundledDocs);
docs = docs.merge(bundledDocs);
}
return docs;
}

View File

@ -28,11 +28,15 @@ import io.nosqlbench.api.config.standard.NBConfiguration;
import io.nosqlbench.api.content.Content;
import io.nosqlbench.api.content.NBIO;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.function.Function;
import java.util.function.LongFunction;
import java.util.stream.Stream;
/**
* <P>The DriverAdapter interface is expected to be the replacement
@ -177,7 +181,7 @@ public interface DriverAdapter<OPTYPE extends Op, SPACETYPE> {
* @return A {@link DocsBinder} which describes docs to include for a given adapter.
*/
default DocsBinder getBundledDocs() {
Docs docs = new Docs().namespace("adapter-"+this.getAdapterName());
Docs docs = new Docs().namespace("drivers");
String dev_docspath = "adapter-" + this.getAdapterName() + "/src/main/resources/docs/" + this.getAdapterName();
String cp_docspath = "docs/" + this.getAdapterName();
@ -185,6 +189,7 @@ public interface DriverAdapter<OPTYPE extends Op, SPACETYPE> {
bundled_docs.map(Content::asPath).ifPresent(docs::addContentsOf);
Optional<Content<?>> maindoc = NBIO.local().name("/src/main/resources/" + this.getAdapterName() + ".md", this.getAdapterName() + ".md").first();
maindoc.map(Content::asPath).ifPresent(docs::addPath);
return docs.asDocsBinder();

View File

@ -0,0 +1,33 @@
/*
* Copyright (c) 2022 nosqlbench
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package io.nosqlbench.engine.api.activityimpl.uniform;
import io.nosqlbench.engine.api.activityimpl.uniform.flowtypes.Op;
import io.nosqlbench.engine.api.activityimpl.uniform.flowtypes.RunnableOp;
public class DryRunOp implements RunnableOp {
private final Op op;
public DryRunOp(Op op) {
this.op = op;
}
@Override
public void run() {
}
}

View File

@ -0,0 +1,38 @@
/*
* Copyright (c) 2022 nosqlbench
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package io.nosqlbench.engine.api.activityimpl.uniform;
import io.nosqlbench.engine.api.activityimpl.BaseOpDispenser;
import io.nosqlbench.engine.api.activityimpl.OpDispenser;
import io.nosqlbench.engine.api.activityimpl.uniform.flowtypes.Op;
import io.nosqlbench.engine.api.templating.ParsedOp;
public class DryRunOpDispenserWrapper extends BaseOpDispenser<Op, Object> {
private final OpDispenser<? extends Op> realDispenser;
public DryRunOpDispenserWrapper(DriverAdapter<Op,Object> adapter, ParsedOp pop, OpDispenser<? extends Op> realDispenser) {
super(adapter, pop);
this.realDispenser = realDispenser;
}
@Override
public DryRunOp apply(long cycle) {
Op op = realDispenser.apply(cycle);
return new DryRunOp(op);
}
}

View File

@ -19,7 +19,7 @@ package io.nosqlbench.engine.api.activityimpl.uniform.flowtypes;
import java.util.function.Function;
/**
* <H2>ChainingOp<I,O>: f(I) -> O</I,O></H2>
* <H2>ChainingOp&lt;I,O&gt;: f(I) -> O&lt;I,O&gt;</H2>
* <P>
* Run a function on the current cached result in the current thread and replace it
* with the result of the function. ChainingOps are one way of invoking

View File

@ -138,7 +138,7 @@ public class CommandTemplate {
for (Function<String, Map<String, String>> parser : parserlist) {
Map<String, String> parsed = parser.apply(oneline);
if (parsed != null) {
logger.debug("parsed request: " + parsed);
logger.debug(() -> "parsed request: " + parsed);
cmd.putAll(parsed);
didParse = true;
break;

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 42 KiB

View File

@ -0,0 +1,105 @@
@startuml
'https://plantuml.com/sequence-diagram
title: Lifecycle of an activity
control caller as caller
control ActivityExecutor as ae
control "Activity\nException\nHandler" as aeh
control "Activity\nThread\nFactory" as atf
control ExecutorService as aes
control Annotator as ann
control Activity as activity
== startup sequence ==
caller -> ae**: create
ae -> aeh**: create
ae -> atf**: create(\w Exception Handler)
aeh -> atf: <injected\nvia ctor>
ae -> aes**: create(\w Thread Factory)
atf -> aes: <injected\nvia ctor>
caller -> ae: startActivity()
activate ae
ae -> ann: Annotate Activity Start
ae -> activity: initActivity()
activate activity
ae <- activity
deactivate activity
note over ae,aes: align threadcount as explained below
caller <- ae
deactivate ae
== dynamic threadcount update ==
note over ae, aes: threads can be changed dynamically
caller -> ae: apply params
activate ae
ae->ae: align motor count
ae->aes: stop extra motors
ae->aes: <start missing motors>
group for each new thread/motor
ae -> aes: execute(<motor>)
activate aes
aes -> atf: get()
atf -> thread**: create
activate atf
aes <- atf: <thread>
deactivate atf
aes --> thread: run()
note over ann, thread: At this point, the\nmotor thread starts running\nthe defined activity's action\nover cycles
ae->ae: await thread state update
ae<-aes:
deactivate aes
end group
caller <- ae
deactivate ae
== shutdown sequence [after startup] ==
caller -> ae: stopActivity()
activate ae
ae -> ae: request stop motors
ae -> ae: await all stop
ae -> activity: shutdownActivity()
activate activity
ae <- activity
deactivate activity
ae -> ann: Annotate Activity Finish
caller <- ae
deactivate ae
== on exception in motor thread ==
thread -> aeh: catch(<thrown exception>)
aeh -> ae: notifyException\n(<thread>,<throwable>)
activate ae
ae -> ae: save exception
ae -> ae: forceStopActivity()
ae -> aes: shutdown();
activate aes
ae <- aes:
deactivate aes
group if needed [after timeout]]
ae -> aes: shutdownNow();
activate aes
ae <- aes
deactivate aes
end group
ae -> activity: shutdownActivity();
ae -> activity: closeAutoCloseables();
note over thread: action\nthread\nterminates
destroy thread
deactivate ae
@enduml

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 27 KiB

View File

@ -0,0 +1,67 @@
@startuml
'https://plantuml.com/sequence-diagram
title Lifecycle of a single scenario.call()
control "caller" as c
control "Scenario" as s
control "Scenario\nController" as sc
control "Scripting\nEngine" as engine
control "Activity\nExecutor" as ae
control "Java\nRuntime" as jrt
control "Shutdown\nHook" as sh
control "Annotations" as ann
c -> s**: create
c -> s: call()
activate s
s -> sh**: create
s -> jrt: register(ShutdownHook)
s -> ann: Annotate Scenario Start
s -> sc**: create
s -> engine**: create
s -> engine: run(script)
activate engine
group async calls [javacript+Java]
engine <--> sc: scenario.(*)
engine <--> sc: activities.(*)
engine <--> sc: metrics.(*)
engine <--> sc: params.(*)
engine -> sc: start(<activity>)
activate sc
sc -> ae**: create
sc -> ae: startActivity()
deactivate sc
end group
s <- engine: result
deactivate engine
s -> sc: awaitCompletion()
activate sc
group for each activity
sc -> ae: awaitCompletion()
activate ae
sc <- ae
deactivate ae
end group
s <- sc
deactivate sc
s -> jrt: unregister(ShutdownHook)
s -> sh: run()
sh -> ann: Annotate Scenario Finish
c <- s: Scenario\nResult
deactivate s
== on exception during call() ==
jrt -> sh: run()
sh -> ann: Annotate Scenario Finish
@enduml

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 28 KiB

View File

@ -0,0 +1,62 @@
@startuml
'https://plantuml.com/sequence-diagram
title Lifecycle of Scenarios
control "NBCLI" as nbcli
control "Scenario" as s
control "Scenario\nController" as sc
control "Scenarios\nExecutor" as se
control "Exception\nHandler" as seh
control "Thread\nFactory" as stf
control "Executor\nService" as ses
nbcli -> se** : create
se -> seh** : create
se -> stf** : create ThreadFactory\n(w/ ExceptionHandler)
se -> ses** : create ExecutorService\n(w/ ThreadFactory)
nbcli -> s** : create
s -> sc** : create
nbcli --> se : execute(Scenario)
se --> ses: submit(<Callable> Scenario)
activate ses
ses -> future**: create
se <-- ses: <Future<ScenarioResult>>
deactivate ses
== [async] on thread from thread factory ==
ses -> stf: get()
stf -> thread**: create
ses <- stf: <thread>
ses -> thread: run task
activate thread
thread -> s: call()
activate s
thread <- s: ScenarioResult
deactivate s
thread -> future: result
deactivate thread
== [async] on NBCLI thread ==
nbcli -> se: awaitAllResults();
activate se
se -> ses: shutdown
loop timeout
se -> ses: awaitTermination(timeout)
activate ses
se <- ses
deactivate ses
end loop
loop each future
se -> future: get()
activate future
se <- future: ScenarioResult
deactivate future
end loop
nbcli <- se: <ScenariosResults>
deactivate se
@enduml

View File

@ -225,6 +225,8 @@ sstablegen
* Mac M1 support
- as of 08/15, all Mac M1 systems should be supported for the.jar and the docker image
* Dry-run mode for all adapter types
review:
- 7578e91d773a9ea8113250899ef46b7aacf95e70
- 392bbcc5954019ae58956850c980646cef14a1f7

View File

@ -105,7 +105,7 @@
<dependency>
<groupId>com.fasterxml.jackson.jaxrs</groupId>
<artifactId>jackson-jaxrs-json-provider</artifactId>
<version>2.14.0</version>
<version>2.14.1</version>
</dependency>
<dependency>

View File

@ -144,11 +144,11 @@ public class NBWebServer implements Runnable {
svcLoader.getNamedProviders().stream().map(p -> p.provider)
.forEach(p -> {
Class<? extends WebServiceObject> c = p.type();
logger.info("Adding web service object: " + c.getSimpleName());
logger.info(() -> "Adding web service object: " + c.getSimpleName());
this.addWebObject(c);
});
logger.debug("Loaded " + this.servletClasses.size() + " root resources.");
logger.debug(() -> "Loaded " + this.servletClasses.size() + " root resources.");
}
@ -177,7 +177,7 @@ public class NBWebServer implements Runnable {
path.getFileSystem().provider().checkAccess(path, AccessMode.READ);
this.basePaths.add(path);
} catch (Exception e) {
logger.error("Unable to access path " + path.toString());
logger.error(() -> "Unable to access path " + path.toString());
throw new RuntimeException(e);
}
}
@ -224,7 +224,7 @@ public class NBWebServer implements Runnable {
}
for (Path basePath : basePaths) {
logger.info("Adding path to server: " + basePath.toString());
logger.info(() -> "Adding path to server: " + basePath.toString());
ResourceHandler resourceHandler = new ResourceHandler();
resourceHandler.setDirAllowed(true);
resourceHandler.setAcceptRanges(true);
@ -253,7 +253,7 @@ public class NBWebServer implements Runnable {
// ServletHolder statusResourceServletHolder = new ServletHolder(statusResourceContainer);
// getContextHandler().addServlet(statusResourceServletHolder, "/_");
logger.info("adding " + servletClasses.size() + " context handlers...");
logger.info(() -> "adding " + servletClasses.size() + " context handlers...");
loadDynamicEndpoints();
@ -311,7 +311,7 @@ public class NBWebServer implements Runnable {
server.setHandler(handlers);
for (Connector connector : server.getConnectors()) {
if (connector instanceof AbstractConnector) {
logger.debug("Setting idle timeout for " + connector + " to 300,000ms");
logger.debug(() -> "Setting idle timeout for " + connector + " to 300,000ms");
((AbstractConnector) connector).setIdleTimeout(300000);
}
}
@ -347,7 +347,7 @@ public class NBWebServer implements Runnable {
server.join();
} catch (Exception e) {
throw new RuntimeException("error while starting doc server: "+e.toString(),e);
throw new RuntimeException("error while starting doc server: "+ e,e);
}
}
@ -381,13 +381,11 @@ public class NBWebServer implements Runnable {
StringBuilder sb = new StringBuilder();
sb.append("----> handler type ").append(handler.getClass().getSimpleName()).append("\n");
if (handler instanceof ResourceHandler) {
ResourceHandler h = (ResourceHandler) handler;
if (handler instanceof ResourceHandler h) {
sb.append(" base resource: ").append(h.getBaseResource().toString())
.append("\n");
sb.append(h.dump());
} else if (handler instanceof ServletContextHandler) {
ServletContextHandler h = (ServletContextHandler) handler;
} else if (handler instanceof ServletContextHandler h) {
sb.append(h.dump()).append("\n");
h.getServletContext().getServletRegistrations().forEach(
(k, v) -> {
@ -396,8 +394,7 @@ public class NBWebServer implements Runnable {
}
);
sb.append("context path:").append(h.getContextPath());
} else if (handler instanceof DefaultHandler) {
DefaultHandler h = (DefaultHandler) handler;
} else if (handler instanceof DefaultHandler h) {
sb.append(h.dump());
}
return sb.toString();

View File

@ -54,14 +54,14 @@ public class NBWebServerApp implements BundledApp {
StandardOpenOption[] OVERWRITE = {StandardOpenOption.TRUNCATE_EXISTING,StandardOpenOption.CREATE,StandardOpenOption.WRITE};
logger.info("generating to directory " + dirpath);
logger.info(() -> "generating to directory " + dirpath);
DocsysMarkdownEndpoint dds = new DocsysMarkdownEndpoint();
String markdownList = dds.getMarkdownList(true);
Path markdownCsvPath = dirpath.resolve(Path.of("services/docs/markdown.csv"));
logger.info("markdown.csv located at " + markdownCsvPath);
logger.info(() -> "markdown.csv located at " + markdownCsvPath);
Files.createDirectories(markdownCsvPath.getParent());
Files.writeString(markdownCsvPath, markdownList, OVERWRITE);
@ -70,7 +70,7 @@ public class NBWebServerApp implements BundledApp {
for (String markdownFile : markdownFileArray) {
Path relativePath = dirpath.resolve(Path.of("services/docs", markdownFile));
logger.info("Creating " + relativePath);
logger.info(() -> "Creating " + relativePath);
Path path = dds.findPath(markdownFile);
// String markdown = dds.getFileByPath(markdownFile);
@ -115,7 +115,7 @@ public class NBWebServerApp implements BundledApp {
server.withContextParam("workspaces_root", workspaces_root);
} else if (arg.matches("--logdir")) {
String logdir_path = serverArgs[i + 1];
logger.info("Setting docserver logdir to " + logdir_path);
logger.info(() -> "Setting docserver logdir to " + logdir_path);
server.withContextParam("logpath", Path.of(logdir_path));
}
}
@ -150,10 +150,10 @@ public class NBWebServerApp implements BundledApp {
} else if (args.length > 0 && args[0].contains("generate")) {
try {
String[] genargs = Arrays.copyOfRange(args, 1, args.length);
logger.info("Generating with args [" + String.join("][", args) + "]");
logger.info(() -> "Generating with args [" + String.join("][", args) + "]");
generate(genargs);
} catch (IOException e) {
logger.error("could not generate files with command " + String.join(" ", args));
logger.error(() -> "could not generate files with command " + String.join(" ", args));
e.printStackTrace();
}
} else {

View File

@ -52,7 +52,7 @@ public abstract class JmxOp implements Op,Runnable {
protected Object readObject(String attributeName) {
try {
Object value = getMBeanConnection().getAttribute(objectName, attributeName);
logger.trace("read attribute '" + value + "': " + value);
logger.trace(() -> "read attribute '" + value + "': " + value);
return value;
} catch (Exception e) {
throw new RuntimeException(e);

View File

@ -1,85 +0,0 @@
/*
* Copyright (c) 2022 nosqlbench
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.nosqlbench.driver.pulsar;
import com.codahale.metrics.Timer;
import io.nosqlbench.driver.pulsar.ops.PulsarOp;
import io.nosqlbench.engine.api.activityapi.core.SyncAction;
import io.nosqlbench.engine.api.activityapi.errorhandling.modular.ErrorDetail;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import java.util.function.LongFunction;
public class PulsarAction implements SyncAction {
private final static Logger logger = LogManager.getLogger(PulsarAction.class);
private final int slot;
private final PulsarActivity activity;
int maxTries = 1;
public PulsarAction(PulsarActivity activity, int slot) {
this.activity = activity;
this.slot = slot;
this.maxTries = activity.getActivityDef().getParams().getOptionalInteger("maxtries").orElse(10);
}
@Override
public void init() {
}
@Override
public int runCycle(long cycle) {
// let's fail the action if some async operation failed
activity.failOnAsyncOperationFailure();
long start = System.nanoTime();
PulsarOp pulsarOp;
try (Timer.Context ctx = activity.getBindTimer().time()) {
LongFunction<? extends PulsarOp> readyPulsarOp = activity.getSequencer().apply(cycle);
pulsarOp = readyPulsarOp.apply(cycle);
} catch (Exception bindException) {
// if diagnostic mode ...
activity.getErrorHandler().handleError(bindException, cycle, 0);
throw new RuntimeException(
"while binding request in cycle " + cycle + ": " + bindException.getMessage(), bindException
);
}
for (int i = 0; i < maxTries; i++) {
Timer.Context ctx = activity.getExecuteTimer().time();
try {
// it is up to the pulsarOp to call Context#close when the activity is executed
// this allows us to track time for async operations
pulsarOp.run(ctx::close);
break;
} catch (RuntimeException err) {
ErrorDetail errorDetail = activity
.getErrorHandler()
.handleError(err, cycle, System.nanoTime() - start);
if (!errorDetail.isRetryable()) {
break;
}
}
}
return 0;
}
}

View File

@ -1,319 +0,0 @@
/*
* Copyright (c) 2022 nosqlbench
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.nosqlbench.driver.pulsar;
import com.codahale.metrics.Counter;
import com.codahale.metrics.Histogram;
import com.codahale.metrics.Timer;
import io.nosqlbench.driver.pulsar.ops.PulsarOp;
import io.nosqlbench.driver.pulsar.ops.ReadyPulsarOp;
import io.nosqlbench.driver.pulsar.util.PulsarActivityUtil;
import io.nosqlbench.driver.pulsar.util.PulsarNBClientConf;
import io.nosqlbench.engine.api.activityapi.core.ActivityDefObserver;
import io.nosqlbench.engine.api.activityapi.errorhandling.modular.NBErrorHandler;
import io.nosqlbench.engine.api.activityapi.planning.OpSequence;
import io.nosqlbench.engine.api.activityapi.ratelimits.RateLimiter;
import io.nosqlbench.engine.api.activityapi.ratelimits.RateLimiters;
import io.nosqlbench.api.engine.activityimpl.ActivityDef;
import io.nosqlbench.engine.api.activityimpl.OpDispenser;
import io.nosqlbench.engine.api.activityimpl.SimpleActivity;
import io.nosqlbench.api.engine.metrics.ActivityMetrics;
import org.apache.commons.lang3.BooleanUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.pulsar.client.admin.PulsarAdmin;
import org.apache.pulsar.client.admin.PulsarAdminBuilder;
import org.apache.pulsar.client.api.*;
import org.apache.pulsar.common.schema.KeyValueEncodingType;
import java.util.Map;
import java.util.Optional;
public class PulsarActivity extends SimpleActivity implements ActivityDefObserver {
private final static Logger logger = LogManager.getLogger(PulsarActivity.class);
private Counter bytesCounter;
private Histogram messageSizeHistogram;
private Timer bindTimer;
private Timer executeTimer;
private Timer createTransactionTimer;
private Timer commitTransactionTimer;
// Metrics for NB Pulsar driver milestone: https://github.com/nosqlbench/nosqlbench/milestone/11
// - end-to-end latency
private Histogram e2eMsgProcLatencyHistogram;
/**
* A histogram that tracks payload round-trip-time, based on a user-defined field in some sender
* system which can be interpreted as millisecond epoch time in the system's local time zone.
* This is paired with a field name of the same type to be extracted and reported in a meteric
* named 'payload-rtt'.
*/
private Histogram payloadRttHistogram;
// - message out of sequence error counter
private Counter msgErrOutOfSeqCounter;
// - message loss counter
private Counter msgErrLossCounter;
// - message duplicate (when dedup is enabled) error counter
private Counter msgErrDuplicateCounter;
private PulsarSpaceCache pulsarCache;
private PulsarNBClientConf pulsarNBClientConf;
private String pulsarSvcUrl;
private String webSvcUrl;
private PulsarAdmin pulsarAdmin;
private PulsarClient pulsarClient;
private Schema<?> pulsarSchema;
private NBErrorHandler errorHandler;
private OpSequence<OpDispenser<? extends PulsarOp>> sequencer;
private volatile Throwable asyncOperationFailure;
private boolean cycleratePerThread;
public PulsarActivity(ActivityDef activityDef) {
super(activityDef);
}
@Override
public void shutdownActivity() {
super.shutdownActivity();
if (pulsarCache == null) {
return;
}
for (PulsarSpace pulsarSpace : pulsarCache.getAssociatedPulsarSpace()) {
pulsarSpace.shutdownPulsarSpace();
}
}
@Override
public void initActivity() {
super.initActivity();
pulsarCache = new PulsarSpaceCache(this);
bytesCounter = ActivityMetrics.counter(activityDef, "bytes");
messageSizeHistogram = ActivityMetrics.histogram(activityDef, "message_size", this.getHdrDigits());
bindTimer = ActivityMetrics.timer(activityDef, "bind", this.getHdrDigits());
executeTimer = ActivityMetrics.timer(activityDef, "execute", this.getHdrDigits());
createTransactionTimer = ActivityMetrics.timer(activityDef, "create_transaction", this.getHdrDigits());
commitTransactionTimer = ActivityMetrics.timer(activityDef, "commit_transaction", this.getHdrDigits());
e2eMsgProcLatencyHistogram = ActivityMetrics.histogram(activityDef, "e2e_msg_latency", this.getHdrDigits());
payloadRttHistogram = ActivityMetrics.histogram(activityDef, "payload_rtt", this.getHdrDigits());
msgErrOutOfSeqCounter = ActivityMetrics.counter(activityDef, "err_msg_oos");
msgErrLossCounter = ActivityMetrics.counter(activityDef, "err_msg_loss");
msgErrDuplicateCounter = ActivityMetrics.counter(activityDef, "err_msg_dup");
String pulsarClntConfFile =
activityDef.getParams().getOptionalString("config").orElse("config.properties");
pulsarNBClientConf = new PulsarNBClientConf(pulsarClntConfFile);
pulsarSvcUrl =
activityDef.getParams().getOptionalString("service_url").orElse("pulsar://localhost:6650");
webSvcUrl =
activityDef.getParams().getOptionalString("web_url").orElse("http://localhost:8080");
initPulsarAdminAndClientObj();
createPulsarSchemaFromConf();
this.sequencer = createOpSequence((ot) -> new ReadyPulsarOp(ot, pulsarCache, this), false, Optional.empty());
setDefaultsFromOpSequence(sequencer);
onActivityDefUpdate(activityDef);
this.errorHandler = new NBErrorHandler(
() -> activityDef.getParams().getOptionalString("errors").orElse("stop"),
this::getExceptionMetrics
);
cycleratePerThread = activityDef.getParams().takeBoolOrDefault("cyclerate_per_thread", false);
}
private final ThreadLocal<RateLimiter> cycleLimiterThreadLocal = ThreadLocal.withInitial(() -> {
if (super.getCycleLimiter() != null) {
return RateLimiters.createOrUpdate(this.getActivityDef(), "cycles", null,
super.getCycleLimiter().getRateSpec());
} else {
return null;
}
});
@Override
public RateLimiter getCycleLimiter() {
if (cycleratePerThread) {
return cycleLimiterThreadLocal.get();
} else {
return super.getCycleLimiter();
}
}
public NBErrorHandler getErrorHandler() { return errorHandler; }
public OpSequence<OpDispenser<? extends PulsarOp>> getSequencer() { return sequencer; }
public void failOnAsyncOperationFailure() {
if (asyncOperationFailure != null) {
throw new RuntimeException(asyncOperationFailure);
}
}
public void asyncOperationFailed(Throwable ex) {
this.asyncOperationFailure = ex;
}
/**
* Initialize
* - PulsarAdmin object for adding/deleting tenant, namespace, and topic
* - PulsarClient object for message publishing and consuming
*/
private void initPulsarAdminAndClientObj() {
PulsarAdminBuilder adminBuilder =
PulsarAdmin.builder()
.serviceHttpUrl(webSvcUrl);
ClientBuilder clientBuilder = PulsarClient.builder();
try {
Map<String, Object> clientConfMap = pulsarNBClientConf.getClientConfMap();
// Override "client.serviceUrl" setting in config.properties
clientConfMap.remove("serviceUrl");
clientBuilder.loadConf(clientConfMap).serviceUrl(pulsarSvcUrl);
// Pulsar Authentication
String authPluginClassName =
(String) pulsarNBClientConf.getClientConfValue(PulsarActivityUtil.CLNT_CONF_KEY.authPulginClassName.label);
String authParams =
(String) pulsarNBClientConf.getClientConfValue(PulsarActivityUtil.CLNT_CONF_KEY.authParams.label);
if ( !StringUtils.isAnyBlank(authPluginClassName, authParams) ) {
adminBuilder.authentication(authPluginClassName, authParams);
clientBuilder.authentication(authPluginClassName, authParams);
}
String useTlsStr =
(String) pulsarNBClientConf.getClientConfValue(PulsarActivityUtil.CLNT_CONF_KEY.useTls.label);
boolean useTls = BooleanUtils.toBoolean(useTlsStr);
String tlsTrustCertsFilePath =
(String) pulsarNBClientConf.getClientConfValue(PulsarActivityUtil.CLNT_CONF_KEY.tlsTrustCertsFilePath.label);
String tlsAllowInsecureConnectionStr =
(String) pulsarNBClientConf.getClientConfValue(PulsarActivityUtil.CLNT_CONF_KEY.tlsAllowInsecureConnection.label);
boolean tlsAllowInsecureConnection = BooleanUtils.toBoolean(tlsAllowInsecureConnectionStr);
String tlsHostnameVerificationEnableStr =
(String) pulsarNBClientConf.getClientConfValue(PulsarActivityUtil.CLNT_CONF_KEY.tlsHostnameVerificationEnable.label);
boolean tlsHostnameVerificationEnable = BooleanUtils.toBoolean(tlsHostnameVerificationEnableStr);
if ( useTls ) {
adminBuilder
.enableTlsHostnameVerification(tlsHostnameVerificationEnable);
clientBuilder
.enableTlsHostnameVerification(tlsHostnameVerificationEnable);
if (!StringUtils.isBlank(tlsTrustCertsFilePath)) {
adminBuilder.tlsTrustCertsFilePath(tlsTrustCertsFilePath);
clientBuilder.tlsTrustCertsFilePath(tlsTrustCertsFilePath);
}
}
// Put this outside "if (useTls)" block for easier handling of "tlsAllowInsecureConnection"
adminBuilder.allowTlsInsecureConnection(tlsAllowInsecureConnection);
clientBuilder.allowTlsInsecureConnection(tlsAllowInsecureConnection);
pulsarAdmin = adminBuilder.build();
pulsarClient = clientBuilder.build();
////////////////
// Not supported in Pulsar 2.8.0
//
// ClientConfigurationData configurationData = pulsarAdmin.getClientConfigData();
// logger.debug(configurationData.toString());
} catch (PulsarClientException e) {
logger.error("Fail to create PulsarAdmin and/or PulsarClient object from the global configuration!");
throw new RuntimeException("Fail to create PulsarAdmin and/or PulsarClient object from global configuration!");
}
}
/**
* Get Pulsar schema from the definition string
*/
private void createPulsarSchemaFromConf() {
pulsarSchema = buldSchemaFromDefinition("schema.type", "schema.definition");
// this is to allow KEY_VALUE schema
if (pulsarNBClientConf.hasSchemaConfKey("schema.key.type")) {
Schema<?> pulsarKeySchema = buldSchemaFromDefinition("schema.key.type", "schema.key.definition");
Object encodingType = pulsarNBClientConf.getSchemaConfValue("schema.keyvalue.encodingtype");
KeyValueEncodingType keyValueEncodingType = KeyValueEncodingType.SEPARATED;
if (encodingType != null) {
keyValueEncodingType = KeyValueEncodingType.valueOf(encodingType.toString());
}
pulsarSchema = Schema.KeyValue(pulsarKeySchema, pulsarSchema, keyValueEncodingType);
}
}
private Schema<?> buldSchemaFromDefinition(String schemaTypeConfEntry,
String schemaDefinitionConfEntry) {
Object value = pulsarNBClientConf.getSchemaConfValue(schemaTypeConfEntry);
Object schemaDefinition = pulsarNBClientConf.getSchemaConfValue(schemaDefinitionConfEntry);
String schemaType = (value != null) ? value.toString() : "";
Schema<?> result;
if (PulsarActivityUtil.isAvroSchemaTypeStr(schemaType)) {
String schemaDefStr = (schemaDefinition != null) ? schemaDefinition.toString() : "";
result = PulsarActivityUtil.getAvroSchema(schemaType, schemaDefStr);
} else if (PulsarActivityUtil.isPrimitiveSchemaTypeStr(schemaType)) {
result = PulsarActivityUtil.getPrimitiveTypeSchema(schemaType);
} else if (PulsarActivityUtil.isAutoConsumeSchemaTypeStr(schemaType)) {
result = Schema.AUTO_CONSUME();
} else {
throw new RuntimeException("Unsupported schema type string: " + schemaType + "; " +
"Only primitive type, Avro type and AUTO_CONSUME are supported at the moment!");
}
return result;
}
public PulsarNBClientConf getPulsarConf() { return this.pulsarNBClientConf;}
public String getPulsarSvcUrl() { return this.pulsarSvcUrl;}
public String getWebSvcUrl() { return this.webSvcUrl; }
public PulsarAdmin getPulsarAdmin() { return this.pulsarAdmin; }
public PulsarClient getPulsarClient() { return this.pulsarClient; }
public Schema<?> getPulsarSchema() { return pulsarSchema; }
public Counter getBytesCounter() { return bytesCounter; }
public Histogram getMessageSizeHistogram() { return messageSizeHistogram; }
public Timer getBindTimer() { return bindTimer; }
public Timer getExecuteTimer() { return this.executeTimer; }
public Timer getCreateTransactionTimer() { return createTransactionTimer; }
public Timer getCommitTransactionTimer() { return commitTransactionTimer; }
public Histogram getPayloadRttHistogram() {return payloadRttHistogram;}
public Histogram getE2eMsgProcLatencyHistogram() { return e2eMsgProcLatencyHistogram; }
public Counter getMsgErrOutOfSeqCounter() { return msgErrOutOfSeqCounter; }
public Counter getMsgErrLossCounter() { return msgErrLossCounter; }
public Counter getMsgErrDuplicateCounter() { return msgErrDuplicateCounter; }
}

View File

@ -1,52 +0,0 @@
/*
* Copyright (c) 2022 nosqlbench
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.nosqlbench.driver.pulsar;
import io.nosqlbench.engine.api.activityapi.core.Action;
import io.nosqlbench.engine.api.activityapi.core.ActionDispenser;
import io.nosqlbench.engine.api.activityapi.core.ActivityType;
import io.nosqlbench.api.engine.activityimpl.ActivityDef;
import io.nosqlbench.nb.annotations.Service;
@Service(value= ActivityType.class, selector="pulsar")
public class PulsarActivityType implements ActivityType<PulsarActivity> {
@Override
public ActionDispenser getActionDispenser(PulsarActivity activity) {
if (activity.getParams().getOptionalString("async").isPresent()) {
throw new RuntimeException("The async pulsar driver is not implemented yet.");
}
return new PulsarActionDispenser(activity);
}
@Override
public PulsarActivity getActivity(ActivityDef activityDef) {
return new PulsarActivity(activityDef);
}
private static class PulsarActionDispenser implements ActionDispenser {
private final PulsarActivity activity;
public PulsarActionDispenser(PulsarActivity activity) {
this.activity = activity;
}
@Override
public Action getAction(int slot) {
return new PulsarAction(activity, slot);
}
}
}

View File

@ -1,802 +0,0 @@
/*
* Copyright (c) 2022 nosqlbench
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.nosqlbench.driver.pulsar;
import com.codahale.metrics.Gauge;
import com.codahale.metrics.Timer;
import io.nosqlbench.driver.pulsar.util.PulsarActivityUtil;
import io.nosqlbench.driver.pulsar.util.PulsarNBClientConf;
import io.nosqlbench.api.engine.activityimpl.ActivityDef;
import io.nosqlbench.api.engine.metrics.ActivityMetrics;
import org.apache.commons.collections.CollectionUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.pulsar.client.admin.Clusters;
import org.apache.pulsar.client.admin.PulsarAdmin;
import org.apache.pulsar.client.admin.PulsarAdminException;
import org.apache.pulsar.client.api.*;
import org.apache.pulsar.client.api.transaction.Transaction;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutionException;
import java.util.function.Function;
import java.util.function.Supplier;
import java.util.regex.Pattern;
import java.util.regex.PatternSyntaxException;
/**
* An instance of a pulsar client, along with all the cached objects which are normally
* associated with it during a client session in a typical application.
* A PulsarSpace is simply a named and cached set of objects which must be used together.
*/
public class PulsarSpace {
private final static Logger logger = LogManager.getLogger(PulsarSpace.class);
private final String spaceName;
private final ConcurrentHashMap<String, Producer<?>> producers = new ConcurrentHashMap<>();
private final ConcurrentHashMap<String, Consumer<?>> consumers = new ConcurrentHashMap<>();
private final ConcurrentHashMap<String, Reader<?>> readers = new ConcurrentHashMap<>();
private final PulsarActivity pulsarActivity;
private final ActivityDef activityDef;
private final PulsarNBClientConf pulsarNBClientConf;
private final String pulsarSvcUrl;
private final String webSvcUrl;
private final PulsarAdmin pulsarAdmin;
private final PulsarClient pulsarClient;
private final Schema<?> pulsarSchema;
private final Set<String> pulsarClusterMetadata = new HashSet<>();
private final Timer createTransactionTimer;
public PulsarSpace(String name, PulsarActivity pulsarActivity) {
this.spaceName = name;
this.pulsarActivity = pulsarActivity;
this.pulsarNBClientConf = pulsarActivity.getPulsarConf();
this.pulsarSvcUrl = pulsarActivity.getPulsarSvcUrl();
this.webSvcUrl = pulsarActivity.getWebSvcUrl();
this.pulsarAdmin = pulsarActivity.getPulsarAdmin();
this.pulsarClient = pulsarActivity.getPulsarClient();
this.pulsarSchema = pulsarActivity.getPulsarSchema();
this.activityDef = pulsarActivity.getActivityDef();
this.createTransactionTimer = pulsarActivity.getCreateTransactionTimer();
try {
Clusters clusters = pulsarAdmin.clusters();
List<String> stringList = clusters.getClusters();
CollectionUtils.addAll(pulsarClusterMetadata, stringList.listIterator());
} catch (PulsarAdminException e) {
// this is okay if you are connecting with a token that does not have access to the
// system configuration
logger.info("Could not get list of Pulsar Clusters from global configuration: " + e.getMessage());
}
}
public PulsarNBClientConf getPulsarClientConf() { return pulsarNBClientConf; }
public PulsarAdmin getPulsarAdmin() { return pulsarAdmin; }
public PulsarClient getPulsarClient() { return pulsarClient; }
public Schema<?> getPulsarSchema() { return pulsarSchema; }
public String getPulsarSvcUrl() { return pulsarSvcUrl;}
public String getWebSvcUrl() { return webSvcUrl; }
public Set<String> getPulsarClusterMetadata() { return pulsarClusterMetadata; }
// Properly shut down all Pulsar objects (producers, consumers, etc.) that are associated with this space
public void shutdownPulsarSpace() {
try {
for (Producer<?> producer : producers.values()) {
if (producer != null) producer.close();
}
for (Consumer<?> consumer : consumers.values()) {
if (consumer != null) consumer.close();
}
for (Reader<?> reader : readers.values()) {
if (reader != null) reader.close();
}
if (pulsarAdmin != null) pulsarAdmin.close();
if (pulsarClient != null) pulsarClient.close();
}
catch (Exception e) {
throw new RuntimeException("Unexpected error when closing Pulsar objects!");
}
}
/**
* Get a proper Pulsar API metrics prefix depending on the API type
*
* @param apiType - Pulsar API type: producer, consumer, reader, etc.
* @param apiObjName - actual name of a producer, a consumer, a reader, etc.
* @param topicName - topic name
* @return String
*/
private String getPulsarAPIMetricsPrefix(String apiType, String apiObjName, String topicName) {
String apiMetricsPrefix;
if (!PulsarActivityUtil.isValidPulsarApiType(apiType)) {
throw new RuntimeException(
"Incorrect Pulsar API type. Valid type list: " + PulsarActivityUtil.getValidPulsarApiTypeList());
}
if (!StringUtils.isBlank(apiObjName)) {
apiMetricsPrefix = apiObjName + "_";
}
else {
// we want a meaningful name for the API object (producer, consumer, reader, etc.)
// we are not appending the topic name
apiMetricsPrefix = apiType;
if (apiType.equalsIgnoreCase(PulsarActivityUtil.PULSAR_API_TYPE.PRODUCER.label))
apiMetricsPrefix += producers.size();
else if (apiType.equalsIgnoreCase(PulsarActivityUtil.PULSAR_API_TYPE.CONSUMER.label))
apiMetricsPrefix += consumers.size();
else if (apiType.equalsIgnoreCase(PulsarActivityUtil.PULSAR_API_TYPE.READER.label))
apiMetricsPrefix += readers.size();
apiMetricsPrefix += "_";
}
apiMetricsPrefix += topicName + "_";
apiMetricsPrefix = apiMetricsPrefix
// default name for tests/demos (in all Pulsar examples) is persistent://public/default/test -> use just the topic name test
.replace("persistent://public/default/", "")
// always remove topic type
.replace("non-persistent://", "")
.replace("persistent://", "")
// persistent://tenant/namespace/topicname -> tenant_namespace_topicname
.replace("/","_");
return apiMetricsPrefix;
}
//////////////////////////////////////
// Producer Processing --> start
//////////////////////////////////////
//
private static class ProducerGaugeImpl implements Gauge<Object> {
private final Producer<?> producer;
private final Function<ProducerStats, Object> valueExtractor;
ProducerGaugeImpl(Producer<?> producer, Function<ProducerStats, Object> valueExtractor) {
this.producer = producer;
this.valueExtractor = valueExtractor;
}
@Override
public Object getValue() {
// see Pulsar bug https://github.com/apache/pulsar/issues/10100
// we need to synchronize on producer otherwise we could receive corrupted data
synchronized(producer) {
return valueExtractor.apply(producer.getStats());
}
}
}
static Gauge<Object> producerSafeExtractMetric(Producer<?> producer, Function<ProducerStats, Object> valueExtractor) {
return new ProducerGaugeImpl(producer, valueExtractor);
}
// Producer name is NOT mandatory
// - It can be set at either global level or cycle level
// - If set at both levels, cycle level setting takes precedence
private String getEffectiveProducerName(String cycleProducerName) {
if (!StringUtils.isBlank(cycleProducerName)) {
return cycleProducerName;
}
String globalProducerName = pulsarNBClientConf.getProducerName();
if (!StringUtils.isBlank(globalProducerName)) {
return globalProducerName;
}
return "";
}
public Supplier<Transaction> getTransactionSupplier() {
PulsarClient pulsarClient = getPulsarClient();
return () -> {
try (Timer.Context time = createTransactionTimer.time() ){
return pulsarClient
.newTransaction()
.build()
.get();
} catch (ExecutionException | InterruptedException err) {
if (logger.isWarnEnabled()) {
logger.warn("Error while starting a new transaction", err);
}
throw new RuntimeException(err);
} catch (PulsarClientException err) {
throw new RuntimeException("Transactions are not enabled on Pulsar Client, " +
"please set client.enableTransaction=true in your Pulsar Client configuration");
}
};
}
// Topic name IS mandatory
// - It must be set at either global level or cycle level
// - If set at both levels, cycle level setting takes precedence
private String getEffectiveProducerTopicName(String cycleTopicName) {
if (!StringUtils.isBlank(cycleTopicName)) {
return cycleTopicName;
}
String globalTopicName = pulsarNBClientConf.getProducerTopicName();
if (!StringUtils.isBlank(globalTopicName)) {
return globalTopicName;
}
throw new RuntimeException("Producer topic name must be set at either global level or cycle level!");
}
public Producer<?> getProducer(String cycleTopicName, String cycleProducerName) {
String topicName = getEffectiveProducerTopicName(cycleTopicName);
String producerName = getEffectiveProducerName(cycleProducerName);
if (StringUtils.isBlank(topicName)) {
throw new RuntimeException("Producer:: must specify a topic name");
}
String producerCacheKey = PulsarActivityUtil.buildCacheKey(producerName, topicName);
Producer<?> producer = producers.get(producerCacheKey);
if (producer == null) {
PulsarClient pulsarClient = getPulsarClient();
// Get other possible producer settings that are set at global level
Map<String, Object> producerConf = pulsarNBClientConf.getProducerConfMap();
// Remove global level settings: "topicName" and "producerName"
producerConf.remove(PulsarActivityUtil.PRODUCER_CONF_STD_KEY.topicName.label);
producerConf.remove(PulsarActivityUtil.PRODUCER_CONF_STD_KEY.producerName.label);
String producerMetricsPrefix = getPulsarAPIMetricsPrefix(
PulsarActivityUtil.PULSAR_API_TYPE.PRODUCER.label,
producerName,
topicName);
try {
ProducerBuilder<?> producerBuilder = pulsarClient.
newProducer(pulsarSchema).
loadConf(producerConf).
topic(topicName);
if (!StringUtils.isAnyBlank(producerName)) {
producerBuilder = producerBuilder.producerName(producerName);
}
producer = producerBuilder.create();
producers.put(producerCacheKey, producer);
ActivityMetrics.gauge(activityDef,
producerMetricsPrefix + "total_bytes_sent",
producerSafeExtractMetric(producer, (s -> s.getTotalBytesSent() + s.getNumBytesSent())));
ActivityMetrics.gauge(activityDef,
producerMetricsPrefix + "total_msg_sent",
producerSafeExtractMetric(producer, (s -> s.getTotalMsgsSent() + s.getNumMsgsSent())));
ActivityMetrics.gauge(activityDef,
producerMetricsPrefix + "total_send_failed",
producerSafeExtractMetric(producer, (s -> s.getTotalSendFailed() + s.getNumSendFailed())));
ActivityMetrics.gauge(activityDef,
producerMetricsPrefix + "total_ack_received",
producerSafeExtractMetric(producer,(s -> s.getTotalAcksReceived() + s.getNumAcksReceived())));
ActivityMetrics.gauge(activityDef,
producerMetricsPrefix + "send_bytes_rate",
producerSafeExtractMetric(producer, ProducerStats::getSendBytesRate));
ActivityMetrics.gauge(activityDef,
producerMetricsPrefix + "send_msg_rate",
producerSafeExtractMetric(producer, ProducerStats::getSendMsgsRate));
}
catch (PulsarClientException ple) {
throw new RuntimeException("Unable to create a Pulsar producer!", ple);
}
}
return producer;
}
//
//////////////////////////////////////
// Producer Processing <-- end
//////////////////////////////////////
//////////////////////////////////////
// Consumer Processing --> start
//////////////////////////////////////
//
private static class ConsumerGaugeImpl implements Gauge<Object> {
private final Consumer<?> consumer;
private final Function<ConsumerStats, Object> valueExtractor;
ConsumerGaugeImpl(Consumer<?> consumer, Function<ConsumerStats, Object> valueExtractor) {
this.consumer = consumer;
this.valueExtractor = valueExtractor;
}
@Override
public Object getValue() {
// see Pulsar bug https://github.com/apache/pulsar/issues/10100
// - this is a bug report for producer stats.
// - assume this also applies to consumer stats.
synchronized(consumer) {
return valueExtractor.apply(consumer.getStats());
}
}
}
static Gauge<Object> consumerSafeExtractMetric(Consumer<?> consumer, Function<ConsumerStats, Object> valueExtractor) {
return new ConsumerGaugeImpl(consumer, valueExtractor);
}
private String getEffectiveSubscriptionName(String cycleSubscriptionName) {
if (!StringUtils.isBlank(cycleSubscriptionName)) {
return cycleSubscriptionName;
}
String globalSubscriptionName = pulsarNBClientConf.getConsumerSubscriptionName();
if (!StringUtils.isBlank(globalSubscriptionName)) {
return globalSubscriptionName;
}
throw new RuntimeException("Consumer::Subscription name must be set at either global level or cycle level!");
}
private String getEffectiveSubscriptionTypeStr(String cycleSubscriptionType) {
if (!StringUtils.isBlank(cycleSubscriptionType)) {
return cycleSubscriptionType;
}
String globalSubscriptionType = pulsarNBClientConf.getConsumerSubscriptionType();
if (!StringUtils.isBlank(globalSubscriptionType)) {
return globalSubscriptionType;
}
return "";
}
private SubscriptionType getEffectiveSubscriptionType(String cycleSubscriptionType) {
String effectiveSubscriptionStr = getEffectiveSubscriptionTypeStr(cycleSubscriptionType);
SubscriptionType subscriptionType = SubscriptionType.Exclusive;
if (!StringUtils.isBlank(effectiveSubscriptionStr)) {
if (!PulsarActivityUtil.isValidSubscriptionType(effectiveSubscriptionStr)) {
throw new RuntimeException("Consumer::Invalid subscription type (\"" +
effectiveSubscriptionStr + "\"). \nValid subscription types: " + PulsarActivityUtil.getValidSubscriptionTypeList());
} else {
subscriptionType = SubscriptionType.valueOf(effectiveSubscriptionStr);
}
}
return subscriptionType;
}
private String getEffectiveConsumerName(String cycleConsumerName) {
if (!StringUtils.isBlank(cycleConsumerName)) {
return cycleConsumerName;
}
String globalConsumerName = pulsarNBClientConf.getConsumerName();
if (!StringUtils.isBlank(globalConsumerName)) {
return globalConsumerName;
}
return "";
}
public Consumer<?> getConsumer(String cycleTopicName,
String cycleSubscriptionName,
String cycleSubscriptionType,
String cycleConsumerName,
String cycleKeySharedSubscriptionRanges) {
String subscriptionName = getEffectiveSubscriptionName(cycleSubscriptionName);
SubscriptionType subscriptionType = getEffectiveSubscriptionType(cycleSubscriptionType);
String consumerName = getEffectiveConsumerName(cycleConsumerName);
if (StringUtils.isAnyBlank(cycleTopicName, subscriptionName)) {
throw new RuntimeException("Consumer:: must specify a topic name and a subscription name");
}
String consumerCacheKey = PulsarActivityUtil.buildCacheKey(consumerName, subscriptionName, cycleTopicName);
Consumer<?> consumer = consumers.get(consumerCacheKey);
if (consumer == null) {
PulsarClient pulsarClient = getPulsarClient();
// Get other possible consumer settings that are set at global level
Map<String, Object> consumerConf = new HashMap<>(pulsarNBClientConf.getConsumerConfMap());
// Remove global level settings:
// - "topicNames", "topicsPattern", "subscriptionName", "subscriptionType", "consumerName"
consumerConf.remove(PulsarActivityUtil.CONSUMER_CONF_STD_KEY.topicNames.label);
consumerConf.remove(PulsarActivityUtil.CONSUMER_CONF_STD_KEY.topicsPattern.label);
consumerConf.remove(PulsarActivityUtil.CONSUMER_CONF_STD_KEY.subscriptionName.label);
consumerConf.remove(PulsarActivityUtil.CONSUMER_CONF_STD_KEY.subscriptionType.label);
consumerConf.remove(PulsarActivityUtil.CONSUMER_CONF_STD_KEY.consumerName.label);
// Remove non-standard consumer configuration properties
consumerConf.remove(PulsarActivityUtil.CONSUMER_CONF_CUSTOM_KEY.timeout.label);
try {
ConsumerBuilder<?> consumerBuilder = pulsarClient.
newConsumer(pulsarSchema).
loadConf(consumerConf).
topic(cycleTopicName).
subscriptionName(subscriptionName).
subscriptionType(subscriptionType);
if (subscriptionType == SubscriptionType.Key_Shared) {
KeySharedPolicy keySharedPolicy = KeySharedPolicy.autoSplitHashRange();
if (cycleKeySharedSubscriptionRanges != null && !cycleKeySharedSubscriptionRanges.isEmpty()) {
Range[] ranges = parseRanges(cycleKeySharedSubscriptionRanges);
logger.info("Configuring KeySharedPolicy#stickyHashRange with ranges {}", ranges);
keySharedPolicy = KeySharedPolicy.stickyHashRange().ranges(ranges);
}
consumerBuilder.keySharedPolicy(keySharedPolicy);
}
if (!StringUtils.isBlank(consumerName)) {
consumerBuilder = consumerBuilder.consumerName(consumerName);
}
consumer = consumerBuilder.subscribe();
String consumerMetricsPrefix = getPulsarAPIMetricsPrefix(
PulsarActivityUtil.PULSAR_API_TYPE.CONSUMER.label,
consumerName,
cycleTopicName);
ActivityMetrics.gauge(activityDef,
consumerMetricsPrefix + "total_bytes_recv",
consumerSafeExtractMetric(consumer, (s -> s.getTotalBytesReceived() + s.getNumBytesReceived())));
ActivityMetrics.gauge(activityDef,
consumerMetricsPrefix + "total_msg_recv",
consumerSafeExtractMetric(consumer, (s -> s.getTotalMsgsReceived() + s.getNumMsgsReceived())));
ActivityMetrics.gauge(activityDef,
consumerMetricsPrefix + "total_recv_failed",
consumerSafeExtractMetric(consumer, (s -> s.getTotalReceivedFailed() + s.getNumReceiveFailed())));
ActivityMetrics.gauge(activityDef,
consumerMetricsPrefix + "total_acks_sent",
consumerSafeExtractMetric(consumer,(s -> s.getTotalAcksSent() + s.getNumAcksSent())));
ActivityMetrics.gauge(activityDef,
consumerMetricsPrefix + "recv_bytes_rate",
consumerSafeExtractMetric(consumer, ConsumerStats::getRateBytesReceived));
ActivityMetrics.gauge(activityDef,
consumerMetricsPrefix + "recv_msg_rate",
consumerSafeExtractMetric(consumer, ConsumerStats::getRateMsgsReceived));
} catch (PulsarClientException ple) {
ple.printStackTrace();
throw new RuntimeException("Unable to create a Pulsar consumer!");
}
consumers.put(consumerCacheKey, consumer);
}
return consumer;
}
private static Range[] parseRanges(String ranges) {
if (ranges == null || ranges.isEmpty()) {
return new Range[0];
}
String[] split = ranges.split(",");
Range[] result = new Range[split.length];
for (int i = 0; i < split.length; i++) {
String range = split[i];
int pos = range.indexOf("..");
if (pos <= 0) {
throw new IllegalArgumentException("Invalid range '" + range + "'");
}
try {
int start = Integer.parseInt(range.substring(0, pos));
int end = Integer.parseInt(range.substring(pos + 2));
result[i] = Range.of(start, end);
} catch (NumberFormatException err) {
throw new IllegalArgumentException("Invalid range '" + range + "'");
}
}
return result;
}
//
//////////////////////////////////////
// Consumer Processing <-- end
//////////////////////////////////////
//////////////////////////////////////
// Multi-topic Consumer Processing --> start
//////////////////////////////////////
//
private String getEffectiveConsumerTopicNameListStr(String cycleTopicNames) {
if (!StringUtils.isBlank(cycleTopicNames)) {
return cycleTopicNames;
}
String globalTopicNames = pulsarNBClientConf.getConsumerTopicNames();
if (!StringUtils.isBlank(globalTopicNames)) {
return globalTopicNames;
}
return "";
}
private List<String> getEffectiveConsumerTopicNameList(String cycleTopicNames) {
String effectiveTopicNamesStr = getEffectiveConsumerTopicNameListStr(cycleTopicNames);
String[] names = effectiveTopicNamesStr.split("[;,]");
ArrayList<String> effectiveTopicNameList = new ArrayList<>();
for (String name : names) {
if (!StringUtils.isBlank(name))
effectiveTopicNameList.add(name.trim());
}
return effectiveTopicNameList;
}
private String getEffectiveConsumerTopicPatternStr(String cycleTopicsPattern) {
if (!StringUtils.isBlank(cycleTopicsPattern)) {
return cycleTopicsPattern;
}
String globalTopicsPattern = pulsarNBClientConf.getConsumerTopicPattern();
if (!StringUtils.isBlank(globalTopicsPattern)) {
return globalTopicsPattern;
}
return "";
}
private Pattern getEffectiveConsumerTopicPattern(String cycleTopicsPattern) {
String effectiveTopicsPatternStr = getEffectiveConsumerTopicPatternStr(cycleTopicsPattern);
Pattern topicsPattern;
try {
if (!StringUtils.isBlank(effectiveTopicsPatternStr))
topicsPattern = Pattern.compile(effectiveTopicsPatternStr);
else
topicsPattern = null;
} catch (PatternSyntaxException pse) {
topicsPattern = null;
}
return topicsPattern;
}
public Consumer<?> getMultiTopicConsumer(
String cycleTopicUri,
String cycleTopicNameList,
String cycleTopicsPattern,
String cycleSubscriptionName,
String cycleSubscriptionType,
String cycleConsumerName) {
List<String> topicNameList = getEffectiveConsumerTopicNameList(cycleTopicNameList);
String topicsPatternStr = getEffectiveConsumerTopicPatternStr(cycleTopicsPattern);
Pattern topicsPattern = getEffectiveConsumerTopicPattern(cycleTopicsPattern);
String subscriptionName = getEffectiveSubscriptionName(cycleSubscriptionName);
SubscriptionType subscriptionType = getEffectiveSubscriptionType(cycleSubscriptionType);
String consumerName = getEffectiveConsumerName(cycleConsumerName);
if ( subscriptionType.equals(SubscriptionType.Exclusive) && (activityDef.getThreads() > 1) ) {
throw new RuntimeException("Consumer:: trying to create multiple consumers of " +
"\"Exclusive\" subscription type under the same subscription name to the same topic!");
}
if (StringUtils.isBlank(cycleTopicUri) && topicNameList.isEmpty() && (topicsPattern == null)) {
throw new RuntimeException("Consumer:: \"topic_uri\", \"topic_names\" and \"topics_pattern\" parameters can't be all empty/invalid!");
}
// precedence sequence:
// topic_names (consumer statement param) >
// topics_pattern (consumer statement param) >
// topic_uri (document level param)
String consumerTopicListString;
if (!topicNameList.isEmpty()) {
consumerTopicListString = String.join("|", topicNameList);
} else if (topicsPattern != null) {
consumerTopicListString = topicsPatternStr;
} else {
consumerTopicListString = cycleTopicUri;
}
String consumerCacheKey = PulsarActivityUtil.buildCacheKey(
consumerName,
subscriptionName,
consumerTopicListString);
Consumer<?> consumer = consumers.get(consumerCacheKey);
if (consumer == null) {
PulsarClient pulsarClient = getPulsarClient();
// Get other possible producer settings that are set at global level
Map<String, Object> consumerConf = new HashMap<>(pulsarNBClientConf.getConsumerConfMap());
// Remove global level settings:
// - "topicNameList", "topicsPattern", "subscriptionName", "subscriptionType", "consumerName"
consumerConf.remove(PulsarActivityUtil.CONSUMER_CONF_STD_KEY.topicNames.label);
consumerConf.remove(PulsarActivityUtil.CONSUMER_CONF_STD_KEY.topicsPattern.label);
consumerConf.remove(PulsarActivityUtil.CONSUMER_CONF_STD_KEY.subscriptionName.label);
consumerConf.remove(PulsarActivityUtil.CONSUMER_CONF_STD_KEY.subscriptionType.label);
consumerConf.remove(PulsarActivityUtil.CONSUMER_CONF_STD_KEY.consumerName.label);
// Remove non-standard consumer configuration properties
consumerConf.remove(PulsarActivityUtil.CONSUMER_CONF_CUSTOM_KEY.timeout.label);
try {
ConsumerBuilder<?> consumerBuilder = pulsarClient.newConsumer(pulsarSchema).
loadConf(consumerConf).
subscriptionName(subscriptionName).
subscriptionType(subscriptionType).
consumerName(consumerName);
if (!topicNameList.isEmpty()) {
consumerBuilder = consumerBuilder.topics(topicNameList);
} else if (topicsPattern != null) {
consumerBuilder = consumerBuilder.topicsPattern(topicsPattern);
} else {
consumerBuilder = consumerBuilder.topic(cycleTopicUri);
}
consumer = consumerBuilder.subscribe();
String consumerMetricsPrefix = getPulsarAPIMetricsPrefix(
PulsarActivityUtil.PULSAR_API_TYPE.PRODUCER.label,
consumerName,
consumerTopicListString);
ActivityMetrics.gauge(activityDef,
consumerMetricsPrefix + "totalBytesRecvd",
consumerSafeExtractMetric(consumer, (s -> s.getTotalBytesReceived() + s.getNumBytesReceived())));
ActivityMetrics.gauge(activityDef,
consumerMetricsPrefix + "totalMsgsRecvd",
consumerSafeExtractMetric(consumer, (s -> s.getTotalMsgsReceived() + s.getNumMsgsReceived())));
ActivityMetrics.gauge(activityDef,
consumerMetricsPrefix + "totalRecvdFailed",
consumerSafeExtractMetric(consumer, (s -> s.getTotalReceivedFailed() + s.getNumReceiveFailed())));
ActivityMetrics.gauge(activityDef,
consumerMetricsPrefix + "totalAcksSent",
consumerSafeExtractMetric(consumer,(s -> s.getTotalAcksSent() + s.getNumAcksSent())));
ActivityMetrics.gauge(activityDef,
consumerMetricsPrefix + "recvdBytesRate",
consumerSafeExtractMetric(consumer, ConsumerStats::getRateBytesReceived));
ActivityMetrics.gauge(activityDef,
consumerMetricsPrefix + "recvdMsgsRate",
consumerSafeExtractMetric(consumer, ConsumerStats::getRateMsgsReceived));
} catch (PulsarClientException ple) {
ple.printStackTrace();
throw new RuntimeException("Unable to create a Pulsar consumer!");
}
consumers.put(consumerCacheKey, consumer);
}
return consumer;
}
//
//////////////////////////////////////
// Multi-topic Consumer Processing <-- end
//////////////////////////////////////
//////////////////////////////////////
// Reader Processing --> Start
//////////////////////////////////////
private String getEffectiveReaderTopicName(String cycleReaderTopicName) {
if (!StringUtils.isBlank(cycleReaderTopicName)) {
return cycleReaderTopicName;
}
String globalReaderTopicName = pulsarNBClientConf.getReaderTopicName();
if (!StringUtils.isBlank(globalReaderTopicName)) {
return globalReaderTopicName;
}
throw new RuntimeException("Reader:: Reader topic name must be set at either global level or cycle level!");
}
private String getEffectiveReaderName(String cycleReaderName) {
if (!StringUtils.isBlank(cycleReaderName)) {
return cycleReaderName;
}
String globalReaderName = pulsarNBClientConf.getConsumerName();
if (!StringUtils.isBlank(globalReaderName)) {
return globalReaderName;
}
return "";
}
private String getEffectiveStartMsgPosStr(String cycleStartMsgPosStr) {
if (!StringUtils.isBlank(cycleStartMsgPosStr)) {
return cycleStartMsgPosStr;
}
String globalStartMsgPosStr = pulsarNBClientConf.getStartMsgPosStr();
if (!StringUtils.isBlank(globalStartMsgPosStr)) {
return globalStartMsgPosStr;
}
return PulsarActivityUtil.READER_MSG_POSITION_TYPE.latest.label;
}
public Reader<?> getReader(String cycleTopicName,
String cycleReaderName,
String cycleStartMsgPos) {
String topicName = getEffectiveReaderTopicName(cycleTopicName);
String readerName = getEffectiveReaderName(cycleReaderName);
String startMsgPosStr = getEffectiveStartMsgPosStr(cycleStartMsgPos);
if (!PulsarActivityUtil.isValideReaderStartPosition(startMsgPosStr)) {
throw new RuntimeException("Reader:: Invalid value for reader start message position!");
}
String readerCacheKey = PulsarActivityUtil.buildCacheKey(topicName, readerName, startMsgPosStr);
Reader<?> reader = readers.get(readerCacheKey);
if (reader == null) {
PulsarClient pulsarClient = getPulsarClient();
Map<String, Object> readerConf = pulsarNBClientConf.getReaderConfMap();
// Remove global level settings: "topicName" and "readerName"
readerConf.remove(PulsarActivityUtil.READER_CONF_STD_KEY.topicName.label);
readerConf.remove(PulsarActivityUtil.READER_CONF_STD_KEY.readerName.label);
// Remove non-standard reader configuration properties
readerConf.remove(PulsarActivityUtil.READER_CONF_CUSTOM_KEY.startMessagePos.label);
try {
ReaderBuilder<?> readerBuilder = pulsarClient.
newReader(pulsarSchema).
loadConf(readerConf).
topic(topicName).
readerName(readerName);
MessageId startMsgId = MessageId.latest;
if (startMsgPosStr.equalsIgnoreCase(PulsarActivityUtil.READER_MSG_POSITION_TYPE.earliest.label)) {
startMsgId = MessageId.earliest;
}
//TODO: custom start message position is NOT supported yet
//else if (startMsgPosStr.startsWith(PulsarActivityUtil.READER_MSG_POSITION_TYPE.custom.label)) {
// startMsgId = MessageId.latest;
//}
reader = readerBuilder.startMessageId(startMsgId).create();
} catch (PulsarClientException ple) {
ple.printStackTrace();
throw new RuntimeException("Unable to create a Pulsar reader!");
}
readers.put(readerCacheKey, reader);
}
return reader;
}
//////////////////////////////////////
// Reader Processing <-- end
//////////////////////////////////////
}

View File

@ -1,52 +0,0 @@
/*
* Copyright (c) 2022 nosqlbench
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.nosqlbench.driver.pulsar;
import java.util.concurrent.ConcurrentHashMap;
/**
* To enable flexibility in testing methods, each object graph which is used within
* the pulsar API is kept within a single umbrella called the PulsarSpace.
* This allows for clients, producers, and consumers to remain connected and
* cached in a useful way.
*/
public class PulsarSpaceCache {
// TODO: Implement cache limits
// TODO: Implement variant cache eviction behaviors (halt, warn, LRU)
private final PulsarActivity activity;
private final ConcurrentHashMap<String, PulsarSpace> clientScopes = new ConcurrentHashMap<>();
public PulsarSpaceCache(PulsarActivity pulsarActivity) {
this.activity = pulsarActivity;
}
public Iterable<PulsarSpace> getAssociatedPulsarSpace() {
return clientScopes.values();
}
public PulsarActivity getAssociatedPulsarActivity() {
return activity;
}
public PulsarSpace getPulsarSpace(String name) {
return clientScopes.computeIfAbsent(name, spaceName -> new PulsarSpace(spaceName, activity));
}
public PulsarActivity getActivity() { return activity; }
}

View File

@ -1,25 +0,0 @@
/*
* Copyright (c) 2022 nosqlbench
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.nosqlbench.driver.pulsar.exception;
public class PulsarDriverUnexpectedException extends RuntimeException {
public PulsarDriverUnexpectedException(String message) {
super(message);
}
public PulsarDriverUnexpectedException(Exception e) { super(e); }
}

View File

@ -1,23 +0,0 @@
/*
* Copyright (c) 2022 nosqlbench
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.nosqlbench.driver.pulsar.exception;
public class PulsarDriverUnsupportedOpException extends RuntimeException {
public PulsarDriverUnsupportedOpException() { super("Unsupported Pulsar driver operation type"); }
}

View File

@ -1,24 +0,0 @@
/*
* Copyright (c) 2022 nosqlbench
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.nosqlbench.driver.pulsar.ops;
public enum EndToEndStartingTimeSource {
NONE, // no end-to-end latency calculation
MESSAGE_PUBLISH_TIME, // use message publish timestamp
MESSAGE_EVENT_TIME, // use message event timestamp
MESSAGE_PROPERTY_E2E_STARTING_TIME // use message property called "e2e_starting_time" as the timestamp
}

View File

@ -1,103 +0,0 @@
/*
* Copyright (c) 2022 nosqlbench
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.nosqlbench.driver.pulsar.ops;
import io.nosqlbench.driver.pulsar.util.PulsarActivityUtil;
import java.util.*;
import org.apache.commons.lang3.RandomUtils;
/**
* Handles adding a monotonic sequence number to message properties of sent messages
*/
class MessageSequenceNumberSendingHandler {
static final int SIMULATED_ERROR_PROBABILITY_PERCENTAGE = 10;
long number = 1;
Queue<Long> outOfOrderNumbers;
public long getNextSequenceNumber(Set<PulsarActivityUtil.SEQ_ERROR_SIMU_TYPE> simulatedErrorTypes) {
return getNextSequenceNumber(simulatedErrorTypes, SIMULATED_ERROR_PROBABILITY_PERCENTAGE);
}
long getNextSequenceNumber(Set<PulsarActivityUtil.SEQ_ERROR_SIMU_TYPE> simulatedErrorTypes, int errorProbabilityPercentage) {
simulateError(simulatedErrorTypes, errorProbabilityPercentage);
return nextNumber();
}
private void simulateError(Set<PulsarActivityUtil.SEQ_ERROR_SIMU_TYPE> simulatedErrorTypes, int errorProbabilityPercentage) {
if (!simulatedErrorTypes.isEmpty() && shouldSimulateError(errorProbabilityPercentage)) {
int selectIndex = 0;
int numberOfErrorTypes = simulatedErrorTypes.size();
if (numberOfErrorTypes > 1) {
// pick one of the simulated error type randomly
selectIndex = RandomUtils.nextInt(0, numberOfErrorTypes);
}
PulsarActivityUtil.SEQ_ERROR_SIMU_TYPE errorType = simulatedErrorTypes.stream()
.skip(selectIndex)
.findFirst()
.get();
switch (errorType) {
case OutOfOrder:
// simulate message out of order
injectMessagesOutOfOrder();
break;
case MsgDup:
// simulate message duplication
injectMessageDuplication();
break;
case MsgLoss:
// simulate message loss
injectMessageLoss();
break;
}
}
}
private boolean shouldSimulateError(int errorProbabilityPercentage) {
// Simulate error with the specified probability
return RandomUtils.nextInt(0, 100) < errorProbabilityPercentage;
}
long nextNumber() {
if (outOfOrderNumbers != null) {
long nextNumber = outOfOrderNumbers.poll();
if (outOfOrderNumbers.isEmpty()) {
outOfOrderNumbers = null;
}
return nextNumber;
}
return number++;
}
void injectMessagesOutOfOrder() {
if (outOfOrderNumbers == null) {
outOfOrderNumbers = new ArrayDeque<>(Arrays.asList(number + 2, number, number + 1));
number += 3;
}
}
void injectMessageDuplication() {
if (outOfOrderNumbers == null) {
number--;
}
}
void injectMessageLoss() {
if (outOfOrderNumbers == null) {
number++;
}
}
}

View File

@ -1,46 +0,0 @@
/*
* Copyright (c) 2022 nosqlbench
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.nosqlbench.driver.pulsar.ops;
import io.nosqlbench.driver.pulsar.PulsarActivity;
import io.nosqlbench.driver.pulsar.PulsarSpace;
import io.nosqlbench.engine.api.templating.CommandTemplate;
import java.util.function.LongFunction;
/**
* This maps a set of specifier functions to a pulsar operation. The result contains
* enough state to define a pulsar operation such that it can be executed, measured, and possibly
* retried if needed.
*
* This function doesn't act *as* the operation. It merely maps the construction logic into
* a simple functional type, given the component functions.
*
* For additional parameterization, the command template is also provided.
*/
public abstract class PulsarAdminMapper extends PulsarOpMapper {
protected final LongFunction<Boolean> adminDelOpFunc;
protected PulsarAdminMapper(CommandTemplate cmdTpl,
PulsarSpace clientSpace,
PulsarActivity pulsarActivity,
LongFunction<Boolean> asyncApiFunc,
LongFunction<Boolean> adminDelOpFunc) {
super(cmdTpl, clientSpace, pulsarActivity, asyncApiFunc);
this.adminDelOpFunc = adminDelOpFunc;
}
}

View File

@ -1,62 +0,0 @@
/*
* Copyright (c) 2022 nosqlbench
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.nosqlbench.driver.pulsar.ops;
import io.nosqlbench.driver.pulsar.PulsarActivity;
import io.nosqlbench.driver.pulsar.PulsarSpace;
import io.nosqlbench.engine.api.templating.CommandTemplate;
import java.util.Set;
import java.util.function.LongFunction;
/**
* This maps a set of specifier functions to a pulsar operation. The pulsar operation contains
* enough state to define a pulsar operation such that it can be executed, measured, and possibly
* retried if needed.
*
* This function doesn't act *as* the operation. It merely maps the construction logic into
* a simple functional type, given the component functions.
*
* For additional parameterization, the command template is also provided.
*/
public class PulsarAdminNamespaceMapper extends PulsarAdminMapper {
private final LongFunction<String> namespaceFunc;
public PulsarAdminNamespaceMapper(CommandTemplate cmdTpl,
PulsarSpace clientSpace,
PulsarActivity pulsarActivity,
LongFunction<Boolean> asyncApiFunc,
LongFunction<Boolean> adminDelOpFunc,
LongFunction<String> namespaceFunc)
{
super(cmdTpl, clientSpace, pulsarActivity, asyncApiFunc, adminDelOpFunc);
this.namespaceFunc = namespaceFunc;
}
@Override
public PulsarOp apply(long value) {
boolean asyncApi = asyncApiFunc.apply(value);
boolean adminDelOp = adminDelOpFunc.apply(value);
String namespace = namespaceFunc.apply(value);
return new PulsarAdminNamespaceOp(
clientSpace,
asyncApi,
adminDelOp,
namespace);
}
}

View File

@ -1,101 +0,0 @@
/*
* Copyright (c) 2022 nosqlbench
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.nosqlbench.driver.pulsar.ops;
import io.nosqlbench.driver.pulsar.PulsarSpace;
import org.apache.commons.lang3.StringUtils;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.pulsar.client.admin.Namespaces;
import org.apache.pulsar.client.admin.PulsarAdmin;
import org.apache.pulsar.client.admin.PulsarAdminException;
import java.util.concurrent.CompletableFuture;
public class PulsarAdminNamespaceOp extends PulsarAdminOp {
private final static Logger logger = LogManager.getLogger(PulsarAdminNamespaceOp.class);
private final String fullNsName;
public PulsarAdminNamespaceOp(PulsarSpace clientSpace,
boolean asyncApi,
boolean adminDelOp,
String fullNsName)
{
super(clientSpace, asyncApi, adminDelOp);
this.fullNsName = fullNsName;
}
@Override
public void run() {
// Do nothing if the namespace name is empty
if ( StringUtils.isBlank(fullNsName) ) return;
PulsarAdmin pulsarAdmin = clientSpace.getPulsarAdmin();
Namespaces namespaces = pulsarAdmin.namespaces();
// Admin API - create tenants and namespaces
if (!adminDelOp) {
try {
if (!asyncApi) {
namespaces.createNamespace(fullNsName);
logger.trace("Successfully created namespace \"" + fullNsName + "\" synchronously!");
} else {
CompletableFuture<Void> future = namespaces.createNamespaceAsync(fullNsName);
future.whenComplete((unused, throwable) ->
logger.trace("Successfully created namespace \"" + fullNsName + "\" asynchronously!"))
.exceptionally(ex -> {
logger.error("Failed to create namespace \"" + fullNsName + "\" asynchronously!:" + ex.getMessage());
return null;
});
}
}
catch (PulsarAdminException.ConflictException ce) {
// do nothing if the namespace already exists
}
catch (PulsarAdminException e) {
e.printStackTrace();
throw new RuntimeException("Unexpected error when creating pulsar namespace: " + fullNsName);
}
}
// Admin API - delete tenants and namespaces
else {
try {
if (!asyncApi) {
namespaces.deleteNamespace(fullNsName, true);
logger.trace("Successfully deleted namespace \"" + fullNsName + "\" synchronously!");
} else {
CompletableFuture<Void> future = namespaces.deleteNamespaceAsync(fullNsName, true);
future.whenComplete((unused, throwable) ->
logger.trace("Successfully deleted namespace \"" + fullNsName + "\" asynchronously!"))
.exceptionally(ex -> {
logger.error("Failed to delete namespace \"" + fullNsName + "\" asynchronously!");
return null;
});
}
}
catch (PulsarAdminException.NotFoundException nfe) {
// do nothing if the namespace doesn't exist
}
catch (PulsarAdminException e) {
e.printStackTrace();
throw new RuntimeException("Unexpected error when deleting pulsar namespace: " + fullNsName);
}
}
}
}

View File

@ -1,39 +0,0 @@
/*
* Copyright (c) 2022 nosqlbench
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.nosqlbench.driver.pulsar.ops;
import io.nosqlbench.driver.pulsar.PulsarSpace;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
public abstract class PulsarAdminOp extends SyncPulsarOp {
private final static Logger logger = LogManager.getLogger(PulsarAdminOp.class);
protected final PulsarSpace clientSpace;
protected final boolean asyncApi;
protected final boolean adminDelOp;
protected PulsarAdminOp(PulsarSpace clientSpace,
boolean asyncApi,
boolean adminDelOp)
{
this.clientSpace = clientSpace;
this.asyncApi = asyncApi;
this.adminDelOp = adminDelOp;
}
}

View File

@ -1,72 +0,0 @@
/*
* Copyright (c) 2022 nosqlbench
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.nosqlbench.driver.pulsar.ops;
import io.nosqlbench.driver.pulsar.PulsarActivity;
import io.nosqlbench.driver.pulsar.PulsarSpace;
import io.nosqlbench.engine.api.templating.CommandTemplate;
import java.util.Set;
import java.util.function.LongFunction;
/**
* This maps a set of specifier functions to a pulsar operation. The pulsar operation contains
* enough state to define a pulsar operation such that it can be executed, measured, and possibly
* retried if needed.
*
* This function doesn't act *as* the operation. It merely maps the construction logic into
* a simple functional type, given the component functions.
*
* For additional parameterization, the command template is also provided.
*/
public class PulsarAdminTenantMapper extends PulsarAdminMapper {
private final LongFunction<Set<String>> adminRolesFunc;
private final LongFunction<Set<String>> allowedClustersFunc;
private final LongFunction<String> tenantFunc;
public PulsarAdminTenantMapper(CommandTemplate cmdTpl,
PulsarSpace clientSpace,
PulsarActivity pulsarActivity,
LongFunction<Boolean> asyncApiFunc,
LongFunction<Boolean> adminDelOpFunc,
LongFunction<Set<String>> adminRolesFunc,
LongFunction<Set<String>> allowedClustersFunc,
LongFunction<String> tenantFunc)
{
super(cmdTpl, clientSpace, pulsarActivity, asyncApiFunc, adminDelOpFunc);
this.adminRolesFunc = adminRolesFunc;
this.allowedClustersFunc = allowedClustersFunc;
this.tenantFunc = tenantFunc;
}
@Override
public PulsarOp apply(long value) {
boolean asyncApi = asyncApiFunc.apply(value);
boolean adminDelOp = adminDelOpFunc.apply(value);
Set<String> adminRoleSet = adminRolesFunc.apply(value);
Set<String> allowedClusterSet = allowedClustersFunc.apply(value);
String tenant = tenantFunc.apply(value);
return new PulsarAdminTenantOp(
clientSpace,
asyncApi,
adminDelOp,
adminRoleSet,
allowedClusterSet,
tenant);
}
}

View File

@ -1,128 +0,0 @@
/*
* Copyright (c) 2022 nosqlbench
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.nosqlbench.driver.pulsar.ops;
import io.nosqlbench.driver.pulsar.PulsarSpace;
import org.apache.commons.lang3.StringUtils;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.pulsar.client.admin.*;
import org.apache.pulsar.common.policies.data.TenantInfo;
import java.util.Set;
import java.util.concurrent.CompletableFuture;
public class PulsarAdminTenantOp extends PulsarAdminOp {
private final static Logger logger = LogManager.getLogger(PulsarAdminTenantOp.class);
private final Set<String> adminRoleSet;
private final Set<String> allowedClusterSet;
private final String tenant;
public PulsarAdminTenantOp(PulsarSpace clientSpace,
boolean asyncApi,
boolean adminDelOp,
Set<String> adminRoleSet,
Set<String> allowedClusterSet,
String tenant)
{
super(clientSpace, asyncApi, adminDelOp);
this.adminRoleSet = adminRoleSet;
this.allowedClusterSet = allowedClusterSet;
this.tenant = tenant;
}
@Override
public void run() {
// Do nothing if the tenant name is empty
if ( StringUtils.isBlank(tenant) ) return;
PulsarAdmin pulsarAdmin = clientSpace.getPulsarAdmin();
Tenants tenants = pulsarAdmin.tenants();
Namespaces namespaces = pulsarAdmin.namespaces();
// Admin API - create tenants and namespaces
if (!adminDelOp) {
TenantInfo tenantInfo = TenantInfo.builder()
.adminRoles(adminRoleSet)
.allowedClusters(!allowedClusterSet.isEmpty() ? allowedClusterSet : clientSpace.getPulsarClusterMetadata())
.build();
try {
if (!asyncApi) {
tenants.createTenant(tenant, tenantInfo);
if (logger.isDebugEnabled()) {
logger.debug("Successful sync creation of tenant {}", tenant);
}
} else {
CompletableFuture<Void> future = tenants.createTenantAsync(tenant, tenantInfo);
future.whenComplete((unused, throwable) -> {
if (logger.isDebugEnabled()) {
logger.debug("Successful async creation of tenant {}", tenant);
}
}).exceptionally(ex -> {
logger.error("Failed async creation of tenant {}", tenant);
return null;
});
}
}
catch (PulsarAdminException.ConflictException ce) {
// do nothing if the tenant already exists
}
catch (PulsarAdminException e) {
e.printStackTrace();
throw new RuntimeException("Unexpected error when creating pulsar tenant: " + tenant);
}
}
// Admin API - delete tenants and namespaces
else {
try {
int nsNum = namespaces.getNamespaces(tenant).size();
// Only delete a tenant when there is no underlying namespaces
if ( nsNum == 0 ) {
if (!asyncApi) {
tenants.deleteTenant(tenant);
if (logger.isDebugEnabled()) {
logger.debug("Successful sync deletion of tenant {}", tenant);
}
} else {
CompletableFuture<Void> future = tenants.deleteTenantAsync(tenant);
future.whenComplete((unused, throwable) -> {
if (logger.isDebugEnabled()) {
logger.debug("Successful async deletion of tenant {}", tenant);
}
}).exceptionally(ex -> {
if (logger.isDebugEnabled()) {
logger.error("Failed async deletion of tenant {}", tenant);
}
return null;
});
}
}
}
catch (PulsarAdminException.NotFoundException nfe) {
// do nothing if the tenant doesn't exist
}
catch (PulsarAdminException e) {
e.printStackTrace();
throw new RuntimeException("Unexpected error when deleting pulsar tenant: " + tenant);
}
}
}
}

View File

@ -1,91 +0,0 @@
/*
* Copyright (c) 2022 nosqlbench
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.nosqlbench.driver.pulsar.ops;
import io.nosqlbench.driver.pulsar.PulsarActivity;
import io.nosqlbench.driver.pulsar.PulsarSpace;
import io.nosqlbench.engine.api.templating.CommandTemplate;
import org.apache.commons.lang3.BooleanUtils;
import org.apache.commons.lang3.StringUtils;
import java.util.function.LongFunction;
/**
* This maps a set of specifier functions to a pulsar operation. The pulsar operation contains
* enough state to define a pulsar operation such that it can be executed, measured, and possibly
* retried if needed.
*
* This function doesn't act *as* the operation. It merely maps the construction logic into
* a simple functional type, given the component functions.
*
* For additional parameterization, the command template is also provided.
*/
public class PulsarAdminTopicMapper extends PulsarAdminMapper {
private final LongFunction<String> topicUriFunc;
private final LongFunction<String> enablePartionFunc;
private final LongFunction<String> partitionNumFunc;
public PulsarAdminTopicMapper(CommandTemplate cmdTpl,
PulsarSpace clientSpace,
PulsarActivity pulsarActivity,
LongFunction<Boolean> asyncApiFunc,
LongFunction<Boolean> adminDelOpFunc,
LongFunction<String> topicUriFunc,
LongFunction<String> enablePartionFunc,
LongFunction<String> partitionNumFunc)
{
super(cmdTpl, clientSpace, pulsarActivity, asyncApiFunc, adminDelOpFunc);
this.topicUriFunc = topicUriFunc;
this.enablePartionFunc = enablePartionFunc;
this.partitionNumFunc = partitionNumFunc;
}
@Override
public PulsarOp apply(long value) {
String topicUri = topicUriFunc.apply(value);
String enablePartitionStr = enablePartionFunc.apply(value);
String partitionNumStr = partitionNumFunc.apply(value);
boolean asyncApi = asyncApiFunc.apply(value);
boolean adminDelOp = adminDelOpFunc.apply(value);
if ( StringUtils.isBlank(topicUri) ) {
throw new RuntimeException("\"topic_uri\" parameter can't be empty when creating a Pulsar topic!");
}
boolean partitionTopic = BooleanUtils.toBoolean(enablePartitionStr);
boolean invalidPartStr;
int partitionNum = 0;
if ( StringUtils.isBlank(partitionNumStr) || !StringUtils.isNumeric(partitionNumStr) ) {
invalidPartStr = true;
} else {
partitionNum = Integer.parseInt(partitionNumStr);
invalidPartStr = (partitionNum <= 0);
}
if (partitionTopic && invalidPartStr) {
throw new RuntimeException("Invalid specified value for \"partition_num\" parameter when creating partitioned topic!");
}
return new PulsarAdminTopicOp(
clientSpace,
topicUri,
partitionTopic,
partitionNum,
asyncApi,
adminDelOp);
}
}

View File

@ -1,159 +0,0 @@
/*
* Copyright (c) 2022 nosqlbench
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.nosqlbench.driver.pulsar.ops;
import io.nosqlbench.driver.pulsar.PulsarSpace;
import io.nosqlbench.driver.pulsar.util.PulsarActivityUtil;
import org.apache.commons.lang3.StringUtils;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.pulsar.client.admin.PulsarAdmin;
import org.apache.pulsar.client.admin.PulsarAdminException;
import org.apache.pulsar.client.admin.Topics;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.CompletableFuture;
public class PulsarAdminTopicOp extends PulsarAdminOp {
private final static Logger logger = LogManager.getLogger(PulsarAdminTopicOp.class);
private final String topicUri;
private final boolean partitionTopic;
private final int partitionNum;
private final String fullNsName;
public PulsarAdminTopicOp(PulsarSpace clientSpace,
String topicUri,
boolean partitionTopic,
int partitionNum,
boolean asyncApi,
boolean adminDelOp)
{
super(clientSpace, asyncApi, adminDelOp);
this.topicUri = topicUri;
this.partitionTopic = partitionTopic;
this.partitionNum = partitionNum;
this.fullNsName = PulsarActivityUtil.getFullNamespaceName(this.topicUri);
}
// Check whether the specified topic already exists
private boolean checkTopicExistence(Topics topics, String topicUri) {
// Check the existence of the topic
List<String> topicListWorkingArea = new ArrayList<>();
try {
if (!partitionTopic) {
topicListWorkingArea = topics.getList(fullNsName);
}
else {
topicListWorkingArea = topics.getPartitionedTopicList(fullNsName);
}
}
catch (PulsarAdminException.NotFoundException nfe) {
// do nothing
}
catch (PulsarAdminException e) {
e.printStackTrace();
throw new RuntimeException("Failed to retrieve topic info.for pulsar namespace: " + fullNsName);
}
return ( !topicListWorkingArea.isEmpty() && topicListWorkingArea.contains(topicUri) );
}
@Override
public void run() {
PulsarAdmin pulsarAdmin = clientSpace.getPulsarAdmin();
Topics topics = pulsarAdmin.topics();
try {
// Create the topic
if (!adminDelOp) {
if (!partitionTopic) {
if (!asyncApi) {
topics.createNonPartitionedTopic(topicUri);
logger.trace("Successfully created non-partitioned topic \"" + topicUri + "\" synchronously!");
} else {
CompletableFuture<Void> future = topics.createNonPartitionedTopicAsync(topicUri);
future.whenComplete((unused, throwable)
-> logger.trace("Successfully created non-partitioned topic \"" + topicUri + "\" asynchronously!"))
.exceptionally(ex -> {
logger.error("Failed to create non-partitioned topic \"" + topicUri + "\" asynchronously!");
return null;
});
}
} else {
if (!asyncApi) {
topics.createPartitionedTopic(topicUri, partitionNum);
logger.trace("Successfully created partitioned topic \"" + topicUri + "\"" +
"(partition_num: " + partitionNum + ") synchronously!");
} else {
CompletableFuture<Void> future = topics.createPartitionedTopicAsync(topicUri, partitionNum);
future.whenComplete((unused, throwable)
-> logger.trace("Successfully created partitioned topic \"" + topicUri + "\"" +
"(partition_num: " + partitionNum + ") asynchronously!"))
.exceptionally(ex -> {
logger.error("Failed to create partitioned topic \"" + topicUri + "\"" +
"(partition_num: " + partitionNum + ") asynchronously!");
return null;
});
}
}
}
// Delete the topic
else {
if (!partitionTopic) {
if (!asyncApi) {
topics.delete(topicUri, true);
logger.trace("Successfully deleted non-partitioned topic \"" + topicUri + "\" synchronously!");
} else {
CompletableFuture<Void> future = topics.deleteAsync(topicUri, true);
future.whenComplete((unused, throwable)
-> logger.trace("Successfully deleted non-partitioned topic \"" + topicUri + "\" asynchronously!"))
.exceptionally(ex -> {
logger.error("Failed to delete non-partitioned topic \"" + topicUri + "\" asynchronously!");
return null;
});
}
} else {
if (!asyncApi) {
topics.deletePartitionedTopic(topicUri, true);
logger.trace("Successfully deleted partitioned topic \"" + topicUri + "\" synchronously!");
} else {
CompletableFuture<Void> future = topics.deletePartitionedTopicAsync(topicUri, true);
future.whenComplete((unused, throwable)
-> logger.trace("Successfully deleted partitioned topic \"" + topicUri + "\" asynchronously!"))
.exceptionally(ex -> {
logger.error("Failed to delete partitioned topic \"" + topicUri + "\" asynchronously!");
return null;
});
}
}
}
}
catch (PulsarAdminException e) {
e.printStackTrace();
String errMsg = String.format("Unexpected error when %s pulsar topic: %s (partition topic: %b; partition number: %d)",
(!adminDelOp ? "creating" : "deleting"),
topicUri,
partitionTopic,
partitionNum);
throw new RuntimeException(errMsg);
}
}
}

View File

@ -1,39 +0,0 @@
/*
* Copyright (c) 2022 nosqlbench
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.nosqlbench.driver.pulsar.ops;
import io.nosqlbench.driver.pulsar.PulsarActivity;
import io.nosqlbench.driver.pulsar.PulsarSpace;
import io.nosqlbench.engine.api.templating.CommandTemplate;
import java.util.function.LongFunction;
public class PulsarBatchProducerEndMapper extends PulsarOpMapper {
public PulsarBatchProducerEndMapper(CommandTemplate cmdTpl,
PulsarSpace clientSpace,
PulsarActivity pulsarActivity,
LongFunction<Boolean> asyncApiFunc)
{
super(cmdTpl, clientSpace, pulsarActivity, asyncApiFunc);
}
@Override
public PulsarOp apply(long value) {
return new PulsarBatchProducerEndOp();
}
}

View File

@ -1,50 +0,0 @@
/*
* Copyright (c) 2022 nosqlbench
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.nosqlbench.driver.pulsar.ops;
import io.nosqlbench.api.errors.BasicError;
import org.apache.pulsar.client.api.MessageId;
import org.apache.pulsar.client.api.Producer;
import org.apache.pulsar.common.util.FutureUtil;
import java.util.List;
import java.util.concurrent.CompletableFuture;
public class PulsarBatchProducerEndOp extends SyncPulsarOp {
@Override
public void run() {
List<CompletableFuture<MessageId>> container = PulsarBatchProducerStartOp.threadLocalBatchMsgContainer.get();
Producer<?> producer = PulsarBatchProducerStartOp.threadLocalProducer.get();
if ((container != null) && (!container.isEmpty())) {
try {
// producer.flushAsync().get();
FutureUtil.waitForAll(container).get();
} catch (Exception e) {
throw new RuntimeException("Batch Producer:: failed to send (some of) the batched messages!");
}
container.clear();
PulsarBatchProducerStartOp.threadLocalBatchMsgContainer.set(null);
}
else {
throw new BasicError("You tried to end an empty batch message container. This means you" +
" did initiate the batch container properly, or there is an error in your" +
" pulsar op sequencing and ratios.");
}
}
}

View File

@ -1,77 +0,0 @@
/*
* Copyright (c) 2022 nosqlbench
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.nosqlbench.driver.pulsar.ops;
import io.nosqlbench.driver.pulsar.PulsarActivity;
import io.nosqlbench.driver.pulsar.PulsarSpace;
import io.nosqlbench.driver.pulsar.util.PulsarActivityUtil;
import io.nosqlbench.engine.api.templating.CommandTemplate;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import java.util.HashMap;
import java.util.Map;
import java.util.function.LongFunction;
public class PulsarBatchProducerMapper extends PulsarOpMapper {
private final static Logger logger = LogManager.getLogger(PulsarBatchProducerMapper.class);
private final LongFunction<String> keyFunc;
private final LongFunction<String> propFunc;
private final LongFunction<String> payloadFunc;
public PulsarBatchProducerMapper(CommandTemplate cmdTpl,
PulsarSpace clientSpace,
PulsarActivity pulsarActivity,
LongFunction<Boolean> asyncApiFunc,
LongFunction<String> keyFunc,
LongFunction<String> propFunc,
LongFunction<String> payloadFunc) {
super(cmdTpl, clientSpace, pulsarActivity, asyncApiFunc);
this.keyFunc = keyFunc;
this.propFunc = propFunc;
this.payloadFunc = payloadFunc;
}
@Override
public PulsarOp apply(long value) {
String msgKey = keyFunc.apply(value);
String msgPayload = payloadFunc.apply(value);
// Check if msgPropJonStr is valid JSON string with a collection of key/value pairs
// - if Yes, convert it to a map
// - otherwise, log an error message and ignore message properties without throwing a runtime exception
Map<String, String> msgProperties = new HashMap<>();
String msgPropJsonStr = propFunc.apply(value);
try {
msgProperties = PulsarActivityUtil.convertJsonToMap(msgPropJsonStr);
}
catch (Exception e) {
logger.error(
"PulsarProducerMapper:: Error parsing message property JSON string {}, ignore message properties!",
msgPropJsonStr);
}
return new PulsarBatchProducerOp(
clientSpace.getPulsarSchema(),
msgKey,
msgProperties,
msgPayload
);
}
}

View File

@ -1,83 +0,0 @@
/*
* Copyright (c) 2022 nosqlbench
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.nosqlbench.driver.pulsar.ops;
import io.nosqlbench.driver.pulsar.util.AvroUtil;
import io.nosqlbench.driver.pulsar.util.PulsarActivityUtil;
import org.apache.pulsar.client.api.MessageId;
import org.apache.pulsar.client.api.Producer;
import org.apache.pulsar.client.api.Schema;
import org.apache.pulsar.client.api.TypedMessageBuilder;
import org.apache.pulsar.client.api.schema.GenericRecord;
import org.apache.pulsar.client.impl.schema.generic.GenericAvroSchema;
import org.apache.pulsar.common.schema.SchemaType;
import java.nio.charset.StandardCharsets;
import java.util.List;
import java.util.Map;
import java.util.concurrent.CompletableFuture;
public class PulsarBatchProducerOp extends SyncPulsarOp {
private final Schema<?> pulsarSchema;
private final String msgKey;
private final Map<String, String> msgProperties;
private final String msgPayload;
public PulsarBatchProducerOp(Schema<?> schema,
String key,
Map<String, String> msgProperties,
String payload) {
this.pulsarSchema = schema;
this.msgKey = key;
this.msgProperties = msgProperties;
this.msgPayload = payload;
}
@Override
public void run() {
if ((msgPayload == null) || msgPayload.isEmpty()) {
throw new RuntimeException("Message payload (\"msg-value\") can't be empty!");
}
List<CompletableFuture<MessageId>> container = PulsarBatchProducerStartOp.threadLocalBatchMsgContainer.get();
Producer<?> producer = PulsarBatchProducerStartOp.threadLocalProducer.get();
assert (producer != null) && (container != null);
TypedMessageBuilder typedMessageBuilder = producer.newMessage(pulsarSchema);
if ((msgKey != null) && (!msgKey.isEmpty())) {
typedMessageBuilder = typedMessageBuilder.key(msgKey);
}
if (!msgProperties.isEmpty()) {
typedMessageBuilder = typedMessageBuilder.properties(msgProperties);
}
SchemaType schemaType = pulsarSchema.getSchemaInfo().getType();
if (PulsarActivityUtil.isAvroSchemaTypeStr(schemaType.name())) {
GenericRecord payload = AvroUtil.GetGenericRecord_PulsarAvro(
(GenericAvroSchema) pulsarSchema,
pulsarSchema.getSchemaInfo().getSchemaDefinition(),
msgPayload
);
typedMessageBuilder = typedMessageBuilder.value(payload);
} else {
typedMessageBuilder = typedMessageBuilder.value(msgPayload.getBytes(StandardCharsets.UTF_8));
}
container.add(typedMessageBuilder.sendAsync());
}
}

View File

@ -1,44 +0,0 @@
/*
* Copyright (c) 2022 nosqlbench
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.nosqlbench.driver.pulsar.ops;
import io.nosqlbench.driver.pulsar.PulsarActivity;
import io.nosqlbench.driver.pulsar.PulsarSpace;
import io.nosqlbench.engine.api.templating.CommandTemplate;
import org.apache.pulsar.client.api.Producer;
import java.util.function.LongFunction;
public class PulsarBatchProducerStartMapper extends PulsarOpMapper {
private final LongFunction<Producer<?>> batchProducerFunc;
public PulsarBatchProducerStartMapper(CommandTemplate cmdTpl,
PulsarSpace clientSpace,
PulsarActivity pulsarActivity,
LongFunction<Boolean> asyncApiFunc,
LongFunction<Producer<?>> batchProducerFunc) {
super(cmdTpl, clientSpace, pulsarActivity, asyncApiFunc);
this.batchProducerFunc = batchProducerFunc;
}
@Override
public PulsarOp apply(long value) {
Producer<?> batchProducer = batchProducerFunc.apply(value);
return new PulsarBatchProducerStartOp(batchProducer);
}
}

View File

@ -1,49 +0,0 @@
/*
* Copyright (c) 2022 nosqlbench
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.nosqlbench.driver.pulsar.ops;
import io.nosqlbench.api.errors.BasicError;
import org.apache.commons.compress.utils.Lists;
import org.apache.pulsar.client.api.*;
import java.util.List;
import java.util.concurrent.CompletableFuture;
public class PulsarBatchProducerStartOp extends SyncPulsarOp {
// TODO: ensure sane container lifecycle management
public final transient static ThreadLocal<List<CompletableFuture<MessageId>>> threadLocalBatchMsgContainer = new ThreadLocal<>();
public final transient static ThreadLocal<Producer<?>> threadLocalProducer = new ThreadLocal<>();
public PulsarBatchProducerStartOp(Producer<?> batchProducer) {
threadLocalProducer.set(batchProducer);
}
@Override
public void run() {
List<CompletableFuture<MessageId>> container = threadLocalBatchMsgContainer.get();
if (container == null) {
container = Lists.newArrayList();
threadLocalBatchMsgContainer.set(container);
} else {
throw new BasicError("You tried to create a batch message container where one was already" +
" defined. This means you did not flush and unset the last container, or there is an error in your" +
" pulsar op sequencing and ratios.");
}
}
}

View File

@ -1,104 +0,0 @@
/*
* Copyright (c) 2022 nosqlbench
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.nosqlbench.driver.pulsar.ops;
import io.nosqlbench.driver.pulsar.PulsarActivity;
import io.nosqlbench.driver.pulsar.PulsarSpace;
import io.nosqlbench.engine.api.templating.CommandTemplate;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.pulsar.client.api.Consumer;
import org.apache.pulsar.client.api.transaction.Transaction;
import java.util.HashMap;
import java.util.Map;
import java.util.function.LongFunction;
import java.util.function.Supplier;
/**
* This maps a set of specifier functions to a pulsar operation. The pulsar operation contains
* enough state to define a pulsar operation such that it can be executed, measured, and possibly
* retried if needed.
*
* This function doesn't act *as* the operation. It merely maps the construction logic into
* a simple functional type, given the component functions.
*
* For additional parameterization, the command template is also provided.
*/
public class PulsarConsumerMapper extends PulsarTransactOpMapper {
private final static Logger logger = LogManager.getLogger(PulsarProducerMapper.class);
private final LongFunction<Consumer<?>> consumerFunc;
private final EndToEndStartingTimeSource endToEndStartingTimeSource;
private final LongFunction<String> payloadRttFieldFunc;
public PulsarConsumerMapper(CommandTemplate cmdTpl,
PulsarSpace clientSpace,
PulsarActivity pulsarActivity,
LongFunction<Boolean> asyncApiFunc,
LongFunction<Boolean> useTransactionFunc,
LongFunction<Boolean> seqTrackingFunc,
LongFunction<Supplier<Transaction>> transactionSupplierFunc,
LongFunction<Consumer<?>> consumerFunc,
EndToEndStartingTimeSource endToEndStartingTimeSource,
LongFunction<String> payloadRttFieldFunc) {
super(cmdTpl, clientSpace, pulsarActivity, asyncApiFunc, useTransactionFunc, seqTrackingFunc, transactionSupplierFunc);
this.consumerFunc = consumerFunc;
this.endToEndStartingTimeSource = endToEndStartingTimeSource;
this.payloadRttFieldFunc = payloadRttFieldFunc;
}
@Override
public PulsarOp apply(long value) {
boolean seqTracking = seqTrackingFunc.apply(value);
Consumer<?> consumer = consumerFunc.apply(value);
boolean asyncApi = asyncApiFunc.apply(value);
boolean useTransaction = useTransactionFunc.apply(value);
Supplier<Transaction> transactionSupplier = transactionSupplierFunc.apply(value);
String payloadRttFieldFunc = this.payloadRttFieldFunc.apply(value);
return new PulsarConsumerOp(
pulsarActivity,
asyncApi,
useTransaction,
seqTracking,
transactionSupplier,
consumer,
clientSpace.getPulsarSchema(),
clientSpace.getPulsarClientConf().getConsumerTimeoutSeconds(),
endToEndStartingTimeSource,
this::getReceivedMessageSequenceTracker,
payloadRttFieldFunc);
}
private ReceivedMessageSequenceTracker getReceivedMessageSequenceTracker(String topicName) {
return receivedMessageSequenceTrackersForTopicThreadLocal.get()
.computeIfAbsent(topicName, k -> createReceivedMessageSequenceTracker());
}
private ReceivedMessageSequenceTracker createReceivedMessageSequenceTracker() {
return new ReceivedMessageSequenceTracker(pulsarActivity.getMsgErrOutOfSeqCounter(),
pulsarActivity.getMsgErrDuplicateCounter(),
pulsarActivity.getMsgErrLossCounter());
}
private final ThreadLocal<Map<String, ReceivedMessageSequenceTracker>> receivedMessageSequenceTrackersForTopicThreadLocal =
ThreadLocal.withInitial(HashMap::new);
}

View File

@ -1,303 +0,0 @@
/*
* Copyright (c) 2022 nosqlbench
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.nosqlbench.driver.pulsar.ops;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.function.Function;
import java.util.function.Supplier;
import com.google.gson.Gson;
import com.google.gson.GsonBuilder;
import org.apache.commons.lang3.StringUtils;
import com.codahale.metrics.Counter;
import com.codahale.metrics.Histogram;
import com.codahale.metrics.Timer;
import io.nosqlbench.driver.pulsar.PulsarActivity;
import io.nosqlbench.driver.pulsar.exception.PulsarDriverUnexpectedException;
import io.nosqlbench.driver.pulsar.util.AvroUtil;
import io.nosqlbench.driver.pulsar.util.PulsarActivityUtil;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.pulsar.client.api.*;
import org.apache.pulsar.client.api.schema.GenericRecord;
import org.apache.pulsar.client.api.transaction.Transaction;
import org.apache.pulsar.common.schema.SchemaType;
public class PulsarConsumerOp implements PulsarOp {
private final static Logger logger = LogManager.getLogger(PulsarConsumerOp.class);
private final PulsarActivity pulsarActivity;
private final boolean asyncPulsarOp;
private final boolean useTransaction;
private final boolean seqTracking;
private final Supplier<Transaction> transactionSupplier;
private final Consumer<?> consumer;
private final Schema<?> pulsarSchema;
private final int timeoutSeconds;
private final EndToEndStartingTimeSource endToEndStartingTimeSource;
private final Counter bytesCounter;
private final Histogram messageSizeHistogram;
private final Timer transactionCommitTimer;
// keep track of end-to-end message latency
private final Histogram e2eMsgProcLatencyHistogram;
private final Function<String, ReceivedMessageSequenceTracker> receivedMessageSequenceTrackerForTopic;
private final Histogram payloadRttHistogram;
private final String payloadRttTrackingField;
private org.apache.avro.Schema avroSchema;
public PulsarConsumerOp(
PulsarActivity pulsarActivity,
boolean asyncPulsarOp,
boolean useTransaction,
boolean seqTracking,
Supplier<Transaction> transactionSupplier,
Consumer<?> consumer,
Schema<?> schema,
int timeoutSeconds,
EndToEndStartingTimeSource endToEndStartingTimeSource,
Function<String, ReceivedMessageSequenceTracker> receivedMessageSequenceTrackerForTopic,
String payloadRttTrackingField)
{
this.pulsarActivity = pulsarActivity;
this.asyncPulsarOp = asyncPulsarOp;
this.useTransaction = useTransaction;
this.seqTracking = seqTracking;
this.transactionSupplier = transactionSupplier;
this.consumer = consumer;
this.pulsarSchema = schema;
this.timeoutSeconds = timeoutSeconds;
this.endToEndStartingTimeSource = endToEndStartingTimeSource;
this.bytesCounter = pulsarActivity.getBytesCounter();
this.messageSizeHistogram = pulsarActivity.getMessageSizeHistogram();
this.transactionCommitTimer = pulsarActivity.getCommitTransactionTimer();
this.e2eMsgProcLatencyHistogram = pulsarActivity.getE2eMsgProcLatencyHistogram();
this.payloadRttHistogram = pulsarActivity.getPayloadRttHistogram();
this.receivedMessageSequenceTrackerForTopic = receivedMessageSequenceTrackerForTopic;
this.payloadRttTrackingField = payloadRttTrackingField;
}
private void checkAndUpdateMessageErrorCounter(Message<?> message) {
String msgSeqIdStr = message.getProperty(PulsarActivityUtil.MSG_SEQUENCE_NUMBER);
if ( !StringUtils.isBlank(msgSeqIdStr) ) {
long sequenceNumber = Long.parseLong(msgSeqIdStr);
ReceivedMessageSequenceTracker receivedMessageSequenceTracker = receivedMessageSequenceTrackerForTopic.apply(message.getTopicName());
receivedMessageSequenceTracker.sequenceNumberReceived(sequenceNumber);
}
}
@Override
public void run(Runnable timeTracker) {
final Transaction transaction;
if (useTransaction) {
// if you are in a transaction you cannot set the schema per-message
transaction = transactionSupplier.get();
}
else {
transaction = null;
}
if (!asyncPulsarOp) {
try {
Message<?> message;
if (timeoutSeconds <= 0) {
// wait forever
message = consumer.receive();
}
else {
message = consumer
.receive(timeoutSeconds, TimeUnit.SECONDS);
if (message == null) {
throw new TimeoutException("Did not receive a message within "+timeoutSeconds+" seconds");
}
}
handleMessage(transaction, message);
}
catch (Exception e) {
logger.error(
"Sync message receiving failed - timeout value: {} seconds ", timeoutSeconds, e);
throw new PulsarDriverUnexpectedException("" +
"Sync message receiving failed - timeout value: " + timeoutSeconds + " seconds ");
}
}
else {
try {
CompletableFuture<? extends Message<?>> msgRecvFuture = consumer.receiveAsync();
if (useTransaction) {
// add commit step
msgRecvFuture = msgRecvFuture.thenCompose(msg -> {
Timer.Context ctx = transactionCommitTimer.time();
return transaction
.commit()
.whenComplete((m,e) -> ctx.close())
.thenApply(v-> msg);
}
);
}
msgRecvFuture.thenAccept(message -> {
try {
handleMessage(transaction, message);
} catch (PulsarClientException | TimeoutException e) {
pulsarActivity.asyncOperationFailed(e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
} catch (ExecutionException e) {
pulsarActivity.asyncOperationFailed(e.getCause());
}
}).exceptionally(ex -> {
pulsarActivity.asyncOperationFailed(ex);
return null;
});
}
catch (Exception e) {
throw new PulsarDriverUnexpectedException(e);
}
}
}
private void handleMessage(Transaction transaction, Message<?> message)
throws PulsarClientException, InterruptedException, ExecutionException, TimeoutException {
// acknowledge the message as soon as possible
if (!useTransaction) {
consumer.acknowledgeAsync(message.getMessageId())
.get(timeoutSeconds, TimeUnit.SECONDS);
} else {
consumer.acknowledgeAsync(message.getMessageId(), transaction)
.get(timeoutSeconds, TimeUnit.SECONDS);
// little problem: here we are counting the "commit" time
// inside the overall time spent for the execution of the consume operation
// we should refactor this operation as for PulsarProducerOp, and use the passed callback
// to track with precision the time spent for the operation and for the commit
try (Timer.Context ctx = transactionCommitTimer.time()) {
transaction.commit().get();
}
}
if (logger.isDebugEnabled()) {
SchemaType schemaType = pulsarSchema.getSchemaInfo().getType();
if (PulsarActivityUtil.isAvroSchemaTypeStr(schemaType.name())) {
org.apache.avro.Schema avroSchema = getSchemaFromConfiguration();
org.apache.avro.generic.GenericRecord avroGenericRecord =
AvroUtil.GetGenericRecord_ApacheAvro(avroSchema, message.getData());
logger.debug("({}) message received: msg-key={}; msg-properties={}; msg-payload={}",
consumer.getConsumerName(),
message.getKey(),
message.getProperties(),
avroGenericRecord.toString());
}
else {
logger.debug("({}) message received: msg-key={}; msg-properties={}; msg-payload={}",
consumer.getConsumerName(),
message.getKey(),
message.getProperties(),
new String(message.getData()));
}
}
if (!payloadRttTrackingField.isEmpty()) {
Object decodedPayload = message.getValue();
Long extractedSendTime = null;
// if Pulsar is able to decode this it is better to let it do the work
// because Pulsar caches the Schema, handles Schema evolution
// as much efficiently as possible
if (decodedPayload instanceof GenericRecord) {
GenericRecord pulsarGenericRecord = (GenericRecord) decodedPayload;
Object field = pulsarGenericRecord.getField(payloadRttTrackingField);
if (field != null) {
if (field instanceof Number) {
extractedSendTime = ((Number) field).longValue();
} else {
extractedSendTime = Long.valueOf(field.toString());
}
}
} else {
org.apache.avro.Schema avroSchema = getSchemaFromConfiguration();
org.apache.avro.generic.GenericRecord avroGenericRecord =
AvroUtil.GetGenericRecord_ApacheAvro(avroSchema, message.getData());
if (avroGenericRecord.hasField(payloadRttTrackingField)) {
extractedSendTime = (Long) avroGenericRecord.get(payloadRttTrackingField);
}
}
if (extractedSendTime != null) {
long delta = System.currentTimeMillis() - extractedSendTime;
payloadRttHistogram.update(delta);
}
}
// keep track end-to-end message processing latency
if (endToEndStartingTimeSource != EndToEndStartingTimeSource.NONE) {
long startTimeStamp = 0L;
switch (endToEndStartingTimeSource) {
case MESSAGE_PUBLISH_TIME:
startTimeStamp = message.getPublishTime();
break;
case MESSAGE_EVENT_TIME:
startTimeStamp = message.getEventTime();
break;
case MESSAGE_PROPERTY_E2E_STARTING_TIME:
String startingTimeProperty = message.getProperty("e2e_starting_time");
startTimeStamp = startingTimeProperty != null ? Long.parseLong(startingTimeProperty) : 0L;
break;
}
if (startTimeStamp != 0L) {
long e2eMsgLatency = System.currentTimeMillis() - startTimeStamp;
e2eMsgProcLatencyHistogram.update(e2eMsgLatency);
}
}
// keep track of message errors and update error counters
if (seqTracking) checkAndUpdateMessageErrorCounter(message);
int messageSize = message.getData().length;
bytesCounter.inc(messageSize);
messageSizeHistogram.update(messageSize);
}
private org.apache.avro.Schema getSchemaFromConfiguration() {
String avroDefStr = pulsarSchema.getSchemaInfo().getSchemaDefinition();
// no need for synchronization, this is only a cache
// in case of the race we will parse the string twice, not a big
if (avroSchema == null) {
avroSchema = AvroUtil.GetSchema_ApacheAvro(avroDefStr);
}
return avroSchema;
}
}

View File

@ -1,30 +0,0 @@
/*
* Copyright (c) 2022 nosqlbench
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.nosqlbench.driver.pulsar.ops;
/**
* Base type of all Pulsar Operations including Producers and Consumers.
*/
public interface PulsarOp {
/**
* Execute the operation, invoke the timeTracker when the operation ended.
* The timeTracker can be invoked in a separate thread, it is only used for metrics.
* @param timeTracker
*/
void run(Runnable timeTracker);
}

View File

@ -1,45 +0,0 @@
/*
* Copyright (c) 2022 nosqlbench
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.nosqlbench.driver.pulsar.ops;
import io.nosqlbench.driver.pulsar.PulsarActivity;
import io.nosqlbench.driver.pulsar.PulsarSpace;
import io.nosqlbench.engine.api.templating.CommandTemplate;
import org.apache.pulsar.client.api.Producer;
import org.apache.pulsar.client.api.Schema;
import org.apache.pulsar.client.api.transaction.Transaction;
import java.util.function.LongFunction;
import java.util.function.Supplier;
public abstract class PulsarOpMapper implements LongFunction<PulsarOp> {
protected final CommandTemplate cmdTpl;
protected final PulsarSpace clientSpace;
protected final PulsarActivity pulsarActivity;
protected final LongFunction<Boolean> asyncApiFunc;
public PulsarOpMapper(CommandTemplate cmdTpl,
PulsarSpace clientSpace,
PulsarActivity pulsarActivity,
LongFunction<Boolean> asyncApiFunc)
{
this.cmdTpl = cmdTpl;
this.clientSpace = clientSpace;
this.pulsarActivity = pulsarActivity;
this.asyncApiFunc = asyncApiFunc;
}
}

View File

@ -1,129 +0,0 @@
/*
* Copyright (c) 2022 nosqlbench
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.nosqlbench.driver.pulsar.ops;
import io.nosqlbench.driver.pulsar.PulsarActivity;
import io.nosqlbench.driver.pulsar.PulsarSpace;
import io.nosqlbench.driver.pulsar.util.PulsarActivityUtil;
import io.nosqlbench.engine.api.templating.CommandTemplate;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import java.util.function.LongFunction;
import java.util.function.Supplier;
import org.apache.commons.lang3.StringUtils;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.pulsar.client.api.Producer;
import org.apache.pulsar.client.api.transaction.Transaction;
/**
* This maps a set of specifier functions to a pulsar operation. The pulsar operation contains
* enough state to define a pulsar operation such that it can be executed, measured, and possibly
* retried if needed.
*
* This function doesn't act *as* the operation. It merely maps the construction logic into
* a simple functional type, given the component functions.
*
* For additional parameterization, the command template is also provided.
*/
public class PulsarProducerMapper extends PulsarTransactOpMapper {
private final static Logger logger = LogManager.getLogger(PulsarProducerMapper.class);
private final LongFunction<Producer<?>> producerFunc;
private final Set<PulsarActivityUtil.SEQ_ERROR_SIMU_TYPE> seqErrSimuTypes;
private final LongFunction<String> keyFunc;
private final LongFunction<String> propFunc;
private final LongFunction<String> payloadFunc;
public PulsarProducerMapper(CommandTemplate cmdTpl,
PulsarSpace clientSpace,
PulsarActivity pulsarActivity,
LongFunction<Boolean> asyncApiFunc,
LongFunction<Boolean> useTransactionFunc,
LongFunction<Boolean> seqTrackingFunc,
LongFunction<Supplier<Transaction>> transactionSupplierFunc,
LongFunction<Producer<?>> producerFunc,
Set<PulsarActivityUtil.SEQ_ERROR_SIMU_TYPE> seqErrSimuTypes,
LongFunction<String> keyFunc,
LongFunction<String> propFunc,
LongFunction<String> payloadFunc) {
super(cmdTpl, clientSpace, pulsarActivity, asyncApiFunc, useTransactionFunc, seqTrackingFunc, transactionSupplierFunc);
this.producerFunc = producerFunc;
this.seqErrSimuTypes = seqErrSimuTypes;
this.keyFunc = keyFunc;
this.propFunc = propFunc;
this.payloadFunc = payloadFunc;
}
@Override
public PulsarOp apply(long value) {
boolean asyncApi = asyncApiFunc.apply(value);
boolean useTransaction = useTransactionFunc.apply(value);
Supplier<Transaction> transactionSupplier = transactionSupplierFunc.apply(value);
Producer<?> producer = producerFunc.apply(value);
String msgKey = keyFunc.apply(value);
String msgPayload = payloadFunc.apply(value);
// Check if msgPropJonStr is valid JSON string with a collection of key/value pairs
// - if Yes, convert it to a map
// - otherwise, log an error message and ignore message properties without throwing a runtime exception
Map<String, String> msgProperties = new HashMap<>();
String msgPropJsonStr = propFunc.apply(value);
if (!StringUtils.isBlank(msgPropJsonStr)) {
try {
msgProperties = PulsarActivityUtil.convertJsonToMap(msgPropJsonStr);
} catch (Exception e) {
logger.error(
"Error parsing message property JSON string {}, ignore message properties!",
msgPropJsonStr);
}
}
boolean sequenceTrackingEnabled = seqTrackingFunc.apply(value);
if (sequenceTrackingEnabled) {
long nextSequenceNumber = getMessageSequenceNumberSendingHandler(producer.getTopic())
.getNextSequenceNumber(seqErrSimuTypes);
msgProperties.put(PulsarActivityUtil.MSG_SEQUENCE_NUMBER, String.valueOf(nextSequenceNumber));
}
return new PulsarProducerOp(
pulsarActivity,
asyncApi,
useTransaction,
transactionSupplier,
producer,
clientSpace.getPulsarSchema(),
msgKey,
msgProperties,
msgPayload);
}
private MessageSequenceNumberSendingHandler getMessageSequenceNumberSendingHandler(String topicName) {
return MessageSequenceNumberSendingHandlersThreadLocal.get()
.computeIfAbsent(topicName, k -> new MessageSequenceNumberSendingHandler());
}
private final ThreadLocal<Map<String, MessageSequenceNumberSendingHandler>> MessageSequenceNumberSendingHandlersThreadLocal =
ThreadLocal.withInitial(HashMap::new);
}

Some files were not shown because too many files have changed in this diff Show More