Merge branch 'main' into cleanup-includes

This commit is contained in:
Jonathan Shook 2023-02-06 15:27:37 -06:00
commit 0366235f71
69 changed files with 933 additions and 1002 deletions

View File

@ -18,16 +18,16 @@ package io.nosqlbench.cqlgen.core;
import com.google.gson.Gson; import com.google.gson.Gson;
import com.google.gson.GsonBuilder; import com.google.gson.GsonBuilder;
import io.nosqlbench.api.apps.BundledApp;
import io.nosqlbench.api.content.Content; import io.nosqlbench.api.content.Content;
import io.nosqlbench.api.content.NBIO; import io.nosqlbench.api.content.NBIO;
import io.nosqlbench.api.apps.BundledApp; import io.nosqlbench.cqlgen.api.BindingsLibrary;
import io.nosqlbench.cqlgen.binders.Binding; import io.nosqlbench.cqlgen.binders.Binding;
import io.nosqlbench.cqlgen.binders.BindingsAccumulator; import io.nosqlbench.cqlgen.binders.BindingsAccumulator;
import io.nosqlbench.cqlgen.api.BindingsLibrary;
import io.nosqlbench.cqlgen.binders.NamingFolio; import io.nosqlbench.cqlgen.binders.NamingFolio;
import io.nosqlbench.cqlgen.transformers.CGModelTransformers;
import io.nosqlbench.cqlgen.model.*; import io.nosqlbench.cqlgen.model.*;
import io.nosqlbench.cqlgen.parser.CqlModelParser; import io.nosqlbench.cqlgen.parser.CqlModelParser;
import io.nosqlbench.cqlgen.transformers.CGModelTransformers;
import io.nosqlbench.nb.annotations.Service; import io.nosqlbench.nb.annotations.Service;
import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.Logger;
@ -75,14 +75,14 @@ public class CGWorkloadExporter implements BundledApp {
private Map<String, List<String>> blockplan = Map.of(); private Map<String, List<String>> blockplan = Map.of();
private final Map<String, Double> timeouts = new HashMap<String, Double>(Map.of( private final Map<String, Double> timeouts = new HashMap<String, Double>(Map.of(
"create", 60.0, "create", 60.0,
"truncate", 900.0, "truncate", 900.0,
"drop", 900.0, "drop", 900.0,
"scan", 30.0, "scan", 30.0,
"select", 10.0, "select", 10.0,
"insert", 10.0, "insert", 10.0,
"delete", 10.0, "delete", 10.0,
"update", 10.0 "update", 10.0
)); ));
public static void main(String[] args) { public static void main(String[] args) {
@ -166,7 +166,7 @@ public class CGWorkloadExporter implements BundledApp {
this.model = CqlModelParser.parse(ddl, srcpath); this.model = CqlModelParser.parse(ddl, srcpath);
List<String> errorlist = model.getReferenceErrors(); List<String> errorlist = model.getReferenceErrors();
if (errorlist.size()>0) { if (errorlist.size() > 0) {
for (String error : errorlist) { for (String error : errorlist) {
logger.error(error); logger.error(error);
} }
@ -177,12 +177,12 @@ public class CGWorkloadExporter implements BundledApp {
String workload = getWorkloadAsYaml(); String workload = getWorkloadAsYaml();
try { try {
Files.writeString( Files.writeString(
target, target,
workload, workload,
StandardOpenOption.CREATE, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING StandardOpenOption.CREATE, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING
); );
logger.info("Wrote workload template as '" + target + "'. Bear in mind that this is simply one version " + logger.info("Wrote workload template as '" + target + "'. Bear in mind that this is simply one version " +
"of a workload using this schema, and may not be representative of actual production usage patterns."); "of a workload using this schema, and may not be representative of actual production usage patterns.");
} catch (IOException e) { } catch (IOException e) {
String errmsg = "There was an error writing '" + target + "'."; String errmsg = "There was an error writing '" + target + "'.";
logger.error(errmsg); logger.error(errmsg);
@ -218,7 +218,7 @@ public class CGWorkloadExporter implements BundledApp {
workload.put("bindings", new LinkedHashMap<String, String>()); workload.put("bindings", new LinkedHashMap<String, String>());
Map<String, Object> blocks = new LinkedHashMap<>(); Map<String, Object> blocks = new LinkedHashMap<>();
workload.put("params", new LinkedHashMap<>( workload.put("params", new LinkedHashMap<>(
Map.of("cl", "LOCAL_QUORUM") Map.of("cl", "LOCAL_QUORUM")
)); ));
workload.put("blocks", blocks); workload.put("blocks", blocks);
@ -227,7 +227,7 @@ public class CGWorkloadExporter implements BundledApp {
List<String> components = blocknameAndComponents.getValue(); List<String> components = blocknameAndComponents.getValue();
LinkedHashMap<String, Object> block = new LinkedHashMap<>( LinkedHashMap<String, Object> block = new LinkedHashMap<>(
Map.of("params", new LinkedHashMap<String, Object>()) Map.of("params", new LinkedHashMap<String, Object>())
); );
for (String component : components) { for (String component : components) {
Map<String, Object> additions = switch (component) { Map<String, Object> additions = switch (component) {
@ -319,11 +319,11 @@ public class CGWorkloadExporter implements BundledApp {
return new LinkedHashMap<>() {{ return new LinkedHashMap<>() {{
put("default", put("default",
new LinkedHashMap<>() {{ new LinkedHashMap<>() {{
put("schema", "run driver=cql tags=block:schema.* threads===UNDEF cycles===UNDEF"); put("schema", "run driver=cql tags=block:'schema-*.*' threads===UNDEF cycles===UNDEF");
put("rampup", "run driver=cql tags=block:rampup.* threads=auto cycles===TEMPLATE(rampup-cycles,10000)"); put("rampup", "run driver=cql tags=block:rampup threads=auto cycles===TEMPLATE(rampup-cycles,10000)");
put("main", "run driver=cql tags=block:main.* threads=auto cycles===TEMPLATE(main-cycles,10000)"); put("main", "run driver=cql tags=block:'main-*.*' threads=auto cycles===TEMPLATE(main-cycles,10000)");
}}); }});
put("main-insert", "run driver=cql tags=block:main-insert threads=auto cycles===TEMPLATE(main-cycles,10000)"); put("main-insert", "run driver=cql tags=block:main-insert threads=auto cycles===TEMPLATE(main-cycles,10000)");
put("main-select", "run driver=cql tags=block:main-select threads=auto cycles===TEMPLATE(main-cycles,10000)"); put("main-select", "run driver=cql tags=block:main-select threads=auto cycles===TEMPLATE(main-cycles,10000)");
@ -351,12 +351,12 @@ public class CGWorkloadExporter implements BundledApp {
logger.debug(() -> "skipping table " + table.getFullName() + " for scan since there are no clustering columns"); logger.debug(() -> "skipping table " + table.getFullName() + " for scan since there are no clustering columns");
} }
ops.put( ops.put(
namer.nameFor(table, "optype", "scan", "blockname", blockname), namer.nameFor(table, "optype", "scan", "blockname", blockname),
Map.of( Map.of(
"prepared", genScanSyntax(table), "prepared", genScanSyntax(table),
"timeout", timeouts.get("scan"), "timeout", timeouts.get("scan"),
"ratio", readRatioFor(table) "ratio", readRatioFor(table)
) )
); );
} }
return blockdata; return blockdata;
@ -364,14 +364,14 @@ public class CGWorkloadExporter implements BundledApp {
private String genScanSyntax(CqlTable table) { private String genScanSyntax(CqlTable table) {
return """ return """
select * from KEYSPACE.TABLE select * from KEYSPACE.TABLE
where PREDICATE where PREDICATE
LIMIT; LIMIT;
""" """
.replace("KEYSPACE", table.getKeyspace().getName()) .replace("KEYSPACE", table.getKeyspace().getName())
.replace("TABLE", table.getName()) .replace("TABLE", table.getName())
.replace("PREDICATE", genPredicateTemplate(table, -1)) .replace("PREDICATE", genPredicateTemplate(table, -1))
.replace("LIMIT", genLimitSyntax(table)); .replace("LIMIT", genLimitSyntax(table));
} }
@ -381,12 +381,12 @@ public class CGWorkloadExporter implements BundledApp {
blockdata.put("ops", ops); blockdata.put("ops", ops);
for (CqlTable table : model.getTableDefs()) { for (CqlTable table : model.getTableDefs()) {
ops.put( ops.put(
namer.nameFor(table, "optype", "select", "blockname", blockname), namer.nameFor(table, "optype", "select", "blockname", blockname),
Map.of( Map.of(
"prepared", genSelectSyntax(table), "prepared", genSelectSyntax(table),
"timeout", timeouts.get("select"), "timeout", timeouts.get("select"),
"ratio", readRatioFor(table) "ratio", readRatioFor(table)
) )
); );
} }
return blockdata; return blockdata;
@ -394,14 +394,14 @@ public class CGWorkloadExporter implements BundledApp {
private String genSelectSyntax(CqlTable table) { private String genSelectSyntax(CqlTable table) {
return """ return """
select * from KEYSPACE.TABLE select * from KEYSPACE.TABLE
where PREDICATE where PREDICATE
LIMIT; LIMIT;
""" """
.replace("KEYSPACE", table.getKeyspace().getName()) .replace("KEYSPACE", table.getKeyspace().getName())
.replace("TABLE", table.getName()) .replace("TABLE", table.getName())
.replace("PREDICATE", genPredicateTemplate(table, 0)) .replace("PREDICATE", genPredicateTemplate(table, 0))
.replace("LIMIT", genLimitSyntax(table)); .replace("LIMIT", genLimitSyntax(table));
} }
private String genLimitSyntax(CqlTable table) { private String genLimitSyntax(CqlTable table) {
@ -415,12 +415,12 @@ public class CGWorkloadExporter implements BundledApp {
for (CqlTable table : model.getTableDefs()) { for (CqlTable table : model.getTableDefs()) {
if (!isCounterTable(table)) { if (!isCounterTable(table)) {
ops.put( ops.put(
namer.nameFor(table, "optype", "insert", "blockname", blockname), namer.nameFor(table, "optype", "insert", "blockname", blockname),
Map.of( Map.of(
"prepared", genInsertSyntax(table), "prepared", genInsertSyntax(table),
"timeout", timeouts.get("insert"), "timeout", timeouts.get("insert"),
"ratio", writeRatioFor(table) "ratio", writeRatioFor(table)
) )
); );
} }
} }
@ -433,22 +433,22 @@ public class CGWorkloadExporter implements BundledApp {
} }
return """ return """
insert into KEYSPACE.TABLE insert into KEYSPACE.TABLE
( FIELDNAMES ) ( FIELDNAMES )
VALUES VALUES
( BINDINGS ); ( BINDINGS );
""" """
.replace("KEYSPACE", table.getKeyspace().getName()) .replace("KEYSPACE", table.getKeyspace().getName())
.replace("TABLE", table.getName()) .replace("TABLE", table.getName())
.replace("FIELDNAMES", .replace("FIELDNAMES",
String.join(", ", String.join(", ",
table.getColumnDefs().stream() table.getColumnDefs().stream()
.map(CqlTableColumn::getName).toList())) .map(CqlTableColumn::getName).toList()))
.replaceAll("BINDINGS", .replaceAll("BINDINGS",
String.join(", ", String.join(", ",
table.getColumnDefs().stream() table.getColumnDefs().stream()
.map(c -> binder.forColumn(c)) .map(c -> binder.forColumn(c))
.map(c -> "{" + c.getName() + "}").toList())); .map(c -> "{" + c.getName() + "}").toList()));
} }
@ -458,12 +458,12 @@ public class CGWorkloadExporter implements BundledApp {
blockdata.put("ops", ops); blockdata.put("ops", ops);
for (CqlTable table : model.getTableDefs()) { for (CqlTable table : model.getTableDefs()) {
ops.put( ops.put(
namer.nameFor(table, "optype", "update", "blockname", blockname), namer.nameFor(table, "optype", "update", "blockname", blockname),
Map.of( Map.of(
"prepared", genUpdateSyntax(table), "prepared", genUpdateSyntax(table),
"timeout", timeouts.get("update"), "timeout", timeouts.get("update"),
"ratio", writeRatioFor(table) "ratio", writeRatioFor(table)
) )
); );
} }
return blockdata; return blockdata;
@ -472,7 +472,7 @@ public class CGWorkloadExporter implements BundledApp {
private boolean isCounterTable(CqlTable table) { private boolean isCounterTable(CqlTable table) {
return table.getColumnDefs().stream() return table.getColumnDefs().stream()
.anyMatch(cd -> cd.getTrimmedTypedef().equalsIgnoreCase("counter")); .anyMatch(cd -> cd.getTrimmedTypedef().equalsIgnoreCase("counter"));
} }
private int totalRatioFor(CqlTable table) { private int totalRatioFor(CqlTable table) {
@ -540,9 +540,9 @@ public class CGWorkloadExporter implements BundledApp {
// TODO; constraints on predicates based on valid constructions // TODO; constraints on predicates based on valid constructions
pkeys.stream().map(this::genPredicatePart) pkeys.stream().map(this::genPredicatePart)
.forEach(p -> { .forEach(p -> {
sb.append(p).append("\n AND "); sb.append(p).append("\n AND ");
}); });
if (sb.length() > 0) { if (sb.length() > 0) {
sb.setLength(sb.length() - "\n AND ".length()); sb.setLength(sb.length() - "\n AND ".length());
} }
@ -557,14 +557,14 @@ public class CGWorkloadExporter implements BundledApp {
private String genUpdateSyntax(CqlTable table) { private String genUpdateSyntax(CqlTable table) {
return """ return """
update KEYSPACE.TABLE update KEYSPACE.TABLE
set ASSIGNMENTS set ASSIGNMENTS
where PREDICATES; where PREDICATES;
""" """
.replaceAll("KEYSPACE", table.getKeyspace().getName()) .replaceAll("KEYSPACE", table.getKeyspace().getName())
.replaceAll("TABLE", table.getName()) .replaceAll("TABLE", table.getName())
.replaceAll("PREDICATES", genPredicateTemplate(table, 0)) .replaceAll("PREDICATES", genPredicateTemplate(table, 0))
.replaceAll("ASSIGNMENTS", genAssignments(table)); .replaceAll("ASSIGNMENTS", genAssignments(table));
} }
private String genAssignments(CqlTable table) { private String genAssignments(CqlTable table) {
@ -572,12 +572,12 @@ public class CGWorkloadExporter implements BundledApp {
for (CqlTableColumn coldef : table.getNonKeyColumnDefinitions()) { for (CqlTableColumn coldef : table.getNonKeyColumnDefinitions()) {
if (coldef.isCounter()) { if (coldef.isCounter()) {
sb.append(coldef.getName()).append("=") sb.append(coldef.getName()).append("=")
.append(coldef.getName()).append("+").append("{").append(binder.forColumn(coldef).getName()).append("}") .append(coldef.getName()).append("+").append("{").append(binder.forColumn(coldef).getName()).append("}")
.append(", "); .append(", ");
} else { } else {
sb.append(coldef.getName()).append("=") sb.append(coldef.getName()).append("=")
.append("{").append(binder.forColumn(coldef).getName()).append("}") .append("{").append(binder.forColumn(coldef).getName()).append("}")
.append(", "); .append(", ");
} }
} }
if (sb.length() > 0) { if (sb.length() > 0) {
@ -602,16 +602,16 @@ public class CGWorkloadExporter implements BundledApp {
((Map<String, String>) workload.get("bindings")).putAll(bindingslib.getAccumulatedBindings()); ((Map<String, String>) workload.get("bindings")).putAll(bindingslib.getAccumulatedBindings());
DumpSettings dumpSettings = DumpSettings.builder() DumpSettings dumpSettings = DumpSettings.builder()
.setDefaultFlowStyle(FlowStyle.BLOCK) .setDefaultFlowStyle(FlowStyle.BLOCK)
.setIndent(2) .setIndent(2)
.setDefaultScalarStyle(ScalarStyle.PLAIN) .setDefaultScalarStyle(ScalarStyle.PLAIN)
.setMaxSimpleKeyLength(1000) .setMaxSimpleKeyLength(1000)
.setWidth(100) .setWidth(100)
.setSplitLines(true) .setSplitLines(true)
.setIndentWithIndicator(true) .setIndentWithIndicator(true)
.setMultiLineFlow(true) .setMultiLineFlow(true)
.setNonPrintableStyle(NonPrintableStyle.ESCAPE) .setNonPrintableStyle(NonPrintableStyle.ESCAPE)
.build(); .build();
BaseRepresenter r; BaseRepresenter r;
Dump dump = new Dump(dumpSettings); Dump dump = new Dump(dumpSettings);
@ -637,11 +637,11 @@ public class CGWorkloadExporter implements BundledApp {
dropTablesBlock.put("ops", ops); dropTablesBlock.put("ops", ops);
for (CqlTable table : model.getTableDefs()) { for (CqlTable table : model.getTableDefs()) {
ops.put( ops.put(
namer.nameFor(table, "optype", "drop", "blockname", blockname), namer.nameFor(table, "optype", "drop", "blockname", blockname),
Map.of( Map.of(
"simple", "drop table if exists " + table.getFullName() + ";", "simple", "drop table if exists " + table.getFullName() + ";",
"timeout", timeouts.get("drop") "timeout", timeouts.get("drop")
) )
); );
} }
return dropTablesBlock; return dropTablesBlock;
@ -653,11 +653,11 @@ public class CGWorkloadExporter implements BundledApp {
dropTypesBlock.put("ops", ops); dropTypesBlock.put("ops", ops);
for (CqlType type : model.getTypeDefs()) { for (CqlType type : model.getTypeDefs()) {
ops.put( ops.put(
namer.nameFor(type, "optype", "drop-type", "blockname", blockname), namer.nameFor(type, "optype", "drop-type", "blockname", blockname),
Map.of( Map.of(
"simple", "drop type if exists " + type.getKeyspace() + "." + type.getName() + ";", "simple", "drop type if exists " + type.getKeyspace() + "." + type.getName() + ";",
"timeout", timeouts.get("drop") "timeout", timeouts.get("drop")
) )
); );
} }
return dropTypesBlock; return dropTypesBlock;
@ -669,11 +669,11 @@ public class CGWorkloadExporter implements BundledApp {
dropTypesBlock.put("ops", ops); dropTypesBlock.put("ops", ops);
for (CqlType type : model.getTypeDefs()) { for (CqlType type : model.getTypeDefs()) {
ops.put( ops.put(
namer.nameFor(type, "optype", "drop-keyspace", "blockname", blockname), namer.nameFor(type, "optype", "drop-keyspace", "blockname", blockname),
Map.of( Map.of(
"simple", "drop keyspace if exists " + type.getKeyspace() + ";", "simple", "drop keyspace if exists " + type.getKeyspace() + ";",
"timeout", timeouts.get("drop") "timeout", timeouts.get("drop")
) )
); );
} }
return dropTypesBlock; return dropTypesBlock;
@ -687,11 +687,11 @@ public class CGWorkloadExporter implements BundledApp {
for (CqlTable table : model.getTableDefs()) { for (CqlTable table : model.getTableDefs()) {
ops.put( ops.put(
namer.nameFor(table, "optype", "truncate", "blockname", blockname), namer.nameFor(table, "optype", "truncate", "blockname", blockname),
Map.of( Map.of(
"simple", "truncate " + table.getFullName() + ";", "simple", "truncate " + table.getFullName() + ";",
"timeout", timeouts.get("truncate") "timeout", timeouts.get("truncate")
) )
); );
} }
return truncateblock; return truncateblock;
@ -703,11 +703,11 @@ public class CGWorkloadExporter implements BundledApp {
for (CqlKeyspaceDef ks : model.getKeyspaceDefs()) { for (CqlKeyspaceDef ks : model.getKeyspaceDefs()) {
ops.put( ops.put(
namer.nameFor(ks, "optype", "create", "blockname", blockname), namer.nameFor(ks, "optype", "create", "blockname", blockname),
Map.of( Map.of(
"simple", genKeyspaceDDL(ks), "simple", genKeyspaceDDL(ks),
"timeout", timeouts.get("create") "timeout", timeouts.get("create")
) )
); );
} }
@ -722,11 +722,11 @@ public class CGWorkloadExporter implements BundledApp {
model.getTypeDefs().forEach(type -> { model.getTypeDefs().forEach(type -> {
ops.put( ops.put(
namer.nameFor(type,"optype","create","blockname",blockname), namer.nameFor(type, "optype", "create", "blockname", blockname),
Map.of( Map.of(
"simple",genTypeDDL(type), "simple", genTypeDDL(type),
"timeout",timeouts.get("create") "timeout", timeouts.get("create")
) )
); );
}); });
@ -736,13 +736,13 @@ public class CGWorkloadExporter implements BundledApp {
private String genKeyspaceDDL(CqlKeyspaceDef keyspace) { private String genKeyspaceDDL(CqlKeyspaceDef keyspace) {
return """ return """
create keyspace KEYSPACE create keyspace KEYSPACE
with replication = {REPLICATION}DURABLEWRITES?; with replication = {REPLICATION}DURABLEWRITES?;
""" """
.replace("KEYSPACE", keyspace.getName()) .replace("KEYSPACE", keyspace.getName())
.replace("REPLICATION", keyspace.getReplicationData()) .replace("REPLICATION", keyspace.getReplicationData())
.replace("DURABLEWRITES?", keyspace.isDurableWrites() ? "" : "\n and durable writes = false") .replace("DURABLEWRITES?", keyspace.isDurableWrites() ? "" : "\n and durable writes = false")
; ;
} }
private Map<String, Object> genCreateTablesOpTemplates(CqlModel model, String blockname) { private Map<String, Object> genCreateTablesOpTemplates(CqlModel model, String blockname) {
@ -751,11 +751,11 @@ public class CGWorkloadExporter implements BundledApp {
model.getTableDefs().forEach(table -> { model.getTableDefs().forEach(table -> {
ops.put( ops.put(
namer.nameFor(table, "optype","create","blockname",blockname), namer.nameFor(table, "optype", "create", "blockname", blockname),
Map.of( Map.of(
"simple",genTableDDL(table), "simple", genTableDDL(table),
"timeout",timeouts.get("create") "timeout", timeouts.get("create")
) )
); );
}); });
@ -766,14 +766,14 @@ public class CGWorkloadExporter implements BundledApp {
private String genTypeDDL(CqlType type) { private String genTypeDDL(CqlType type) {
return """ return """
create type KEYSPACE.TYPENAME ( create type KEYSPACE.TYPENAME (
TYPEDEF TYPEDEF
); );
""" """
.replace("KEYSPACE", type.getKeyspace().getName()) .replace("KEYSPACE", type.getKeyspace().getName())
.replace("TYPENAME", type.getName()) .replace("TYPENAME", type.getName())
.replace("TYPEDEF", type.getColumnDefs().stream() .replace("TYPEDEF", type.getColumnDefs().stream()
.map(def -> def.getName() + " " + def.getTypedef()).collect(Collectors.joining(",\n"))); .map(def -> def.getName() + " " + def.getTypedef()).collect(Collectors.joining(",\n")));
} }
private Object genTableDDL(CqlTable cqltable) { private Object genTableDDL(CqlTable cqltable) {
@ -782,16 +782,16 @@ public class CGWorkloadExporter implements BundledApp {
} }
return """ return """
create table if not exists KEYSPACE.TABLE ( create table if not exists KEYSPACE.TABLE (
COLUMN_DEFS, COLUMN_DEFS,
primary key (PRIMARYKEY) primary key (PRIMARYKEY)
)CLUSTERING; )CLUSTERING;
""" """
.replace("KEYSPACE", cqltable.getKeyspace().getName()) .replace("KEYSPACE", cqltable.getKeyspace().getName())
.replace("TABLE", cqltable.getName()) .replace("TABLE", cqltable.getName())
.replace("COLUMN_DEFS", genTableColumnDDL(cqltable)) .replace("COLUMN_DEFS", genTableColumnDDL(cqltable))
.replace("PRIMARYKEY", genPrimaryKeyDDL(cqltable)) .replace("PRIMARYKEY", genPrimaryKeyDDL(cqltable))
.replace("CLUSTERING", genTableClusteringOrderDDL(cqltable)); .replace("CLUSTERING", genTableClusteringOrderDDL(cqltable));
} }
@ -829,8 +829,8 @@ public class CGWorkloadExporter implements BundledApp {
private String genTableColumnDDL(CqlTable cqltable) { private String genTableColumnDDL(CqlTable cqltable) {
return cqltable.getColumnDefs().stream() return cqltable.getColumnDefs().stream()
.map(cd -> cd.getName() + " " + cd.getTrimmedTypedef()) .map(cd -> cd.getName() + " " + cd.getTrimmedTypedef())
.collect(Collectors.joining(",\n")); .collect(Collectors.joining(",\n"));
} }

View File

@ -1,33 +1,29 @@
# nb -v run driver=cql yaml=cql-iot tags=phase:schema host=dsehost
description: An IOT workload with more optimal settings for DSE description: An IOT workload with more optimal settings for DSE
scenarios: scenarios:
default: default:
schema: run driver=cql tags==phase:schema threads==1 cycles==UNDEF schema: run driver=cql tags==block:schema threads==1 cycles==UNDEF
rampup: run driver=cql tags==phase:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto rampup: run driver=cql tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
main: run driver=cql tags==phase:main cycles===TEMPLATE(main-cycles,10000000) threads=auto main: run driver=cql tags==block:"main-*.*" cycles===TEMPLATE(main-cycles,10000000) threads=auto
bindings: bindings:
machine_id: Mod(<<sources:10000>>); ToHashedUUID() -> java.util.UUID machine_id: Mod(<<sources:10000>>); ToHashedUUID() -> java.util.UUID
sensor_name: HashedLineToString('data/variable_words.txt') sensor_name: HashedLineToString('data/variable_words.txt')
time: Mul(<<timespeed:100>>L); Div(<<sources:10000>>L); ToDate() time: Mul(<<timespeed:100>>L); Div(<<sources:10000>>L); ToJavaInstant()
cell_timestamp: Mul(<<timespeed:100>>L); Div(<<sources:10000>>L); Mul(1000L) cell_timestamp: Mul(<<timespeed:100>>L); Div(<<sources:10000>>L); Mul(1000L);
sensor_value: Normal(0.0,5.0); Add(100.0) -> double sensor_value: Normal(0.0,5.0); Add(100.0) -> double
station_id: Div(<<sources:10000>>);Mod(<<stations:100>>); ToHashedUUID() -> java.util.UUID station_id: Div(<<sources:10000>>);Mod(<<stations:100>>); ToHashedUUID() -> java.util.UUID
data: HashedFileExtractToString('data/lorem_ipsum_full.txt',800,1200) data: HashedFileExtractToString('data/lorem_ipsum_full.txt',800,1200)
blocks: blocks:
- tags: schema:
phase: schema
params: params:
prepared: false prepared: false
statements: ops:
- create-keyspace: | create-keyspace: |
create keyspace if not exists <<keyspace:baselines>> create keyspace if not exists <<keyspace:baselines>>
WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '<<rf:1>>'} WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '<<rf:1>>'}
AND durable_writes = true; AND durable_writes = true;
tags:
name: create-keyspace create-table: |
- create-table : |
create table if not exists <<keyspace:baselines>>.<<table:iot>> ( create table if not exists <<keyspace:baselines>>.<<table:iot>> (
machine_id UUID, // source machine machine_id UUID, // source machine
sensor_name text, // sensor name sensor_name text, // sensor name
@ -45,63 +41,46 @@ blocks:
'compaction_window_unit': 'MINUTES', 'compaction_window_unit': 'MINUTES',
'split_during_flush': true 'split_during_flush': true
}; };
tags:
name: create-table truncate-table: |
- truncate-table: | truncate table <<keyspace:baselines>>.<<table:iot>>;
truncate table <<keyspace:baselines>>.<<table:iot>>; rampup:
tags:
name: truncate-table
- tags:
phase: rampup
params: params:
cl: <<write_cl:LOCAL_QUORUM>> cl: <<write_cl:LOCAL_QUORUM>>
statements: idempotent: true
- insert-rampup: | ops:
insert into <<keyspace:baselines>>.<<table:iot>> rampup-insert: |
(machine_id, sensor_name, time, sensor_value, station_id, data) insert into <<keyspace:baselines>>.<<table:iot>> (machine_id, sensor_name, time, sensor_value, station_id, data)
values ({machine_id}, {sensor_name}, {time}, {sensor_value}, {station_id}, {data}) values ({machine_id}, {sensor_name}, {time}, {sensor_value}, {station_id}, {data})
using timestamp {cell_timestamp} using timestamp {cell_timestamp};
idempotent: true verify:
tags:
name: insert-rampup
- tags:
phase: verify
type: read
params: params:
ratio: 1 ratio: 1
cl: <<read_cl:LOCAL_QUORUM>> cl: <<read_cl:LOCAL_QUORUM>>
statements:
- select-verify: |
select * from <<keyspace:baselines>>.<<table:iot>>
where machine_id={machine_id} and sensor_name={sensor_name} and time={time};
verify-fields: "*, -cell_timestamp"
tags:
name: select-verify
- tags:
phase: main
type: read type: read
verify-fields: "*, -cell_timestamp"
ops:
select-verify: |
select * from <<keyspace:baselines>>.<<table:iot>> where machine_id={machine_id}
and sensor_name={sensor_name} and time={time};
main-read:
params: params:
ratio: <<read_ratio:1>> ratio: <<read_ratio:1>>
cl: <<read_cl:LOCAL_QUORUM>> cl: <<read_cl:LOCAL_QUORUM>>
statements: ops:
- select-read: | select-read: |
select * from <<keyspace:baselines>>.<<table:iot>> select * from <<keyspace:baselines>>.<<table:iot>>
where machine_id={machine_id} and sensor_name={sensor_name} where machine_id={machine_id} and sensor_name={sensor_name}
limit <<limit:10>> limit <<limit:10>>;
tags: main-write:
name: select-read
- tags:
phase: main
type: write
params: params:
ratio: <<write_ratio:9>> ratio: <<write_ratio:9>>
cl: <<write_cl:LOCAL_QUORUM>> cl: <<write_cl:LOCAL_QUORUM>>
statements: idempotent: true
- insert-main: | ops:
insert into <<keyspace:baselines>>.<<table:iot>> insert-main: |
(machine_id, sensor_name, time, sensor_value, station_id, data) insert into <<keyspace:baselines>>.<<table:iot>>
values ({machine_id}, {sensor_name}, {time}, {sensor_value}, {station_id}, {data}) (machine_id, sensor_name, time, sensor_value, station_id, data)
using timestamp {cell_timestamp} values ({machine_id}, {sensor_name}, {time}, {sensor_value}, {station_id}, {data})
idempotent: true using timestamp {cell_timestamp};
tags:
name: insert-main

View File

@ -1,4 +1,3 @@
# nb -v run driver=cql yaml=cql-iot tags=phase:schema host=dsehost
description: | description: |
Time-series data model and access patterns. (use cql-timeseries instead) Time-series data model and access patterns. (use cql-timeseries instead)
This is the same a cql-timeseries, which is the preferred name as it is This is the same a cql-timeseries, which is the preferred name as it is

View File

@ -1,14 +1,15 @@
min_version: "5.17.1"
description: A workload with only text keys and text values description: A workload with only text keys and text values
scenarios: scenarios:
default: default:
schema: run driver=cql tags==phase:schema threads==1 cycles==UNDEF schema: run driver=cql tags==block:schema threads==1 cycles==UNDEF
rampup: run driver=cql tags==phase:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto rampup: run driver=cql tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
main: run driver=cql tags==phase:main cycles===TEMPLATE(main-cycles,10000000) threads=auto main: run driver=cql tags==block:'main-*.*' cycles===TEMPLATE(main-cycles,10000000) threads=auto
astra: astra:
schema: run driver=cql tags==phase:schema-astra threads==1 cycles==UNDEF schema: run driver=cql tags==block:schema-astra threads==1 cycles==UNDEF
rampup: run driver=cql tags==phase:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto rampup: run driver=cql tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
main: run driver=cql tags==phase:main cycles===TEMPLATE(main-cycles,10000000) threads=auto main: run driver=cql tags==block:'main-*.*' cycles===TEMPLATE(main-cycles,10000000) threads=auto
bindings: bindings:
seq_key: Mod(<<keycount:1000000000>>); ToString() -> String seq_key: Mod(<<keycount:1000000000>>); ToString() -> String
@ -17,80 +18,53 @@ bindings:
rw_value: Hash(); <<valdist:Uniform(0,1000000000)->int>>; ToString() -> String rw_value: Hash(); <<valdist:Uniform(0,1000000000)->int>>; ToString() -> String
blocks: blocks:
- name: schema schema:
tags:
phase: schema
params: params:
prepared: false prepared: false
statements: ops:
- create-table: | create-table: |
create table if not exists <<keyspace:baselines>>.<<table:keyvalue>> ( create table if not exists <<keyspace:baselines>>.<<table:keyvalue>> (
key text, key text,
value text, value text,
PRIMARY KEY (key) PRIMARY KEY (key)
); );
tags:
name: create-table schema-astra:
- name: schema-astra
tags:
phase: schema-astra
params: params:
prepared: false prepared: false
statements: ops:
- create-table: | create-table: |
create table if not exists <<keyspace:baselines>>.<<table:keyvalue>> ( create table if not exists <<keyspace:baselines>>.<<table:keyvalue>> (
key text, key text,
value text, value text,
PRIMARY KEY (key) PRIMARY KEY (key)
); );
tags: rampup:
name: create-table-astra
- name: rampup
tags:
phase: rampup
params: params:
cl: <<write_cl:LOCAL_QUORUM>> cl: <<write_cl:LOCAL_QUORUM>>
statements: ops:
- rampup-insert: | rampup-insert: |
insert into <<keyspace:baselines>>.<<table:keyvalue>> insert into <<keyspace:baselines>>.<<table:keyvalue>>
(key, value) (key, value)
values ({seq_key},{seq_value}); values ({seq_key},{seq_value});
tags: verify:
name: rampup-insert
- name: verify
tags:
phase: verify
type: read
params: params:
cl: <<read_cl:LOCAL_QUORUM>> cl: <<read_cl:LOCAL_QUORUM>>
statements: verify-fields: key->seq_key, value->seq_value
- verify-select: | ops:
verify-select: |
select * from <<keyspace:baselines>>.<<table:keyvalue>> where key={seq_key}; select * from <<keyspace:baselines>>.<<table:keyvalue>> where key={seq_key};
verify-fields: key->seq_key, value->seq_value main-read:
tags:
name: verify
- name: main-read
tags:
phase: main
type: read
params: params:
ratio: 5 ratio: 5
cl: <<read_cl:LOCAL_QUORUM>> cl: <<read_cl:LOCAL_QUORUM>>
statements: ops:
- main-select: | main-select: |
select * from <<keyspace:baselines>>.<<table:keyvalue>> where key={rw_key}; select * from <<keyspace:baselines>>.<<table:keyvalue>> where key={rw_key};
tags: main-write:
name: main-select
- name: main-write
tags:
phase: main
type: write
params: params:
ratio: 5 ratio: 5
cl: <<write_cl:LOCAL_QUORUM>> cl: <<write_cl:LOCAL_QUORUM>>
statements: ops:
- main-insert: | main-insert: |
insert into <<keyspace:baselines>>.<<table:keyvalue>> insert into <<keyspace:baselines>>.<<table:keyvalue>> (key, value) values ({rw_key}, {rw_value});
(key, value) values ({rw_key}, {rw_value});
tags:
name: main-insert

View File

@ -1,14 +1,15 @@
min_version: "5.17.1"
description: A tabular workload with partitions, clusters, and data fields description: A tabular workload with partitions, clusters, and data fields
scenarios: scenarios:
default: default:
schema: run driver=cql tags==phase:schema threads==1 cycles==UNDEF schema: run driver=cql tags==block:schema threads==1 cycles==UNDEF
rampup: run driver=cql tags==phase:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto rampup: run driver=cql tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
main: run driver=cql tags==phase:main cycles===TEMPLATE(main-cycles,10000000) threads=auto main: run driver=cql tags==block:main-*.* cycles===TEMPLATE(main-cycles,10000000) threads=auto
astra: astra:
schema: run driver=cql tags==phase:schema-astra threads==1 cycles==UNDEF schema: run driver=cql tags==block:schema-astra threads==1 cycles==UNDEF
rampup: run driver=cql tags==phase:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto rampup: run driver=cql tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
main: run driver=cql tags==phase:main cycles===TEMPLATE(main-cycles,10000000) threads=auto main: run driver=cql tags==block:main-*.* cycles===TEMPLATE(main-cycles,10000000) threads=auto
bindings: bindings:
# for ramp-up and verify # for ramp-up and verify
@ -25,88 +26,60 @@ bindings:
data_write: Hash(); HashedFileExtractToString('data/lorem_ipsum_full.txt',50,150) -> String data_write: Hash(); HashedFileExtractToString('data/lorem_ipsum_full.txt',50,150) -> String
blocks: blocks:
- name: schema schema:
tags:
phase: schema
params: params:
prepared: false prepared: false
statements: ops:
- create-keyspace: | create-keyspace: |
create keyspace if not exists <<keyspace:baselines>> create keyspace if not exists <<keyspace:baselines>>
WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '<<rf:1>>'} WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '<<rf:1>>'}
AND durable_writes = true; AND durable_writes = true;
tags: create-table: |
name: create-keyspace
- create-table: |
create table if not exists <<keyspace:baselines>>.<<table:tabular>> ( create table if not exists <<keyspace:baselines>>.<<table:tabular>> (
part text, part text,
clust text, clust text,
data text, data text,
PRIMARY KEY (part,clust) PRIMARY KEY (part,clust)
); );
tags: schema-astra:
name: create-table
- name: schema-astra
tags:
phase: schema-astra
params: params:
prepared: false prepared: false
statements: ops:
- create-table: | create-table: |
create table if not exists <<keyspace:baselines>>.<<table:tabular>> ( create table if not exists <<keyspace:baselines>>.<<table:tabular>> (
part text, part text,
clust text, clust text,
data text, data text,
PRIMARY KEY (part,clust) PRIMARY KEY (part,clust)
); );
tags: rampup:
name: create-table-astra
- name: rampup
tags:
phase: rampup
params: params:
cl: <<write_cl:LOCAL_QUORUM>> cl: <<write_cl:LOCAL_QUORUM>>
statements: ops:
- rampup-insert: | rampup-insert: |
insert into <<keyspace:baselines>>.<<table:tabular>> insert into <<keyspace:baselines>>.<<table:tabular>>
(part,clust,data) (part,clust,data)
values ({part_layout},{clust_layout},{data}) values ({part_layout},{clust_layout},{data})
tags: verify:
name: rampup-insert
- name: verify
tags:
phase: verify
type: read
params: params:
cl: <<read_cl:LOCAL_QUORUM>> cl: <<read_cl:LOCAL_QUORUM>>
statements: ops:
- verify-select: | verify-select: |
select * from <<keyspace:baselines>>.<<table:tabular>> where part={part_layout} and clust={clust_layout} select * from <<keyspace:baselines>>.<<table:tabular>> where part={part_layout} and clust={clust_layout};
tags:
name: verify-select main-read:
- name: main-read
tags:
phase: main
type: read
params: params:
ratio: 5 ratio: 5
cl: <<read_cl:LOCAL_QUORUM>> cl: <<read_cl:LOCAL_QUORUM>>
statements: ops:
- main-select: | main-select: |
select * from <<keyspace:baselines>>.<<table:tabular>> where part={part_read} limit {limit}; select * from <<keyspace:baselines>>.<<table:tabular>> where part={part_read} limit {limit};
tags:
name: main-select main-write:
- name: main-write
tags:
phase: main
type: write
params: params:
ratio: 5 ratio: 5
cl: <<write_cl:LOCAL_QUORUM>> cl: <<write_cl:LOCAL_QUORUM>>
statements: ops:
- main-write: | main-write: |
insert into <<keyspace:baselines>>.<<table:tabular>> insert into <<keyspace:baselines>>.<<table:tabular>>
(part, clust, data) (part, clust, data) values ({part_write},{clust_write},{data_write});
values ({part_write},{clust_write},{data_write})
tags:
name: main-write

View File

@ -1,13 +1,13 @@
min_version: "4.17.15" min_version: "5.17.1"
description: creates local graphs which resemble a wagon-wheel topology, using description: creates local graphs which resemble a wagon-wheel topology, using
DSE Graph, version 6.8 or newer DSE Graph, version 6.8 or newer
scenarios: scenarios:
default: default:
creategraph: run driver=cqld4 graphname=graph_wheels tags=phase:create-graph cycles===UNDEF creategraph: run driver=cqld4 graphname=graph_wheels tags=block:create-graph cycles===UNDEF
schema: run driver=cqld4 graphname=graph_wheels tags=phase:graph-schema cycles===UNDEF schema: run driver=cqld4 graphname=graph_wheels tags=block:graph-schema cycles===UNDEF
rampup: run driver==cqld4 graphname=graph_wheels tags=phase:rampup cycles=1 rampup: run driver==cqld4 graphname=graph_wheels tags=block:rampup cycles=1
drop-graph: run driver=cqld4 graphname=graph_wheels tags=block:drop-graph cycles===UNDEF drop-graph: run driver=cqld4 graphname=graph_wheels tags=block:drop-graph cycles===UNDEF
creategraph-classic: run driver=cqld4 graphname=graph_wheels tags=block:create-graph-classic cycles===UNDEF creategraph-classic: run driver=cqld4 graphname=graph_wheels tags=block:create-graph-classic cycles===UNDEF
fluent: run driver=cqld4 graphname=graph_wheels tags=block:fluent cycles=10 fluent: run driver=cqld4 graphname=graph_wheels tags=block:fluent cycles=10
@ -40,16 +40,12 @@ blocks:
.classicEngine() .classicEngine()
.create() .create()
create-graph: create-graph:
tags:
phase: create-graph
statements: statements:
creategraph: creategraph:
type: gremlin type: gremlin
script: >- script: >-
system.graph('<<graphname:graph_wheels>>').ifNotExists().create() system.graph('<<graphname:graph_wheels>>').ifNotExists().create()
create-schema: create-schema:
tags:
phase: graph-schema
statements: statements:
graph-schema: graph-schema:
type: gremlin type: gremlin
@ -78,7 +74,7 @@ blocks:
.create() .create()
dev-mode: dev-mode:
tags: tags:
phase: dev-mode block: dev-mode
statements: statements:
dev-mode: dev-mode:
type: gremlin type: gremlin
@ -87,7 +83,7 @@ blocks:
schema.config().option('graph.schema_mode').set('Development'); schema.config().option('graph.schema_mode').set('Development');
prod-mode: prod-mode:
tags: tags:
phase: prod-mode block: prod-mode
statements: statements:
prod-mode: prod-mode:
type: gremlin type: gremlin
@ -96,7 +92,7 @@ blocks:
schema.config().option('graph.schema_mode').set('Production'); schema.config().option('graph.schema_mode').set('Production');
rampup: rampup:
tags: tags:
phase: rampup block: rampup
statements: statements:
main-add: main-add:
type: gremlin type: gremlin

View File

@ -1,3 +1,4 @@
min_version: "5.17.1"
description: | description: |
This is a workload which creates an incrementally growing dataset over cycles. This is a workload which creates an incrementally growing dataset over cycles.
@ -26,16 +27,13 @@ description: |
scenarios: scenarios:
default: default:
schema: run tags=phase:schema threads==1 schema: run tags=block:schema.* threads==1
# rampup: run tags=phase:rampup cycles===TEMPLATE(rampup-cycles,100000) threads=auto main: run tags=block:main-.*.* cycles===TEMPLATE(main-cycles,0) threads=auto
main: run tags=block:"main.*" cycles===TEMPLATE(main-cycles,0) threads=auto default-schema: run tags=block:'schema.*' threads==1
default-schema: run tags=block:schema threads==1 default-main: run tags=block:'main.*' cycles===TEMPLATE(main-cycles,0) threads=auto
# default-rampup: run tags=phase:rampup cycles===TEMPLATE(rampup-cycles,100000) threads=auto
default-main: run tags=block:"main.* cycles===TEMPLATE(main-cycles,0) threads=auto
astra: astra:
schema: run tags=block:astra-schema threads==1 schema: run tags=block:astra-schema threads==1
# rampup: run tags=phase:rampup cycles===TEMPLATE(rampup-cycles,0) threads=auto main: run tags=block:'main.*' cycles===TEMPLATE(main-cycles,0) threads=auto
main: run tags=block:"main.*" cycles===TEMPLATE(main-cycles,0) threads=auto
params: params:
instrument: true instrument: true

View File

@ -1,4 +1,4 @@
min_version: "4.17.15" min_version: "5.17.1"
description: | description: |
A workload with only text keys and text values which range in size from 50K to 150K. A workload with only text keys and text values which range in size from 50K to 150K.
@ -83,5 +83,4 @@ blocks:
cl: TEMPLATE(write_cl,LOCAL_QUORUM) cl: TEMPLATE(write_cl,LOCAL_QUORUM)
statements: statements:
main-insert: | main-insert: |
insert into TEMPLATE(keyspace,baselines).TEMPLATE(table,keyvalue) insert into TEMPLATE(keyspace,baselines).TEMPLATE(table,keyvalue) (key, value) values ({rw_key}, {rw_value});
(key, value) values ({rw_key}, {rw_value});

View File

@ -1,4 +1,4 @@
min_version: "4.17.15" min_version: "5.17.1"
description: | description: |
A workload with only text keys and text values. A workload with only text keys and text values.

View File

@ -1,4 +1,4 @@
min_version: "4.17.15" min_version: "5.17.1"
description: | description: |
A tabular workload with partitions, clusters, and data fields A tabular workload with partitions, clusters, and data fields
@ -29,11 +29,11 @@ scenarios:
default: default:
schema: run driver=cql tags==block:schema threads==1 cycles==UNDEF schema: run driver=cql tags==block:schema threads==1 cycles==UNDEF
rampup: run driver=cql tags==block:rampup cycles===TEMPLATE(rampup-cycles,10B) threads=auto rampup: run driver=cql tags==block:rampup cycles===TEMPLATE(rampup-cycles,10B) threads=auto
main: run driver=cql tags==block:"main.*" cycles===TEMPLATE(main-cycles,100M) threads=auto main: run driver=cql tags==block:"main-*.*" cycles===TEMPLATE(main-cycles,100M) threads=auto
astra: astra:
schema: run driver=cql tags==block:schema-astra threads==1 cycles==UNDEF schema: run driver=cql tags==block:schema-astra threads==1 cycles==UNDEF
rampup: run driver=cql tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto rampup: run driver=cql tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
main: run driver=cql tags==block:"main.*" cycles===TEMPLATE(main-cycles,10000000) threads=auto main: run driver=cql tags==block:"main-*.*" cycles===TEMPLATE(main-cycles,10000000) threads=auto
params: params:
instrument: true instrument: true

View File

@ -1,4 +1,4 @@
min_version: "4.17.15" min_version: "5.17.1"
description: | description: |
This workload emulates a time-series data model and access patterns. This workload emulates a time-series data model and access patterns.
@ -7,11 +7,11 @@ scenarios:
default: default:
schema: run driver=cql tags==block:schema threads==1 cycles==UNDEF schema: run driver=cql tags==block:schema threads==1 cycles==UNDEF
rampup: run driver=cql tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto rampup: run driver=cql tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
main: run driver=cql tags==block:"main.*" cycles===TEMPLATE(main-cycles,10000000) threads=auto main: run driver=cql tags==block:"main-*.*" cycles===TEMPLATE(main-cycles,10000000) threads=auto
astra: astra:
schema: run driver=cql tags==block:schema-astra threads==1 cycles==UNDEF schema: run driver=cql tags==block:schema-astra threads==1 cycles==UNDEF
rampup: run driver=cql tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto rampup: run driver=cql tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
main: run driver=cql tags==block:"main.*" cycles===TEMPLATE(main-cycles,10000000) threads=auto main: run driver=cql tags==block:"main-*.*" cycles===TEMPLATE(main-cycles,10000000) threads=auto
params: params:
instrument: TEMPLATE(instrument,false) instrument: TEMPLATE(instrument,false)
@ -82,11 +82,12 @@ blocks:
ratio: 1 ratio: 1
cl: TEMPLATE(read_cl,LOCAL_QUORUM) cl: TEMPLATE(read_cl,LOCAL_QUORUM)
instrument: TEMPLATE(instrument-reads,TEMPLATE(instrument,false)) instrument: TEMPLATE(instrument-reads,TEMPLATE(instrument,false))
verify-fields: "*, -cell_timestamp"
ops: ops:
select-verify: | select-verify: |
select * from TEMPLATE(keyspace,baselines).TEMPLATE(table,iot) select * from TEMPLATE(keyspace,baselines).TEMPLATE(table,iot)
where machine_id={machine_id} and sensor_name={sensor_name} and time={time}; where machine_id={machine_id} and sensor_name={sensor_name} and time={time};
verify-fields: "*, -cell_timestamp"
main-read: main-read:
params: params:
ratio: TEMPLATE(read_ratio,1) ratio: TEMPLATE(read_ratio,1)
@ -96,7 +97,7 @@ blocks:
select-read: | select-read: |
select * from TEMPLATE(keyspace,baselines).TEMPLATE(table,iot) select * from TEMPLATE(keyspace,baselines).TEMPLATE(table,iot)
where machine_id={machine_id} and sensor_name={sensor_name} where machine_id={machine_id} and sensor_name={sensor_name}
limit TEMPLATE(limit,10) limit TEMPLATE(limit,10);
main-write: main-write:
params: params:
ratio: TEMPLATE(write_ratio,9) ratio: TEMPLATE(write_ratio,9)
@ -108,4 +109,4 @@ blocks:
insert into TEMPLATE(keyspace,baselines).TEMPLATE(table,iot) insert into TEMPLATE(keyspace,baselines).TEMPLATE(table,iot)
(machine_id, sensor_name, time, sensor_value, station_id, data) (machine_id, sensor_name, time, sensor_value, station_id, data)
values ({machine_id}, {sensor_name}, {time}, {sensor_value}, {station_id}, {data}) values ({machine_id}, {sensor_name}, {time}, {sensor_value}, {station_id}, {data})
using timestamp {cell_timestamp} using timestamp {cell_timestamp};

View File

@ -1,9 +1,10 @@
min_version: "4.17.15" min_version: "5.17.1"
scenarios: scenarios:
default: default:
schema: run driver=cql tags==phase:schema cycles==UNDEF threads==1 schema: run driver=cql tags==block:schema cycles==UNDEF threads==1
rampup: run driver=cql tags==phase:rampup cycles=TEMPLATE(rampup-cycles,100K) threads=auto rampup: run driver=cql tags==block:rampup cycles=TEMPLATE(rampup-cycles,100K) threads=auto
main: run driver=cql tags==block:"main" cycles===TEMPLATE(main-cycles,100K) threads=auto
bindings: bindings:
userid: Template('user-{}',ToString()); SaveString('userid'); userid: Template('user-{}',ToString()); SaveString('userid');

View File

@ -1,18 +1,18 @@
description: Auto-generated workload from source schema. description: Auto-generated workload from source schema.
scenarios: scenarios:
default: default:
schema: run driver=cql tags=block:schema.* threads===UNDEF cycles===UNDEF schema: run driver=cql tags=block:'schema.*' threads===UNDEF cycles===UNDEF
rampup: run driver=cql tags=block:rampup.* threads=auto cycles===TEMPLATE(rampup-cycles,10000) rampup: run driver=cql tags=block:'rampup.*' threads=auto cycles===TEMPLATE(rampup-cycles,10000)
main: run driver=cql tags=block:main.* threads=auto cycles===TEMPLATE(main-cycles,10000) main: run driver=cql tags=block:'main.*' threads=auto cycles===TEMPLATE(main-cycles,10000)
main-insert: run driver=cql tags=block:main-insert threads=auto cycles===TEMPLATE(main-cycles,10000) main-insert: run driver=cql tags=block:main-insert threads=auto cycles===TEMPLATE(main-cycles,10000)
main-select: run driver=cql tags=block:main-select threads=auto cycles===TEMPLATE(main-cycles,10000) main-select: run driver=cql tags=block:main-select threads=auto cycles===TEMPLATE(main-cycles,10000)
main-scan: run driver=cql tags=block:main-scan threads=auto cycles===TEMPLATE(main-cycles,10000) main-scan: run driver=cql tags=block:main-scan threads=auto cycles===TEMPLATE(main-cycles,10000)
main-update: run driver=cql tags=block:main-update threads=auto cycles===TEMPLATE(main-cycles,10000) main-update: run driver=cql tags=block:main-update threads=auto cycles===TEMPLATE(main-cycles,10000)
truncate: run driver=cql tags=block:truncate.* threads===UNDEF cycles===UNDEF truncate: run driver=cql tags=block:'truncate.*' threads===UNDEF cycles===UNDEF
schema-keyspaces: run driver=cql tags=block:schema-keyspaces threads===UNDEF cycles===UNDEF schema-keyspaces: run driver=cql tags=block:schema-keyspaces threads===UNDEF cycles===UNDEF
schema-types: run driver=cql tags=block:schema-types threads===UNDEF cycles===UNDEF schema-types: run driver=cql tags=block:schema-types threads===UNDEF cycles===UNDEF
schema-tables: run driver=cql tags=block:schema-tables threads===UNDEF cycles===UNDEF schema-tables: run driver=cql tags=block:schema-tables threads===UNDEF cycles===UNDEF
drop: run driver=cql tags=block:drop.* threads===UNDEF cycles===UNDEF drop: run driver=cql tags=block:'drop.*' threads===UNDEF cycles===UNDEF
drop-tables: run driver=cql tags=block:drop-tables threads===UNDEF cycles===UNDEF drop-tables: run driver=cql tags=block:drop-tables threads===UNDEF cycles===UNDEF
drop-types: run driver=cql tags=block:drop-types threads===UNDEF cycles===UNDEF drop-types: run driver=cql tags=block:drop-types threads===UNDEF cycles===UNDEF
drop-keyspaces: run driver=cql tags=block:drop-keyspaces threads===UNDEF cycles===UNDEF drop-keyspaces: run driver=cql tags=block:drop-keyspaces threads===UNDEF cycles===UNDEF

View File

@ -1,4 +1,4 @@
min_version: "4.17.15" min_version: "5.17.1"
description: | description: |
A workload with only text keys and text values. This is based on the CQL keyvalue workloads as found A workload with only text keys and text values. This is based on the CQL keyvalue workloads as found
@ -6,9 +6,9 @@ description: |
scenarios: scenarios:
default: default:
schema: run driver=dynamodb tags==block:schema threads==1 cycles==UNDEF schema: run driver=dynamodb tags==block:'schema.*' threads==1 cycles==UNDEF
rampup: run driver=dynamodb tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto rampup: run driver=dynamodb tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
main: run driver=dynamodb tags=="block:main.*" cycles===TEMPLATE(main-cycles,10000000) threads=auto main: run driver=dynamodb tags==block:'main-*.*' cycles===TEMPLATE(main-cycles,10000000) threads=auto
read: run driver=dynamodb tags==block:main-read cycles===TEMPLATE(main-cycles,10000000) threads=auto read: run driver=dynamodb tags==block:main-read cycles===TEMPLATE(main-cycles,10000000) threads=auto
write: run driver=dynamodb tags==block:main-write cycles===TEMPLATE(main-cycles,10000000) threads=auto write: run driver=dynamodb tags==block:main-write cycles===TEMPLATE(main-cycles,10000000) threads=auto

View File

@ -1,13 +1,13 @@
min_version: "4.17.15" min_version: "5.17.1"
description: | description: |
Run a read/write workload against DynamoDB with varying field sizes and query patterns Run a read/write workload against DynamoDB with varying field sizes and query patterns
scenarios: scenarios:
schema: run driver=dynamodb tags=block:schema region=us-east-1 schema: run driver=dynamodb tags=block:'schema.*' region=us-east-1
rampup: run driver=dynamodb tags=block:rampup region=us-east-1 rampup: run driver=dynamodb tags=block:rampup region=us-east-1
read: run driver=dynamodb tags=block:read region=us-east-1 read: run driver=dynamodb tags=block:read region=us-east-1
main: run driver=dynamodb tags=block:"main.*" region=us-east-1 main: run driver=dynamodb tags=block:'main-*.*' region=us-east-1
read01: run driver=dynamodb tags='name:.*main-read-01' region=us-east-1 read01: run driver=dynamodb tags='name:.*main-read-01' region=us-east-1
delete: delete:
table: run driver=dynamodb tags==block:delete threads==1 cycles==UNDEF table: run driver=dynamodb tags==block:delete threads==1 cycles==UNDEF

View File

@ -1,4 +1,4 @@
min_version: "4.17.15" min_version: "5.17.1"
description: | description: |
This workload emulates a time-series data model and access patterns. This is based on the This workload emulates a time-series data model and access patterns. This is based on the
@ -11,7 +11,7 @@ description: |
scenarios: scenarios:
default: default:
schema: run driver=dynamodb tags==block:schema threads==1 cycles==UNDEF schema: run driver=dynamodb tags==block:'schema.*' threads==1 cycles==UNDEF
rampup: run driver=dynamodb tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto rampup: run driver=dynamodb tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
main: run driver=dynamodb tags==block:main cycles===TEMPLATE(main-cycles,10000000) threads=auto main: run driver=dynamodb tags==block:main cycles===TEMPLATE(main-cycles,10000000) threads=auto
delete: delete:

View File

@ -1,4 +1,4 @@
min_version: "4.17.15" min_version: "5.17.1"
description: | description: |
This workload emulates a key-value data model and access patterns. This workload emulates a key-value data model and access patterns.
@ -9,8 +9,8 @@ description: |
scenarios: scenarios:
default: default:
schema: run driver=http tags==block:schema threads==1 cycles==UNDEF schema: run driver=http tags==block:schema threads==1 cycles==UNDEF
rampup: run driver=http tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto rampup: run driver=http tags==block:"rampup-*.*" cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
main: run driver=http tags==block:main cycles===TEMPLATE(main-cycles,10000000) threads=auto main: run driver=http tags==block:"main-*.*" cycles===TEMPLATE(main-cycles,10000000) threads=auto
bindings: bindings:
# To enable an optional weighted set of hosts in place of a load balancer # To enable an optional weighted set of hosts in place of a load balancer
@ -19,11 +19,13 @@ bindings:
# multiple hosts: restapi_host=host1,host2,host3 # multiple hosts: restapi_host=host1,host2,host3
# multiple weighted hosts: restapi_host=host1:3,host2:7 # multiple weighted hosts: restapi_host=host1:3,host2:7
weighted_hosts: WeightedStrings('<<restapi_host:stargate>>') weighted_hosts: WeightedStrings('<<restapi_host:stargate>>')
# http request id
request_id: ToHashedUUID(); ToString(); request_id: ToHashedUUID(); ToString();
request_token: ToString(); TextOfFile("TEMPLATE(stargate_tokenfile,data/stargate_token.txt)")
seq_key: Mod(<<keycount:10000000>>); ToString() -> String seq_key: Mod(<<keycount:10000000>>); ToString() -> String
seq_value: Hash(); Mod(<<valuecount:1000000000>>); ToString() -> String seq_value: Hash(); Mod(<<valuecount:1000000000>>); ToString() -> String
rw_key: <<keydist:Uniform(0,10000000)->int>>; ToString() -> String rw_key: <<keydist:Uniform(0,10000000)->int>>; ToString() -> String
rw_value: Hash(); <<valdist:Uniform(0,1000000000)->int>>; ToString() -> String rw_value: Hash(); <<valdist:Uniform(0,1000000000)->int>>; ToString() -> String
@ -35,7 +37,7 @@ blocks:
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/schemas/keyspaces uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/schemas/keyspaces
Accept: "application/json" Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}" X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>" X-Cassandra-Token: "{request_token}"
Content-Type: "application/json" Content-Type: "application/json"
body: | body: |
{ {
@ -48,7 +50,7 @@ blocks:
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/schemas/keyspaces/<<keyspace:baselines>>/tables/<<table:keyvalue>> uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/schemas/keyspaces/<<keyspace:baselines>>/tables/<<table:keyvalue>>
Accept: "application/json" Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}" X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>" X-Cassandra-Token: "{request_token}"
Content-Type: "application/json" Content-Type: "application/json"
ok-status: "[2-4][0-9][0-9]" ok-status: "[2-4][0-9][0-9]"
@ -57,7 +59,7 @@ blocks:
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/schemas/keyspaces/<<keyspace:baselines>>/tables uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/schemas/keyspaces/<<keyspace:baselines>>/tables
Accept: "application/json" Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}" X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>" X-Cassandra-Token: "{request_token}"
Content-Type: "application/json" Content-Type: "application/json"
body: | body: |
{ {
@ -87,7 +89,7 @@ blocks:
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/schemas/keyspaces/<<keyspace:baselines>>/tables uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/schemas/keyspaces/<<keyspace:baselines>>/tables
Accept: "application/json" Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}" X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>" X-Cassandra-Token: "{request_token}"
Content-Type: "application/json" Content-Type: "application/json"
body: | body: |
{ {
@ -117,7 +119,7 @@ blocks:
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/keyspaces/<<keyspace:baselines>>/<<table:keyvalue>> uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/keyspaces/<<keyspace:baselines>>/<<table:keyvalue>>
Accept: "application/json" Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}" X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>" X-Cassandra-Token: "{request_token}"
Content-Type: "application/json" Content-Type: "application/json"
body: | body: |
{ {
@ -134,7 +136,7 @@ blocks:
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/keyspaces/<<keyspace:baselines>>/<<table:keyvalue>>/{rw_key} uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/keyspaces/<<keyspace:baselines>>/<<table:keyvalue>>/{rw_key}
Accept: "application/json" Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}" X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>" X-Cassandra-Token: "{request_token}"
Content-Type: "application/json" Content-Type: "application/json"
ok-status: "[2-4][0-9][0-9]" ok-status: "[2-4][0-9][0-9]"
@ -147,7 +149,7 @@ blocks:
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/keyspaces/<<keyspace:baselines>>/<<table:keyvalue>> uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/keyspaces/<<keyspace:baselines>>/<<table:keyvalue>>
Accept: "application/json" Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}" X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>" X-Cassandra-Token: "{request_token}"
Content-Type: "application/json" Content-Type: "application/json"
body: | body: |
{ {

View File

@ -1,4 +1,4 @@
min_version: "4.17.15" min_version: "5.17.1"
description: | description: |
This workload emulates a tabular workload with partitions, clusters, and data fields. This workload emulates a tabular workload with partitions, clusters, and data fields.
@ -9,9 +9,9 @@ description: |
scenarios: scenarios:
default: default:
schema: run driver=http tags==block:schema threads==1 cycles==UNDEF schema: run driver=http tags==block:'schema.*' threads==1 cycles==UNDEF
rampup: run driver=http tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto rampup: run driver=http tags==block:'rampup-*.*' cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
main: run driver=http tags==block:main cycles===TEMPLATE(main-cycles,10000000) threads=auto main: run driver=http tags==block:'main-*.*' cycles===TEMPLATE(main-cycles,10000000) threads=auto
bindings: bindings:
# To enable an optional weighted set of hosts in place of a load balancer # To enable an optional weighted set of hosts in place of a load balancer
@ -20,16 +20,19 @@ bindings:
# multiple hosts: restapi_host=host1,host2,host3 # multiple hosts: restapi_host=host1,host2,host3
# multiple weighted hosts: restapi_host=host1:3,host2:7 # multiple weighted hosts: restapi_host=host1:3,host2:7
weighted_hosts: WeightedStrings('<<restapi_host:stargate>>') weighted_hosts: WeightedStrings('<<restapi_host:stargate>>')
# http request id
request_id: ToHashedUUID(); ToString(); request_id: ToHashedUUID(); ToString();
request_token: ToString(); TextOfFile("TEMPLATE(stargate_tokenfile,data/stargate_token.txt)")
# for ramp-up and verify # for ramp-up and verify
part_layout: Div(<<partsize:1000000>>); ToString() -> String part_layout: Div(<<partsize:1000000>>); ToString() -> String
clust_layout: Mod(<<partsize:1000000>>); ToString() -> String clust_layout: Mod(<<partsize:1000000>>); ToString() -> String
data: HashedFileExtractToString('data/lorem_ipsum_full.txt',50,150); URLEncode(); data: HashedFileExtractToString('data/lorem_ipsum_full.txt',50,150); URLEncode();
# for read # for read
limit: Uniform(1,10) -> int limit: Uniform(1,10) -> int
part_read: Uniform(0,<<partcount:100>>)->int; ToString() -> String part_read: Uniform(0,<<partcount:100>>)->int; ToString() -> String
clust_read: Add(1); Uniform(0,<<partsize:1000000>>)->int; ToString() -> String clust_read: Add(1); Uniform(0,<<partsize:1000000>>)->int; ToString() -> String
# for write # for write
part_write: Hash(); Uniform(0,<<partcount:100>>)->int; ToString() -> String part_write: Hash(); Uniform(0,<<partcount:100>>)->int; ToString() -> String
clust_write: Hash(); Add(1); Uniform(0,<<partsize:1000000>>)->int; ToString() -> String clust_write: Hash(); Add(1); Uniform(0,<<partsize:1000000>>)->int; ToString() -> String
@ -43,7 +46,7 @@ blocks:
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/schemas/keyspaces uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/schemas/keyspaces
Accept: "application/json" Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}" X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>" X-Cassandra-Token: "{request_token}"
Content-Type: "application/json" Content-Type: "application/json"
body: | body: |
{ {
@ -56,7 +59,7 @@ blocks:
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/schemas/keyspaces/<<keyspace:baselines>>/tables/<<table:tabular>> uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/schemas/keyspaces/<<keyspace:baselines>>/tables/<<table:tabular>>
Accept: "application/json" Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}" X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>" X-Cassandra-Token: "{request_token}"
Content-Type: "application/json" Content-Type: "application/json"
ok-status: "[2-4][0-9][0-9]" ok-status: "[2-4][0-9][0-9]"
@ -65,7 +68,7 @@ blocks:
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/schemas/keyspaces/<<keyspace:baselines>>/tables uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/schemas/keyspaces/<<keyspace:baselines>>/tables
Accept: "application/json" Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}" X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>" X-Cassandra-Token: "{request_token}"
Content-Type: "application/json" Content-Type: "application/json"
body: | body: |
{ {
@ -102,7 +105,7 @@ blocks:
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/keyspaces/<<keyspace:baselines>>/<<table:tabular>> uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/keyspaces/<<keyspace:baselines>>/<<table:tabular>>
Accept: "application/json" Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}" X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>" X-Cassandra-Token: "{request_token}"
Content-Type: "application/json" Content-Type: "application/json"
body: | body: |
{ {
@ -120,7 +123,7 @@ blocks:
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/keyspaces/<<keyspace:baselines>>/<<table:tabular>>/{part_read}&page-size={limit} uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/keyspaces/<<keyspace:baselines>>/<<table:tabular>>/{part_read}&page-size={limit}
Accept: "application/json" Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}" X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>" X-Cassandra-Token: "{request_token}"
Content-Type: "application/json" Content-Type: "application/json"
main-write: main-write:
@ -132,7 +135,7 @@ blocks:
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/keyspaces/<<keyspace:baselines>>/<<table:tabular>> uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/keyspaces/<<keyspace:baselines>>/<<table:tabular>>
Accept: "application/json" Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}" X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>" X-Cassandra-Token: "{request_token}"
Content-Type: "application/json" Content-Type: "application/json"
body: | body: |
{ {

View File

@ -1,4 +1,4 @@
min_version: "4.17.15" min_version: "5.17.1"
description: | description: |
This workload emulates a time-series data model and access patterns. This workload emulates a time-series data model and access patterns.
@ -12,10 +12,10 @@ description: |
scenarios: scenarios:
default: default:
schema: run driver=cql tags==block:schema threads==1 cycles==UNDEF schema: run driver=cql tags==block:'schema-*.*' threads==1 cycles==UNDEF
schema-astra: run driver=cql tags==block:schema-astra threads==1 cycles==UNDEF schema-astra: run driver=cql tags==block:schema-astra threads==1 cycles==UNDEF
rampup: run driver=http tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto rampup: run driver=http tags==block:'rampup-*.*' cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
main: run driver=http tags==block:"main.*" cycles===TEMPLATE(main-cycles,10000000) threads=auto main: run driver=http tags==block:'main-*.*' cycles===TEMPLATE(main-cycles,10000000) threads=auto
bindings: bindings:
# To enable an optional weighted set of hosts in place of a load balancer # To enable an optional weighted set of hosts in place of a load balancer
@ -24,8 +24,10 @@ bindings:
# multiple hosts: restapi_host=host1,host2,host3 # multiple hosts: restapi_host=host1,host2,host3
# multiple weighted hosts: restapi_host=host1:3,host2:7 # multiple weighted hosts: restapi_host=host1:3,host2:7
weighted_hosts: WeightedStrings('<<restapi_host:stargate>>') weighted_hosts: WeightedStrings('<<restapi_host:stargate>>')
# http request id # http request id
request_id: ToHashedUUID(); ToString(); request_id: ToHashedUUID(); ToString();
request_token: ToString(); TextOfFile("TEMPLATE(stargate_tokenfile,data/stargate_token.txt)")
machine_id: Mod(<<sources:10000>>); ToHashedUUID() -> java.util.UUID machine_id: Mod(<<sources:10000>>); ToHashedUUID() -> java.util.UUID
sensor_name: HashedLineToString('data/variable_words.txt') sensor_name: HashedLineToString('data/variable_words.txt')
@ -42,7 +44,7 @@ blocks:
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/schemas/keyspaces uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/schemas/keyspaces
Accept: "application/json" Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}" X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>" X-Cassandra-Token: "{request_token}"
Content-Type: "application/json" Content-Type: "application/json"
body: | body: |
{ {
@ -55,7 +57,7 @@ blocks:
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/schemas/keyspaces/<<keyspace:baselines>>/tables/<<table:iot>> uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/schemas/keyspaces/<<keyspace:baselines>>/tables/<<table:iot>>
Accept: "application/json" Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}" X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>" X-Cassandra-Token: "{request_token}"
Content-Type: "application/json" Content-Type: "application/json"
ok-status: "[2-4][0-9][0-9]" ok-status: "[2-4][0-9][0-9]"
@ -64,7 +66,7 @@ blocks:
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/schemas/keyspaces/<<keyspace:baselines>>/tables uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/schemas/keyspaces/<<keyspace:baselines>>/tables
Accept: "application/json" Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}" X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>" X-Cassandra-Token: "{request_token}"
Content-Type: "application/json" Content-Type: "application/json"
body: | body: |
{ {
@ -123,7 +125,7 @@ blocks:
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/keyspaces/<<keyspace:baselines>>/<<table:iot>> uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/keyspaces/<<keyspace:baselines>>/<<table:iot>>
Accept: "application/json" Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}" X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>" X-Cassandra-Token: "{request_token}"
Content-Type: "application/json" Content-Type: "application/json"
body: | body: |
{ {
@ -144,7 +146,7 @@ blocks:
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/keyspaces/<<keyspace:baselines>>/<<table:iot>>?where=URLENCODE[[{"machine_id":{"$eq":"{machine_id}"},"sensor_name":{"$eq":"{sensor_name}"}}]]&page-size=<<limit:10>> uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/keyspaces/<<keyspace:baselines>>/<<table:iot>>?where=URLENCODE[[{"machine_id":{"$eq":"{machine_id}"},"sensor_name":{"$eq":"{sensor_name}"}}]]&page-size=<<limit:10>>
Accept: "application/json" Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}" X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>" X-Cassandra-Token: "{request_token}"
Content-Type: "application/json" Content-Type: "application/json"
main-write: main-write:
@ -156,7 +158,7 @@ blocks:
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/keyspaces/<<keyspace:baselines>>/<<table:iot>> uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/keyspaces/<<keyspace:baselines>>/<<table:iot>>
Accept: "application/json" Accept: "application/json"
X-Cassandra-Request-Id: "{request_id}" X-Cassandra-Request-Id: "{request_id}"
X-Cassandra-Token: "<<auth_token:my_auth_token>>" X-Cassandra-Token: "{request_token}"
Content-Type: "application/json" Content-Type: "application/json"
body: | body: |
{ {

View File

@ -1,6 +1,4 @@
min_version: "4.17.15" min_version: "5.17.1"
# nb -v run driver=http yaml=http-docsapi-crud-basic tags=phase:schema docsapi_host=my_docsapi_host auth_token=$AUTH_TOKEN
description: | description: |
This workload emulates CRUD operations for the Stargate Documents API. This workload emulates CRUD operations for the Stargate Documents API.
@ -9,11 +7,11 @@ description: |
scenarios: scenarios:
default: default:
schema: run driver=http tags==block:schema threads==1 cycles==UNDEF schema: run driver=http tags==block:'schema.*' threads==1 cycles==UNDEF
write: run driver=http tags==block:"write.*" cycles===TEMPLATE(write-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn write: run driver=http tags==block:'write.*' cycles===TEMPLATE(write-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
read: run driver=http tags==block:"read.*" cycles===TEMPLATE(read-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn read: run driver=http tags==block:'read.*' cycles===TEMPLATE(read-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
update: run driver=http tags==block:"update.*" cycles===TEMPLATE(update-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn update: run driver=http tags==block:'update.*' cycles===TEMPLATE(update-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
delete: run driver=http tags==block:"delete.*" cycles===TEMPLATE(delete-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn delete: run driver=http tags==block:'delete.*' cycles===TEMPLATE(delete-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
bindings: bindings:
# To enable an optional weighted set of hosts in place of a load balancer # To enable an optional weighted set of hosts in place of a load balancer

View File

@ -1,4 +1,4 @@
min_version: "4.17.15" min_version: "5.17.1"
description: | description: |
This workload emulates CRUD operations for the Stargate Documents API. This workload emulates CRUD operations for the Stargate Documents API.
@ -7,11 +7,11 @@ description: |
scenarios: scenarios:
default: default:
schema: run driver=http tags==block:schema threads==1 cycles==UNDEF schema: run driver=http tags==block:'schema.*' threads==1 cycles==UNDEF
write: run driver=http tags==name:"write.*" cycles===TEMPLATE(write-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn write: run driver=http tags==name:'write.*' cycles===TEMPLATE(write-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
read: run driver=http tags==name:"read.*" cycles===TEMPLATE(read-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn read: run driver=http tags==name:'read.*' cycles===TEMPLATE(read-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
update: run driver=http tags==name:"update.*" cycles===TEMPLATE(update-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn update: run driver=http tags==name:'update.*' cycles===TEMPLATE(update-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
delete: run driver=http tags==name:"delete.*" cycles===TEMPLATE(delete-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn delete: run driver=http tags==name:'delete.*' cycles===TEMPLATE(delete-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
bindings: bindings:
# To enable an optional weighted set of hosts in place of a load balancer # To enable an optional weighted set of hosts in place of a load balancer

View File

@ -1,4 +1,4 @@
min_version: "4.17.15" min_version: "5.17.1"
description: | description: |
This workload emulates a key-value data model and access patterns. This workload emulates a key-value data model and access patterns.
@ -10,7 +10,7 @@ description: |
scenarios: scenarios:
default: default:
schema: run driver=http tags==block:schema threads==1 cycles==UNDEF schema: run driver=http tags==block:'schema.*' threads==1 cycles==UNDEF
rampup: run driver=http tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto rampup: run driver=http tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
main: run driver=http tags==block:main cycles===TEMPLATE(main-cycles,10000000) threads=auto main: run driver=http tags==block:main cycles===TEMPLATE(main-cycles,10000000) threads=auto

View File

@ -1,4 +1,4 @@
min_version: "4.17.15" min_version: "5.17.1"
description: | description: |
This workload emulates advanced search filter combinations for the Stargate Documents API. This workload emulates advanced search filter combinations for the Stargate Documents API.
@ -15,10 +15,10 @@ description: |
# complex2: (match1 LTE 0 OR match2 EQ "false") AND (match2 EQ "false" OR match3 EQ true) # complex2: (match1 LTE 0 OR match2 EQ "false") AND (match2 EQ "false" OR match3 EQ true)
# complex3: (match1 LTE 0 AND match2 EQ "true") OR (match2 EQ "false" AND match3 EQ true) # complex3: (match1 LTE 0 AND match2 EQ "true") OR (match2 EQ "false" AND match3 EQ true)
scenarios: scenarios:
schema: run driver=http tags==phase:schema threads==<<threads:1>> cycles==UNDEF schema: run driver=http tags==block:'schema.*' threads==<<threads:1>> cycles==UNDEF
rampup: rampup:
write: run driver=http tags==name:"rampup-put.*" cycles===TEMPLATE(docscount,10000000) docpadding=TEMPLATE(docpadding,0) match-ratio=TEMPLATE(match-ratio,0.01) threads=<<threads:auto>> errors=timer,warn write: run driver=http tags==name:'rampup-put.*' cycles===TEMPLATE(docscount,10000000) docpadding=TEMPLATE(docpadding,0) match-ratio=TEMPLATE(match-ratio,0.01) threads=<<threads:auto>> errors=timer,warn
read: run driver=http tags==phase:"rampup-get.*" cycles===TEMPLATE(rampup-cycles, 10000000) page-size=TEMPLATE(page-size,3) fields=TEMPLATE(fields,%5b%5d) threads=<<threads:auto>> errors=timer,warn read: run driver=http tags==block:'rampup-get.*' cycles===TEMPLATE(rampup-cycles, 10000000) page-size=TEMPLATE(page-size,3) fields=TEMPLATE(fields,%5b%5d) threads=<<threads:auto>> errors=timer,warn
main: main:
all: run driver=http tags==block:main cycles===TEMPLATE(read-cycles,TEMPLATE(docscount,10000000)) page-size=TEMPLATE(page-size,3) fields=TEMPLATE(fields,%5b%5d) threads=<<threads:auto>> errors=timer,warn all: run driver=http tags==block:main cycles===TEMPLATE(read-cycles,TEMPLATE(docscount,10000000)) page-size=TEMPLATE(page-size,3) fields=TEMPLATE(fields,%5b%5d) threads=<<threads:auto>> errors=timer,warn
get-in: run driver=http tags==name:main-get-in,filter:in cycles===TEMPLATE(read-cycles,TEMPLATE(docscount,10000000)) page-size=TEMPLATE(page-size,3) fields=TEMPLATE(fields,%5b%5d) threads=<<threads:auto>> errors=timer,warn get-in: run driver=http tags==name:main-get-in,filter:in cycles===TEMPLATE(read-cycles,TEMPLATE(docscount,10000000)) page-size=TEMPLATE(page-size,3) fields=TEMPLATE(fields,%5b%5d) threads=<<threads:auto>> errors=timer,warn

View File

@ -1,4 +1,4 @@
min_version: "4.17.15" min_version: "5.17.1"
description: | description: |
This workload emulates basic search operations for the Stargate Documents API. This workload emulates basic search operations for the Stargate Documents API.
@ -7,10 +7,10 @@ description: |
Note that docsapi_port should reflect the port where the Docs API is exposed (defaults to 8180). Note that docsapi_port should reflect the port where the Docs API is exposed (defaults to 8180).
scenarios: scenarios:
schema: run driver=http tags==block:schema threads==<<threads:1>> cycles==UNDEF schema: run driver=http tags==block:'schema.*' threads==<<threads:1>> cycles==UNDEF
rampup: rampup:
write: run driver=http tags==name:"rampup-put.*" cycles===TEMPLATE(docscount,10000000) docpadding=TEMPLATE(docpadding,0) match-ratio=TEMPLATE(match-ratio,0.01) threads=<<threads:auto>> errors=timer,warn write: run driver=http tags==name:'rampup-put.*' cycles===TEMPLATE(docscount,10000000) docpadding=TEMPLATE(docpadding,0) match-ratio=TEMPLATE(match-ratio,0.01) threads=<<threads:auto>> errors=timer,warn
read: run driver=http tags==name:"rampup-get.*" cycles===TEMPLATE(rampup-cycles, 10000000) page-size=TEMPLATE(page-size,3) fields=TEMPLATE(fields,%5b%5d) threads=<<threads:auto>> errors=timer,warn read: run driver=http tags==name:'rampup-get.*' cycles===TEMPLATE(rampup-cycles, 10000000) page-size=TEMPLATE(page-size,3) fields=TEMPLATE(fields,%5b%5d) threads=<<threads:auto>> errors=timer,warn
main: main:
all: run driver=http tags==block:main cycles===TEMPLATE(read-cycles,TEMPLATE(docscount,10000000)) page-size=TEMPLATE(page-size,3) fields=TEMPLATE(fields,%5b%5d) threads=<<threads:auto>> errors=timer,warn all: run driver=http tags==block:main cycles===TEMPLATE(read-cycles,TEMPLATE(docscount,10000000)) page-size=TEMPLATE(page-size,3) fields=TEMPLATE(fields,%5b%5d) threads=<<threads:auto>> errors=timer,warn
get-eq: run driver=http tags==name:main-get-eq,filter:eq cycles===TEMPLATE(read-cycles,TEMPLATE(docscount,10000000)) page-size=TEMPLATE(page-size,3) fields=TEMPLATE(fields,%5b%5d) threads=<<threads:auto>> errors=timer,warn get-eq: run driver=http tags==name:main-get-eq,filter:eq cycles===TEMPLATE(read-cycles,TEMPLATE(docscount,10000000)) page-size=TEMPLATE(page-size,3) fields=TEMPLATE(fields,%5b%5d) threads=<<threads:auto>> errors=timer,warn

View File

@ -1,4 +1,4 @@
min_version: "4.17.15" min_version: "5.17.1"
description: | description: |
This workload emulates a key-value data model and access patterns. This workload emulates a key-value data model and access patterns.
@ -9,7 +9,7 @@ description: |
scenarios: scenarios:
default: default:
schema: run driver=http tags==block:schema threads==1 cycles==UNDEF schema: run driver=http tags==block:'schema.*' threads==1 cycles==UNDEF
rampup: run driver=http tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto rampup: run driver=http tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
main: run driver=http tags==block:"main.*" cycles===TEMPLATE(main-cycles,10000000) threads=auto main: run driver=http tags==block:"main.*" cycles===TEMPLATE(main-cycles,10000000) threads=auto

View File

@ -1,4 +1,4 @@
min_version: "4.17.15" min_version: "5.17.1"
description: | description: |
This workload emulates a tabular workload with partitions, clusters, and data fields. This workload emulates a tabular workload with partitions, clusters, and data fields.
@ -10,9 +10,9 @@ description: |
scenarios: scenarios:
default: default:
schema: run driver=http tags==block:schema threads==1 cycles==UNDEF schema: run driver=http tags==block:'schema.*' threads==1 cycles==UNDEF
rampup: run driver=http tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto rampup: run driver=http tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
main: run driver=http tags==block:"main.*" cycles===TEMPLATE(main-cycles,10000000) threads=auto main: run driver=http tags==block:'main.*' cycles===TEMPLATE(main-cycles,10000000) threads=auto
bindings: bindings:
# To enable an optional weighted set of hosts in place of a load balancer # To enable an optional weighted set of hosts in place of a load balancer
@ -39,7 +39,7 @@ bindings:
blocks: blocks:
schema: schema:
tags: tags:
phase: schema block: schema
ops: ops:
create-keyspace: create-keyspace:
method: POST method: POST

View File

@ -1,4 +1,4 @@
min_version: "4.17.15" min_version: "5.17.1"
description: | description: |
This workload emulates a time-series data model and access patterns. This workload emulates a time-series data model and access patterns.
@ -15,7 +15,7 @@ scenarios:
default: default:
schema: run driver=cql tags==block:schema threads==1 cycles==UNDEF schema: run driver=cql tags==block:schema threads==1 cycles==UNDEF
rampup: run driver=http tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto rampup: run driver=http tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
main: run driver=http tags==block:"main.*" cycles===TEMPLATE(main-cycles,10000000) threads=auto main: run driver=http tags==block:main cycles===TEMPLATE(main-cycles,10000000) threads=auto
bindings: bindings:
# To enable an optional weighted set of hosts in place of a load balancer # To enable an optional weighted set of hosts in place of a load balancer

View File

@ -1,4 +1,4 @@
min_version: "4.17.15" min_version: "5.17.1"
# TODO # TODO
# - do we need a truncate schema / namespace at the end # - do we need a truncate schema / namespace at the end
@ -13,9 +13,9 @@ description: |
scenarios: scenarios:
default: default:
schema: run driver=http tags==block:schema threads==1 cycles==UNDEF schema: run driver=http tags==block:'schema.*' threads==1 cycles==UNDEF
rampup: run driver=http tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto rampup: run driver=http tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
main: run driver=http tags==phase:"main.*" cycles===TEMPLATE(main-cycles,10000000) threads=auto main: run driver=http tags==block:'main.*' cycles===TEMPLATE(main-cycles,10000000) threads=auto
bindings: bindings:
# To enable an optional weighted set of hosts in place of a load balancer # To enable an optional weighted set of hosts in place of a load balancer

View File

@ -1,4 +1,4 @@
min_version: "4.17.15" min_version: "5.17.1"
# TODO # TODO
# - do we need a truncate schema / namespace at the end # - do we need a truncate schema / namespace at the end
@ -15,9 +15,9 @@ description: |
scenarios: scenarios:
default: default:
schema: run driver=http tags==block:schema threads==1 cycles==UNDEF schema: run driver=http tags==block:'schema.*' threads==1 cycles==UNDEF
rampup: run driver=http tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto rampup: run driver=http tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
man: run driver=http tags==block:"main.*" cycles===TEMPLATE(main-cycles,10000000) threads=auto man: run driver=http tags==block:'main.*' cycles===TEMPLATE(main-cycles,10000000) threads=auto
bindings: bindings:
# To enable an optional weighted set of hosts in place of a load balancer # To enable an optional weighted set of hosts in place of a load balancer

View File

@ -1,4 +1,4 @@
min_version: "4.17.15" min_version: "5.17.1"
# TODO # TODO
# - do we need a truncate schema / namespace at the end # - do we need a truncate schema / namespace at the end
@ -19,9 +19,9 @@ description: |
scenarios: scenarios:
default: default:
schema: run driver=http tags==block:schema threads==1 cycles==UNDEF schema: run driver=http tags==block:'schema.*' threads==1 cycles==UNDEF
rampup: run driver=http tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto rampup: run driver=http tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
main: run driver=http tags==block:"main.*" cycles===TEMPLATE(main-cycles,10000000) threads=auto main: run driver=http tags==block:'main.*' cycles===TEMPLATE(main-cycles,10000000) threads=auto
bindings: bindings:
# To enable an optional weighted set of hosts in place of a load balancer # To enable an optional weighted set of hosts in place of a load balancer

View File

@ -0,0 +1 @@
# <<put-token-here>>

View File

@ -1,17 +1,17 @@
# Connection Guide: https://www.mongodb.com/docs/drivers/java/sync/current/fundamentals/connection/ # Connection Guide: https://www.mongodb.com/docs/drivers/java/sync/current/fundamentals/connection/
# nb5 run driver=mongodb workload=/path/to/mongodb-keyvalue2.yaml tags=block:schema connection='mongodb+srv://user:pass@sample-db.host.mongodb.net/?retryWrites=true&w=majority' database=baselines -vv --show-stacktraces # nb5 run driver=mongodb workload=/path/to/mongodb-keyvalue2.yaml tags=block:"schema.*" connection='mongodb+srv://user:pass@sample-db.host.mongodb.net/?retryWrites=true&w=majority' database=baselines -vv --show-stacktraces
# nb5 run driver=mongodb workload=/path/to/mongodb-keyvalue2.yaml tags=block:rampup cycles=25 connection='mongodb+srv://user:pass@sample-db.host.mongodb.net/?retryWrites=true&w=majority' database=baselines -vv --show-stacktraces # nb5 run driver=mongodb workload=/path/to/mongodb-keyvalue2.yaml tags=block:rampup cycles=25 connection='mongodb+srv://user:pass@sample-db.host.mongodb.net/?retryWrites=true&w=majority' database=baselines -vv --show-stacktraces
# nb5 run driver=mongodb workload=/path/to/mongodb-keyvalue2.yaml tags='block:main-.*' cycles=25 connection='mongodb+srv://user:pass@sample-db.host.mongodb.net/?retryWrites=true&w=majority' database=baselines -vv --show-stacktraces # nb5 run driver=mongodb workload=/path/to/mongodb-keyvalue2.yaml tags='block:main-.*' cycles=25 connection='mongodb+srv://user:pass@sample-db.host.mongodb.net/?retryWrites=true&w=majority' database=baselines -vv --show-stacktraces
min_version: "4.17.31" min_version: "5.17.1"
description: | description: |
This workload is analogous to the cql-keyvalue2 workload, just implemented for MongoDB. This workload is analogous to the cql-keyvalue2 workload, just implemented for MongoDB.
scenarios: scenarios:
default: default:
schema: run driver=mongodb tags==block:schema threads==1 cycles==UNDEF schema: run driver=mongodb tags==block:'schema.*' threads==1 cycles==UNDEF
rampup: run driver=mongodb tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto rampup: run driver=mongodb tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
main: run driver=mongodb tags==block:"main-.*" cycles===TEMPLATE(main-cycles,10000000) threads=auto main: run driver=mongodb tags==block:'main-.*' cycles===TEMPLATE(main-cycles,10000000) threads=auto
drop: run driver=mongodb tags==block:drop-collection threads==1 cycles==UNDEF drop: run driver=mongodb tags==block:drop-collection threads==1 cycles==UNDEF
params: params:

View File

@ -1,17 +1,17 @@
# Connection Guide: https://www.mongodb.com/docs/drivers/java/sync/current/fundamentals/connection/ # Connection Guide: https://www.mongodb.com/docs/drivers/java/sync/current/fundamentals/connection/
# nb5 run driver=mongodb workload=/path/to/mongodb-tabular2.yaml tags=block:schema connection='mongodb+srv://user:pass@sample-db.host.mongodb.net/?retryWrites=true&w=majority' database=baselines -vv --show-stacktraces # nb5 run driver=mongodb workload=/path/to/mongodb-tabular2.yaml tags=block:"schema.*" connection='mongodb+srv://user:pass@sample-db.host.mongodb.net/?retryWrites=true&w=majority' database=baselines -vv --show-stacktraces
# nb5 run driver=mongodb workload=/path/to/mongodb-tabular2.yaml tags=block:rampup cycles=25 connection='mongodb+srv://user:pass@sample-db.host.mongodb.net/?retryWrites=true&w=majority' database=baselines -vv --show-stacktraces # nb5 run driver=mongodb workload=/path/to/mongodb-tabular2.yaml tags=block:rampup cycles=25 connection='mongodb+srv://user:pass@sample-db.host.mongodb.net/?retryWrites=true&w=majority' database=baselines -vv --show-stacktraces
# nb5 run driver=mongodb workload=/path/to/mongodb-tabular2.yaml tags='block:main.*' cycles=25 connection='mongodb+srv://user:pass@sample-db.host.mongodb.net/?retryWrites=true&w=majority' database=baselines -vv --show-stacktraces # nb5 run driver=mongodb workload=/path/to/mongodb-tabular2.yaml tags='block:main.*' cycles=25 connection='mongodb+srv://user:pass@sample-db.host.mongodb.net/?retryWrites=true&w=majority' database=baselines -vv --show-stacktraces
min_version: "4.17.31" min_version: "5.17.1"
description: | description: |
This workload is analogous to the cql-tabular2 workload, just implemented for MongoDB. This workload is analogous to the cql-tabular2 workload, just implemented for MongoDB.
scenarios: scenarios:
default: default:
schema: run driver=mongodb tags==block:schema threads==1 cycles==UNDEF schema: run driver=mongodb tags==block:'schema.*' threads==1 cycles==UNDEF
rampup: run driver=mongodb tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto rampup: run driver=mongodb tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
main: run driver=mongodb tags==block:"main-.*" cycles===TEMPLATE(main-cycles,10000000) threads=auto main: run driver=mongodb tags==block:'main-.*' cycles===TEMPLATE(main-cycles,10000000) threads=auto
drop: run driver=mongodb tags==block:drop-collection threads==1 cycles==UNDEF drop: run driver=mongodb tags==block:drop-collection threads==1 cycles==UNDEF
params: params:

View File

@ -1,21 +1,21 @@
# Connection Guide: https://www.mongodb.com/docs/drivers/java/sync/current/fundamentals/connection/ # Connection Guide: https://www.mongodb.com/docs/drivers/java/sync/current/fundamentals/connection/
# nb5 run driver=mongodb workload=/path/to/mongodb-timeseries2.yaml tags=block:schema connection='mongodb+srv://user:pass@sample-db.host.mongodb.net/?retryWrites=true&w=majority' database=baselines -vv --show-stacktraces # nb5 run driver=mongodb workload=/path/to/mongodb-timeseries2.yaml tags=block:"schema.*" connection='mongodb+srv://user:pass@sample-db.host.mongodb.net/?retryWrites=true&w=majority' database=baselines -vv --show-stacktraces
# nb5 run driver=mongodb workload=/path/to/mongodb-timeseries2.yaml tags=block:rampup cycles=25 connection='mongodb+srv://user:pass@sample-db.host.mongodb.net/?retryWrites=true&w=majority' database=baselines -vv --show-stacktraces # nb5 run driver=mongodb workload=/path/to/mongodb-timeseries2.yaml tags=block:rampup cycles=25 connection='mongodb+srv://user:pass@sample-db.host.mongodb.net/?retryWrites=true&w=majority' database=baselines -vv --show-stacktraces
# nb5 run driver=mongodb workload=/path/to/mongodb-timeseries2.yaml tags='block:main.*' cycles=25 connection='mongodb+srv://user:pass@sample-db.host.mongodb.net/?retryWrites=true&w=majority' database=baselines -vv --show-stacktraces # nb5 run driver=mongodb workload=/path/to/mongodb-timeseries2.yaml tags='block:main.*' cycles=25 connection='mongodb+srv://user:pass@sample-db.host.mongodb.net/?retryWrites=true&w=majority' database=baselines -vv --show-stacktraces
# https://www.mongodb.com/community/forums/t/how-to-store-a-uuid-with-binary-subtype-0x04-using-the-mongodb-java-driver/13184 # https://www.mongodb.com/community/forums/t/how-to-store-a-uuid-with-binary-subtype-0x04-using-the-mongodb-java-driver/13184
# https://www.mongodb.com/community/forums/t/problem-inserting-uuid-field-with-binary-subtype-via-atlas-web-ui/1071/4 # https://www.mongodb.com/community/forums/t/problem-inserting-uuid-field-with-binary-subtype-via-atlas-web-ui/1071/4
# https://www.mongodb.com/community/forums/t/timeseries-last-x-documents/186574/5 # https://www.mongodb.com/community/forums/t/timeseries-last-x-documents/186574/5
min_version: "4.17.31" min_version: "5.17.1"
description: | description: |
This workload is analogous to the cql-timeseries2 workload, just implemented for MongoDB. This workload is analogous to the cql-timeseries2 workload, just implemented for MongoDB.
scenarios: scenarios:
default: default:
schema: run driver=mongodb tags==block:schema threads==1 cycles==UNDEF schema: run driver=mongodb tags==block:'schema.*' threads==1 cycles==UNDEF
rampup: run driver=mongodb tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto rampup: run driver=mongodb tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
main: run driver=mongodb tags==block:"main-.*" cycles===TEMPLATE(main-cycles,10000000) threads=auto main: run driver=mongodb tags==block:'main-.*' cycles===TEMPLATE(main-cycles,10000000) threads=auto
drop: run driver=mongodb tags==block:drop-collection threads==1 cycles==UNDEF drop: run driver=mongodb tags==block:drop-collection threads==1 cycles==UNDEF
params: params:

View File

@ -1,10 +1,9 @@
# nb -v run driver=mongodb yaml=mongodb-basic-uuid connection=mongodb://127.0.0.1 database=testdb tags=phase:rampup
description: An example of a basic mongo insert and find with UUID description: An example of a basic mongo insert and find with UUID
scenarios: scenarios:
default: default:
- run driver=mongodb tags==phase:rampup cycles===TEMPLATE(rampup-cycles,100000000) threads=auto rampup: run driver=mongodb tags==block:rampup cycles===TEMPLATE(rampup-cycles,100000000) threads=auto
- run driver=mongodb tags==phase:main cycles===TEMPLATE(main-cycles,100000000) threads=auto main: run driver=mongodb tags==block:main cycles===TEMPLATE(main-cycles,100000000) threads=auto
bindings: bindings:
seq_uuid: Mod(<<uuidCount:100000000>>L); ToHashedUUID() -> java.util.UUID; ToString() -> String seq_uuid: Mod(<<uuidCount:100000000>>L); ToHashedUUID() -> java.util.UUID; ToString() -> String
rw_uuid: <<uuidDist:Uniform(0,100000000)->long>>; ToHashedUUID() -> java.util.UUID; ToString() -> String rw_uuid: <<uuidDist:Uniform(0,100000000)->long>>; ToHashedUUID() -> java.util.UUID; ToString() -> String
@ -12,69 +11,50 @@ bindings:
seq_value: Mod(<<valueCount:1000000000>>L); <<valueSizeDist:Hash()>>; ToLong() seq_value: Mod(<<valueCount:1000000000>>L); <<valueSizeDist:Hash()>>; ToLong()
rw_key: <<keyDist:Uniform(0,1000000)->long>>; ToInt() rw_key: <<keyDist:Uniform(0,1000000)->long>>; ToInt()
rw_value: <<valDist:Uniform(0,1000000000)->long>>; <<valueSizeDist:Hash()>>; ToLong() rw_value: <<valDist:Uniform(0,1000000000)->long>>; <<valueSizeDist:Hash()>>; ToLong()
blocks: blocks:
- name: rampup params:
tags: readPreference: primary
phase: rampup rampup:
statements: ops:
- rampup-insert: | rampup-insert: |
{ {
insert: "<<collection:keyvalueuuid>>", insert: "<<collection:keyvalueuuid>>",
documents: [ { _id: UUID("{seq_uuid}"), documents: [ { _id: UUID("{seq_uuid}"),
key: {seq_key}, key: {seq_key},
value: NumberLong({seq_value}) } ] value: NumberLong({seq_value}) } ]
} }
params: verify:
readPreference: primary
tags:
name: rampup-insert
- name: verify
tags:
phase: verify
type: read
params: params:
cl: <<read_cl:LOCAL_QUORUM>>
statements:
- verify-find: |
{
find: "<<collection:keyvalueuuid>>",
filter: { _id: UUID("{seq_uuid}") }
}
verify-fields: _id->seq_uuid, key->seq_key, value->seq_value
tags:
name: verify
- name: main-read
tags:
phase: main
type: read type: read
cl: <<read_cl:LOCAL_QUORUM>>
verify-fields: _id->seq_uuid, key->seq_key, value->seq_value
ops:
verify-find: |
{
find: "<<collection:keyvalueuuid>>",
filter: { _id: UUID("{seq_uuid}") }
}
main-read:
params: params:
ratio: <<read_ratio:1>> ratio: <<read_ratio:1>>
statements: type: read
- main-find: | readPreference: primary
{ ops:
find: "<<collection:keyvalueuuid>>", main-find: |
filter: { _id: UUID("{rw_uuid}") } {
} find: "<<collection:keyvalueuuid>>",
params: filter: { _id: UUID("{rw_uuid}") }
readPreference: primary }
tags: main-write:
name: main-find
- name: main-write
tags:
phase: main
type: write
params: params:
ratio: <<write_ratio:1>> ratio: <<write_ratio:1>>
statements: type: write
- main-insert: | readPreference: primary
{ ops:
insert: "<<collection:keyvalueuuid>>", main-insert: |
documents: [ { _id: UUID("{rw_uuid}") {
key: {rw_key}, insert: "<<collection:keyvalueuuid>>",
value: NumberLong({rw_value}) } ] documents: [ { _id: UUID("{rw_uuid}")
} key: {rw_key},
params: value: NumberLong({rw_value}) } ]
readPreference: primary }
tags:
name: main-insert

View File

@ -1,10 +1,10 @@
# nb -v run driver=mongodb yaml=mongodb-basic connection=mongodb://127.0.0.1 database=testdb tags=phase:rampup cycles=1M
description: An example of a basic mongo insert and find. description: An example of a basic mongo insert and find.
scenarios: scenarios:
default: default:
- run driver=mongodb tags==phase:rampup cycles===TEMPLATE(rampup-cycles,1000000) threads=auto rampup: run driver=mongodb tags==block:rampup cycles===TEMPLATE(rampup-cycles,1000000) threads=auto
- run driver=mongodb tags==phase:main cycles===TEMPLATE(main-cycles,1000000) threads=auto main: run driver=mongodb tags==block:'main-*.*' cycles===TEMPLATE(main-cycles,10000000) threads=auto
bindings: bindings:
seq_key: Mod(<<keyCount:1000000>>L); ToInt() seq_key: Mod(<<keyCount:1000000>>L); ToInt()
seq_value: Mod(<<valueCount:1000000000>>L); <<valueSizeDist:Hash()>>; ToString() -> String seq_value: Mod(<<valueCount:1000000000>>L); <<valueSizeDist:Hash()>>; ToString() -> String
@ -12,46 +12,39 @@ bindings:
rw_value: <<valDist:Uniform(0,1000000000)->int>>; <<valueSizeDist:Hash()>>; ToString() -> String rw_value: <<valDist:Uniform(0,1000000000)->int>>; <<valueSizeDist:Hash()>>; ToString() -> String
blocks: blocks:
- name: rampup rampup:
tags: ops:
phase: rampup rampup-insert: |
statements:
- rampup-insert: |
{ {
insert: "<<collection:keyvalue>>", insert: "<<collection:keyvalue>>",
documents: [ { _id: {seq_key}, documents: [ { _id: {seq_key}, value: {seq_value} } ]
value: {seq_value} } ]
} }
params: params:
readPreference: primary readPreference: primary
tags: tags:
name: rampup-insert name: rampup-insert
- name: main-read
tags: main-read:
phase: main
type: read
params: params:
ratio: <<read_ratio:1>> ratio: <<read_ratio:1>>
statements: readPreference: primary
- main-find: | type: read
{ ops:
find: "<<collection:keyvalue>>", main-find: |
filter: { _id: {rw_key} } {
} find: "<<collection:keyvalue>>",
params: filter: { _id: {rw_key} }
readPreference: primary }
- name: main-write
tags: main-write:
phase: main
type: write
params: params:
ratio: <<write_ratio:1>> ratio: <<write_ratio:1>>
statements: type: write
- main-insert: | ops:
main-insert: |
{ {
insert: "<<collection:keyvalue>>", insert: "<<collection:keyvalue>>",
documents: [ { _id: {rw_key}, documents: [ { _id: {rw_key}, value: {rw_value} } ]
value: {rw_value} } ]
} }
params: params:
readPreference: primary readPreference: primary

View File

@ -1,4 +1,3 @@
# nb -v run driver=mongodb yaml=mongodb-crud-basic tags=phase:schema connection=mongodb://127.0.0.1 database=testdb
description: | description: |
This workload emulates CRUD operations for the mongoDB. This workload emulates CRUD operations for the mongoDB.
@ -7,11 +6,11 @@ description: |
scenarios: scenarios:
default: default:
schema: run driver=mongodb tags==phase:schema threads==1 cycles==UNDEF schema: run driver=mongodb tags==block:'schema.*' threads==1 cycles==UNDEF
write: run driver=mongodb tags==phase:main,type:write cycles===TEMPLATE(write-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn write: run driver=mongodb tags==block:main-write,type:write cycles===TEMPLATE(write-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
read: run driver=mongodb tags==phase:main,type:read cycles===TEMPLATE(read-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn read: run driver=mongodb tags==block:main-read,type:read cycles===TEMPLATE(read-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
update: run driver=mongodb tags==phase:main,type:update cycles===TEMPLATE(update-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn update: run driver=mongodb tags==block:main-update,type:update cycles===TEMPLATE(update-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
delete: run driver=mongodb tags==phase:main,type:delete cycles===TEMPLATE(delete-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn delete: run driver=mongodb tags==block:main-delete,type:delete cycles===TEMPLATE(delete-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
bindings: bindings:
seq_key: Mod(<<docscount:10000000>>); ToString() -> String seq_key: Mod(<<docscount:10000000>>); ToString() -> String
@ -29,62 +28,93 @@ bindings:
friend_id: Add(-1); ToHashedUUID(); ToString() -> String friend_id: Add(-1); ToHashedUUID(); ToString() -> String
blocks: blocks:
- tags: schema:
phase: schema ops:
statements: dummy-insert: |
- dummy-insert: | {
{ insert: "<<collection:crud_basic>>",
insert: "<<collection:crud_basic>>", documents: [ { _id: "dummyyyy" } ]
documents: [ { _id: "dummyyyy" } ] }
} drop-collection: |
{
drop: "<<collection:crud_basic>>"
}
create-collection: |
{
create: "<<collection:crud_basic>>"
}
create-indexes: |
{
createIndexes: "<<collection:crud_basic>>",
indexes: [
{
key: { user_id: 1 },
name: "user_id_idx",
unique: true
},
{
key: { created_on: 1 },
name: "created_on_idx"
},
{
key: { gender: 1 },
name: "gender_idx"
}
]
}
- drop-collection: | main-write:
{ ops:
drop: "<<collection:crud_basic>>" write-document: |
} {
tags: insert: "<<collection:crud_basic>>",
name: drop-collection writeConcern: { w: "majority" },
documents: [
- create-collection: | {
{ "_id": "{seq_key}",
create: "<<collection:crud_basic>>" "user_id": "{user_id}",
} "created_on": {created_on},
tags: "gender": "{gender}",
name: create-collection "full_name": "{full_name}",
"married": {married},
- create-indexes: | "address": {
{ "primary": {
createIndexes: "<<collection:crud_basic>>", "city": "{city}",
indexes: [ "cc": "{country_code}"
{ },
key: { user_id: 1 }, "secondary": {}
name: "user_id_idx",
unique: true
}, },
{ "coordinates": [
key: { created_on: 1 }, {lat},
name: "created_on_idx" {lng}
}, ],
{ "children": [],
key: { gender: 1 }, "friends": [
name: "gender_idx" "{friend_id}"
} ],
] "debt": null
} }
tags: ]
name: create-indexes }
- name: main-write main-read:
tags: ops:
phase: main read-document: |
type: write {
statements: find: "<<collection:crud_basic>>",
- write-document: | filter: { _id: "{random_key}" }
{ }
insert: "<<collection:crud_basic>>",
writeConcern: { w: "majority" }, main-update:
documents: [ ops:
{ update-document: |
{
update: "<<collection:crud_basic>>",
writeConcern: { w: "majority" },
updates: [
{
q: { _id: "{random_key}" },
u: {
"_id": "{seq_key}", "_id": "{seq_key}",
"user_id": "{user_id}", "user_id": "{user_id}",
"created_on": {created_on}, "created_on": {created_on},
@ -108,78 +138,19 @@ blocks:
], ],
"debt": null "debt": null
} }
] }
} ]
tags: }
name: write-document
- name: main-read main-delete:
tags: ops:
phase: main delete-document: |
type: read {
statements: delete: "<<collection:crud_basic>>",
- read-document: | deletes: [
{ {
find: "<<collection:crud_basic>>", q: { _id: "{seq_key}" },
filter: { _id: "{random_key}" } limit: 1
} }
tags: ]
name: read-document }
- name: main-update
tags:
phase: main
type: update
statements:
- update-document: |
{
update: "<<collection:crud_basic>>",
writeConcern: { w: "majority" },
updates: [
{
q: { _id: "{random_key}" },
u: {
"_id": "{seq_key}",
"user_id": "{user_id}",
"created_on": {created_on},
"gender": "{gender}",
"full_name": "{full_name}",
"married": {married},
"address": {
"primary": {
"city": "{city}",
"cc": "{country_code}"
},
"secondary": {}
},
"coordinates": [
{lat},
{lng}
],
"children": [],
"friends": [
"{friend_id}"
],
"debt": null
}
}
]
}
tags:
name: update-document
- name: main-delete
tags:
phase: main
type: delete
statements:
- delete-document: |
{
delete: "<<collection:crud_basic>>",
deletes: [
{
q: { _id: "{seq_key}" },
limit: 1
}
]
}

View File

@ -1,4 +1,3 @@
# nb -v run driver=mongodb yaml=mongodb-crud-dataset tags=phase:schema connection=mongodb://127.0.0.1 database=testdb dataset_file=path/to/data.json
description: | description: |
This workload emulates CRUD operations for the mongoDB. This workload emulates CRUD operations for the mongoDB.
@ -7,110 +6,85 @@ description: |
scenarios: scenarios:
default: default:
schema: run driver=mongodb tags==phase:schema threads==1 cycles==UNDEF schema: run driver=mongodb tags==block:'schema.*' threads==1 cycles==UNDEF
write: run driver=mongodb tags==phase:main,type:write cycles===TEMPLATE(write-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn write: run driver=mongodb tags==block:main-write,type:write cycles===TEMPLATE(write-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
read: run driver=mongodb tags==phase:main,type:read cycles===TEMPLATE(read-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn read: run driver=mongodb tags==block:main-read,type:read cycles===TEMPLATE(read-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
update: run driver=mongodb tags==phase:main,type:update cycles===TEMPLATE(update-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn update: run driver=mongodb tags==block:main-update,type:update cycles===TEMPLATE(update-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
delete: run driver=mongodb tags==phase:main,type:delete cycles===TEMPLATE(delete-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn delete: run driver=mongodb tags==block:main-delete,type:delete cycles===TEMPLATE(delete-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
bindings: bindings:
seq_key: Mod(<<docscount:10000000>>); ToString() -> String seq_key: Mod(<<docscount:10000000>>); ToString() -> String
random_key: Uniform(0,<<docscount:10000000>>); ToString() -> String random_key: Uniform(0,<<docscount:10000000>>); ToString() -> String
blocks: blocks:
- tags: schema:
phase: schema ops:
statements: dummy-insert: |
- dummy-insert: | {
{ insert: "<<collection:crud_dataset>>",
insert: "<<collection:crud_dataset>>", documents: [ { _id: "dummyyyy" } ]
documents: [ { _id: "dummyyyy" } ] }
}
- drop-collection: | drop-collection: |
{ {
drop: "<<collection:crud_dataset>>" drop: "<<collection:crud_dataset>>"
} }
tags:
name: drop-collection
- create-collection: | create-collection: |
{ {
create: "<<collection:crud_dataset>>" create: "<<collection:crud_dataset>>"
} }
tags:
name: create-collection
- create-indexes: | create-indexes: |
{ {
createIndexes: "<<collection:crud_dataset>>", createIndexes: "<<collection:crud_dataset>>",
indexes: <<indexes:[ { key: { dummy : 1 }, name: "dummy_idx", sparse: true } ]>> indexes: <<indexes:[ { key: { dummy : 1 }, name: "dummy_idx", sparse: true } ]>>
} }
tags:
name: create-indexes
- name: main-write main-write:
tags: ops:
phase: main write-document: |
type: write {
statements: insert: "<<collection:crud_dataset>>",
- write-document: | writeConcern: { w: "majority" },
{ documents: [ { "_id": "{seq_key}", {document_json_without_id} ]
insert: "<<collection:crud_dataset>>", }
writeConcern: { w: "majority" },
documents: [ { "_id": "{seq_key}", {document_json_without_id} ]
}
tags:
name: write-document
bindings: bindings:
document_json_without_id: ModuloLineToString('<<dataset_file>>'); ReplaceRegex('^\{', '') document_json_without_id: ModuloLineToString('<<dataset_file>>'); ReplaceRegex('^\{', '')
- name: main-read main-read:
tags: ops:
phase: main read-document: |
type: read {
statements: find: "<<collection:crud_dataset>>",
- read-document: | filter: { _id: "{random_key}" }
{ }
find: "<<collection:crud_dataset>>",
filter: { _id: "{random_key}" }
}
tags:
name: read-document
- name: main-update main-update:
tags: ops:
phase: main update-document: |
type: update {
statements: update: "<<collection:crud_dataset>>",
- update-document: | writeConcern: { w: "majority" },
{ updates: [
update: "<<collection:crud_dataset>>", {
writeConcern: { w: "majority" }, q: { _id: "{random_key}" },
updates: [ u: { "_id": "{random_key}", {document_json_without_id}
{ }
q: { _id: "{random_key}" }, ]
u: { "_id": "{random_key}", {document_json_without_id} }
}
]
}
tags:
name: update-document
bindings: bindings:
document_json_without_id: ModuloLineToString('<<dataset_file>>'); ReplaceRegex('^\{', '') document_json_without_id: ModuloLineToString('<<dataset_file>>'); ReplaceRegex('^\{', '')
- name: main-delete main-delete:
tags: ops:
phase: main delete-document: |
type: delete {
statements: delete: "<<collection:crud_dataset>>",
- delete-document: | deletes: [
{ {
delete: "<<collection:crud_dataset>>", q: { _id: "{seq_key}" },
deletes: [ limit: 1
{ }
q: { _id: "{seq_key}" }, ]
limit: 1 }
}
]
}

View File

@ -1,4 +1,3 @@
# nb -v run driver=mongodb yaml=mongodb-search-basic tags=phase:schema connection=mongodb://127.0.0.1 database=testdb
description: | description: |
This workload emulates basic search operations for the mongoDB. This workload emulates basic search operations for the mongoDB.
@ -7,15 +6,15 @@ description: |
It's a counterpart of the Stargate's Documents API Basic Search workflow. It's a counterpart of the Stargate's Documents API Basic Search workflow.
scenarios: scenarios:
schema: run driver=mongodb tags==phase:schema threads==1 cycles==UNDEF schema: run driver=mongodb tags==block:'schema.*' threads==1 cycles==UNDEF
rampup-write: run driver=mongodb tags==phase:rampup-write cycles===TEMPLATE(docscount,10000000) docpadding=TEMPLATE(docpadding,0) match-ratio=TEMPLATE(match-ratio,0.01) threads=auto errors=timer,warn rampup-write: run driver=mongodb tags==block:rampup-write cycles===TEMPLATE(docscount,10000000) docpadding=TEMPLATE(docpadding,0) match-ratio=TEMPLATE(match-ratio,0.01) threads=auto errors=timer,warn
rampup-read: run driver=mongodb tags==phase:rampup-read cycles===TEMPLATE(rampup-cycles, 10000000) field-projection=TEMPLATE(fields,null) threads=<<threads:auto>> errors=timer,warn rampup-read: run driver=mongodb tags==block:rampup-read cycles===TEMPLATE(rampup-cycles, 10000000) field-projection=TEMPLATE(fields,null) threads=<<threads:auto>> errors=timer,warn
main: run driver=mongodb tags==phase:main cycles===TEMPLATE(read-cycles,TEMPLATE(docscount,10000000)) field-projection=TEMPLATE(fields,null) threads=<<threads:auto>> errors=timer,warn main: run driver=mongodb tags==block:main cycles===TEMPLATE(read-cycles,TEMPLATE(docscount,10000000)) field-projection=TEMPLATE(fields,null) threads=<<threads:auto>> errors=timer,warn
main-eq: run driver=mongodb tags==phase:main,filter:eq cycles===TEMPLATE(read-cycles,TEMPLATE(docscount,10000000)) field-projection=TEMPLATE(fields,null) threads=<<threads:auto>> errors=timer,warn main-eq: run driver=mongodb tags==block:main-eq,filter:eq cycles===TEMPLATE(read-cycles,TEMPLATE(docscount,10000000)) field-projection=TEMPLATE(fields,null) threads=<<threads:auto>> errors=timer,warn
main-lt: run driver=mongodb tags==phase:main,filter:lt cycles===TEMPLATE(read-cycles,TEMPLATE(docscount,10000000)) field-projection=TEMPLATE(fields,null) threads=<<threads:auto>> errors=timer,warn main-lt: run driver=mongodb tags==block:main-lt,filter:lt cycles===TEMPLATE(read-cycles,TEMPLATE(docscount,10000000)) field-projection=TEMPLATE(fields,null) threads=<<threads:auto>> errors=timer,warn
main-and: run driver=mongodb tags==phase:main,filter:and cycles===TEMPLATE(read-cycles,TEMPLATE(docscount,10000000)) field-projection=TEMPLATE(fields,null) threads=<<threads:auto>> errors=timer,warn main-and: run driver=mongodb tags==block:main-and,filter:and cycles===TEMPLATE(read-cycles,TEMPLATE(docscount,10000000)) field-projection=TEMPLATE(fields,null) threads=<<threads:auto>> errors=timer,warn
main-or: run driver=mongodb tags==phase:main,filter:or cycles===TEMPLATE(read-cycles,TEMPLATE(docscount,10000000)) field-projection=TEMPLATE(fields,null) threads=<<threads:auto>> errors=timer,warn main-or: run driver=mongodb tags==block:main-or,filter:or cycles===TEMPLATE(read-cycles,TEMPLATE(docscount,10000000)) field-projection=TEMPLATE(fields,null) threads=<<threads:auto>> errors=timer,warn
main-or-single-match: run driver=mongodb tags==phase:main,filter:or-single-match cycles===TEMPLATE(read-cycles,TEMPLATE(docscount,10000000)) field-projection=TEMPLATE(fields,null) threads=<<threads:auto>> errors=timer,warn main-or-single-match: run driver=mongodb tags==block:main-or-single-match,filter:or-single-match cycles===TEMPLATE(read-cycles,TEMPLATE(docscount,10000000)) field-projection=TEMPLATE(fields,null) threads=<<threads:auto>> errors=timer,warn
bindings: bindings:
seq_key: Mod(<<docscount:10000000>>); ToString() -> String seq_key: Mod(<<docscount:10000000>>); ToString() -> String
@ -34,57 +33,49 @@ bindings:
match1: Identity(); CoinFunc(<<match-ratio>>, FixedValue(0), FixedValue(1000)) match1: Identity(); CoinFunc(<<match-ratio>>, FixedValue(0), FixedValue(1000))
match2: Identity(); CoinFunc(<<match-ratio>>, FixedValue("true"), FixedValue("false")) match2: Identity(); CoinFunc(<<match-ratio>>, FixedValue("true"), FixedValue("false"))
additional_fields: ListSizedStepped(<<docpadding:0>>,Template("\"{}\":{}",Identity(),Identity())); ToString(); ReplaceAll('\[\"', ',\"'); ReplaceAll('\[', ''); ReplaceAll('\]', '') -> String additional_fields: ListSizedStepped(<<docpadding:0>>,Template("\"{}\":{}",Identity(),Identity())); ToString(); ReplaceAll('\[\"', ',\"'); ReplaceAll('\[', ''); ReplaceAll('\]', '') -> String
blocks: blocks:
- tags: schema:
phase: schema ops:
statements: dummy-insert: |
- dummy-insert: | {
{ insert: "<<collection:search_basic>>",
insert: "<<collection:search_basic>>", documents: [ { _id: "dummyyyy" } ]
documents: [ { _id: "dummyyyy" } ] }
}
- drop-collection: | drop-collection: |
{ {
drop: "<<collection:search_basic>>" drop: "<<collection:search_basic>>"
} }
tags:
name: drop-collection
- create-collection: | create-collection: |
{ {
create: "<<collection:search_basic>>" create: "<<collection:search_basic>>"
} }
tags:
name: create-collection
- create-indexes: | create-indexes: |
{ {
createIndexes: "<<collection:search_basic>>", createIndexes: "<<collection:search_basic>>",
indexes: [ indexes: [
{ {
key: { user_id: 1 }, key: { user_id: 1 },
name: "user_id_idx", name: "user_id_idx",
unique: true unique: true
}, },
{ {
key: { created_on: 1 }, key: { created_on: 1 },
name: "created_on_idx" name: "created_on_idx"
}, },
{ {
key: { city: 1 }, key: { city: 1 },
name: "city_idx" name: "city_idx"
} }
] ]
} }
tags:
name: create-indexes
- name: rampup-write rampup-write:
tags: ops:
phase: rampup-write write-document:
statements:
- write-document: |
{ {
insert: "<<collection:search_basic>>", insert: "<<collection:search_basic>>",
writeConcern: { w: "majority" }, writeConcern: { w: "majority" },
@ -118,83 +109,62 @@ blocks:
} }
] ]
} }
tags: rampup-read:
name: rampup-write params:
filter: eq
- name: rampup ops:
tags: read-document: |
phase: rampup-read
filter: eq
statements:
- read-document: |
{ {
find: "<<collection:search_basic>>", find: "<<collection:search_basic>>",
filter: { match1: 0 } filter: { match1: 0 }
}, <<field-projection:null>> }, <<field-projection:null>>
tags:
name: rampup-read
- name: main-eq main-eq:
tags: params:
phase: main
filter: eq filter: eq
statements: ops:
- read-document: | read-document: |
{ {
find: "<<collection:search_basic>>", find: "<<collection:search_basic>>",
filter: { match3: true } filter: { match3: true }
}, <<field-projection:null>> }, <<field-projection:null>>
tags:
name: read-document
- name: main-lt main-lt:
tags: params:
phase: main
filter: lt filter: lt
statements: ops:
- read-document: | read-document: |
{ {
find: "<<collection:search_basic>>", find: "<<collection:search_basic>>",
filter: { match1: {$lt: 1}} filter: { match1: {$lt: 1}}
}, <<field-projection:null>> }, <<field-projection:null>>
tags:
name: read-document
- name: main-and main-and:
tags: params:
phase: main
filter: and filter: and
statements: ops:
- read-document: | read-document: |
{ {
find: "<<collection:search_basic>>", find: "<<collection:search_basic>>",
filter: { match1: {$lt: 1}, match2: "true"} filter: { match1: {$lt: 1}, match2: "true"}
}, <<field-projection:null>> }, <<field-projection:null>>
tags:
name: read-document
- name: main-or main-or:
tags: params:
phase: main
filter: or filter: or
statements: ops:
- read-document: | read-document: |
{ {
find: "<<collection:search_basic>>", find: "<<collection:search_basic>>",
filter: { $or: [ {match1: {$lt: 1}}, {match3: true}]} filter: { $or: [ {match1: {$lt: 1}}, {match3: true}]}
}, <<field-projection:null>> }, <<field-projection:null>>
tags:
name: read-document
- name: main-or-single-match main-or-single-match:
tags: params:
phase: main
filter: or-single-match filter: or-single-match
statements: ops:
- read-document: | read-document: |
{ {
find: "<<collection:search_basic>>", find: "<<collection:search_basic>>",
filter: { $or: [ {match1: {$lt: 1}}, {match2: "notamatch"}]} filter: { $or: [ {match1: {$lt: 1}}, {match2: "notamatch"}]}
}, <<field-projection:null>> }, <<field-projection:null>>
tags:
name: read-document

View File

@ -302,7 +302,7 @@ in the workload construction guide.
```yaml ```yaml
tags: tags:
phase: main block: main
``` ```
*json:* *json:*
@ -311,7 +311,7 @@ tags:
{ {
"tags": { "tags": {
"phase": "main" "block": "main"
} }
} }
``` ```
@ -331,7 +331,7 @@ Blocks are used to logically partition a workload for the purposes of grouping,
executing subsets and op sequences. Blocks can contain any of the defined elements above. executing subsets and op sequences. Blocks can contain any of the defined elements above.
Every op template within a block automatically gets a tag with the name 'block' and the value of Every op template within a block automatically gets a tag with the name 'block' and the value of
the block name. This makes it easy to select a whole block at a time with a tag filter like the block name. This makes it easy to select a whole block at a time with a tag filter like
`tags=block:schema`. `tags=block:"schema.*"`.
Blocks are not recursive. You may not put a block inside another block. Blocks are not recursive. You may not put a block inside another block.

View File

@ -269,7 +269,7 @@ ops:
bindings: bindings:
binding1: NumberNameToString(); binding1: NumberNameToString();
tags: tags:
phase: schema block: schema
params: params:
prepared: false prepared: false
description: This is just an example operation description: This is just an example operation
@ -292,7 +292,7 @@ ops:
"prepared": false "prepared": false
}, },
"tags": { "tags": {
"phase": "schema" "block": "schema"
} }
} }
} }
@ -317,7 +317,7 @@ ops:
"prepared": false "prepared": false
}, },
"tags": { "tags": {
"phase": "schema", "block": "schema",
"name": "block0--special-op-name", "name": "block0--special-op-name",
"block": "block0" "block": "block0"
} }
@ -351,7 +351,7 @@ blocks:
bindings: bindings:
binding1: NumberNameToString(); binding1: NumberNameToString();
tags: tags:
phase: schema block: schema
params: params:
prepared: false prepared: false
description: This is just an example operation description: This is just an example operation
@ -386,7 +386,7 @@ blocks:
"prepared": false "prepared": false
}, },
"tags": { "tags": {
"phase": "schema" "block": "schema"
}, },
"ops": { "ops": {
"op1": { "op1": {
@ -416,7 +416,7 @@ blocks:
"prepared": false "prepared": false
}, },
"tags": { "tags": {
"phase": "schema", "block": "schema",
"docleveltag": "is-tagging-everything", "docleveltag": "is-tagging-everything",
"name": "block-named-fred--special-op-name", "name": "block-named-fred--special-op-name",
"block": "block-named-fred" "block": "block-named-fred"

View File

@ -77,7 +77,7 @@ public class RawYamlTemplateLoaderTest {
assertThat(schemaOnlyScenario.keySet()) assertThat(schemaOnlyScenario.keySet())
.containsExactly("000"); .containsExactly("000");
assertThat(schemaOnlyScenario.values()) assertThat(schemaOnlyScenario.values())
.containsExactly("run driver=blah tags=phase:schema"); .containsExactly("run driver=blah tags=block:'schema.*'");
assertThat(rawOpsDoc.getName()).isEqualTo("doc1"); assertThat(rawOpsDoc.getName()).isEqualTo("doc1");
assertThat(blocks).hasSize(1); assertThat(blocks).hasSize(1);

View File

@ -7,7 +7,7 @@ scenarios:
- run driver=stdout alias=step1 - run driver=stdout alias=step1
- run driver=stdout alias=step2 - run driver=stdout alias=step2
schema-only: schema-only:
- run driver=blah tags=phase:schema - run driver=blah tags=block:'schema.*'
tags: tags:
atagname: atagvalue atagname: atagvalue

View File

@ -1,4 +1,3 @@
# nb -v run driver=cql yaml=cql-iot tags=phase:schema host=dsehost
description: | description: |
put workload descript here put workload descript here
scenarios: scenarios:

View File

@ -161,7 +161,7 @@ This puts NB on a footing to be "Modular Jar" compatible, which is a step toward
* auto-injected statement block and statement name tags. * auto-injected statement block and statement name tags.
- this means: You can now construct filters for specific blocks or statements simply by - this means: You can now construct filters for specific blocks or statements simply by
knowing their name: knowing their name:
- `tags=block:schema` or `tags='main-.*'` - `tags=block:"schema.*"` or `tags='main-.*'`
* safe usage of activity params and template vars are compatible, but may not be ambiguous. This * safe usage of activity params and template vars are compatible, but may not be ambiguous. This
means that if you have a template variable in myworkload.yaml, it must be distinctly named means that if you have a template variable in myworkload.yaml, it must be distinctly named
from any valid activity parameters, or an error is thrown. This eliminates a confusing source from any valid activity parameters, or an error is thrown. This eliminates a confusing source
@ -229,7 +229,7 @@ cqlgen - takes schema.cql tablestats -> workload.yaml
sstablegen sstablegen
* yaml+nb version checks * yaml+nb version checks
- `min_version: "4.17.15"` - `min_version: "5.17.1"`
* Mac M1 support * Mac M1 support

View File

@ -231,6 +231,12 @@ public class NBCLIScenarioParser {
String[] namedStepPieces = cmd.split(" "); String[] namedStepPieces = cmd.split(" ");
for (String commandFragment : namedStepPieces) { for (String commandFragment : namedStepPieces) {
Matcher matcher = WordAndMaybeAssignment.matcher(commandFragment); Matcher matcher = WordAndMaybeAssignment.matcher(commandFragment);
if (commandFragment.equalsIgnoreCase("")) {
logger.debug("Command fragment discovered to be empty. Skipping this fragment for cmd: {}", cmd);
continue;
}
if (!matcher.matches()) { if (!matcher.matches()) {
throw new BasicError("Unable to recognize scenario cmd spec in '" + commandFragment + "'"); throw new BasicError("Unable to recognize scenario cmd spec in '" + commandFragment + "'");
} }

View File

@ -153,19 +153,19 @@ public class TagFilterTest {
public void testLeadingSpaceTrimmedInQuotedTag() { public void testLeadingSpaceTrimmedInQuotedTag() {
Map<String, String> itemtags = new HashMap<>() {{ Map<String, String> itemtags = new HashMap<>() {{
put("phase", "main"); put("block", "main");
}}; }};
TagFilter tf = new TagFilter("\"phase: main\""); TagFilter tf = new TagFilter("\"block: main\"");
assertThat(tf.matches(itemtags).matched()).isTrue(); assertThat(tf.matches(itemtags).matched()).isTrue();
} }
@Test @Test
public void testAnyCondition() { public void testAnyCondition() {
Map<String, String> itemtags = Map.of("phase", "main", "truck", "car"); Map<String, String> itemtags = Map.of("block", "main", "truck", "car");
TagFilter tf = new TagFilter("any(truck:car,phase:moon)"); TagFilter tf = new TagFilter("any(truck:car,block:moon)");
assertThat(tf.matches(itemtags).matched()).isTrue(); assertThat(tf.matches(itemtags).matched()).isTrue();
TagFilter tf2 = new TagFilter("any(car:truck,phase:moon)"); TagFilter tf2 = new TagFilter("any(car:truck,block:moon)");
assertThat(tf2.matches(itemtags).matched()).isFalse(); assertThat(tf2.matches(itemtags).matched()).isFalse();
} }
} }

View File

@ -144,9 +144,9 @@ naming scheme for phase control. This means that you have tagged each of
your statements or statement blocks with the appropriate phase tags from your statements or statement blocks with the appropriate phase tags from
schema, rampup, main, for example. schema, rampup, main, for example.
- `schematags=phase:schema` - The tag filter for schema statements. - `schematags=block:"schema.*"` - The tag filter for schema statements.
Findmax will run a schema phase with 1 thread by default. Findmax will run a schema phase with 1 thread by default.
- `maintags=phase:main` - The tag filter for the main workload. This is - `maintags=block:main` - The tag filter for the main workload. This is
the workload that is started and run in the background for all of the the workload that is started and run in the background for all of the
sampling windows. sampling windows.

View File

@ -48,7 +48,7 @@ schema_activitydef = params.withDefaults({
}); });
schema_activitydef.alias="findmax_schema"; schema_activitydef.alias="findmax_schema";
schema_activitydef.threads="1"; schema_activitydef.threads="1";
schema_activitydef.tags="TEMPLATE(schematags,phase:schema)"; schema_activitydef.tags="TEMPLATE(schematags,block:'schema.*')";
print("Creating schema with schematags:" + schema_activitydef.tags); print("Creating schema with schematags:" + schema_activitydef.tags);
scenario.run(schema_activitydef); scenario.run(schema_activitydef);
@ -63,7 +63,7 @@ activitydef = params.withDefaults({
activitydef.alias="findmax"; activitydef.alias="findmax";
activitydef.cycles="1000000000"; activitydef.cycles="1000000000";
activitydef.recycles="1000000000"; activitydef.recycles="1000000000";
activitydef.tags="TEMPLATE(maintags,phase:main)"; activitydef.tags="TEMPLATE(maintags,block:main)";
print("Iterating main workload with tags:" + activitydef.tags); print("Iterating main workload with tags:" + activitydef.tags);

View File

@ -16,8 +16,8 @@
package io.nosqlbench.engine.cli; package io.nosqlbench.engine.cli;
import io.nosqlbench.engine.api.scenarios.NBCLIScenarioParser;
import io.nosqlbench.api.errors.BasicError; import io.nosqlbench.api.errors.BasicError;
import io.nosqlbench.engine.api.scenarios.NBCLIScenarioParser;
import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Test;
import java.nio.file.Path; import java.nio.file.Path;
@ -31,39 +31,39 @@ public class NBCLIScenarioParserTest {
@Test @Test
public void providePathForScenario() { public void providePathForScenario() {
NBCLIOptions opts = new NBCLIOptions(new String[]{ "local/example-scenarios" }); NBCLIOptions opts = new NBCLIOptions(new String[]{"local/example-scenarios"});
List<Cmd> cmds = opts.getCommands(); List<Cmd> cmds = opts.getCommands();
} }
@Test @Test
public void defaultScenario() { public void defaultScenario() {
NBCLIOptions opts = new NBCLIOptions(new String[]{ "scenario-test" }); NBCLIOptions opts = new NBCLIOptions(new String[]{"scenario-test"});
List<Cmd> cmds = opts.getCommands(); List<Cmd> cmds = opts.getCommands();
} }
@Test @Test
public void defaultScenarioWithParams() { public void defaultScenarioWithParams() {
NBCLIOptions opts = new NBCLIOptions(new String[]{ "scenario-test", "cycles=100"}); NBCLIOptions opts = new NBCLIOptions(new String[]{"scenario-test", "cycles=100"});
List<Cmd> cmds = opts.getCommands(); List<Cmd> cmds = opts.getCommands();
assertThat(cmds.get(0).getArg("cycles")).isEqualTo("100"); assertThat(cmds.get(0).getArg("cycles")).isEqualTo("100");
} }
@Test @Test
public void namedScenario() { public void namedScenario() {
NBCLIOptions opts = new NBCLIOptions(new String[]{ "scenario-test", "schema-only"}); NBCLIOptions opts = new NBCLIOptions(new String[]{"scenario-test", "schema-only"});
List<Cmd> cmds = opts.getCommands(); List<Cmd> cmds = opts.getCommands();
} }
@Test @Test
public void namedScenarioWithParams() { public void namedScenarioWithParams() {
NBCLIOptions opts = new NBCLIOptions(new String[]{ "scenario-test", "schema-only", "cycles=100"}); NBCLIOptions opts = new NBCLIOptions(new String[]{"scenario-test", "schema-only", "cycles=100"});
List<Cmd> cmds = opts.getCommands(); List<Cmd> cmds = opts.getCommands();
assertThat(cmds.get(0).getArg("cycles")).containsOnlyOnce("100"); assertThat(cmds.get(0).getArg("cycles")).containsOnlyOnce("100");
} }
@Test @Test
public void testThatSilentFinalParametersPersist() { public void testThatSilentFinalParametersPersist() {
NBCLIOptions opts = new NBCLIOptions(new String[]{ "scenario-test", "type=foo"}); NBCLIOptions opts = new NBCLIOptions(new String[]{"scenario-test", "type=foo"});
List<Cmd> cmds = opts.getCommands(); List<Cmd> cmds = opts.getCommands();
assertThat(cmds.get(0).getArg("driver")).isEqualTo("stdout"); assertThat(cmds.get(0).getArg("driver")).isEqualTo("stdout");
} }
@ -71,25 +71,25 @@ public class NBCLIScenarioParserTest {
@Test @Test
public void testThatVerboseFinalParameterThrowsError() { public void testThatVerboseFinalParameterThrowsError() {
assertThatExceptionOfType(BasicError.class) assertThatExceptionOfType(BasicError.class)
.isThrownBy(() -> new NBCLIOptions(new String[]{ "scenario-test", "workload=canttouchthis"})); .isThrownBy(() -> new NBCLIOptions(new String[]{"scenario-test", "workload=canttouchthis"}));
} }
@Test @Test
public void testThatMissingScenarioNameThrowsError() { public void testThatMissingScenarioNameThrowsError() {
assertThatExceptionOfType(BasicError.class) assertThatExceptionOfType(BasicError.class)
.isThrownBy(() -> new NBCLIOptions(new String[]{ "scenario-test", "missing-scenario"})); .isThrownBy(() -> new NBCLIOptions(new String[]{"scenario-test", "missing-scenario"}));
} }
@Test @Test
public void testThatMultipleScenariosConcatenate() { public void testThatMultipleScenariosConcatenate() {
NBCLIOptions opts = new NBCLIOptions(new String[]{ "scenario-test", "default", "default"}); NBCLIOptions opts = new NBCLIOptions(new String[]{"scenario-test", "default", "default"});
List<Cmd> cmds = opts.getCommands(); List<Cmd> cmds = opts.getCommands();
assertThat(cmds.size()).isEqualTo(6); assertThat(cmds.size()).isEqualTo(6);
} }
@Test @Test
public void testThatTemplatesAreExpandedDefault() { public void testThatTemplatesAreExpandedDefault() {
NBCLIOptions opts = new NBCLIOptions(new String[]{ "scenario-test", "template-test"}); NBCLIOptions opts = new NBCLIOptions(new String[]{"scenario-test", "template-test"});
List<Cmd> cmds = opts.getCommands(); List<Cmd> cmds = opts.getCommands();
assertThat(cmds.size()).isEqualTo(1); assertThat(cmds.size()).isEqualTo(1);
assertThat(cmds.get(0).getArg("driver")).isEqualTo("stdout"); assertThat(cmds.get(0).getArg("driver")).isEqualTo("stdout");
@ -99,31 +99,31 @@ public class NBCLIScenarioParserTest {
@Test @Test
public void testThatTemplateParamsAreExpandedAndNotRemovedOverride() { public void testThatTemplateParamsAreExpandedAndNotRemovedOverride() {
NBCLIOptions opts = new NBCLIOptions(new String[]{ "scenario-test", "template-test", "cycles-test=20"}); NBCLIOptions opts = new NBCLIOptions(new String[]{"scenario-test", "template-test", "cycles-test=20"});
List<Cmd> cmds = opts.getCommands(); List<Cmd> cmds = opts.getCommands();
assertThat(cmds.size()).isEqualTo(1); assertThat(cmds.size()).isEqualTo(1);
assertThat(cmds.get(0).getParams()).isEqualTo(Map.of( assertThat(cmds.get(0).getParams()).isEqualTo(Map.of(
"alias","scenariotest_templatetest_withtemplate", "alias", "scenariotest_templatetest_withtemplate",
"cycles","20", "cycles", "20",
"cycles-test","20", "cycles-test", "20",
"driver","stdout", "driver", "stdout",
"workload","scenario-test" "workload", "scenario-test"
)); ));
} }
@Test @Test
public void testThatUndefValuesAreUndefined() { public void testThatUndefValuesAreUndefined() {
NBCLIOptions opts = new NBCLIOptions(new String[]{ "scenario-test", "schema-only", "cycles-test=20"}); NBCLIOptions opts = new NBCLIOptions(new String[]{"scenario-test", "schema-only", "cycles-test=20"});
List<Cmd> cmds = opts.getCommands(); List<Cmd> cmds = opts.getCommands();
assertThat(cmds.size()).isEqualTo(1); assertThat(cmds.size()).isEqualTo(1);
assertThat(cmds.get(0).getParams()).isEqualTo(Map.of( assertThat(cmds.get(0).getParams()).isEqualTo(Map.of(
"alias","scenariotest_schemaonly_000", "alias", "scenariotest_schemaonly_schema",
"cycles-test","20", "cycles-test", "20",
"driver","stdout", "driver", "stdout",
"tags","phase:schema", "tags", "block:'schema.*'",
"workload","scenario-test" "workload", "scenario-test"
)); ));
NBCLIOptions opts1 = new NBCLIOptions(new String[]{ "scenario-test", "schema-only", "doundef=20"}); NBCLIOptions opts1 = new NBCLIOptions(new String[]{"scenario-test", "schema-only", "doundef=20"});
List<Cmd> cmds1 = opts1.getCommands(); List<Cmd> cmds1 = opts1.getCommands();
assertThat(cmds1.size()).isEqualTo(1); assertThat(cmds1.size()).isEqualTo(1);
assertThat(cmds1.get(0).getArg("cycles-test")).isNull(); assertThat(cmds1.get(0).getArg("cycles-test")).isNull();
@ -140,7 +140,7 @@ public class NBCLIScenarioParserTest {
Path absolute = rel.toAbsolutePath(); Path absolute = rel.toAbsolutePath();
assertThat(absolute).exists(); assertThat(absolute).exists();
NBCLIOptions opts = new NBCLIOptions(new String[]{ absolute.toString(), "schema-only", "cycles-test=20"}); NBCLIOptions opts = new NBCLIOptions(new String[]{absolute.toString(), "schema-only", "cycles-test=20"});
List<Cmd> cmds = opts.getCommands(); List<Cmd> cmds = opts.getCommands();
assertThat(cmds.size()).isGreaterThan(0); assertThat(cmds.size()).isGreaterThan(0);
} }
@ -150,7 +150,7 @@ public class NBCLIScenarioParserTest {
//TODO: This might change? //TODO: This might change?
String urlScenario = "https://raw.githubusercontent.com/nosqlbench/nosqlbench/main/engine-cli/src/test/resources/activities/scenario-test.yaml"; String urlScenario = "https://raw.githubusercontent.com/nosqlbench/nosqlbench/main/engine-cli/src/test/resources/activities/scenario-test.yaml";
NBCLIOptions opts = new NBCLIOptions(new String[]{ urlScenario, "schema-only", "cycles-test=20"}); NBCLIOptions opts = new NBCLIOptions(new String[]{urlScenario, "schema-only", "cycles-test=20"});
List<Cmd> cmds = opts.getCommands(); List<Cmd> cmds = opts.getCommands();
assertThat(cmds.size()).isGreaterThan(0); assertThat(cmds.size()).isGreaterThan(0);
} }
@ -163,17 +163,17 @@ public class NBCLIScenarioParserTest {
@Test @Test
public void testSubStepSelection() { public void testSubStepSelection() {
NBCLIOptions opts = new NBCLIOptions(new String[]{ "scenario-test", "schema-only", "cycles-test=20"}); NBCLIOptions opts = new NBCLIOptions(new String[]{"scenario-test", "schema-only", "cycles-test=20"});
List<Cmd> cmds = opts.getCommands(); List<Cmd> cmds = opts.getCommands();
assertThat(cmds.size()).isEqualTo(1); assertThat(cmds.size()).isEqualTo(1);
assertThat(cmds.get(0).getParams()).isEqualTo(Map.of( assertThat(cmds.get(0).getParams()).isEqualTo(Map.of(
"alias","scenariotest_schemaonly_000", "alias", "scenariotest_schemaonly_schema",
"cycles-test","20", "cycles-test", "20",
"driver","stdout", "driver", "stdout",
"tags","phase:schema", "tags", "block:'schema.*'",
"workload","scenario-test" "workload", "scenario-test"
)); ));
NBCLIOptions opts1 = new NBCLIOptions(new String[]{ "local/example-scenarios", "namedsteps.one", "testparam1=testvalue2"}); NBCLIOptions opts1 = new NBCLIOptions(new String[]{"local/example-scenarios", "namedsteps.one", "testparam1=testvalue2"});
List<Cmd> cmds1 = opts1.getCommands(); List<Cmd> cmds1 = opts1.getCommands();
assertThat(cmds1.size()).isEqualTo(1); assertThat(cmds1.size()).isEqualTo(1);
assertThat(cmds1.get(0).getArg("cycles-test")).isNull(); assertThat(cmds1.get(0).getArg("cycles-test")).isNull();

View File

@ -1,6 +1,6 @@
name: alternate-format-test name: alternate-format-test
scenarios: scenarios:
default: default:
schema: run driver=cql protocol_version=v4 tags=block:schema threads==1 cycles=UNDEF schema: run driver=cql protocol_version=v4 tags=block:'schema.*' threads==1 cycles=UNDEF
rampup: run driver=cql protocol_version=v4 tags=block:rampup cycles=10000 rampup: run driver=cql protocol_version=v4 tags=block:rampup cycles=10000
main: run driver=cql protocol_version=v4 tags=block:main_mixed cycles=10000 main: run driver=cql protocol_version=v4 tags=block:main_mixed cycles=10000

View File

@ -1,12 +1,13 @@
min_version: "4.17.15" min_version: "5.17.1"
scenarios: scenarios:
default: default:
schema: run driver==stdout workload===scenario-test tags=block:schema schema: run driver==stdout workload===scenario-test tags=block:'schema.*'
rampup: run driver=stdout workload===scenario-test tags=block:rampup cycles=TEMPLATE(cycles1,10) rampup: run driver=stdout workload===scenario-test tags=block:rampup cycles=TEMPLATE(cycles1,10)
main: run driver=stdout workload===scenario-test tags=block:"main.*" cycles=TEMPLATE(cycles2,10) main: run driver=stdout workload===scenario-test tags=block:'main.*' cycles=TEMPLATE(cycles2,10)
schema-only: schema-only:
- "run driver=stdout workload=scenario-test tags=phase:schema doundef==undef" schema: run driver=stdout workload==scenario-test tags=block:'schema.*' doundef==undef
template-test: template-test:
with-template: run driver=stdout cycles=TEMPLATE(cycles-test,10) with-template: run driver=stdout cycles=TEMPLATE(cycles-test,10)
@ -22,6 +23,6 @@ blocks:
main: main:
ops: ops:
insert: | insert: |
insert into puppies (test) values (1) ; insert into puppies (test) values (1);
select: | select: |
select * from puppies; select * from puppies;

View File

@ -1,8 +1,8 @@
# example-scenarios.yaml # example-scenarios.yaml
scenarios: scenarios:
default: default:
- run cycles=3 alias=A driver=stdout one: run cycles=3 alias=A driver=stdout
- run cycles=5 alias=B driver=stdout two: run cycles=5 alias=B driver=stdout
namedsteps: namedsteps:
one: run cycles=3 alias=A driver=stdout testparam1=testvalue1 one: run cycles=3 alias=A driver=stdout testparam1=testvalue1
two: run cycles=5 alias=B driver=stdout two: run cycles=5 alias=B driver=stdout

View File

@ -74,7 +74,7 @@ public class GrafanaRegionAnalyzer implements Runnable {
//[2020-12-15T05:04:37.232Z[GMT] - 2020-12-15T05:04:37.232Z[GMT]] //[2020-12-15T05:04:37.232Z[GMT] - 2020-12-15T05:04:37.232Z[GMT]]
//span:interval //span:interval
//details: //details:
// params: ActivityDef:(4)/{keycount=5000000000L, hosts=node1, main-cycles=500, threads=1, workload=./keyvalue.yaml, cycles=2, stride=2, tags=phase:schema, password=cassandra, rf=3, pooling=16:16:500, driver=cql, rampup-cycles=5000000000, alias=keyvalue_default_schema, valuecount=5000000000L, errors=count, username=cassandra} // params: ActivityDef:(4)/{keycount=5000000000L, hosts=node1, main-cycles=500, threads=1, workload=./keyvalue.yaml, cycles=2, stride=2, tags=block:'schema.*', password=cassandra, rf=3, pooling=16:16:500, driver=cql, rampup-cycles=5000000000, alias=keyvalue_default_schema, valuecount=5000000000L, errors=count, username=cassandra}
//labels: //labels:
// layer: Activity // layer: Activity
// alias: keyvalue_default_schema // alias: keyvalue_default_schema

View File

@ -53,7 +53,7 @@
"span:interval", "span:interval",
"appname:nosqlbench" "appname:nosqlbench"
], ],
"text": "session: scenario_20201215_050302_981\n[2020-12-15T05:03:04.813Z[GMT] - 2020-12-15T05:03:04.813Z[GMT]]\nspan:interval\ndetails:\n params: ActivityDef:(4)/{keycount\u003d5B, hosts\u003dnode1, main-cycles\u003d1B, threads\u003d1, workload\u003d./keyvalue.yaml, cycles\u003d2, stride\u003d2, tags\u003dphase:schema, password\u003dcassandra, rf\u003d3, pooling\u003d16:16:500, driver\u003dcql, rampup-cycles\u003d5B, alias\u003dkeyvalue_default_schema, valuecount\u003d5B, errors\u003dcount, username\u003dcassandra}\nlabels:\n layer: Activity\n alias: keyvalue_default_schema\n driver: cql\n workload: ./keyvalue.yaml\n session: scenario_20201215_050302_981\n span: interval\n appname: nosqlbench\n", "text": "session: scenario_20201215_050302_981\n[2020-12-15T05:03:04.813Z[GMT] - 2020-12-15T05:03:04.813Z[GMT]]\nspan:interval\ndetails:\n params: ActivityDef:(4)/{keycount\u003d5B, hosts\u003dnode1, main-cycles\u003d1B, threads\u003d1, workload\u003d./keyvalue.yaml, cycles\u003d2, stride\u003d2, tags\u003dblock:"schema.*", password\u003dcassandra, rf\u003d3, pooling\u003d16:16:500, driver\u003dcql, rampup-cycles\u003d5B, alias\u003dkeyvalue_default_schema, valuecount\u003d5B, errors\u003dcount, username\u003dcassandra}\nlabels:\n layer: Activity\n alias: keyvalue_default_schema\n driver: cql\n workload: ./keyvalue.yaml\n session: scenario_20201215_050302_981\n span: interval\n appname: nosqlbench\n",
"time": 1608008584813, "time": 1608008584813,
"timeEnd": 1608008588900, "timeEnd": 1608008588900,
"updated": 1608008588918, "updated": 1608008588918,
@ -81,7 +81,7 @@
"span:interval", "span:interval",
"appname:nosqlbench" "appname:nosqlbench"
], ],
"text": "session: scenario_20201215_050355_270\n[2020-12-15T05:03:57.142Z[GMT] - 2020-12-15T05:03:57.142Z[GMT]]\nspan:interval\ndetails:\n params: ActivityDef:(4)/{keycount\u003d5000000000, hosts\u003dnode1, main-cycles\u003d5000000000, threads\u003d1, workload\u003d./keyvalue.yaml, cycles\u003d2, stride\u003d2, tags\u003dphase:schema, password\u003dcassandra, rf\u003d3, pooling\u003d16:16:500, driver\u003dcql, rampup-cycles\u003d5000000000, alias\u003dkeyvalue_default_schema, valuecount\u003d5000000000, errors\u003dcount, username\u003dcassandra}\nlabels:\n layer: Activity\n alias: keyvalue_default_schema\n driver: cql\n workload: ./keyvalue.yaml\n session: scenario_20201215_050355_270\n span: interval\n appname: nosqlbench\n", "text": "session: scenario_20201215_050355_270\n[2020-12-15T05:03:57.142Z[GMT] - 2020-12-15T05:03:57.142Z[GMT]]\nspan:interval\ndetails:\n params: ActivityDef:(4)/{keycount\u003d5000000000, hosts\u003dnode1, main-cycles\u003d5000000000, threads\u003d1, workload\u003d./keyvalue.yaml, cycles\u003d2, stride\u003d2, tags\u003dblock:"schema.*", password\u003dcassandra, rf\u003d3, pooling\u003d16:16:500, driver\u003dcql, rampup-cycles\u003d5000000000, alias\u003dkeyvalue_default_schema, valuecount\u003d5000000000, errors\u003dcount, username\u003dcassandra}\nlabels:\n layer: Activity\n alias: keyvalue_default_schema\n driver: cql\n workload: ./keyvalue.yaml\n session: scenario_20201215_050355_270\n span: interval\n appname: nosqlbench\n",
"time": 1608008637142, "time": 1608008637142,
"timeEnd": 1608008641044, "timeEnd": 1608008641044,
"updated": 1608008641063, "updated": 1608008641063,
@ -109,7 +109,7 @@
"span:interval", "span:interval",
"appname:nosqlbench" "appname:nosqlbench"
], ],
"text": "session: scenario_20201215_050435_240\n[2020-12-15T05:04:37.232Z[GMT] - 2020-12-15T05:04:37.232Z[GMT]]\nspan:interval\ndetails:\n params: ActivityDef:(4)/{keycount\u003d5000000000L, hosts\u003dnode1, main-cycles\u003d500, threads\u003d1, workload\u003d./keyvalue.yaml, cycles\u003d2, stride\u003d2, tags\u003dphase:schema, password\u003dcassandra, rf\u003d3, pooling\u003d16:16:500, driver\u003dcql, rampup-cycles\u003d5000000000, alias\u003dkeyvalue_default_schema, valuecount\u003d5000000000L, errors\u003dcount, username\u003dcassandra}\nlabels:\n layer: Activity\n alias: keyvalue_default_schema\n driver: cql\n workload: ./keyvalue.yaml\n session: scenario_20201215_050435_240\n span: interval\n appname: nosqlbench\n", "text": "session: scenario_20201215_050435_240\n[2020-12-15T05:04:37.232Z[GMT] - 2020-12-15T05:04:37.232Z[GMT]]\nspan:interval\ndetails:\n params: ActivityDef:(4)/{keycount\u003d5000000000L, hosts\u003dnode1, main-cycles\u003d500, threads\u003d1, workload\u003d./keyvalue.yaml, cycles\u003d2, stride\u003d2, tags\u003dblock:"schema.*", password\u003dcassandra, rf\u003d3, pooling\u003d16:16:500, driver\u003dcql, rampup-cycles\u003d5000000000, alias\u003dkeyvalue_default_schema, valuecount\u003d5000000000L, errors\u003dcount, username\u003dcassandra}\nlabels:\n layer: Activity\n alias: keyvalue_default_schema\n driver: cql\n workload: ./keyvalue.yaml\n session: scenario_20201215_050435_240\n span: interval\n appname: nosqlbench\n",
"time": 1608008677232, "time": 1608008677232,
"timeEnd": 1608008681038, "timeEnd": 1608008681038,
"updated": 1608008681058, "updated": 1608008681058,
@ -137,7 +137,7 @@
"span:interval", "span:interval",
"appname:nosqlbench" "appname:nosqlbench"
], ],
"text": "session: scenario_20201215_050435_240\n[2020-12-15T05:04:41.120Z[GMT] - 2020-12-15T05:04:41.120Z[GMT]]\nspan:interval\ndetails:\n params: ActivityDef:(3)/{keycount\u003d5000000000L, hosts\u003dnode1, main-cycles\u003d500, threads\u003d960, workload\u003d./keyvalue.yaml, cycles\u003d5000000000, stride\u003d1, tags\u003dphase:rampup, password\u003dcassandra, rf\u003d3, pooling\u003d16:16:500, driver\u003dcql, rampup-cycles\u003d5000000000, alias\u003dkeyvalue_default_rampup, valuecount\u003d5000000000L, errors\u003dcount, username\u003dcassandra}\nlabels:\n layer: Activity\n alias: keyvalue_default_rampup\n driver: cql\n workload: ./keyvalue.yaml\n session: scenario_20201215_050435_240\n span: interval\n appname: nosqlbench\n", "text": "session: scenario_20201215_050435_240\n[2020-12-15T05:04:41.120Z[GMT] - 2020-12-15T05:04:41.120Z[GMT]]\nspan:interval\ndetails:\n params: ActivityDef:(3)/{keycount\u003d5000000000L, hosts\u003dnode1, main-cycles\u003d500, threads\u003d960, workload\u003d./keyvalue.yaml, cycles\u003d5000000000, stride\u003d1, tags\u003dblock:rampup, password\u003dcassandra, rf\u003d3, pooling\u003d16:16:500, driver\u003dcql, rampup-cycles\u003d5000000000, alias\u003dkeyvalue_default_rampup, valuecount\u003d5000000000L, errors\u003dcount, username\u003dcassandra}\nlabels:\n layer: Activity\n alias: keyvalue_default_rampup\n driver: cql\n workload: ./keyvalue.yaml\n session: scenario_20201215_050435_240\n span: interval\n appname: nosqlbench\n",
"time": 1608008681120, "time": 1608008681120,
"timeEnd": 1608042107780, "timeEnd": 1608042107780,
"updated": 1608042107859, "updated": 1608042107859,
@ -165,7 +165,7 @@
"span:interval", "span:interval",
"appname:nosqlbench" "appname:nosqlbench"
], ],
"text": "session: scenario_20201215_050435_240\n[2020-12-15T14:21:47.918Z[GMT] - 2020-12-15T14:21:47.918Z[GMT]]\nspan:interval\ndetails:\n params: ActivityDef:(3)/{keycount\u003d5000000000L, hosts\u003dnode1, main-cycles\u003d500, threads\u003d500, workload\u003d./keyvalue.yaml, cycles\u003d500, stride\u003d10, tags\u003dphase:main, password\u003dcassandra, rf\u003d3, pooling\u003d16:16:500, driver\u003dcql, rampup-cycles\u003d5000000000, alias\u003dkeyvalue_default_main, valuecount\u003d5000000000L, errors\u003dcount, username\u003dcassandra}\nlabels:\n layer: Activity\n alias: keyvalue_default_main\n driver: cql\n workload: ./keyvalue.yaml\n session: scenario_20201215_050435_240\n span: interval\n appname: nosqlbench\n", "text": "session: scenario_20201215_050435_240\n[2020-12-15T14:21:47.918Z[GMT] - 2020-12-15T14:21:47.918Z[GMT]]\nspan:interval\ndetails:\n params: ActivityDef:(3)/{keycount\u003d5000000000L, hosts\u003dnode1, main-cycles\u003d500, threads\u003d500, workload\u003d./keyvalue.yaml, cycles\u003d500, stride\u003d10, tags\u003dblock:main, password\u003dcassandra, rf\u003d3, pooling\u003d16:16:500, driver\u003dcql, rampup-cycles\u003d5000000000, alias\u003dkeyvalue_default_main, valuecount\u003d5000000000L, errors\u003dcount, username\u003dcassandra}\nlabels:\n layer: Activity\n alias: keyvalue_default_main\n driver: cql\n workload: ./keyvalue.yaml\n session: scenario_20201215_050435_240\n span: interval\n appname: nosqlbench\n",
"time": 1608042107918, "time": 1608042107918,
"timeEnd": 1608042108099, "timeEnd": 1608042108099,
"updated": 1608042108117, "updated": 1608042108117,
@ -193,7 +193,7 @@
"span:interval", "span:interval",
"appname:nosqlbench" "appname:nosqlbench"
], ],
"text": "session: scenario_20201215_050435_240\n[2020-12-15T14:21:47.918Z[GMT] - 2020-12-15T14:21:47.918Z[GMT]]\nspan:interval\ndetails:\n params: ActivityDef:(3)/{keycount\u003d5000000000L, hosts\u003dnode1, main-cycles\u003d500, threads\u003d500, workload\u003d./keyvalue.yaml, cycles\u003d500, stride\u003d10, tags\u003dphase:main, password\u003dcassandra, rf\u003d3, pooling\u003d16:16:500, driver\u003dcql, rampup-cycles\u003d5000000000, alias\u003dkeyvalue_default_main, valuecount\u003d5000000000L, errors\u003dcount, username\u003dcassandra}\nlabels:\n layer: Activity\n alias: keyvalue_default_main\n driver: cql\n workload: ./keyvalue.yaml\n session: scenario_20201215_050435_240\n span: interval\n appname: nosqlbench\n", "text": "session: scenario_20201215_050435_240\n[2020-12-15T14:21:47.918Z[GMT] - 2020-12-15T14:21:47.918Z[GMT]]\nspan:interval\ndetails:\n params: ActivityDef:(3)/{keycount\u003d5000000000L, hosts\u003dnode1, main-cycles\u003d500, threads\u003d500, workload\u003d./keyvalue.yaml, cycles\u003d500, stride\u003d10, tags\u003dblock:main, password\u003dcassandra, rf\u003d3, pooling\u003d16:16:500, driver\u003dcql, rampup-cycles\u003d5000000000, alias\u003dkeyvalue_default_main, valuecount\u003d5000000000L, errors\u003dcount, username\u003dcassandra}\nlabels:\n layer: Activity\n alias: keyvalue_default_main\n driver: cql\n workload: ./keyvalue.yaml\n session: scenario_20201215_050435_240\n span: interval\n appname: nosqlbench\n",
"time": 1608042107918, "time": 1608042107918,
"timeEnd": 1608042108127, "timeEnd": 1608042108127,
"updated": 1608042108144, "updated": 1608042108144,
@ -221,7 +221,7 @@
"span:interval", "span:interval",
"appname:nosqlbench" "appname:nosqlbench"
], ],
"text": "session: scenario_20201215_050435_240\n[2020-12-15T05:04:41.120Z[GMT] - 2020-12-15T05:04:41.120Z[GMT]]\nspan:interval\ndetails:\n params: ActivityDef:(3)/{keycount\u003d5000000000L, hosts\u003dnode1, main-cycles\u003d500, threads\u003d960, workload\u003d./keyvalue.yaml, cycles\u003d5000000000, stride\u003d1, tags\u003dphase:rampup, password\u003dcassandra, rf\u003d3, pooling\u003d16:16:500, driver\u003dcql, rampup-cycles\u003d5000000000, alias\u003dkeyvalue_default_rampup, valuecount\u003d5000000000L, errors\u003dcount, username\u003dcassandra}\nlabels:\n layer: Activity\n alias: keyvalue_default_rampup\n driver: cql\n workload: ./keyvalue.yaml\n session: scenario_20201215_050435_240\n span: interval\n appname: nosqlbench\n", "text": "session: scenario_20201215_050435_240\n[2020-12-15T05:04:41.120Z[GMT] - 2020-12-15T05:04:41.120Z[GMT]]\nspan:interval\ndetails:\n params: ActivityDef:(3)/{keycount\u003d5000000000L, hosts\u003dnode1, main-cycles\u003d500, threads\u003d960, workload\u003d./keyvalue.yaml, cycles\u003d5000000000, stride\u003d1, tags\u003dblock:rampup, password\u003dcassandra, rf\u003d3, pooling\u003d16:16:500, driver\u003dcql, rampup-cycles\u003d5000000000, alias\u003dkeyvalue_default_rampup, valuecount\u003d5000000000L, errors\u003dcount, username\u003dcassandra}\nlabels:\n layer: Activity\n alias: keyvalue_default_rampup\n driver: cql\n workload: ./keyvalue.yaml\n session: scenario_20201215_050435_240\n span: interval\n appname: nosqlbench\n",
"time": 1608008681120, "time": 1608008681120,
"timeEnd": 1608042108127, "timeEnd": 1608042108127,
"updated": 1608042108167, "updated": 1608042108167,
@ -249,7 +249,7 @@
"span:interval", "span:interval",
"appname:nosqlbench" "appname:nosqlbench"
], ],
"text": "session: scenario_20201215_050435_240\n[2020-12-15T05:04:37.232Z[GMT] - 2020-12-15T05:04:37.232Z[GMT]]\nspan:interval\ndetails:\n params: ActivityDef:(4)/{keycount\u003d5000000000L, hosts\u003dnode1, main-cycles\u003d500, threads\u003d1, workload\u003d./keyvalue.yaml, cycles\u003d2, stride\u003d2, tags\u003dphase:schema, password\u003dcassandra, rf\u003d3, pooling\u003d16:16:500, driver\u003dcql, rampup-cycles\u003d5000000000, alias\u003dkeyvalue_default_schema, valuecount\u003d5000000000L, errors\u003dcount, username\u003dcassandra}\nlabels:\n layer: Activity\n alias: keyvalue_default_schema\n driver: cql\n workload: ./keyvalue.yaml\n session: scenario_20201215_050435_240\n span: interval\n appname: nosqlbench\n", "text": "session: scenario_20201215_050435_240\n[2020-12-15T05:04:37.232Z[GMT] - 2020-12-15T05:04:37.232Z[GMT]]\nspan:interval\ndetails:\n params: ActivityDef:(4)/{keycount\u003d5000000000L, hosts\u003dnode1, main-cycles\u003d500, threads\u003d1, workload\u003d./keyvalue.yaml, cycles\u003d2, stride\u003d2, tags\u003dblock:"schema.*", password\u003dcassandra, rf\u003d3, pooling\u003d16:16:500, driver\u003dcql, rampup-cycles\u003d5000000000, alias\u003dkeyvalue_default_schema, valuecount\u003d5000000000L, errors\u003dcount, username\u003dcassandra}\nlabels:\n layer: Activity\n alias: keyvalue_default_schema\n driver: cql\n workload: ./keyvalue.yaml\n session: scenario_20201215_050435_240\n span: interval\n appname: nosqlbench\n",
"time": 1608008677232, "time": 1608008677232,
"timeEnd": 1608042108127, "timeEnd": 1608042108127,
"updated": 1608042108190, "updated": 1608042108190,

View File

@ -33,7 +33,7 @@ You can mark statements as schema phase statements by adding this set of
tags to the statements, either directly, or by block: tags to the statements, either directly, or by block:
tags: tags:
phase: schema block: schema
## Rampup phase ## Rampup phase
@ -64,7 +64,7 @@ You can mark statements as rampup phase statements by adding this set of
tags to the statements, either directly, or by block: tags to the statements, either directly, or by block:
tags: tags:
phase: rampup block: rampup
## Main phase ## Main phase
@ -76,4 +76,4 @@ You can mark statement as schema phase statements by adding this set of
tags to the statements, either directly, or by block: tags to the statements, either directly, or by block:
tags: tags:
phase: main block: main

View File

@ -15,7 +15,7 @@ command line, go ahead and execute the following command, replacing
the `host=<host-or-ip>` with that of one of your database nodes. the `host=<host-or-ip>` with that of one of your database nodes.
```text ```text
./nb run driver=cql workload=cql-keyvalue tags=phase:schema host=<host-or-ip> ./nb run driver=cql workload=cql-keyvalue tags=block:'schema.*' host=<host-or-ip>
``` ```
This command is creating the following schema in your database: This command is creating the following schema in your database:
@ -45,8 +45,8 @@ defines the activity.
In this example, we use `cql-keyvalue` which is a pre-built workload that In this example, we use `cql-keyvalue` which is a pre-built workload that
is packaged with nosqlbench. is packaged with nosqlbench.
`tags=phase:schema` tells nosqlbench to run the yaml block that has `tags=block:"schema.*"` tells nosqlbench to run the yaml block that has
the `phase:schema` defined as one of its tags. the `block:"schema.*"` defined as one of its tags.
In this example, that is the DDL portion of the `cql-keyvalue` In this example, that is the DDL portion of the `cql-keyvalue`
workload. `host=...` tells nosqlbench how to connect to your database, workload. `host=...` tells nosqlbench how to connect to your database,
@ -68,7 +68,7 @@ statements.
Go ahead and execute the following command: Go ahead and execute the following command:
./nb run driver=stdout workload=cql-keyvalue tags=phase:rampup cycles=10 ./nb run driver=stdout workload=cql-keyvalue tags=block:rampup cycles=10
You should see 10 of the following statements in your console You should see 10 of the following statements in your console
@ -91,12 +91,12 @@ be the same from run to run.
Now we are ready to write some data to our database. Go ahead and execute Now we are ready to write some data to our database. Go ahead and execute
the following from your command line: the following from your command line:
./nb run driver=cql workload=cql-keyvalue tags=phase:rampup host=<host-or-ip> cycles=100k --progress console:1s ./nb run driver=cql workload=cql-keyvalue tags=block:rampup host=<host-or-ip> cycles=100k --progress console:1s
Note the differences between this and the command that we used to generate Note the differences between this and the command that we used to generate
the schema. the schema.
`tags=phase:rampup` is running the yaml block in `cql-keyvalue` that has `tags=block:rampup` is running the yaml block in `cql-keyvalue` that has
only INSERT statements. only INSERT statements.
`cycles=100k` will run a total of 100,000 operations, in this case, `cycles=100k` will run a total of 100,000 operations, in this case,
@ -139,7 +139,7 @@ Now that we have a base dataset of 100k rows in the database, we will now
run a mixed read / write workload, by default this runs a 50% read / 50% run a mixed read / write workload, by default this runs a 50% read / 50%
write workload. write workload.
./nb run driver=cql workload=cql-keyvalue tags=phase:main host=<host-or-ip> cycles=100k cyclerate=5000 threads=50 --progress console:1s ./nb run driver=cql workload=cql-keyvalue tags=block:main host=<host-or-ip> cycles=100k cyclerate=5000 threads=50 --progress console:1s
You should see output that looks like this: You should see output that looks like this:
@ -174,7 +174,7 @@ cql-keyvalue: 100.00%/Finished (details: min=0 cycle=100000 max=100000)
We have a few new command line options here: We have a few new command line options here:
`tags=phase:main` is using a new block in our activity's yaml that `tags=block:main` is using a new block in our activity's yaml that
contains both read and write queries. contains both read and write queries.
`threads=50` is an important one. The default for nosqlbench is to run `threads=50` is an important one. The default for nosqlbench is to run

View File

@ -103,8 +103,8 @@ semicolon, then a newline is also added immediately after.
~~~text ~~~text
./nb \ ./nb \
start driver=stdout alias=a cycles=100K workload=cql-iot tags=phase:main\ start driver=stdout alias=a cycles=100K workload=cql-iot tags=block:main\
start driver=stdout alias=b cycles=200K workload=cql-iot tags=phase:main\ start driver=stdout alias=b cycles=200K workload=cql-iot tags=block:main\
waitmillis 10000 \ waitmillis 10000 \
await one \ await one \
stop two stop two

View File

@ -46,9 +46,9 @@ built-ins.
Each built-in contains the following tags that can be used to break the workload up into uniform phases: Each built-in contains the following tags that can be used to break the workload up into uniform phases:
- schema - selected with `tags=phase:schema` - schema - selected with `tags=block:"schema.*"`
- rampup - selected with `tags=phase:rampup` - rampup - selected with `tags=block:rampup`
- main - selected with `tags=phase:main` - main - selected with `tags=block:main`
### Parameters ### Parameters

View File

@ -198,7 +198,7 @@
<dependency> <dependency>
<groupId>io.netty</groupId> <groupId>io.netty</groupId>
<artifactId>netty-handler</artifactId> <artifactId>netty-handler</artifactId>
<version>4.1.86.Final</version> <version>4.1.87.Final</version>
</dependency> </dependency>
<dependency> <dependency>

View File

@ -76,6 +76,7 @@ public class NBIO implements NBPathsAPI.Facets {
return Arrays.asList(split); return Arrays.asList(split);
} }
public static CSVParser readFileCSV(String filename, String... searchPaths) { public static CSVParser readFileCSV(String filename, String... searchPaths) {
return NBIO.readFileDelimCSV(filename, ',', searchPaths); return NBIO.readFileDelimCSV(filename, ',', searchPaths);
} }

View File

@ -1,4 +1,4 @@
min_version: "4.17.15" min_version: "5.17.1"
# eb sequences concat # eb sequences concat
# yields A B B C C C D D D D A B B C C C D D D D # yields A B B C C C D D D D A B B C C C D D D D

View File

@ -148,7 +148,7 @@ var yaml_file = "TEMPLATE(yaml_file,cql-iot)";
// //
// schema_activitydef.alias = "findmax_schema"; // schema_activitydef.alias = "findmax_schema";
// schema_activitydef.threads = "1"; // schema_activitydef.threads = "1";
// schema_activitydef.tags = "TEMPLATE(schematags,phase:schema)"; // schema_activitydef.tags = "TEMPLATE(schematags,block:'schema.*')";
// printf("Creating schema with schematags: %s\n",schema_activitydef.tags.toString()); // printf("Creating schema with schematags: %s\n",schema_activitydef.tags.toString());
// //
// scenario.run(schema_activitydef); // scenario.run(schema_activitydef);
@ -164,7 +164,7 @@ activitydef = params.withDefaults({
activitydef.alias = "findmax"; activitydef.alias = "findmax";
activitydef.cycles = "1000000000"; activitydef.cycles = "1000000000";
activitydef.recycles = "1000000000"; activitydef.recycles = "1000000000";
activitydef.tags = "TEMPLATE(maintags,phase:main)"; activitydef.tags = "TEMPLATE(maintags,block:main)";
function ops_s(iteration, results) { function ops_s(iteration, results) {
return results[iteration].ops_per_second; return results[iteration].ops_per_second;

View File

@ -25,7 +25,7 @@ function as_js(ref) {
} }
if (ref instanceof java.util.Map) { if (ref instanceof java.util.Map) {
let newobj = {}; let newobj = {};
for each(key in ref.keySet()) { for (let key in ref.keySet()) {
newobj[key] = Java.asJSONCompatible(ref.get(key)); newobj[key] = Java.asJSONCompatible(ref.get(key));
} }
return newobj; return newobj;
@ -112,7 +112,7 @@ schema_activitydef = params.withDefaults({
}); });
schema_activitydef.alias = "optimo_schema"; schema_activitydef.alias = "optimo_schema";
schema_activitydef.threads = "1"; schema_activitydef.threads = "1";
schema_activitydef.tags = "TEMPLATE(schematags,phase:schema)"; schema_activitydef.tags = "TEMPLATE(schematags,block:'schema.*')";
schema_activitydef.speculative = "none" schema_activitydef.speculative = "none"
print("Creating schema with schematags:" + schema_activitydef.tags); print("Creating schema with schematags:" + schema_activitydef.tags);
@ -129,7 +129,7 @@ activitydef = params.withDefaults({
activitydef.alias = "optimo"; activitydef.alias = "optimo";
activitydef.cycles = "1000000000"; activitydef.cycles = "1000000000";
activitydef.recycles = "1000000000"; activitydef.recycles = "1000000000";
activitydef.tags = "TEMPLATE(maintags,phase:main)"; activitydef.tags = "TEMPLATE(maintags,block:main)";
activitydef.speculative = "none" activitydef.speculative = "none"
print("Iterating main workload with tags:" + activitydef.tags); print("Iterating main workload with tags:" + activitydef.tags);

View File

@ -60,7 +60,7 @@ print("starting activity for stepup analysis");
var activitydef = params.withDefaults({ var activitydef = params.withDefaults({
'alias': 'stepup', 'alias': 'stepup',
'driver': driver, 'driver': driver,
'tags':'any(block:main.*,phase:main)', 'tags':'any(block:main.*,block:main)',
'workload' : 'TEMPLATE(workload)', 'workload' : 'TEMPLATE(workload)',
'cycles': '1t', 'cycles': '1t',
'stride': '1000', 'stride': '1000',

View File

@ -9,7 +9,7 @@ printf " - nosqlbench-4.17.20+\n"
printf " - nosqlbench-4.15.100+\n" printf " - nosqlbench-4.15.100+\n"
printf " FURTHER: This removes all your local tags first and then synchronizes\n" printf " FURTHER: This removes all your local tags first and then synchronizes\n"
printf " from origin. If you have any special tags only on local, it will remove them.\n" printf " from origin. If you have any special tags only on local, it will remove them.\n"
printf " If you do NOT want to do this, hit crtl-c now!\n" printf " If you do NOT want to do this, hit control-c now!\n"
read response read response
#delete all the remote tags with the pattern your looking for ... #delete all the remote tags with the pattern your looking for ...

View File

@ -0,0 +1,64 @@
/*
* Copyright (c) 2023 nosqlbench
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.nosqlbench.virtdata.library.basics.shared.unary_string;
import io.nosqlbench.api.content.NBIO;
import io.nosqlbench.api.errors.BasicError;
import io.nosqlbench.virtdata.api.annotations.Categories;
import io.nosqlbench.virtdata.api.annotations.Category;
import io.nosqlbench.virtdata.api.annotations.Example;
import io.nosqlbench.virtdata.api.annotations.ThreadSafeMapper;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import java.util.List;
import java.util.function.Function;
/**
* Provides a single line of text from a target file provided.
*/
@ThreadSafeMapper
@Categories({Category.general})
public class TextOfFile implements Function<Object, String> {
private static final Logger logger = LogManager.getLogger(TextOfFile.class);
private final String text;
public String toString() {
return getClass().getSimpleName();
}
@Example({"TextOfFile()", "Provides the first line of text in the specified file."})
public TextOfFile(String targetFile) {
try {
final List<String> lines = NBIO.readLines(targetFile);
logger.info("TextOfFile() reading: {}", targetFile);
if (lines.isEmpty()) {
throw new BasicError(String.format("Unable to locate content for %s", this));
}
text = lines.get(0);
} catch (Exception ex) {
throw new BasicError(String.format("Unable to locate file %s: ", targetFile), ex);
}
}
@Override
public String apply(Object obj) {
return text;
}
}

View File

@ -0,0 +1,45 @@
/*
* Copyright (c) 2023 nosqlbench
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.nosqlbench.virtdata.library.basics.shared.unary_string;
import org.junit.jupiter.api.Test;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Assertions.assertThatException;
class TextOfFileTest {
private static final String EXPECTED_CONTENTS = "test-data-entry";
private static final String NOT_EXPECTED_CONTENTS = "foozy-content";
private static final String VALID_PATH = "text-provider-sample.txt";
private static final String INVALID_PATH = "not-good.txt";
private static final String PLACEHOLDER_APPLY_INPUT = "placeholder-input";
@Test
void testValidPathAndContents() {
final TextOfFile TextOfFile = new TextOfFile(VALID_PATH);
assertThat(TextOfFile.apply(PLACEHOLDER_APPLY_INPUT)).isEqualTo(EXPECTED_CONTENTS);
}
@Test
void testInvalidPathAndContents() {
final TextOfFile textOfFileValid = new TextOfFile(VALID_PATH);
assertThatException().isThrownBy(() -> new TextOfFile(INVALID_PATH));
assertThat(textOfFileValid.apply(PLACEHOLDER_APPLY_INPUT)).isNotEqualTo(NOT_EXPECTED_CONTENTS);
}
}

View File

@ -0,0 +1 @@
test-data-entry