mirror of
https://github.com/nosqlbench/nosqlbench.git
synced 2025-02-25 18:55:28 -06:00
Merge branch 'main' of github.com:nosqlbench/nosqlbench
This commit is contained in:
commit
ff30ae9982
@ -18,16 +18,16 @@ package io.nosqlbench.cqlgen.core;
|
||||
|
||||
import com.google.gson.Gson;
|
||||
import com.google.gson.GsonBuilder;
|
||||
import io.nosqlbench.api.apps.BundledApp;
|
||||
import io.nosqlbench.api.content.Content;
|
||||
import io.nosqlbench.api.content.NBIO;
|
||||
import io.nosqlbench.api.apps.BundledApp;
|
||||
import io.nosqlbench.cqlgen.api.BindingsLibrary;
|
||||
import io.nosqlbench.cqlgen.binders.Binding;
|
||||
import io.nosqlbench.cqlgen.binders.BindingsAccumulator;
|
||||
import io.nosqlbench.cqlgen.api.BindingsLibrary;
|
||||
import io.nosqlbench.cqlgen.binders.NamingFolio;
|
||||
import io.nosqlbench.cqlgen.transformers.CGModelTransformers;
|
||||
import io.nosqlbench.cqlgen.model.*;
|
||||
import io.nosqlbench.cqlgen.parser.CqlModelParser;
|
||||
import io.nosqlbench.cqlgen.transformers.CGModelTransformers;
|
||||
import io.nosqlbench.nb.annotations.Service;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
@ -75,14 +75,14 @@ public class CGWorkloadExporter implements BundledApp {
|
||||
private Map<String, List<String>> blockplan = Map.of();
|
||||
|
||||
private final Map<String, Double> timeouts = new HashMap<String, Double>(Map.of(
|
||||
"create", 60.0,
|
||||
"truncate", 900.0,
|
||||
"drop", 900.0,
|
||||
"scan", 30.0,
|
||||
"select", 10.0,
|
||||
"insert", 10.0,
|
||||
"delete", 10.0,
|
||||
"update", 10.0
|
||||
"create", 60.0,
|
||||
"truncate", 900.0,
|
||||
"drop", 900.0,
|
||||
"scan", 30.0,
|
||||
"select", 10.0,
|
||||
"insert", 10.0,
|
||||
"delete", 10.0,
|
||||
"update", 10.0
|
||||
));
|
||||
|
||||
public static void main(String[] args) {
|
||||
@ -166,7 +166,7 @@ public class CGWorkloadExporter implements BundledApp {
|
||||
|
||||
this.model = CqlModelParser.parse(ddl, srcpath);
|
||||
List<String> errorlist = model.getReferenceErrors();
|
||||
if (errorlist.size()>0) {
|
||||
if (errorlist.size() > 0) {
|
||||
for (String error : errorlist) {
|
||||
logger.error(error);
|
||||
}
|
||||
@ -177,12 +177,12 @@ public class CGWorkloadExporter implements BundledApp {
|
||||
String workload = getWorkloadAsYaml();
|
||||
try {
|
||||
Files.writeString(
|
||||
target,
|
||||
workload,
|
||||
StandardOpenOption.CREATE, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING
|
||||
target,
|
||||
workload,
|
||||
StandardOpenOption.CREATE, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING
|
||||
);
|
||||
logger.info("Wrote workload template as '" + target + "'. Bear in mind that this is simply one version " +
|
||||
"of a workload using this schema, and may not be representative of actual production usage patterns.");
|
||||
"of a workload using this schema, and may not be representative of actual production usage patterns.");
|
||||
} catch (IOException e) {
|
||||
String errmsg = "There was an error writing '" + target + "'.";
|
||||
logger.error(errmsg);
|
||||
@ -218,7 +218,7 @@ public class CGWorkloadExporter implements BundledApp {
|
||||
workload.put("bindings", new LinkedHashMap<String, String>());
|
||||
Map<String, Object> blocks = new LinkedHashMap<>();
|
||||
workload.put("params", new LinkedHashMap<>(
|
||||
Map.of("cl", "LOCAL_QUORUM")
|
||||
Map.of("cl", "LOCAL_QUORUM")
|
||||
));
|
||||
workload.put("blocks", blocks);
|
||||
|
||||
@ -227,7 +227,7 @@ public class CGWorkloadExporter implements BundledApp {
|
||||
List<String> components = blocknameAndComponents.getValue();
|
||||
|
||||
LinkedHashMap<String, Object> block = new LinkedHashMap<>(
|
||||
Map.of("params", new LinkedHashMap<String, Object>())
|
||||
Map.of("params", new LinkedHashMap<String, Object>())
|
||||
);
|
||||
for (String component : components) {
|
||||
Map<String, Object> additions = switch (component) {
|
||||
@ -319,11 +319,11 @@ public class CGWorkloadExporter implements BundledApp {
|
||||
return new LinkedHashMap<>() {{
|
||||
|
||||
put("default",
|
||||
new LinkedHashMap<>() {{
|
||||
put("schema", "run driver=cql tags=block:schema.* threads===UNDEF cycles===UNDEF");
|
||||
put("rampup", "run driver=cql tags=block:rampup.* threads=auto cycles===TEMPLATE(rampup-cycles,10000)");
|
||||
put("main", "run driver=cql tags=block:main.* threads=auto cycles===TEMPLATE(main-cycles,10000)");
|
||||
}});
|
||||
new LinkedHashMap<>() {{
|
||||
put("schema", "run driver=cql tags=block:'schema-*.*' threads===UNDEF cycles===UNDEF");
|
||||
put("rampup", "run driver=cql tags=block:rampup threads=auto cycles===TEMPLATE(rampup-cycles,10000)");
|
||||
put("main", "run driver=cql tags=block:'main-*.*' threads=auto cycles===TEMPLATE(main-cycles,10000)");
|
||||
}});
|
||||
|
||||
put("main-insert", "run driver=cql tags=block:main-insert threads=auto cycles===TEMPLATE(main-cycles,10000)");
|
||||
put("main-select", "run driver=cql tags=block:main-select threads=auto cycles===TEMPLATE(main-cycles,10000)");
|
||||
@ -351,12 +351,12 @@ public class CGWorkloadExporter implements BundledApp {
|
||||
logger.debug(() -> "skipping table " + table.getFullName() + " for scan since there are no clustering columns");
|
||||
}
|
||||
ops.put(
|
||||
namer.nameFor(table, "optype", "scan", "blockname", blockname),
|
||||
Map.of(
|
||||
"prepared", genScanSyntax(table),
|
||||
"timeout", timeouts.get("scan"),
|
||||
"ratio", readRatioFor(table)
|
||||
)
|
||||
namer.nameFor(table, "optype", "scan", "blockname", blockname),
|
||||
Map.of(
|
||||
"prepared", genScanSyntax(table),
|
||||
"timeout", timeouts.get("scan"),
|
||||
"ratio", readRatioFor(table)
|
||||
)
|
||||
);
|
||||
}
|
||||
return blockdata;
|
||||
@ -364,14 +364,14 @@ public class CGWorkloadExporter implements BundledApp {
|
||||
|
||||
private String genScanSyntax(CqlTable table) {
|
||||
return """
|
||||
select * from KEYSPACE.TABLE
|
||||
where PREDICATE
|
||||
LIMIT;
|
||||
"""
|
||||
.replace("KEYSPACE", table.getKeyspace().getName())
|
||||
.replace("TABLE", table.getName())
|
||||
.replace("PREDICATE", genPredicateTemplate(table, -1))
|
||||
.replace("LIMIT", genLimitSyntax(table));
|
||||
select * from KEYSPACE.TABLE
|
||||
where PREDICATE
|
||||
LIMIT;
|
||||
"""
|
||||
.replace("KEYSPACE", table.getKeyspace().getName())
|
||||
.replace("TABLE", table.getName())
|
||||
.replace("PREDICATE", genPredicateTemplate(table, -1))
|
||||
.replace("LIMIT", genLimitSyntax(table));
|
||||
}
|
||||
|
||||
|
||||
@ -381,12 +381,12 @@ public class CGWorkloadExporter implements BundledApp {
|
||||
blockdata.put("ops", ops);
|
||||
for (CqlTable table : model.getTableDefs()) {
|
||||
ops.put(
|
||||
namer.nameFor(table, "optype", "select", "blockname", blockname),
|
||||
Map.of(
|
||||
"prepared", genSelectSyntax(table),
|
||||
"timeout", timeouts.get("select"),
|
||||
"ratio", readRatioFor(table)
|
||||
)
|
||||
namer.nameFor(table, "optype", "select", "blockname", blockname),
|
||||
Map.of(
|
||||
"prepared", genSelectSyntax(table),
|
||||
"timeout", timeouts.get("select"),
|
||||
"ratio", readRatioFor(table)
|
||||
)
|
||||
);
|
||||
}
|
||||
return blockdata;
|
||||
@ -394,14 +394,14 @@ public class CGWorkloadExporter implements BundledApp {
|
||||
|
||||
private String genSelectSyntax(CqlTable table) {
|
||||
return """
|
||||
select * from KEYSPACE.TABLE
|
||||
where PREDICATE
|
||||
LIMIT;
|
||||
"""
|
||||
.replace("KEYSPACE", table.getKeyspace().getName())
|
||||
.replace("TABLE", table.getName())
|
||||
.replace("PREDICATE", genPredicateTemplate(table, 0))
|
||||
.replace("LIMIT", genLimitSyntax(table));
|
||||
select * from KEYSPACE.TABLE
|
||||
where PREDICATE
|
||||
LIMIT;
|
||||
"""
|
||||
.replace("KEYSPACE", table.getKeyspace().getName())
|
||||
.replace("TABLE", table.getName())
|
||||
.replace("PREDICATE", genPredicateTemplate(table, 0))
|
||||
.replace("LIMIT", genLimitSyntax(table));
|
||||
}
|
||||
|
||||
private String genLimitSyntax(CqlTable table) {
|
||||
@ -415,12 +415,12 @@ public class CGWorkloadExporter implements BundledApp {
|
||||
for (CqlTable table : model.getTableDefs()) {
|
||||
if (!isCounterTable(table)) {
|
||||
ops.put(
|
||||
namer.nameFor(table, "optype", "insert", "blockname", blockname),
|
||||
Map.of(
|
||||
"prepared", genInsertSyntax(table),
|
||||
"timeout", timeouts.get("insert"),
|
||||
"ratio", writeRatioFor(table)
|
||||
)
|
||||
namer.nameFor(table, "optype", "insert", "blockname", blockname),
|
||||
Map.of(
|
||||
"prepared", genInsertSyntax(table),
|
||||
"timeout", timeouts.get("insert"),
|
||||
"ratio", writeRatioFor(table)
|
||||
)
|
||||
);
|
||||
}
|
||||
}
|
||||
@ -433,22 +433,22 @@ public class CGWorkloadExporter implements BundledApp {
|
||||
}
|
||||
|
||||
return """
|
||||
insert into KEYSPACE.TABLE
|
||||
( FIELDNAMES )
|
||||
VALUES
|
||||
( BINDINGS );
|
||||
"""
|
||||
.replace("KEYSPACE", table.getKeyspace().getName())
|
||||
.replace("TABLE", table.getName())
|
||||
.replace("FIELDNAMES",
|
||||
String.join(", ",
|
||||
table.getColumnDefs().stream()
|
||||
.map(CqlTableColumn::getName).toList()))
|
||||
.replaceAll("BINDINGS",
|
||||
String.join(", ",
|
||||
table.getColumnDefs().stream()
|
||||
.map(c -> binder.forColumn(c))
|
||||
.map(c -> "{" + c.getName() + "}").toList()));
|
||||
insert into KEYSPACE.TABLE
|
||||
( FIELDNAMES )
|
||||
VALUES
|
||||
( BINDINGS );
|
||||
"""
|
||||
.replace("KEYSPACE", table.getKeyspace().getName())
|
||||
.replace("TABLE", table.getName())
|
||||
.replace("FIELDNAMES",
|
||||
String.join(", ",
|
||||
table.getColumnDefs().stream()
|
||||
.map(CqlTableColumn::getName).toList()))
|
||||
.replaceAll("BINDINGS",
|
||||
String.join(", ",
|
||||
table.getColumnDefs().stream()
|
||||
.map(c -> binder.forColumn(c))
|
||||
.map(c -> "{" + c.getName() + "}").toList()));
|
||||
}
|
||||
|
||||
|
||||
@ -458,12 +458,12 @@ public class CGWorkloadExporter implements BundledApp {
|
||||
blockdata.put("ops", ops);
|
||||
for (CqlTable table : model.getTableDefs()) {
|
||||
ops.put(
|
||||
namer.nameFor(table, "optype", "update", "blockname", blockname),
|
||||
Map.of(
|
||||
"prepared", genUpdateSyntax(table),
|
||||
"timeout", timeouts.get("update"),
|
||||
"ratio", writeRatioFor(table)
|
||||
)
|
||||
namer.nameFor(table, "optype", "update", "blockname", blockname),
|
||||
Map.of(
|
||||
"prepared", genUpdateSyntax(table),
|
||||
"timeout", timeouts.get("update"),
|
||||
"ratio", writeRatioFor(table)
|
||||
)
|
||||
);
|
||||
}
|
||||
return blockdata;
|
||||
@ -472,7 +472,7 @@ public class CGWorkloadExporter implements BundledApp {
|
||||
|
||||
private boolean isCounterTable(CqlTable table) {
|
||||
return table.getColumnDefs().stream()
|
||||
.anyMatch(cd -> cd.getTrimmedTypedef().equalsIgnoreCase("counter"));
|
||||
.anyMatch(cd -> cd.getTrimmedTypedef().equalsIgnoreCase("counter"));
|
||||
}
|
||||
|
||||
private int totalRatioFor(CqlTable table) {
|
||||
@ -540,9 +540,9 @@ public class CGWorkloadExporter implements BundledApp {
|
||||
|
||||
// TODO; constraints on predicates based on valid constructions
|
||||
pkeys.stream().map(this::genPredicatePart)
|
||||
.forEach(p -> {
|
||||
sb.append(p).append("\n AND ");
|
||||
});
|
||||
.forEach(p -> {
|
||||
sb.append(p).append("\n AND ");
|
||||
});
|
||||
if (sb.length() > 0) {
|
||||
sb.setLength(sb.length() - "\n AND ".length());
|
||||
}
|
||||
@ -557,14 +557,14 @@ public class CGWorkloadExporter implements BundledApp {
|
||||
|
||||
private String genUpdateSyntax(CqlTable table) {
|
||||
return """
|
||||
update KEYSPACE.TABLE
|
||||
set ASSIGNMENTS
|
||||
where PREDICATES;
|
||||
"""
|
||||
.replaceAll("KEYSPACE", table.getKeyspace().getName())
|
||||
.replaceAll("TABLE", table.getName())
|
||||
.replaceAll("PREDICATES", genPredicateTemplate(table, 0))
|
||||
.replaceAll("ASSIGNMENTS", genAssignments(table));
|
||||
update KEYSPACE.TABLE
|
||||
set ASSIGNMENTS
|
||||
where PREDICATES;
|
||||
"""
|
||||
.replaceAll("KEYSPACE", table.getKeyspace().getName())
|
||||
.replaceAll("TABLE", table.getName())
|
||||
.replaceAll("PREDICATES", genPredicateTemplate(table, 0))
|
||||
.replaceAll("ASSIGNMENTS", genAssignments(table));
|
||||
}
|
||||
|
||||
private String genAssignments(CqlTable table) {
|
||||
@ -572,12 +572,12 @@ public class CGWorkloadExporter implements BundledApp {
|
||||
for (CqlTableColumn coldef : table.getNonKeyColumnDefinitions()) {
|
||||
if (coldef.isCounter()) {
|
||||
sb.append(coldef.getName()).append("=")
|
||||
.append(coldef.getName()).append("+").append("{").append(binder.forColumn(coldef).getName()).append("}")
|
||||
.append(", ");
|
||||
.append(coldef.getName()).append("+").append("{").append(binder.forColumn(coldef).getName()).append("}")
|
||||
.append(", ");
|
||||
} else {
|
||||
sb.append(coldef.getName()).append("=")
|
||||
.append("{").append(binder.forColumn(coldef).getName()).append("}")
|
||||
.append(", ");
|
||||
.append("{").append(binder.forColumn(coldef).getName()).append("}")
|
||||
.append(", ");
|
||||
}
|
||||
}
|
||||
if (sb.length() > 0) {
|
||||
@ -602,16 +602,16 @@ public class CGWorkloadExporter implements BundledApp {
|
||||
((Map<String, String>) workload.get("bindings")).putAll(bindingslib.getAccumulatedBindings());
|
||||
|
||||
DumpSettings dumpSettings = DumpSettings.builder()
|
||||
.setDefaultFlowStyle(FlowStyle.BLOCK)
|
||||
.setIndent(2)
|
||||
.setDefaultScalarStyle(ScalarStyle.PLAIN)
|
||||
.setMaxSimpleKeyLength(1000)
|
||||
.setWidth(100)
|
||||
.setSplitLines(true)
|
||||
.setIndentWithIndicator(true)
|
||||
.setMultiLineFlow(true)
|
||||
.setNonPrintableStyle(NonPrintableStyle.ESCAPE)
|
||||
.build();
|
||||
.setDefaultFlowStyle(FlowStyle.BLOCK)
|
||||
.setIndent(2)
|
||||
.setDefaultScalarStyle(ScalarStyle.PLAIN)
|
||||
.setMaxSimpleKeyLength(1000)
|
||||
.setWidth(100)
|
||||
.setSplitLines(true)
|
||||
.setIndentWithIndicator(true)
|
||||
.setMultiLineFlow(true)
|
||||
.setNonPrintableStyle(NonPrintableStyle.ESCAPE)
|
||||
.build();
|
||||
BaseRepresenter r;
|
||||
Dump dump = new Dump(dumpSettings);
|
||||
|
||||
@ -637,11 +637,11 @@ public class CGWorkloadExporter implements BundledApp {
|
||||
dropTablesBlock.put("ops", ops);
|
||||
for (CqlTable table : model.getTableDefs()) {
|
||||
ops.put(
|
||||
namer.nameFor(table, "optype", "drop", "blockname", blockname),
|
||||
Map.of(
|
||||
"simple", "drop table if exists " + table.getFullName() + ";",
|
||||
"timeout", timeouts.get("drop")
|
||||
)
|
||||
namer.nameFor(table, "optype", "drop", "blockname", blockname),
|
||||
Map.of(
|
||||
"simple", "drop table if exists " + table.getFullName() + ";",
|
||||
"timeout", timeouts.get("drop")
|
||||
)
|
||||
);
|
||||
}
|
||||
return dropTablesBlock;
|
||||
@ -653,11 +653,11 @@ public class CGWorkloadExporter implements BundledApp {
|
||||
dropTypesBlock.put("ops", ops);
|
||||
for (CqlType type : model.getTypeDefs()) {
|
||||
ops.put(
|
||||
namer.nameFor(type, "optype", "drop-type", "blockname", blockname),
|
||||
Map.of(
|
||||
"simple", "drop type if exists " + type.getKeyspace() + "." + type.getName() + ";",
|
||||
"timeout", timeouts.get("drop")
|
||||
)
|
||||
namer.nameFor(type, "optype", "drop-type", "blockname", blockname),
|
||||
Map.of(
|
||||
"simple", "drop type if exists " + type.getKeyspace() + "." + type.getName() + ";",
|
||||
"timeout", timeouts.get("drop")
|
||||
)
|
||||
);
|
||||
}
|
||||
return dropTypesBlock;
|
||||
@ -669,11 +669,11 @@ public class CGWorkloadExporter implements BundledApp {
|
||||
dropTypesBlock.put("ops", ops);
|
||||
for (CqlType type : model.getTypeDefs()) {
|
||||
ops.put(
|
||||
namer.nameFor(type, "optype", "drop-keyspace", "blockname", blockname),
|
||||
Map.of(
|
||||
"simple", "drop keyspace if exists " + type.getKeyspace() + ";",
|
||||
"timeout", timeouts.get("drop")
|
||||
)
|
||||
namer.nameFor(type, "optype", "drop-keyspace", "blockname", blockname),
|
||||
Map.of(
|
||||
"simple", "drop keyspace if exists " + type.getKeyspace() + ";",
|
||||
"timeout", timeouts.get("drop")
|
||||
)
|
||||
);
|
||||
}
|
||||
return dropTypesBlock;
|
||||
@ -687,11 +687,11 @@ public class CGWorkloadExporter implements BundledApp {
|
||||
|
||||
for (CqlTable table : model.getTableDefs()) {
|
||||
ops.put(
|
||||
namer.nameFor(table, "optype", "truncate", "blockname", blockname),
|
||||
Map.of(
|
||||
"simple", "truncate " + table.getFullName() + ";",
|
||||
"timeout", timeouts.get("truncate")
|
||||
)
|
||||
namer.nameFor(table, "optype", "truncate", "blockname", blockname),
|
||||
Map.of(
|
||||
"simple", "truncate " + table.getFullName() + ";",
|
||||
"timeout", timeouts.get("truncate")
|
||||
)
|
||||
);
|
||||
}
|
||||
return truncateblock;
|
||||
@ -703,11 +703,11 @@ public class CGWorkloadExporter implements BundledApp {
|
||||
|
||||
for (CqlKeyspaceDef ks : model.getKeyspaceDefs()) {
|
||||
ops.put(
|
||||
namer.nameFor(ks, "optype", "create", "blockname", blockname),
|
||||
Map.of(
|
||||
"simple", genKeyspaceDDL(ks),
|
||||
"timeout", timeouts.get("create")
|
||||
)
|
||||
namer.nameFor(ks, "optype", "create", "blockname", blockname),
|
||||
Map.of(
|
||||
"simple", genKeyspaceDDL(ks),
|
||||
"timeout", timeouts.get("create")
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
@ -722,11 +722,11 @@ public class CGWorkloadExporter implements BundledApp {
|
||||
|
||||
model.getTypeDefs().forEach(type -> {
|
||||
ops.put(
|
||||
namer.nameFor(type,"optype","create","blockname",blockname),
|
||||
Map.of(
|
||||
"simple",genTypeDDL(type),
|
||||
"timeout",timeouts.get("create")
|
||||
)
|
||||
namer.nameFor(type, "optype", "create", "blockname", blockname),
|
||||
Map.of(
|
||||
"simple", genTypeDDL(type),
|
||||
"timeout", timeouts.get("create")
|
||||
)
|
||||
);
|
||||
});
|
||||
|
||||
@ -736,13 +736,13 @@ public class CGWorkloadExporter implements BundledApp {
|
||||
|
||||
private String genKeyspaceDDL(CqlKeyspaceDef keyspace) {
|
||||
return """
|
||||
create keyspace KEYSPACE
|
||||
with replication = {REPLICATION}DURABLEWRITES?;
|
||||
"""
|
||||
.replace("KEYSPACE", keyspace.getName())
|
||||
.replace("REPLICATION", keyspace.getReplicationData())
|
||||
.replace("DURABLEWRITES?", keyspace.isDurableWrites() ? "" : "\n and durable writes = false")
|
||||
;
|
||||
create keyspace KEYSPACE
|
||||
with replication = {REPLICATION}DURABLEWRITES?;
|
||||
"""
|
||||
.replace("KEYSPACE", keyspace.getName())
|
||||
.replace("REPLICATION", keyspace.getReplicationData())
|
||||
.replace("DURABLEWRITES?", keyspace.isDurableWrites() ? "" : "\n and durable writes = false")
|
||||
;
|
||||
}
|
||||
|
||||
private Map<String, Object> genCreateTablesOpTemplates(CqlModel model, String blockname) {
|
||||
@ -751,11 +751,11 @@ public class CGWorkloadExporter implements BundledApp {
|
||||
|
||||
model.getTableDefs().forEach(table -> {
|
||||
ops.put(
|
||||
namer.nameFor(table, "optype","create","blockname",blockname),
|
||||
Map.of(
|
||||
"simple",genTableDDL(table),
|
||||
"timeout",timeouts.get("create")
|
||||
)
|
||||
namer.nameFor(table, "optype", "create", "blockname", blockname),
|
||||
Map.of(
|
||||
"simple", genTableDDL(table),
|
||||
"timeout", timeouts.get("create")
|
||||
)
|
||||
);
|
||||
});
|
||||
|
||||
@ -766,14 +766,14 @@ public class CGWorkloadExporter implements BundledApp {
|
||||
|
||||
private String genTypeDDL(CqlType type) {
|
||||
return """
|
||||
create type KEYSPACE.TYPENAME (
|
||||
TYPEDEF
|
||||
);
|
||||
"""
|
||||
.replace("KEYSPACE", type.getKeyspace().getName())
|
||||
.replace("TYPENAME", type.getName())
|
||||
.replace("TYPEDEF", type.getColumnDefs().stream()
|
||||
.map(def -> def.getName() + " " + def.getTypedef()).collect(Collectors.joining(",\n")));
|
||||
create type KEYSPACE.TYPENAME (
|
||||
TYPEDEF
|
||||
);
|
||||
"""
|
||||
.replace("KEYSPACE", type.getKeyspace().getName())
|
||||
.replace("TYPENAME", type.getName())
|
||||
.replace("TYPEDEF", type.getColumnDefs().stream()
|
||||
.map(def -> def.getName() + " " + def.getTypedef()).collect(Collectors.joining(",\n")));
|
||||
}
|
||||
|
||||
private Object genTableDDL(CqlTable cqltable) {
|
||||
@ -782,16 +782,16 @@ public class CGWorkloadExporter implements BundledApp {
|
||||
}
|
||||
|
||||
return """
|
||||
create table if not exists KEYSPACE.TABLE (
|
||||
COLUMN_DEFS,
|
||||
primary key (PRIMARYKEY)
|
||||
)CLUSTERING;
|
||||
"""
|
||||
.replace("KEYSPACE", cqltable.getKeyspace().getName())
|
||||
.replace("TABLE", cqltable.getName())
|
||||
.replace("COLUMN_DEFS", genTableColumnDDL(cqltable))
|
||||
.replace("PRIMARYKEY", genPrimaryKeyDDL(cqltable))
|
||||
.replace("CLUSTERING", genTableClusteringOrderDDL(cqltable));
|
||||
create table if not exists KEYSPACE.TABLE (
|
||||
COLUMN_DEFS,
|
||||
primary key (PRIMARYKEY)
|
||||
)CLUSTERING;
|
||||
"""
|
||||
.replace("KEYSPACE", cqltable.getKeyspace().getName())
|
||||
.replace("TABLE", cqltable.getName())
|
||||
.replace("COLUMN_DEFS", genTableColumnDDL(cqltable))
|
||||
.replace("PRIMARYKEY", genPrimaryKeyDDL(cqltable))
|
||||
.replace("CLUSTERING", genTableClusteringOrderDDL(cqltable));
|
||||
|
||||
}
|
||||
|
||||
@ -829,8 +829,8 @@ public class CGWorkloadExporter implements BundledApp {
|
||||
|
||||
private String genTableColumnDDL(CqlTable cqltable) {
|
||||
return cqltable.getColumnDefs().stream()
|
||||
.map(cd -> cd.getName() + " " + cd.getTrimmedTypedef())
|
||||
.collect(Collectors.joining(",\n"));
|
||||
.map(cd -> cd.getName() + " " + cd.getTrimmedTypedef())
|
||||
.collect(Collectors.joining(",\n"));
|
||||
}
|
||||
|
||||
|
||||
|
@ -1,33 +1,29 @@
|
||||
# nb -v run driver=cql yaml=cql-iot tags=phase:schema host=dsehost
|
||||
description: An IOT workload with more optimal settings for DSE
|
||||
|
||||
scenarios:
|
||||
default:
|
||||
schema: run driver=cql tags==phase:schema threads==1 cycles==UNDEF
|
||||
rampup: run driver=cql tags==phase:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
|
||||
main: run driver=cql tags==phase:main cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
schema: run driver=cql tags==block:schema threads==1 cycles==UNDEF
|
||||
rampup: run driver=cql tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
|
||||
main: run driver=cql tags==block:"main-*.*" cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
|
||||
bindings:
|
||||
machine_id: Mod(<<sources:10000>>); ToHashedUUID() -> java.util.UUID
|
||||
sensor_name: HashedLineToString('data/variable_words.txt')
|
||||
time: Mul(<<timespeed:100>>L); Div(<<sources:10000>>L); ToDate()
|
||||
cell_timestamp: Mul(<<timespeed:100>>L); Div(<<sources:10000>>L); Mul(1000L)
|
||||
time: Mul(<<timespeed:100>>L); Div(<<sources:10000>>L); ToJavaInstant()
|
||||
cell_timestamp: Mul(<<timespeed:100>>L); Div(<<sources:10000>>L); Mul(1000L);
|
||||
sensor_value: Normal(0.0,5.0); Add(100.0) -> double
|
||||
station_id: Div(<<sources:10000>>);Mod(<<stations:100>>); ToHashedUUID() -> java.util.UUID
|
||||
data: HashedFileExtractToString('data/lorem_ipsum_full.txt',800,1200)
|
||||
blocks:
|
||||
- tags:
|
||||
phase: schema
|
||||
schema:
|
||||
params:
|
||||
prepared: false
|
||||
statements:
|
||||
- create-keyspace: |
|
||||
ops:
|
||||
create-keyspace: |
|
||||
create keyspace if not exists <<keyspace:baselines>>
|
||||
WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '<<rf:1>>'}
|
||||
AND durable_writes = true;
|
||||
tags:
|
||||
name: create-keyspace
|
||||
- create-table : |
|
||||
|
||||
create-table: |
|
||||
create table if not exists <<keyspace:baselines>>.<<table:iot>> (
|
||||
machine_id UUID, // source machine
|
||||
sensor_name text, // sensor name
|
||||
@ -45,63 +41,46 @@ blocks:
|
||||
'compaction_window_unit': 'MINUTES',
|
||||
'split_during_flush': true
|
||||
};
|
||||
tags:
|
||||
name: create-table
|
||||
- truncate-table: |
|
||||
truncate table <<keyspace:baselines>>.<<table:iot>>;
|
||||
tags:
|
||||
name: truncate-table
|
||||
- tags:
|
||||
phase: rampup
|
||||
|
||||
truncate-table: |
|
||||
truncate table <<keyspace:baselines>>.<<table:iot>>;
|
||||
rampup:
|
||||
params:
|
||||
cl: <<write_cl:LOCAL_QUORUM>>
|
||||
statements:
|
||||
- insert-rampup: |
|
||||
insert into <<keyspace:baselines>>.<<table:iot>>
|
||||
(machine_id, sensor_name, time, sensor_value, station_id, data)
|
||||
values ({machine_id}, {sensor_name}, {time}, {sensor_value}, {station_id}, {data})
|
||||
using timestamp {cell_timestamp}
|
||||
idempotent: true
|
||||
tags:
|
||||
name: insert-rampup
|
||||
- tags:
|
||||
phase: verify
|
||||
type: read
|
||||
idempotent: true
|
||||
ops:
|
||||
rampup-insert: |
|
||||
insert into <<keyspace:baselines>>.<<table:iot>> (machine_id, sensor_name, time, sensor_value, station_id, data)
|
||||
values ({machine_id}, {sensor_name}, {time}, {sensor_value}, {station_id}, {data})
|
||||
using timestamp {cell_timestamp};
|
||||
verify:
|
||||
params:
|
||||
ratio: 1
|
||||
cl: <<read_cl:LOCAL_QUORUM>>
|
||||
statements:
|
||||
- select-verify: |
|
||||
select * from <<keyspace:baselines>>.<<table:iot>>
|
||||
where machine_id={machine_id} and sensor_name={sensor_name} and time={time};
|
||||
verify-fields: "*, -cell_timestamp"
|
||||
tags:
|
||||
name: select-verify
|
||||
- tags:
|
||||
phase: main
|
||||
type: read
|
||||
verify-fields: "*, -cell_timestamp"
|
||||
ops:
|
||||
select-verify: |
|
||||
select * from <<keyspace:baselines>>.<<table:iot>> where machine_id={machine_id}
|
||||
and sensor_name={sensor_name} and time={time};
|
||||
|
||||
main-read:
|
||||
params:
|
||||
ratio: <<read_ratio:1>>
|
||||
cl: <<read_cl:LOCAL_QUORUM>>
|
||||
statements:
|
||||
- select-read: |
|
||||
select * from <<keyspace:baselines>>.<<table:iot>>
|
||||
where machine_id={machine_id} and sensor_name={sensor_name}
|
||||
limit <<limit:10>>
|
||||
tags:
|
||||
name: select-read
|
||||
- tags:
|
||||
phase: main
|
||||
type: write
|
||||
ops:
|
||||
select-read: |
|
||||
select * from <<keyspace:baselines>>.<<table:iot>>
|
||||
where machine_id={machine_id} and sensor_name={sensor_name}
|
||||
limit <<limit:10>>;
|
||||
main-write:
|
||||
params:
|
||||
ratio: <<write_ratio:9>>
|
||||
cl: <<write_cl:LOCAL_QUORUM>>
|
||||
statements:
|
||||
- insert-main: |
|
||||
insert into <<keyspace:baselines>>.<<table:iot>>
|
||||
(machine_id, sensor_name, time, sensor_value, station_id, data)
|
||||
values ({machine_id}, {sensor_name}, {time}, {sensor_value}, {station_id}, {data})
|
||||
using timestamp {cell_timestamp}
|
||||
idempotent: true
|
||||
tags:
|
||||
name: insert-main
|
||||
idempotent: true
|
||||
ops:
|
||||
insert-main: |
|
||||
insert into <<keyspace:baselines>>.<<table:iot>>
|
||||
(machine_id, sensor_name, time, sensor_value, station_id, data)
|
||||
values ({machine_id}, {sensor_name}, {time}, {sensor_value}, {station_id}, {data})
|
||||
using timestamp {cell_timestamp};
|
||||
|
@ -1,4 +1,3 @@
|
||||
# nb -v run driver=cql yaml=cql-iot tags=phase:schema host=dsehost
|
||||
description: |
|
||||
Time-series data model and access patterns. (use cql-timeseries instead)
|
||||
This is the same a cql-timeseries, which is the preferred name as it is
|
||||
|
@ -1,14 +1,15 @@
|
||||
min_version: "5.17.1"
|
||||
description: A workload with only text keys and text values
|
||||
|
||||
scenarios:
|
||||
default:
|
||||
schema: run driver=cql tags==phase:schema threads==1 cycles==UNDEF
|
||||
rampup: run driver=cql tags==phase:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
|
||||
main: run driver=cql tags==phase:main cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
schema: run driver=cql tags==block:schema threads==1 cycles==UNDEF
|
||||
rampup: run driver=cql tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
|
||||
main: run driver=cql tags==block:'main-*.*' cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
astra:
|
||||
schema: run driver=cql tags==phase:schema-astra threads==1 cycles==UNDEF
|
||||
rampup: run driver=cql tags==phase:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
|
||||
main: run driver=cql tags==phase:main cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
schema: run driver=cql tags==block:schema-astra threads==1 cycles==UNDEF
|
||||
rampup: run driver=cql tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
|
||||
main: run driver=cql tags==block:'main-*.*' cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
|
||||
bindings:
|
||||
seq_key: Mod(<<keycount:1000000000>>); ToString() -> String
|
||||
@ -17,80 +18,53 @@ bindings:
|
||||
rw_value: Hash(); <<valdist:Uniform(0,1000000000)->int>>; ToString() -> String
|
||||
|
||||
blocks:
|
||||
- name: schema
|
||||
tags:
|
||||
phase: schema
|
||||
schema:
|
||||
params:
|
||||
prepared: false
|
||||
statements:
|
||||
- create-table: |
|
||||
ops:
|
||||
create-table: |
|
||||
create table if not exists <<keyspace:baselines>>.<<table:keyvalue>> (
|
||||
key text,
|
||||
value text,
|
||||
PRIMARY KEY (key)
|
||||
);
|
||||
tags:
|
||||
name: create-table
|
||||
- name: schema-astra
|
||||
tags:
|
||||
phase: schema-astra
|
||||
|
||||
schema-astra:
|
||||
params:
|
||||
prepared: false
|
||||
statements:
|
||||
- create-table: |
|
||||
ops:
|
||||
create-table: |
|
||||
create table if not exists <<keyspace:baselines>>.<<table:keyvalue>> (
|
||||
key text,
|
||||
value text,
|
||||
PRIMARY KEY (key)
|
||||
);
|
||||
tags:
|
||||
name: create-table-astra
|
||||
- name: rampup
|
||||
tags:
|
||||
phase: rampup
|
||||
rampup:
|
||||
params:
|
||||
cl: <<write_cl:LOCAL_QUORUM>>
|
||||
statements:
|
||||
- rampup-insert: |
|
||||
ops:
|
||||
rampup-insert: |
|
||||
insert into <<keyspace:baselines>>.<<table:keyvalue>>
|
||||
(key, value)
|
||||
values ({seq_key},{seq_value});
|
||||
tags:
|
||||
name: rampup-insert
|
||||
- name: verify
|
||||
tags:
|
||||
phase: verify
|
||||
type: read
|
||||
verify:
|
||||
params:
|
||||
cl: <<read_cl:LOCAL_QUORUM>>
|
||||
statements:
|
||||
- verify-select: |
|
||||
verify-fields: key->seq_key, value->seq_value
|
||||
ops:
|
||||
verify-select: |
|
||||
select * from <<keyspace:baselines>>.<<table:keyvalue>> where key={seq_key};
|
||||
verify-fields: key->seq_key, value->seq_value
|
||||
tags:
|
||||
name: verify
|
||||
- name: main-read
|
||||
tags:
|
||||
phase: main
|
||||
type: read
|
||||
main-read:
|
||||
params:
|
||||
ratio: 5
|
||||
cl: <<read_cl:LOCAL_QUORUM>>
|
||||
statements:
|
||||
- main-select: |
|
||||
ops:
|
||||
main-select: |
|
||||
select * from <<keyspace:baselines>>.<<table:keyvalue>> where key={rw_key};
|
||||
tags:
|
||||
name: main-select
|
||||
- name: main-write
|
||||
tags:
|
||||
phase: main
|
||||
type: write
|
||||
main-write:
|
||||
params:
|
||||
ratio: 5
|
||||
cl: <<write_cl:LOCAL_QUORUM>>
|
||||
statements:
|
||||
- main-insert: |
|
||||
insert into <<keyspace:baselines>>.<<table:keyvalue>>
|
||||
(key, value) values ({rw_key}, {rw_value});
|
||||
tags:
|
||||
name: main-insert
|
||||
ops:
|
||||
main-insert: |
|
||||
insert into <<keyspace:baselines>>.<<table:keyvalue>> (key, value) values ({rw_key}, {rw_value});
|
||||
|
@ -1,14 +1,15 @@
|
||||
min_version: "5.17.1"
|
||||
description: A tabular workload with partitions, clusters, and data fields
|
||||
|
||||
scenarios:
|
||||
default:
|
||||
schema: run driver=cql tags==phase:schema threads==1 cycles==UNDEF
|
||||
rampup: run driver=cql tags==phase:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
|
||||
main: run driver=cql tags==phase:main cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
schema: run driver=cql tags==block:schema threads==1 cycles==UNDEF
|
||||
rampup: run driver=cql tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
|
||||
main: run driver=cql tags==block:main-*.* cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
astra:
|
||||
schema: run driver=cql tags==phase:schema-astra threads==1 cycles==UNDEF
|
||||
rampup: run driver=cql tags==phase:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
|
||||
main: run driver=cql tags==phase:main cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
schema: run driver=cql tags==block:schema-astra threads==1 cycles==UNDEF
|
||||
rampup: run driver=cql tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
|
||||
main: run driver=cql tags==block:main-*.* cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
|
||||
bindings:
|
||||
# for ramp-up and verify
|
||||
@ -25,88 +26,60 @@ bindings:
|
||||
data_write: Hash(); HashedFileExtractToString('data/lorem_ipsum_full.txt',50,150) -> String
|
||||
|
||||
blocks:
|
||||
- name: schema
|
||||
tags:
|
||||
phase: schema
|
||||
schema:
|
||||
params:
|
||||
prepared: false
|
||||
statements:
|
||||
- create-keyspace: |
|
||||
ops:
|
||||
create-keyspace: |
|
||||
create keyspace if not exists <<keyspace:baselines>>
|
||||
WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '<<rf:1>>'}
|
||||
AND durable_writes = true;
|
||||
tags:
|
||||
name: create-keyspace
|
||||
- create-table: |
|
||||
create-table: |
|
||||
create table if not exists <<keyspace:baselines>>.<<table:tabular>> (
|
||||
part text,
|
||||
clust text,
|
||||
data text,
|
||||
PRIMARY KEY (part,clust)
|
||||
);
|
||||
tags:
|
||||
name: create-table
|
||||
- name: schema-astra
|
||||
tags:
|
||||
phase: schema-astra
|
||||
schema-astra:
|
||||
params:
|
||||
prepared: false
|
||||
statements:
|
||||
- create-table: |
|
||||
ops:
|
||||
create-table: |
|
||||
create table if not exists <<keyspace:baselines>>.<<table:tabular>> (
|
||||
part text,
|
||||
clust text,
|
||||
data text,
|
||||
PRIMARY KEY (part,clust)
|
||||
);
|
||||
tags:
|
||||
name: create-table-astra
|
||||
- name: rampup
|
||||
tags:
|
||||
phase: rampup
|
||||
rampup:
|
||||
params:
|
||||
cl: <<write_cl:LOCAL_QUORUM>>
|
||||
statements:
|
||||
- rampup-insert: |
|
||||
ops:
|
||||
rampup-insert: |
|
||||
insert into <<keyspace:baselines>>.<<table:tabular>>
|
||||
(part,clust,data)
|
||||
values ({part_layout},{clust_layout},{data})
|
||||
tags:
|
||||
name: rampup-insert
|
||||
- name: verify
|
||||
tags:
|
||||
phase: verify
|
||||
type: read
|
||||
verify:
|
||||
params:
|
||||
cl: <<read_cl:LOCAL_QUORUM>>
|
||||
statements:
|
||||
- verify-select: |
|
||||
select * from <<keyspace:baselines>>.<<table:tabular>> where part={part_layout} and clust={clust_layout}
|
||||
tags:
|
||||
name: verify-select
|
||||
- name: main-read
|
||||
tags:
|
||||
phase: main
|
||||
type: read
|
||||
ops:
|
||||
verify-select: |
|
||||
select * from <<keyspace:baselines>>.<<table:tabular>> where part={part_layout} and clust={clust_layout};
|
||||
|
||||
main-read:
|
||||
params:
|
||||
ratio: 5
|
||||
cl: <<read_cl:LOCAL_QUORUM>>
|
||||
statements:
|
||||
- main-select: |
|
||||
ops:
|
||||
main-select: |
|
||||
select * from <<keyspace:baselines>>.<<table:tabular>> where part={part_read} limit {limit};
|
||||
tags:
|
||||
name: main-select
|
||||
- name: main-write
|
||||
tags:
|
||||
phase: main
|
||||
type: write
|
||||
|
||||
main-write:
|
||||
params:
|
||||
ratio: 5
|
||||
cl: <<write_cl:LOCAL_QUORUM>>
|
||||
statements:
|
||||
- main-write: |
|
||||
ops:
|
||||
main-write: |
|
||||
insert into <<keyspace:baselines>>.<<table:tabular>>
|
||||
(part, clust, data)
|
||||
values ({part_write},{clust_write},{data_write})
|
||||
tags:
|
||||
name: main-write
|
||||
(part, clust, data) values ({part_write},{clust_write},{data_write});
|
@ -1,13 +1,13 @@
|
||||
min_version: "4.17.15"
|
||||
min_version: "5.17.1"
|
||||
|
||||
description: creates local graphs which resemble a wagon-wheel topology, using
|
||||
DSE Graph, version 6.8 or newer
|
||||
|
||||
scenarios:
|
||||
default:
|
||||
creategraph: run driver=cqld4 graphname=graph_wheels tags=phase:create-graph cycles===UNDEF
|
||||
schema: run driver=cqld4 graphname=graph_wheels tags=phase:graph-schema cycles===UNDEF
|
||||
rampup: run driver==cqld4 graphname=graph_wheels tags=phase:rampup cycles=1
|
||||
creategraph: run driver=cqld4 graphname=graph_wheels tags=block:create-graph cycles===UNDEF
|
||||
schema: run driver=cqld4 graphname=graph_wheels tags=block:graph-schema cycles===UNDEF
|
||||
rampup: run driver==cqld4 graphname=graph_wheels tags=block:rampup cycles=1
|
||||
drop-graph: run driver=cqld4 graphname=graph_wheels tags=block:drop-graph cycles===UNDEF
|
||||
creategraph-classic: run driver=cqld4 graphname=graph_wheels tags=block:create-graph-classic cycles===UNDEF
|
||||
fluent: run driver=cqld4 graphname=graph_wheels tags=block:fluent cycles=10
|
||||
@ -40,16 +40,12 @@ blocks:
|
||||
.classicEngine()
|
||||
.create()
|
||||
create-graph:
|
||||
tags:
|
||||
phase: create-graph
|
||||
statements:
|
||||
creategraph:
|
||||
type: gremlin
|
||||
script: >-
|
||||
system.graph('<<graphname:graph_wheels>>').ifNotExists().create()
|
||||
create-schema:
|
||||
tags:
|
||||
phase: graph-schema
|
||||
statements:
|
||||
graph-schema:
|
||||
type: gremlin
|
||||
@ -78,7 +74,7 @@ blocks:
|
||||
.create()
|
||||
dev-mode:
|
||||
tags:
|
||||
phase: dev-mode
|
||||
block: dev-mode
|
||||
statements:
|
||||
dev-mode:
|
||||
type: gremlin
|
||||
@ -87,7 +83,7 @@ blocks:
|
||||
schema.config().option('graph.schema_mode').set('Development');
|
||||
prod-mode:
|
||||
tags:
|
||||
phase: prod-mode
|
||||
block: prod-mode
|
||||
statements:
|
||||
prod-mode:
|
||||
type: gremlin
|
||||
@ -96,7 +92,7 @@ blocks:
|
||||
schema.config().option('graph.schema_mode').set('Production');
|
||||
rampup:
|
||||
tags:
|
||||
phase: rampup
|
||||
block: rampup
|
||||
statements:
|
||||
main-add:
|
||||
type: gremlin
|
||||
|
@ -1,3 +1,4 @@
|
||||
min_version: "5.17.1"
|
||||
description: |
|
||||
This is a workload which creates an incrementally growing dataset over cycles.
|
||||
|
||||
@ -26,16 +27,13 @@ description: |
|
||||
|
||||
scenarios:
|
||||
default:
|
||||
schema: run tags=phase:schema threads==1
|
||||
# rampup: run tags=phase:rampup cycles===TEMPLATE(rampup-cycles,100000) threads=auto
|
||||
main: run tags=block:"main.*" cycles===TEMPLATE(main-cycles,0) threads=auto
|
||||
default-schema: run tags=block:schema threads==1
|
||||
# default-rampup: run tags=phase:rampup cycles===TEMPLATE(rampup-cycles,100000) threads=auto
|
||||
default-main: run tags=block:"main.* cycles===TEMPLATE(main-cycles,0) threads=auto
|
||||
schema: run tags=block:schema.* threads==1
|
||||
main: run tags=block:main-.*.* cycles===TEMPLATE(main-cycles,0) threads=auto
|
||||
default-schema: run tags=block:'schema.*' threads==1
|
||||
default-main: run tags=block:'main.*' cycles===TEMPLATE(main-cycles,0) threads=auto
|
||||
astra:
|
||||
schema: run tags=block:astra-schema threads==1
|
||||
# rampup: run tags=phase:rampup cycles===TEMPLATE(rampup-cycles,0) threads=auto
|
||||
main: run tags=block:"main.*" cycles===TEMPLATE(main-cycles,0) threads=auto
|
||||
main: run tags=block:'main.*' cycles===TEMPLATE(main-cycles,0) threads=auto
|
||||
|
||||
params:
|
||||
instrument: true
|
||||
|
@ -1,4 +1,4 @@
|
||||
min_version: "4.17.15"
|
||||
min_version: "5.17.1"
|
||||
|
||||
description: |
|
||||
A workload with only text keys and text values which range in size from 50K to 150K.
|
||||
@ -83,5 +83,4 @@ blocks:
|
||||
cl: TEMPLATE(write_cl,LOCAL_QUORUM)
|
||||
statements:
|
||||
main-insert: |
|
||||
insert into TEMPLATE(keyspace,baselines).TEMPLATE(table,keyvalue)
|
||||
(key, value) values ({rw_key}, {rw_value});
|
||||
insert into TEMPLATE(keyspace,baselines).TEMPLATE(table,keyvalue) (key, value) values ({rw_key}, {rw_value});
|
||||
|
@ -1,4 +1,4 @@
|
||||
min_version: "4.17.15"
|
||||
min_version: "5.17.1"
|
||||
|
||||
description: |
|
||||
A workload with only text keys and text values.
|
||||
|
@ -1,4 +1,4 @@
|
||||
min_version: "4.17.15"
|
||||
min_version: "5.17.1"
|
||||
|
||||
description: |
|
||||
A tabular workload with partitions, clusters, and data fields
|
||||
@ -29,11 +29,11 @@ scenarios:
|
||||
default:
|
||||
schema: run driver=cql tags==block:schema threads==1 cycles==UNDEF
|
||||
rampup: run driver=cql tags==block:rampup cycles===TEMPLATE(rampup-cycles,10B) threads=auto
|
||||
main: run driver=cql tags==block:"main.*" cycles===TEMPLATE(main-cycles,100M) threads=auto
|
||||
main: run driver=cql tags==block:"main-*.*" cycles===TEMPLATE(main-cycles,100M) threads=auto
|
||||
astra:
|
||||
schema: run driver=cql tags==block:schema-astra threads==1 cycles==UNDEF
|
||||
rampup: run driver=cql tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
|
||||
main: run driver=cql tags==block:"main.*" cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
main: run driver=cql tags==block:"main-*.*" cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
|
||||
params:
|
||||
instrument: true
|
||||
|
@ -1,4 +1,4 @@
|
||||
min_version: "4.17.15"
|
||||
min_version: "5.17.1"
|
||||
|
||||
description: |
|
||||
This workload emulates a time-series data model and access patterns.
|
||||
@ -7,11 +7,11 @@ scenarios:
|
||||
default:
|
||||
schema: run driver=cql tags==block:schema threads==1 cycles==UNDEF
|
||||
rampup: run driver=cql tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
|
||||
main: run driver=cql tags==block:"main.*" cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
main: run driver=cql tags==block:"main-*.*" cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
astra:
|
||||
schema: run driver=cql tags==block:schema-astra threads==1 cycles==UNDEF
|
||||
rampup: run driver=cql tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
|
||||
main: run driver=cql tags==block:"main.*" cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
main: run driver=cql tags==block:"main-*.*" cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
|
||||
params:
|
||||
instrument: TEMPLATE(instrument,false)
|
||||
@ -82,11 +82,12 @@ blocks:
|
||||
ratio: 1
|
||||
cl: TEMPLATE(read_cl,LOCAL_QUORUM)
|
||||
instrument: TEMPLATE(instrument-reads,TEMPLATE(instrument,false))
|
||||
verify-fields: "*, -cell_timestamp"
|
||||
ops:
|
||||
select-verify: |
|
||||
select * from TEMPLATE(keyspace,baselines).TEMPLATE(table,iot)
|
||||
where machine_id={machine_id} and sensor_name={sensor_name} and time={time};
|
||||
verify-fields: "*, -cell_timestamp"
|
||||
|
||||
main-read:
|
||||
params:
|
||||
ratio: TEMPLATE(read_ratio,1)
|
||||
@ -96,7 +97,7 @@ blocks:
|
||||
select-read: |
|
||||
select * from TEMPLATE(keyspace,baselines).TEMPLATE(table,iot)
|
||||
where machine_id={machine_id} and sensor_name={sensor_name}
|
||||
limit TEMPLATE(limit,10)
|
||||
limit TEMPLATE(limit,10);
|
||||
main-write:
|
||||
params:
|
||||
ratio: TEMPLATE(write_ratio,9)
|
||||
@ -108,4 +109,4 @@ blocks:
|
||||
insert into TEMPLATE(keyspace,baselines).TEMPLATE(table,iot)
|
||||
(machine_id, sensor_name, time, sensor_value, station_id, data)
|
||||
values ({machine_id}, {sensor_name}, {time}, {sensor_value}, {station_id}, {data})
|
||||
using timestamp {cell_timestamp}
|
||||
using timestamp {cell_timestamp};
|
||||
|
@ -1,9 +1,10 @@
|
||||
min_version: "4.17.15"
|
||||
min_version: "5.17.1"
|
||||
|
||||
scenarios:
|
||||
default:
|
||||
schema: run driver=cql tags==phase:schema cycles==UNDEF threads==1
|
||||
rampup: run driver=cql tags==phase:rampup cycles=TEMPLATE(rampup-cycles,100K) threads=auto
|
||||
schema: run driver=cql tags==block:schema cycles==UNDEF threads==1
|
||||
rampup: run driver=cql tags==block:rampup cycles=TEMPLATE(rampup-cycles,100K) threads=auto
|
||||
main: run driver=cql tags==block:"main" cycles===TEMPLATE(main-cycles,100K) threads=auto
|
||||
|
||||
bindings:
|
||||
userid: Template('user-{}',ToString()); SaveString('userid');
|
||||
|
@ -1,18 +1,18 @@
|
||||
description: Auto-generated workload from source schema.
|
||||
scenarios:
|
||||
default:
|
||||
schema: run driver=cql tags=block:schema.* threads===UNDEF cycles===UNDEF
|
||||
rampup: run driver=cql tags=block:rampup.* threads=auto cycles===TEMPLATE(rampup-cycles,10000)
|
||||
main: run driver=cql tags=block:main.* threads=auto cycles===TEMPLATE(main-cycles,10000)
|
||||
schema: run driver=cql tags=block:'schema.*' threads===UNDEF cycles===UNDEF
|
||||
rampup: run driver=cql tags=block:'rampup.*' threads=auto cycles===TEMPLATE(rampup-cycles,10000)
|
||||
main: run driver=cql tags=block:'main.*' threads=auto cycles===TEMPLATE(main-cycles,10000)
|
||||
main-insert: run driver=cql tags=block:main-insert threads=auto cycles===TEMPLATE(main-cycles,10000)
|
||||
main-select: run driver=cql tags=block:main-select threads=auto cycles===TEMPLATE(main-cycles,10000)
|
||||
main-scan: run driver=cql tags=block:main-scan threads=auto cycles===TEMPLATE(main-cycles,10000)
|
||||
main-update: run driver=cql tags=block:main-update threads=auto cycles===TEMPLATE(main-cycles,10000)
|
||||
truncate: run driver=cql tags=block:truncate.* threads===UNDEF cycles===UNDEF
|
||||
truncate: run driver=cql tags=block:'truncate.*' threads===UNDEF cycles===UNDEF
|
||||
schema-keyspaces: run driver=cql tags=block:schema-keyspaces threads===UNDEF cycles===UNDEF
|
||||
schema-types: run driver=cql tags=block:schema-types threads===UNDEF cycles===UNDEF
|
||||
schema-tables: run driver=cql tags=block:schema-tables threads===UNDEF cycles===UNDEF
|
||||
drop: run driver=cql tags=block:drop.* threads===UNDEF cycles===UNDEF
|
||||
drop: run driver=cql tags=block:'drop.*' threads===UNDEF cycles===UNDEF
|
||||
drop-tables: run driver=cql tags=block:drop-tables threads===UNDEF cycles===UNDEF
|
||||
drop-types: run driver=cql tags=block:drop-types threads===UNDEF cycles===UNDEF
|
||||
drop-keyspaces: run driver=cql tags=block:drop-keyspaces threads===UNDEF cycles===UNDEF
|
||||
|
@ -1,4 +1,4 @@
|
||||
min_version: "4.17.15"
|
||||
min_version: "5.17.1"
|
||||
|
||||
description: |
|
||||
A workload with only text keys and text values. This is based on the CQL keyvalue workloads as found
|
||||
@ -6,9 +6,9 @@ description: |
|
||||
|
||||
scenarios:
|
||||
default:
|
||||
schema: run driver=dynamodb tags==block:schema threads==1 cycles==UNDEF
|
||||
schema: run driver=dynamodb tags==block:'schema.*' threads==1 cycles==UNDEF
|
||||
rampup: run driver=dynamodb tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
|
||||
main: run driver=dynamodb tags=="block:main.*" cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
main: run driver=dynamodb tags==block:'main-*.*' cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
read: run driver=dynamodb tags==block:main-read cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
write: run driver=dynamodb tags==block:main-write cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
|
||||
|
@ -1,13 +1,13 @@
|
||||
min_version: "4.17.15"
|
||||
min_version: "5.17.1"
|
||||
|
||||
description: |
|
||||
Run a read/write workload against DynamoDB with varying field sizes and query patterns
|
||||
|
||||
scenarios:
|
||||
schema: run driver=dynamodb tags=block:schema region=us-east-1
|
||||
schema: run driver=dynamodb tags=block:'schema.*' region=us-east-1
|
||||
rampup: run driver=dynamodb tags=block:rampup region=us-east-1
|
||||
read: run driver=dynamodb tags=block:read region=us-east-1
|
||||
main: run driver=dynamodb tags=block:"main.*" region=us-east-1
|
||||
main: run driver=dynamodb tags=block:'main-*.*' region=us-east-1
|
||||
read01: run driver=dynamodb tags='name:.*main-read-01' region=us-east-1
|
||||
delete:
|
||||
table: run driver=dynamodb tags==block:delete threads==1 cycles==UNDEF
|
||||
|
@ -1,4 +1,4 @@
|
||||
min_version: "4.17.15"
|
||||
min_version: "5.17.1"
|
||||
|
||||
description: |
|
||||
This workload emulates a time-series data model and access patterns. This is based on the
|
||||
@ -11,7 +11,7 @@ description: |
|
||||
|
||||
scenarios:
|
||||
default:
|
||||
schema: run driver=dynamodb tags==block:schema threads==1 cycles==UNDEF
|
||||
schema: run driver=dynamodb tags==block:'schema.*' threads==1 cycles==UNDEF
|
||||
rampup: run driver=dynamodb tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
|
||||
main: run driver=dynamodb tags==block:main cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
delete:
|
||||
|
@ -1,4 +1,4 @@
|
||||
min_version: "4.17.15"
|
||||
min_version: "5.17.1"
|
||||
|
||||
description: |
|
||||
This workload emulates a key-value data model and access patterns.
|
||||
@ -9,8 +9,8 @@ description: |
|
||||
scenarios:
|
||||
default:
|
||||
schema: run driver=http tags==block:schema threads==1 cycles==UNDEF
|
||||
rampup: run driver=http tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
|
||||
main: run driver=http tags==block:main cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
rampup: run driver=http tags==block:"rampup-*.*" cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
|
||||
main: run driver=http tags==block:"main-*.*" cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
|
||||
bindings:
|
||||
# To enable an optional weighted set of hosts in place of a load balancer
|
||||
@ -19,11 +19,13 @@ bindings:
|
||||
# multiple hosts: restapi_host=host1,host2,host3
|
||||
# multiple weighted hosts: restapi_host=host1:3,host2:7
|
||||
weighted_hosts: WeightedStrings('<<restapi_host:stargate>>')
|
||||
# http request id
|
||||
|
||||
request_id: ToHashedUUID(); ToString();
|
||||
request_token: ToString(); TextOfFile("TEMPLATE(stargate_tokenfile,data/stargate_token.txt)")
|
||||
|
||||
seq_key: Mod(<<keycount:10000000>>); ToString() -> String
|
||||
seq_value: Hash(); Mod(<<valuecount:1000000000>>); ToString() -> String
|
||||
|
||||
rw_key: <<keydist:Uniform(0,10000000)->int>>; ToString() -> String
|
||||
rw_value: Hash(); <<valdist:Uniform(0,1000000000)->int>>; ToString() -> String
|
||||
|
||||
@ -35,7 +37,7 @@ blocks:
|
||||
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/schemas/keyspaces
|
||||
Accept: "application/json"
|
||||
X-Cassandra-Request-Id: "{request_id}"
|
||||
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
|
||||
X-Cassandra-Token: "{request_token}"
|
||||
Content-Type: "application/json"
|
||||
body: |
|
||||
{
|
||||
@ -48,7 +50,7 @@ blocks:
|
||||
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/schemas/keyspaces/<<keyspace:baselines>>/tables/<<table:keyvalue>>
|
||||
Accept: "application/json"
|
||||
X-Cassandra-Request-Id: "{request_id}"
|
||||
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
|
||||
X-Cassandra-Token: "{request_token}"
|
||||
Content-Type: "application/json"
|
||||
ok-status: "[2-4][0-9][0-9]"
|
||||
|
||||
@ -57,7 +59,7 @@ blocks:
|
||||
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/schemas/keyspaces/<<keyspace:baselines>>/tables
|
||||
Accept: "application/json"
|
||||
X-Cassandra-Request-Id: "{request_id}"
|
||||
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
|
||||
X-Cassandra-Token: "{request_token}"
|
||||
Content-Type: "application/json"
|
||||
body: |
|
||||
{
|
||||
@ -87,7 +89,7 @@ blocks:
|
||||
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/schemas/keyspaces/<<keyspace:baselines>>/tables
|
||||
Accept: "application/json"
|
||||
X-Cassandra-Request-Id: "{request_id}"
|
||||
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
|
||||
X-Cassandra-Token: "{request_token}"
|
||||
Content-Type: "application/json"
|
||||
body: |
|
||||
{
|
||||
@ -117,7 +119,7 @@ blocks:
|
||||
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/keyspaces/<<keyspace:baselines>>/<<table:keyvalue>>
|
||||
Accept: "application/json"
|
||||
X-Cassandra-Request-Id: "{request_id}"
|
||||
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
|
||||
X-Cassandra-Token: "{request_token}"
|
||||
Content-Type: "application/json"
|
||||
body: |
|
||||
{
|
||||
@ -134,7 +136,7 @@ blocks:
|
||||
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/keyspaces/<<keyspace:baselines>>/<<table:keyvalue>>/{rw_key}
|
||||
Accept: "application/json"
|
||||
X-Cassandra-Request-Id: "{request_id}"
|
||||
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
|
||||
X-Cassandra-Token: "{request_token}"
|
||||
Content-Type: "application/json"
|
||||
ok-status: "[2-4][0-9][0-9]"
|
||||
|
||||
@ -147,7 +149,7 @@ blocks:
|
||||
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/keyspaces/<<keyspace:baselines>>/<<table:keyvalue>>
|
||||
Accept: "application/json"
|
||||
X-Cassandra-Request-Id: "{request_id}"
|
||||
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
|
||||
X-Cassandra-Token: "{request_token}"
|
||||
Content-Type: "application/json"
|
||||
body: |
|
||||
{
|
||||
|
@ -1,4 +1,4 @@
|
||||
min_version: "4.17.15"
|
||||
min_version: "5.17.1"
|
||||
|
||||
description: |
|
||||
This workload emulates a tabular workload with partitions, clusters, and data fields.
|
||||
@ -9,9 +9,9 @@ description: |
|
||||
|
||||
scenarios:
|
||||
default:
|
||||
schema: run driver=http tags==block:schema threads==1 cycles==UNDEF
|
||||
rampup: run driver=http tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
|
||||
main: run driver=http tags==block:main cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
schema: run driver=http tags==block:'schema.*' threads==1 cycles==UNDEF
|
||||
rampup: run driver=http tags==block:'rampup-*.*' cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
|
||||
main: run driver=http tags==block:'main-*.*' cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
|
||||
bindings:
|
||||
# To enable an optional weighted set of hosts in place of a load balancer
|
||||
@ -20,16 +20,19 @@ bindings:
|
||||
# multiple hosts: restapi_host=host1,host2,host3
|
||||
# multiple weighted hosts: restapi_host=host1:3,host2:7
|
||||
weighted_hosts: WeightedStrings('<<restapi_host:stargate>>')
|
||||
# http request id
|
||||
request_id: ToHashedUUID(); ToString();
|
||||
request_token: ToString(); TextOfFile("TEMPLATE(stargate_tokenfile,data/stargate_token.txt)")
|
||||
|
||||
# for ramp-up and verify
|
||||
part_layout: Div(<<partsize:1000000>>); ToString() -> String
|
||||
clust_layout: Mod(<<partsize:1000000>>); ToString() -> String
|
||||
data: HashedFileExtractToString('data/lorem_ipsum_full.txt',50,150); URLEncode();
|
||||
|
||||
# for read
|
||||
limit: Uniform(1,10) -> int
|
||||
part_read: Uniform(0,<<partcount:100>>)->int; ToString() -> String
|
||||
clust_read: Add(1); Uniform(0,<<partsize:1000000>>)->int; ToString() -> String
|
||||
|
||||
# for write
|
||||
part_write: Hash(); Uniform(0,<<partcount:100>>)->int; ToString() -> String
|
||||
clust_write: Hash(); Add(1); Uniform(0,<<partsize:1000000>>)->int; ToString() -> String
|
||||
@ -43,7 +46,7 @@ blocks:
|
||||
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/schemas/keyspaces
|
||||
Accept: "application/json"
|
||||
X-Cassandra-Request-Id: "{request_id}"
|
||||
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
|
||||
X-Cassandra-Token: "{request_token}"
|
||||
Content-Type: "application/json"
|
||||
body: |
|
||||
{
|
||||
@ -56,7 +59,7 @@ blocks:
|
||||
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/schemas/keyspaces/<<keyspace:baselines>>/tables/<<table:tabular>>
|
||||
Accept: "application/json"
|
||||
X-Cassandra-Request-Id: "{request_id}"
|
||||
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
|
||||
X-Cassandra-Token: "{request_token}"
|
||||
Content-Type: "application/json"
|
||||
ok-status: "[2-4][0-9][0-9]"
|
||||
|
||||
@ -65,7 +68,7 @@ blocks:
|
||||
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/schemas/keyspaces/<<keyspace:baselines>>/tables
|
||||
Accept: "application/json"
|
||||
X-Cassandra-Request-Id: "{request_id}"
|
||||
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
|
||||
X-Cassandra-Token: "{request_token}"
|
||||
Content-Type: "application/json"
|
||||
body: |
|
||||
{
|
||||
@ -102,7 +105,7 @@ blocks:
|
||||
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/keyspaces/<<keyspace:baselines>>/<<table:tabular>>
|
||||
Accept: "application/json"
|
||||
X-Cassandra-Request-Id: "{request_id}"
|
||||
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
|
||||
X-Cassandra-Token: "{request_token}"
|
||||
Content-Type: "application/json"
|
||||
body: |
|
||||
{
|
||||
@ -120,7 +123,7 @@ blocks:
|
||||
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/keyspaces/<<keyspace:baselines>>/<<table:tabular>>/{part_read}&page-size={limit}
|
||||
Accept: "application/json"
|
||||
X-Cassandra-Request-Id: "{request_id}"
|
||||
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
|
||||
X-Cassandra-Token: "{request_token}"
|
||||
Content-Type: "application/json"
|
||||
|
||||
main-write:
|
||||
@ -132,7 +135,7 @@ blocks:
|
||||
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/keyspaces/<<keyspace:baselines>>/<<table:tabular>>
|
||||
Accept: "application/json"
|
||||
X-Cassandra-Request-Id: "{request_id}"
|
||||
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
|
||||
X-Cassandra-Token: "{request_token}"
|
||||
Content-Type: "application/json"
|
||||
body: |
|
||||
{
|
||||
|
@ -1,4 +1,4 @@
|
||||
min_version: "4.17.15"
|
||||
min_version: "5.17.1"
|
||||
|
||||
description: |
|
||||
This workload emulates a time-series data model and access patterns.
|
||||
@ -12,10 +12,10 @@ description: |
|
||||
|
||||
scenarios:
|
||||
default:
|
||||
schema: run driver=cql tags==block:schema threads==1 cycles==UNDEF
|
||||
schema: run driver=cql tags==block:'schema-*.*' threads==1 cycles==UNDEF
|
||||
schema-astra: run driver=cql tags==block:schema-astra threads==1 cycles==UNDEF
|
||||
rampup: run driver=http tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
|
||||
main: run driver=http tags==block:"main.*" cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
rampup: run driver=http tags==block:'rampup-*.*' cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
|
||||
main: run driver=http tags==block:'main-*.*' cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
|
||||
bindings:
|
||||
# To enable an optional weighted set of hosts in place of a load balancer
|
||||
@ -24,8 +24,10 @@ bindings:
|
||||
# multiple hosts: restapi_host=host1,host2,host3
|
||||
# multiple weighted hosts: restapi_host=host1:3,host2:7
|
||||
weighted_hosts: WeightedStrings('<<restapi_host:stargate>>')
|
||||
|
||||
# http request id
|
||||
request_id: ToHashedUUID(); ToString();
|
||||
request_token: ToString(); TextOfFile("TEMPLATE(stargate_tokenfile,data/stargate_token.txt)")
|
||||
|
||||
machine_id: Mod(<<sources:10000>>); ToHashedUUID() -> java.util.UUID
|
||||
sensor_name: HashedLineToString('data/variable_words.txt')
|
||||
@ -42,7 +44,7 @@ blocks:
|
||||
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/schemas/keyspaces
|
||||
Accept: "application/json"
|
||||
X-Cassandra-Request-Id: "{request_id}"
|
||||
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
|
||||
X-Cassandra-Token: "{request_token}"
|
||||
Content-Type: "application/json"
|
||||
body: |
|
||||
{
|
||||
@ -55,7 +57,7 @@ blocks:
|
||||
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/schemas/keyspaces/<<keyspace:baselines>>/tables/<<table:iot>>
|
||||
Accept: "application/json"
|
||||
X-Cassandra-Request-Id: "{request_id}"
|
||||
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
|
||||
X-Cassandra-Token: "{request_token}"
|
||||
Content-Type: "application/json"
|
||||
ok-status: "[2-4][0-9][0-9]"
|
||||
|
||||
@ -64,7 +66,7 @@ blocks:
|
||||
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/schemas/keyspaces/<<keyspace:baselines>>/tables
|
||||
Accept: "application/json"
|
||||
X-Cassandra-Request-Id: "{request_id}"
|
||||
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
|
||||
X-Cassandra-Token: "{request_token}"
|
||||
Content-Type: "application/json"
|
||||
body: |
|
||||
{
|
||||
@ -123,7 +125,7 @@ blocks:
|
||||
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/keyspaces/<<keyspace:baselines>>/<<table:iot>>
|
||||
Accept: "application/json"
|
||||
X-Cassandra-Request-Id: "{request_id}"
|
||||
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
|
||||
X-Cassandra-Token: "{request_token}"
|
||||
Content-Type: "application/json"
|
||||
body: |
|
||||
{
|
||||
@ -144,7 +146,7 @@ blocks:
|
||||
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/keyspaces/<<keyspace:baselines>>/<<table:iot>>?where=URLENCODE[[{"machine_id":{"$eq":"{machine_id}"},"sensor_name":{"$eq":"{sensor_name}"}}]]&page-size=<<limit:10>>
|
||||
Accept: "application/json"
|
||||
X-Cassandra-Request-Id: "{request_id}"
|
||||
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
|
||||
X-Cassandra-Token: "{request_token}"
|
||||
Content-Type: "application/json"
|
||||
|
||||
main-write:
|
||||
@ -156,7 +158,7 @@ blocks:
|
||||
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/keyspaces/<<keyspace:baselines>>/<<table:iot>>
|
||||
Accept: "application/json"
|
||||
X-Cassandra-Request-Id: "{request_id}"
|
||||
X-Cassandra-Token: "<<auth_token:my_auth_token>>"
|
||||
X-Cassandra-Token: "{request_token}"
|
||||
Content-Type: "application/json"
|
||||
body: |
|
||||
{
|
||||
|
@ -1,6 +1,4 @@
|
||||
min_version: "4.17.15"
|
||||
|
||||
# nb -v run driver=http yaml=http-docsapi-crud-basic tags=phase:schema docsapi_host=my_docsapi_host auth_token=$AUTH_TOKEN
|
||||
min_version: "5.17.1"
|
||||
|
||||
description: |
|
||||
This workload emulates CRUD operations for the Stargate Documents API.
|
||||
@ -9,11 +7,11 @@ description: |
|
||||
|
||||
scenarios:
|
||||
default:
|
||||
schema: run driver=http tags==block:schema threads==1 cycles==UNDEF
|
||||
write: run driver=http tags==block:"write.*" cycles===TEMPLATE(write-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
|
||||
read: run driver=http tags==block:"read.*" cycles===TEMPLATE(read-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
|
||||
update: run driver=http tags==block:"update.*" cycles===TEMPLATE(update-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
|
||||
delete: run driver=http tags==block:"delete.*" cycles===TEMPLATE(delete-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
|
||||
schema: run driver=http tags==block:'schema.*' threads==1 cycles==UNDEF
|
||||
write: run driver=http tags==block:'write.*' cycles===TEMPLATE(write-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
|
||||
read: run driver=http tags==block:'read.*' cycles===TEMPLATE(read-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
|
||||
update: run driver=http tags==block:'update.*' cycles===TEMPLATE(update-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
|
||||
delete: run driver=http tags==block:'delete.*' cycles===TEMPLATE(delete-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
|
||||
|
||||
bindings:
|
||||
# To enable an optional weighted set of hosts in place of a load balancer
|
||||
|
@ -1,4 +1,4 @@
|
||||
min_version: "4.17.15"
|
||||
min_version: "5.17.1"
|
||||
|
||||
description: |
|
||||
This workload emulates CRUD operations for the Stargate Documents API.
|
||||
@ -7,11 +7,11 @@ description: |
|
||||
|
||||
scenarios:
|
||||
default:
|
||||
schema: run driver=http tags==block:schema threads==1 cycles==UNDEF
|
||||
write: run driver=http tags==name:"write.*" cycles===TEMPLATE(write-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
|
||||
read: run driver=http tags==name:"read.*" cycles===TEMPLATE(read-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
|
||||
update: run driver=http tags==name:"update.*" cycles===TEMPLATE(update-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
|
||||
delete: run driver=http tags==name:"delete.*" cycles===TEMPLATE(delete-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
|
||||
schema: run driver=http tags==block:'schema.*' threads==1 cycles==UNDEF
|
||||
write: run driver=http tags==name:'write.*' cycles===TEMPLATE(write-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
|
||||
read: run driver=http tags==name:'read.*' cycles===TEMPLATE(read-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
|
||||
update: run driver=http tags==name:'update.*' cycles===TEMPLATE(update-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
|
||||
delete: run driver=http tags==name:'delete.*' cycles===TEMPLATE(delete-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
|
||||
|
||||
bindings:
|
||||
# To enable an optional weighted set of hosts in place of a load balancer
|
||||
|
@ -1,4 +1,4 @@
|
||||
min_version: "4.17.15"
|
||||
min_version: "5.17.1"
|
||||
|
||||
description: |
|
||||
This workload emulates a key-value data model and access patterns.
|
||||
@ -10,7 +10,7 @@ description: |
|
||||
|
||||
scenarios:
|
||||
default:
|
||||
schema: run driver=http tags==block:schema threads==1 cycles==UNDEF
|
||||
schema: run driver=http tags==block:'schema.*' threads==1 cycles==UNDEF
|
||||
rampup: run driver=http tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
|
||||
main: run driver=http tags==block:main cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
min_version: "4.17.15"
|
||||
min_version: "5.17.1"
|
||||
|
||||
description: |
|
||||
This workload emulates advanced search filter combinations for the Stargate Documents API.
|
||||
@ -15,10 +15,10 @@ description: |
|
||||
# complex2: (match1 LTE 0 OR match2 EQ "false") AND (match2 EQ "false" OR match3 EQ true)
|
||||
# complex3: (match1 LTE 0 AND match2 EQ "true") OR (match2 EQ "false" AND match3 EQ true)
|
||||
scenarios:
|
||||
schema: run driver=http tags==phase:schema threads==<<threads:1>> cycles==UNDEF
|
||||
schema: run driver=http tags==block:'schema.*' threads==<<threads:1>> cycles==UNDEF
|
||||
rampup:
|
||||
write: run driver=http tags==name:"rampup-put.*" cycles===TEMPLATE(docscount,10000000) docpadding=TEMPLATE(docpadding,0) match-ratio=TEMPLATE(match-ratio,0.01) threads=<<threads:auto>> errors=timer,warn
|
||||
read: run driver=http tags==phase:"rampup-get.*" cycles===TEMPLATE(rampup-cycles, 10000000) page-size=TEMPLATE(page-size,3) fields=TEMPLATE(fields,%5b%5d) threads=<<threads:auto>> errors=timer,warn
|
||||
write: run driver=http tags==name:'rampup-put.*' cycles===TEMPLATE(docscount,10000000) docpadding=TEMPLATE(docpadding,0) match-ratio=TEMPLATE(match-ratio,0.01) threads=<<threads:auto>> errors=timer,warn
|
||||
read: run driver=http tags==block:'rampup-get.*' cycles===TEMPLATE(rampup-cycles, 10000000) page-size=TEMPLATE(page-size,3) fields=TEMPLATE(fields,%5b%5d) threads=<<threads:auto>> errors=timer,warn
|
||||
main:
|
||||
all: run driver=http tags==block:main cycles===TEMPLATE(read-cycles,TEMPLATE(docscount,10000000)) page-size=TEMPLATE(page-size,3) fields=TEMPLATE(fields,%5b%5d) threads=<<threads:auto>> errors=timer,warn
|
||||
get-in: run driver=http tags==name:main-get-in,filter:in cycles===TEMPLATE(read-cycles,TEMPLATE(docscount,10000000)) page-size=TEMPLATE(page-size,3) fields=TEMPLATE(fields,%5b%5d) threads=<<threads:auto>> errors=timer,warn
|
||||
|
@ -1,4 +1,4 @@
|
||||
min_version: "4.17.15"
|
||||
min_version: "5.17.1"
|
||||
|
||||
description: |
|
||||
This workload emulates basic search operations for the Stargate Documents API.
|
||||
@ -7,10 +7,10 @@ description: |
|
||||
Note that docsapi_port should reflect the port where the Docs API is exposed (defaults to 8180).
|
||||
|
||||
scenarios:
|
||||
schema: run driver=http tags==block:schema threads==<<threads:1>> cycles==UNDEF
|
||||
schema: run driver=http tags==block:'schema.*' threads==<<threads:1>> cycles==UNDEF
|
||||
rampup:
|
||||
write: run driver=http tags==name:"rampup-put.*" cycles===TEMPLATE(docscount,10000000) docpadding=TEMPLATE(docpadding,0) match-ratio=TEMPLATE(match-ratio,0.01) threads=<<threads:auto>> errors=timer,warn
|
||||
read: run driver=http tags==name:"rampup-get.*" cycles===TEMPLATE(rampup-cycles, 10000000) page-size=TEMPLATE(page-size,3) fields=TEMPLATE(fields,%5b%5d) threads=<<threads:auto>> errors=timer,warn
|
||||
write: run driver=http tags==name:'rampup-put.*' cycles===TEMPLATE(docscount,10000000) docpadding=TEMPLATE(docpadding,0) match-ratio=TEMPLATE(match-ratio,0.01) threads=<<threads:auto>> errors=timer,warn
|
||||
read: run driver=http tags==name:'rampup-get.*' cycles===TEMPLATE(rampup-cycles, 10000000) page-size=TEMPLATE(page-size,3) fields=TEMPLATE(fields,%5b%5d) threads=<<threads:auto>> errors=timer,warn
|
||||
main:
|
||||
all: run driver=http tags==block:main cycles===TEMPLATE(read-cycles,TEMPLATE(docscount,10000000)) page-size=TEMPLATE(page-size,3) fields=TEMPLATE(fields,%5b%5d) threads=<<threads:auto>> errors=timer,warn
|
||||
get-eq: run driver=http tags==name:main-get-eq,filter:eq cycles===TEMPLATE(read-cycles,TEMPLATE(docscount,10000000)) page-size=TEMPLATE(page-size,3) fields=TEMPLATE(fields,%5b%5d) threads=<<threads:auto>> errors=timer,warn
|
||||
|
@ -1,4 +1,4 @@
|
||||
min_version: "4.17.15"
|
||||
min_version: "5.17.1"
|
||||
|
||||
description: |
|
||||
This workload emulates a key-value data model and access patterns.
|
||||
@ -9,7 +9,7 @@ description: |
|
||||
|
||||
scenarios:
|
||||
default:
|
||||
schema: run driver=http tags==block:schema threads==1 cycles==UNDEF
|
||||
schema: run driver=http tags==block:'schema.*' threads==1 cycles==UNDEF
|
||||
rampup: run driver=http tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
|
||||
main: run driver=http tags==block:"main.*" cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
min_version: "4.17.15"
|
||||
min_version: "5.17.1"
|
||||
|
||||
description: |
|
||||
This workload emulates a tabular workload with partitions, clusters, and data fields.
|
||||
@ -10,9 +10,9 @@ description: |
|
||||
|
||||
scenarios:
|
||||
default:
|
||||
schema: run driver=http tags==block:schema threads==1 cycles==UNDEF
|
||||
schema: run driver=http tags==block:'schema.*' threads==1 cycles==UNDEF
|
||||
rampup: run driver=http tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
|
||||
main: run driver=http tags==block:"main.*" cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
main: run driver=http tags==block:'main.*' cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
|
||||
bindings:
|
||||
# To enable an optional weighted set of hosts in place of a load balancer
|
||||
@ -39,7 +39,7 @@ bindings:
|
||||
blocks:
|
||||
schema:
|
||||
tags:
|
||||
phase: schema
|
||||
block: schema
|
||||
ops:
|
||||
create-keyspace:
|
||||
method: POST
|
||||
|
@ -1,4 +1,4 @@
|
||||
min_version: "4.17.15"
|
||||
min_version: "5.17.1"
|
||||
|
||||
description: |
|
||||
This workload emulates a time-series data model and access patterns.
|
||||
@ -15,7 +15,7 @@ scenarios:
|
||||
default:
|
||||
schema: run driver=cql tags==block:schema threads==1 cycles==UNDEF
|
||||
rampup: run driver=http tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
|
||||
main: run driver=http tags==block:"main.*" cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
main: run driver=http tags==block:main cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
|
||||
bindings:
|
||||
# To enable an optional weighted set of hosts in place of a load balancer
|
||||
|
@ -1,4 +1,4 @@
|
||||
min_version: "4.17.15"
|
||||
min_version: "5.17.1"
|
||||
|
||||
# TODO
|
||||
# - do we need a truncate schema / namespace at the end
|
||||
@ -13,9 +13,9 @@ description: |
|
||||
|
||||
scenarios:
|
||||
default:
|
||||
schema: run driver=http tags==block:schema threads==1 cycles==UNDEF
|
||||
schema: run driver=http tags==block:'schema.*' threads==1 cycles==UNDEF
|
||||
rampup: run driver=http tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
|
||||
main: run driver=http tags==phase:"main.*" cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
main: run driver=http tags==block:'main.*' cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
|
||||
bindings:
|
||||
# To enable an optional weighted set of hosts in place of a load balancer
|
||||
|
@ -1,4 +1,4 @@
|
||||
min_version: "4.17.15"
|
||||
min_version: "5.17.1"
|
||||
|
||||
# TODO
|
||||
# - do we need a truncate schema / namespace at the end
|
||||
@ -15,9 +15,9 @@ description: |
|
||||
|
||||
scenarios:
|
||||
default:
|
||||
schema: run driver=http tags==block:schema threads==1 cycles==UNDEF
|
||||
schema: run driver=http tags==block:'schema.*' threads==1 cycles==UNDEF
|
||||
rampup: run driver=http tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
|
||||
man: run driver=http tags==block:"main.*" cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
man: run driver=http tags==block:'main.*' cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
|
||||
bindings:
|
||||
# To enable an optional weighted set of hosts in place of a load balancer
|
||||
|
@ -1,4 +1,4 @@
|
||||
min_version: "4.17.15"
|
||||
min_version: "5.17.1"
|
||||
|
||||
# TODO
|
||||
# - do we need a truncate schema / namespace at the end
|
||||
@ -19,9 +19,9 @@ description: |
|
||||
|
||||
scenarios:
|
||||
default:
|
||||
schema: run driver=http tags==block:schema threads==1 cycles==UNDEF
|
||||
schema: run driver=http tags==block:'schema.*' threads==1 cycles==UNDEF
|
||||
rampup: run driver=http tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
|
||||
main: run driver=http tags==block:"main.*" cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
main: run driver=http tags==block:'main.*' cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
|
||||
bindings:
|
||||
# To enable an optional weighted set of hosts in place of a load balancer
|
||||
|
1
adapter-http/src/main/resources/data/stargate_token.txt
Normal file
1
adapter-http/src/main/resources/data/stargate_token.txt
Normal file
@ -0,0 +1 @@
|
||||
# <<put-token-here>>
|
@ -1,17 +1,17 @@
|
||||
# Connection Guide: https://www.mongodb.com/docs/drivers/java/sync/current/fundamentals/connection/
|
||||
# nb5 run driver=mongodb workload=/path/to/mongodb-keyvalue2.yaml tags=block:schema connection='mongodb+srv://user:pass@sample-db.host.mongodb.net/?retryWrites=true&w=majority' database=baselines -vv --show-stacktraces
|
||||
# nb5 run driver=mongodb workload=/path/to/mongodb-keyvalue2.yaml tags=block:"schema.*" connection='mongodb+srv://user:pass@sample-db.host.mongodb.net/?retryWrites=true&w=majority' database=baselines -vv --show-stacktraces
|
||||
# nb5 run driver=mongodb workload=/path/to/mongodb-keyvalue2.yaml tags=block:rampup cycles=25 connection='mongodb+srv://user:pass@sample-db.host.mongodb.net/?retryWrites=true&w=majority' database=baselines -vv --show-stacktraces
|
||||
# nb5 run driver=mongodb workload=/path/to/mongodb-keyvalue2.yaml tags='block:main-.*' cycles=25 connection='mongodb+srv://user:pass@sample-db.host.mongodb.net/?retryWrites=true&w=majority' database=baselines -vv --show-stacktraces
|
||||
min_version: "4.17.31"
|
||||
min_version: "5.17.1"
|
||||
|
||||
description: |
|
||||
This workload is analogous to the cql-keyvalue2 workload, just implemented for MongoDB.
|
||||
|
||||
scenarios:
|
||||
default:
|
||||
schema: run driver=mongodb tags==block:schema threads==1 cycles==UNDEF
|
||||
schema: run driver=mongodb tags==block:'schema.*' threads==1 cycles==UNDEF
|
||||
rampup: run driver=mongodb tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
|
||||
main: run driver=mongodb tags==block:"main-.*" cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
main: run driver=mongodb tags==block:'main-.*' cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
drop: run driver=mongodb tags==block:drop-collection threads==1 cycles==UNDEF
|
||||
|
||||
params:
|
||||
|
@ -1,17 +1,17 @@
|
||||
# Connection Guide: https://www.mongodb.com/docs/drivers/java/sync/current/fundamentals/connection/
|
||||
# nb5 run driver=mongodb workload=/path/to/mongodb-tabular2.yaml tags=block:schema connection='mongodb+srv://user:pass@sample-db.host.mongodb.net/?retryWrites=true&w=majority' database=baselines -vv --show-stacktraces
|
||||
# nb5 run driver=mongodb workload=/path/to/mongodb-tabular2.yaml tags=block:"schema.*" connection='mongodb+srv://user:pass@sample-db.host.mongodb.net/?retryWrites=true&w=majority' database=baselines -vv --show-stacktraces
|
||||
# nb5 run driver=mongodb workload=/path/to/mongodb-tabular2.yaml tags=block:rampup cycles=25 connection='mongodb+srv://user:pass@sample-db.host.mongodb.net/?retryWrites=true&w=majority' database=baselines -vv --show-stacktraces
|
||||
# nb5 run driver=mongodb workload=/path/to/mongodb-tabular2.yaml tags='block:main.*' cycles=25 connection='mongodb+srv://user:pass@sample-db.host.mongodb.net/?retryWrites=true&w=majority' database=baselines -vv --show-stacktraces
|
||||
min_version: "4.17.31"
|
||||
min_version: "5.17.1"
|
||||
|
||||
description: |
|
||||
This workload is analogous to the cql-tabular2 workload, just implemented for MongoDB.
|
||||
|
||||
scenarios:
|
||||
default:
|
||||
schema: run driver=mongodb tags==block:schema threads==1 cycles==UNDEF
|
||||
schema: run driver=mongodb tags==block:'schema.*' threads==1 cycles==UNDEF
|
||||
rampup: run driver=mongodb tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
|
||||
main: run driver=mongodb tags==block:"main-.*" cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
main: run driver=mongodb tags==block:'main-.*' cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
drop: run driver=mongodb tags==block:drop-collection threads==1 cycles==UNDEF
|
||||
|
||||
params:
|
||||
|
@ -1,21 +1,21 @@
|
||||
# Connection Guide: https://www.mongodb.com/docs/drivers/java/sync/current/fundamentals/connection/
|
||||
# nb5 run driver=mongodb workload=/path/to/mongodb-timeseries2.yaml tags=block:schema connection='mongodb+srv://user:pass@sample-db.host.mongodb.net/?retryWrites=true&w=majority' database=baselines -vv --show-stacktraces
|
||||
# nb5 run driver=mongodb workload=/path/to/mongodb-timeseries2.yaml tags=block:"schema.*" connection='mongodb+srv://user:pass@sample-db.host.mongodb.net/?retryWrites=true&w=majority' database=baselines -vv --show-stacktraces
|
||||
# nb5 run driver=mongodb workload=/path/to/mongodb-timeseries2.yaml tags=block:rampup cycles=25 connection='mongodb+srv://user:pass@sample-db.host.mongodb.net/?retryWrites=true&w=majority' database=baselines -vv --show-stacktraces
|
||||
# nb5 run driver=mongodb workload=/path/to/mongodb-timeseries2.yaml tags='block:main.*' cycles=25 connection='mongodb+srv://user:pass@sample-db.host.mongodb.net/?retryWrites=true&w=majority' database=baselines -vv --show-stacktraces
|
||||
|
||||
# https://www.mongodb.com/community/forums/t/how-to-store-a-uuid-with-binary-subtype-0x04-using-the-mongodb-java-driver/13184
|
||||
# https://www.mongodb.com/community/forums/t/problem-inserting-uuid-field-with-binary-subtype-via-atlas-web-ui/1071/4
|
||||
# https://www.mongodb.com/community/forums/t/timeseries-last-x-documents/186574/5
|
||||
min_version: "4.17.31"
|
||||
min_version: "5.17.1"
|
||||
|
||||
description: |
|
||||
This workload is analogous to the cql-timeseries2 workload, just implemented for MongoDB.
|
||||
|
||||
scenarios:
|
||||
default:
|
||||
schema: run driver=mongodb tags==block:schema threads==1 cycles==UNDEF
|
||||
schema: run driver=mongodb tags==block:'schema.*' threads==1 cycles==UNDEF
|
||||
rampup: run driver=mongodb tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
|
||||
main: run driver=mongodb tags==block:"main-.*" cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
main: run driver=mongodb tags==block:'main-.*' cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
drop: run driver=mongodb tags==block:drop-collection threads==1 cycles==UNDEF
|
||||
|
||||
params:
|
||||
|
@ -1,10 +1,9 @@
|
||||
# nb -v run driver=mongodb yaml=mongodb-basic-uuid connection=mongodb://127.0.0.1 database=testdb tags=phase:rampup
|
||||
description: An example of a basic mongo insert and find with UUID
|
||||
|
||||
scenarios:
|
||||
default:
|
||||
- run driver=mongodb tags==phase:rampup cycles===TEMPLATE(rampup-cycles,100000000) threads=auto
|
||||
- run driver=mongodb tags==phase:main cycles===TEMPLATE(main-cycles,100000000) threads=auto
|
||||
rampup: run driver=mongodb tags==block:rampup cycles===TEMPLATE(rampup-cycles,100000000) threads=auto
|
||||
main: run driver=mongodb tags==block:main cycles===TEMPLATE(main-cycles,100000000) threads=auto
|
||||
bindings:
|
||||
seq_uuid: Mod(<<uuidCount:100000000>>L); ToHashedUUID() -> java.util.UUID; ToString() -> String
|
||||
rw_uuid: <<uuidDist:Uniform(0,100000000)->long>>; ToHashedUUID() -> java.util.UUID; ToString() -> String
|
||||
@ -12,69 +11,50 @@ bindings:
|
||||
seq_value: Mod(<<valueCount:1000000000>>L); <<valueSizeDist:Hash()>>; ToLong()
|
||||
rw_key: <<keyDist:Uniform(0,1000000)->long>>; ToInt()
|
||||
rw_value: <<valDist:Uniform(0,1000000000)->long>>; <<valueSizeDist:Hash()>>; ToLong()
|
||||
|
||||
blocks:
|
||||
- name: rampup
|
||||
tags:
|
||||
phase: rampup
|
||||
statements:
|
||||
- rampup-insert: |
|
||||
{
|
||||
insert: "<<collection:keyvalueuuid>>",
|
||||
documents: [ { _id: UUID("{seq_uuid}"),
|
||||
key: {seq_key},
|
||||
value: NumberLong({seq_value}) } ]
|
||||
}
|
||||
params:
|
||||
readPreference: primary
|
||||
tags:
|
||||
name: rampup-insert
|
||||
- name: verify
|
||||
tags:
|
||||
phase: verify
|
||||
type: read
|
||||
params:
|
||||
readPreference: primary
|
||||
rampup:
|
||||
ops:
|
||||
rampup-insert: |
|
||||
{
|
||||
insert: "<<collection:keyvalueuuid>>",
|
||||
documents: [ { _id: UUID("{seq_uuid}"),
|
||||
key: {seq_key},
|
||||
value: NumberLong({seq_value}) } ]
|
||||
}
|
||||
verify:
|
||||
params:
|
||||
cl: <<read_cl:LOCAL_QUORUM>>
|
||||
statements:
|
||||
- verify-find: |
|
||||
{
|
||||
find: "<<collection:keyvalueuuid>>",
|
||||
filter: { _id: UUID("{seq_uuid}") }
|
||||
}
|
||||
verify-fields: _id->seq_uuid, key->seq_key, value->seq_value
|
||||
tags:
|
||||
name: verify
|
||||
- name: main-read
|
||||
tags:
|
||||
phase: main
|
||||
type: read
|
||||
cl: <<read_cl:LOCAL_QUORUM>>
|
||||
verify-fields: _id->seq_uuid, key->seq_key, value->seq_value
|
||||
ops:
|
||||
verify-find: |
|
||||
{
|
||||
find: "<<collection:keyvalueuuid>>",
|
||||
filter: { _id: UUID("{seq_uuid}") }
|
||||
}
|
||||
main-read:
|
||||
params:
|
||||
ratio: <<read_ratio:1>>
|
||||
statements:
|
||||
- main-find: |
|
||||
{
|
||||
find: "<<collection:keyvalueuuid>>",
|
||||
filter: { _id: UUID("{rw_uuid}") }
|
||||
}
|
||||
params:
|
||||
readPreference: primary
|
||||
tags:
|
||||
name: main-find
|
||||
- name: main-write
|
||||
tags:
|
||||
phase: main
|
||||
type: write
|
||||
type: read
|
||||
readPreference: primary
|
||||
ops:
|
||||
main-find: |
|
||||
{
|
||||
find: "<<collection:keyvalueuuid>>",
|
||||
filter: { _id: UUID("{rw_uuid}") }
|
||||
}
|
||||
main-write:
|
||||
params:
|
||||
ratio: <<write_ratio:1>>
|
||||
statements:
|
||||
- main-insert: |
|
||||
{
|
||||
insert: "<<collection:keyvalueuuid>>",
|
||||
documents: [ { _id: UUID("{rw_uuid}")
|
||||
key: {rw_key},
|
||||
value: NumberLong({rw_value}) } ]
|
||||
}
|
||||
params:
|
||||
readPreference: primary
|
||||
tags:
|
||||
name: main-insert
|
||||
type: write
|
||||
readPreference: primary
|
||||
ops:
|
||||
main-insert: |
|
||||
{
|
||||
insert: "<<collection:keyvalueuuid>>",
|
||||
documents: [ { _id: UUID("{rw_uuid}")
|
||||
key: {rw_key},
|
||||
value: NumberLong({rw_value}) } ]
|
||||
}
|
@ -1,10 +1,10 @@
|
||||
# nb -v run driver=mongodb yaml=mongodb-basic connection=mongodb://127.0.0.1 database=testdb tags=phase:rampup cycles=1M
|
||||
description: An example of a basic mongo insert and find.
|
||||
|
||||
scenarios:
|
||||
default:
|
||||
- run driver=mongodb tags==phase:rampup cycles===TEMPLATE(rampup-cycles,1000000) threads=auto
|
||||
- run driver=mongodb tags==phase:main cycles===TEMPLATE(main-cycles,1000000) threads=auto
|
||||
rampup: run driver=mongodb tags==block:rampup cycles===TEMPLATE(rampup-cycles,1000000) threads=auto
|
||||
main: run driver=mongodb tags==block:'main-*.*' cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
|
||||
bindings:
|
||||
seq_key: Mod(<<keyCount:1000000>>L); ToInt()
|
||||
seq_value: Mod(<<valueCount:1000000000>>L); <<valueSizeDist:Hash()>>; ToString() -> String
|
||||
@ -12,46 +12,39 @@ bindings:
|
||||
rw_value: <<valDist:Uniform(0,1000000000)->int>>; <<valueSizeDist:Hash()>>; ToString() -> String
|
||||
|
||||
blocks:
|
||||
- name: rampup
|
||||
tags:
|
||||
phase: rampup
|
||||
statements:
|
||||
- rampup-insert: |
|
||||
rampup:
|
||||
ops:
|
||||
rampup-insert: |
|
||||
{
|
||||
insert: "<<collection:keyvalue>>",
|
||||
documents: [ { _id: {seq_key},
|
||||
value: {seq_value} } ]
|
||||
documents: [ { _id: {seq_key}, value: {seq_value} } ]
|
||||
}
|
||||
params:
|
||||
readPreference: primary
|
||||
tags:
|
||||
name: rampup-insert
|
||||
- name: main-read
|
||||
tags:
|
||||
phase: main
|
||||
type: read
|
||||
|
||||
main-read:
|
||||
params:
|
||||
ratio: <<read_ratio:1>>
|
||||
statements:
|
||||
- main-find: |
|
||||
{
|
||||
find: "<<collection:keyvalue>>",
|
||||
filter: { _id: {rw_key} }
|
||||
}
|
||||
params:
|
||||
readPreference: primary
|
||||
- name: main-write
|
||||
tags:
|
||||
phase: main
|
||||
type: write
|
||||
readPreference: primary
|
||||
type: read
|
||||
ops:
|
||||
main-find: |
|
||||
{
|
||||
find: "<<collection:keyvalue>>",
|
||||
filter: { _id: {rw_key} }
|
||||
}
|
||||
|
||||
main-write:
|
||||
params:
|
||||
ratio: <<write_ratio:1>>
|
||||
statements:
|
||||
- main-insert: |
|
||||
type: write
|
||||
ops:
|
||||
main-insert: |
|
||||
{
|
||||
insert: "<<collection:keyvalue>>",
|
||||
documents: [ { _id: {rw_key},
|
||||
value: {rw_value} } ]
|
||||
documents: [ { _id: {rw_key}, value: {rw_value} } ]
|
||||
}
|
||||
params:
|
||||
readPreference: primary
|
||||
readPreference: primary
|
@ -1,4 +1,3 @@
|
||||
# nb -v run driver=mongodb yaml=mongodb-crud-basic tags=phase:schema connection=mongodb://127.0.0.1 database=testdb
|
||||
|
||||
description: |
|
||||
This workload emulates CRUD operations for the mongoDB.
|
||||
@ -7,11 +6,11 @@ description: |
|
||||
|
||||
scenarios:
|
||||
default:
|
||||
schema: run driver=mongodb tags==phase:schema threads==1 cycles==UNDEF
|
||||
write: run driver=mongodb tags==phase:main,type:write cycles===TEMPLATE(write-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
|
||||
read: run driver=mongodb tags==phase:main,type:read cycles===TEMPLATE(read-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
|
||||
update: run driver=mongodb tags==phase:main,type:update cycles===TEMPLATE(update-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
|
||||
delete: run driver=mongodb tags==phase:main,type:delete cycles===TEMPLATE(delete-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
|
||||
schema: run driver=mongodb tags==block:'schema.*' threads==1 cycles==UNDEF
|
||||
write: run driver=mongodb tags==block:main-write,type:write cycles===TEMPLATE(write-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
|
||||
read: run driver=mongodb tags==block:main-read,type:read cycles===TEMPLATE(read-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
|
||||
update: run driver=mongodb tags==block:main-update,type:update cycles===TEMPLATE(update-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
|
||||
delete: run driver=mongodb tags==block:main-delete,type:delete cycles===TEMPLATE(delete-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
|
||||
|
||||
bindings:
|
||||
seq_key: Mod(<<docscount:10000000>>); ToString() -> String
|
||||
@ -29,62 +28,93 @@ bindings:
|
||||
friend_id: Add(-1); ToHashedUUID(); ToString() -> String
|
||||
|
||||
blocks:
|
||||
- tags:
|
||||
phase: schema
|
||||
statements:
|
||||
- dummy-insert: |
|
||||
{
|
||||
insert: "<<collection:crud_basic>>",
|
||||
documents: [ { _id: "dummyyyy" } ]
|
||||
}
|
||||
schema:
|
||||
ops:
|
||||
dummy-insert: |
|
||||
{
|
||||
insert: "<<collection:crud_basic>>",
|
||||
documents: [ { _id: "dummyyyy" } ]
|
||||
}
|
||||
drop-collection: |
|
||||
{
|
||||
drop: "<<collection:crud_basic>>"
|
||||
}
|
||||
create-collection: |
|
||||
{
|
||||
create: "<<collection:crud_basic>>"
|
||||
}
|
||||
create-indexes: |
|
||||
{
|
||||
createIndexes: "<<collection:crud_basic>>",
|
||||
indexes: [
|
||||
{
|
||||
key: { user_id: 1 },
|
||||
name: "user_id_idx",
|
||||
unique: true
|
||||
},
|
||||
{
|
||||
key: { created_on: 1 },
|
||||
name: "created_on_idx"
|
||||
},
|
||||
{
|
||||
key: { gender: 1 },
|
||||
name: "gender_idx"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
- drop-collection: |
|
||||
{
|
||||
drop: "<<collection:crud_basic>>"
|
||||
}
|
||||
tags:
|
||||
name: drop-collection
|
||||
|
||||
- create-collection: |
|
||||
{
|
||||
create: "<<collection:crud_basic>>"
|
||||
}
|
||||
tags:
|
||||
name: create-collection
|
||||
|
||||
- create-indexes: |
|
||||
{
|
||||
createIndexes: "<<collection:crud_basic>>",
|
||||
indexes: [
|
||||
{
|
||||
key: { user_id: 1 },
|
||||
name: "user_id_idx",
|
||||
unique: true
|
||||
main-write:
|
||||
ops:
|
||||
write-document: |
|
||||
{
|
||||
insert: "<<collection:crud_basic>>",
|
||||
writeConcern: { w: "majority" },
|
||||
documents: [
|
||||
{
|
||||
"_id": "{seq_key}",
|
||||
"user_id": "{user_id}",
|
||||
"created_on": {created_on},
|
||||
"gender": "{gender}",
|
||||
"full_name": "{full_name}",
|
||||
"married": {married},
|
||||
"address": {
|
||||
"primary": {
|
||||
"city": "{city}",
|
||||
"cc": "{country_code}"
|
||||
},
|
||||
"secondary": {}
|
||||
},
|
||||
{
|
||||
key: { created_on: 1 },
|
||||
name: "created_on_idx"
|
||||
},
|
||||
{
|
||||
key: { gender: 1 },
|
||||
name: "gender_idx"
|
||||
}
|
||||
]
|
||||
}
|
||||
tags:
|
||||
name: create-indexes
|
||||
"coordinates": [
|
||||
{lat},
|
||||
{lng}
|
||||
],
|
||||
"children": [],
|
||||
"friends": [
|
||||
"{friend_id}"
|
||||
],
|
||||
"debt": null
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
- name: main-write
|
||||
tags:
|
||||
phase: main
|
||||
type: write
|
||||
statements:
|
||||
- write-document: |
|
||||
{
|
||||
insert: "<<collection:crud_basic>>",
|
||||
writeConcern: { w: "majority" },
|
||||
documents: [
|
||||
{
|
||||
main-read:
|
||||
ops:
|
||||
read-document: |
|
||||
{
|
||||
find: "<<collection:crud_basic>>",
|
||||
filter: { _id: "{random_key}" }
|
||||
}
|
||||
|
||||
main-update:
|
||||
ops:
|
||||
update-document: |
|
||||
{
|
||||
update: "<<collection:crud_basic>>",
|
||||
writeConcern: { w: "majority" },
|
||||
updates: [
|
||||
{
|
||||
q: { _id: "{random_key}" },
|
||||
u: {
|
||||
"_id": "{seq_key}",
|
||||
"user_id": "{user_id}",
|
||||
"created_on": {created_on},
|
||||
@ -108,78 +138,19 @@ blocks:
|
||||
],
|
||||
"debt": null
|
||||
}
|
||||
]
|
||||
}
|
||||
tags:
|
||||
name: write-document
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
- name: main-read
|
||||
tags:
|
||||
phase: main
|
||||
type: read
|
||||
statements:
|
||||
- read-document: |
|
||||
{
|
||||
find: "<<collection:crud_basic>>",
|
||||
filter: { _id: "{random_key}" }
|
||||
}
|
||||
tags:
|
||||
name: read-document
|
||||
|
||||
- name: main-update
|
||||
tags:
|
||||
phase: main
|
||||
type: update
|
||||
statements:
|
||||
- update-document: |
|
||||
{
|
||||
update: "<<collection:crud_basic>>",
|
||||
writeConcern: { w: "majority" },
|
||||
updates: [
|
||||
{
|
||||
q: { _id: "{random_key}" },
|
||||
u: {
|
||||
"_id": "{seq_key}",
|
||||
"user_id": "{user_id}",
|
||||
"created_on": {created_on},
|
||||
"gender": "{gender}",
|
||||
"full_name": "{full_name}",
|
||||
"married": {married},
|
||||
"address": {
|
||||
"primary": {
|
||||
"city": "{city}",
|
||||
"cc": "{country_code}"
|
||||
},
|
||||
"secondary": {}
|
||||
},
|
||||
"coordinates": [
|
||||
{lat},
|
||||
{lng}
|
||||
],
|
||||
"children": [],
|
||||
"friends": [
|
||||
"{friend_id}"
|
||||
],
|
||||
"debt": null
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
tags:
|
||||
name: update-document
|
||||
|
||||
- name: main-delete
|
||||
tags:
|
||||
phase: main
|
||||
type: delete
|
||||
statements:
|
||||
- delete-document: |
|
||||
{
|
||||
delete: "<<collection:crud_basic>>",
|
||||
deletes: [
|
||||
{
|
||||
q: { _id: "{seq_key}" },
|
||||
limit: 1
|
||||
}
|
||||
]
|
||||
}
|
||||
main-delete:
|
||||
ops:
|
||||
delete-document: |
|
||||
{
|
||||
delete: "<<collection:crud_basic>>",
|
||||
deletes: [
|
||||
{
|
||||
q: { _id: "{seq_key}" },
|
||||
limit: 1
|
||||
}
|
||||
]
|
||||
}
|
@ -1,4 +1,3 @@
|
||||
# nb -v run driver=mongodb yaml=mongodb-crud-dataset tags=phase:schema connection=mongodb://127.0.0.1 database=testdb dataset_file=path/to/data.json
|
||||
|
||||
description: |
|
||||
This workload emulates CRUD operations for the mongoDB.
|
||||
@ -7,110 +6,85 @@ description: |
|
||||
|
||||
scenarios:
|
||||
default:
|
||||
schema: run driver=mongodb tags==phase:schema threads==1 cycles==UNDEF
|
||||
write: run driver=mongodb tags==phase:main,type:write cycles===TEMPLATE(write-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
|
||||
read: run driver=mongodb tags==phase:main,type:read cycles===TEMPLATE(read-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
|
||||
update: run driver=mongodb tags==phase:main,type:update cycles===TEMPLATE(update-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
|
||||
delete: run driver=mongodb tags==phase:main,type:delete cycles===TEMPLATE(delete-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
|
||||
schema: run driver=mongodb tags==block:'schema.*' threads==1 cycles==UNDEF
|
||||
write: run driver=mongodb tags==block:main-write,type:write cycles===TEMPLATE(write-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
|
||||
read: run driver=mongodb tags==block:main-read,type:read cycles===TEMPLATE(read-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
|
||||
update: run driver=mongodb tags==block:main-update,type:update cycles===TEMPLATE(update-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
|
||||
delete: run driver=mongodb tags==block:main-delete,type:delete cycles===TEMPLATE(delete-cycles,TEMPLATE(docscount,10000000)) threads=auto errors=timer,warn
|
||||
|
||||
bindings:
|
||||
seq_key: Mod(<<docscount:10000000>>); ToString() -> String
|
||||
random_key: Uniform(0,<<docscount:10000000>>); ToString() -> String
|
||||
|
||||
blocks:
|
||||
- tags:
|
||||
phase: schema
|
||||
statements:
|
||||
- dummy-insert: |
|
||||
{
|
||||
insert: "<<collection:crud_dataset>>",
|
||||
documents: [ { _id: "dummyyyy" } ]
|
||||
}
|
||||
schema:
|
||||
ops:
|
||||
dummy-insert: |
|
||||
{
|
||||
insert: "<<collection:crud_dataset>>",
|
||||
documents: [ { _id: "dummyyyy" } ]
|
||||
}
|
||||
|
||||
- drop-collection: |
|
||||
{
|
||||
drop: "<<collection:crud_dataset>>"
|
||||
}
|
||||
tags:
|
||||
name: drop-collection
|
||||
drop-collection: |
|
||||
{
|
||||
drop: "<<collection:crud_dataset>>"
|
||||
}
|
||||
|
||||
- create-collection: |
|
||||
{
|
||||
create: "<<collection:crud_dataset>>"
|
||||
}
|
||||
tags:
|
||||
name: create-collection
|
||||
create-collection: |
|
||||
{
|
||||
create: "<<collection:crud_dataset>>"
|
||||
}
|
||||
|
||||
- create-indexes: |
|
||||
{
|
||||
createIndexes: "<<collection:crud_dataset>>",
|
||||
indexes: <<indexes:[ { key: { dummy : 1 }, name: "dummy_idx", sparse: true } ]>>
|
||||
}
|
||||
tags:
|
||||
name: create-indexes
|
||||
create-indexes: |
|
||||
{
|
||||
createIndexes: "<<collection:crud_dataset>>",
|
||||
indexes: <<indexes:[ { key: { dummy : 1 }, name: "dummy_idx", sparse: true } ]>>
|
||||
}
|
||||
|
||||
- name: main-write
|
||||
tags:
|
||||
phase: main
|
||||
type: write
|
||||
statements:
|
||||
- write-document: |
|
||||
{
|
||||
insert: "<<collection:crud_dataset>>",
|
||||
writeConcern: { w: "majority" },
|
||||
documents: [ { "_id": "{seq_key}", {document_json_without_id} ]
|
||||
}
|
||||
tags:
|
||||
name: write-document
|
||||
main-write:
|
||||
ops:
|
||||
write-document: |
|
||||
{
|
||||
insert: "<<collection:crud_dataset>>",
|
||||
writeConcern: { w: "majority" },
|
||||
documents: [ { "_id": "{seq_key}", {document_json_without_id} ]
|
||||
}
|
||||
bindings:
|
||||
document_json_without_id: ModuloLineToString('<<dataset_file>>'); ReplaceRegex('^\{', '')
|
||||
|
||||
- name: main-read
|
||||
tags:
|
||||
phase: main
|
||||
type: read
|
||||
statements:
|
||||
- read-document: |
|
||||
{
|
||||
find: "<<collection:crud_dataset>>",
|
||||
filter: { _id: "{random_key}" }
|
||||
}
|
||||
tags:
|
||||
name: read-document
|
||||
main-read:
|
||||
ops:
|
||||
read-document: |
|
||||
{
|
||||
find: "<<collection:crud_dataset>>",
|
||||
filter: { _id: "{random_key}" }
|
||||
}
|
||||
|
||||
- name: main-update
|
||||
tags:
|
||||
phase: main
|
||||
type: update
|
||||
statements:
|
||||
- update-document: |
|
||||
{
|
||||
update: "<<collection:crud_dataset>>",
|
||||
writeConcern: { w: "majority" },
|
||||
updates: [
|
||||
{
|
||||
q: { _id: "{random_key}" },
|
||||
u: { "_id": "{random_key}", {document_json_without_id}
|
||||
}
|
||||
]
|
||||
}
|
||||
tags:
|
||||
name: update-document
|
||||
main-update:
|
||||
ops:
|
||||
update-document: |
|
||||
{
|
||||
update: "<<collection:crud_dataset>>",
|
||||
writeConcern: { w: "majority" },
|
||||
updates: [
|
||||
{
|
||||
q: { _id: "{random_key}" },
|
||||
u: { "_id": "{random_key}", {document_json_without_id}
|
||||
}
|
||||
]
|
||||
}
|
||||
bindings:
|
||||
document_json_without_id: ModuloLineToString('<<dataset_file>>'); ReplaceRegex('^\{', '')
|
||||
|
||||
- name: main-delete
|
||||
tags:
|
||||
phase: main
|
||||
type: delete
|
||||
statements:
|
||||
- delete-document: |
|
||||
{
|
||||
delete: "<<collection:crud_dataset>>",
|
||||
deletes: [
|
||||
{
|
||||
q: { _id: "{seq_key}" },
|
||||
limit: 1
|
||||
}
|
||||
]
|
||||
}
|
||||
main-delete:
|
||||
ops:
|
||||
delete-document: |
|
||||
{
|
||||
delete: "<<collection:crud_dataset>>",
|
||||
deletes: [
|
||||
{
|
||||
q: { _id: "{seq_key}" },
|
||||
limit: 1
|
||||
}
|
||||
]
|
||||
}
|
@ -1,4 +1,3 @@
|
||||
# nb -v run driver=mongodb yaml=mongodb-search-basic tags=phase:schema connection=mongodb://127.0.0.1 database=testdb
|
||||
|
||||
description: |
|
||||
This workload emulates basic search operations for the mongoDB.
|
||||
@ -7,15 +6,15 @@ description: |
|
||||
It's a counterpart of the Stargate's Documents API Basic Search workflow.
|
||||
|
||||
scenarios:
|
||||
schema: run driver=mongodb tags==phase:schema threads==1 cycles==UNDEF
|
||||
rampup-write: run driver=mongodb tags==phase:rampup-write cycles===TEMPLATE(docscount,10000000) docpadding=TEMPLATE(docpadding,0) match-ratio=TEMPLATE(match-ratio,0.01) threads=auto errors=timer,warn
|
||||
rampup-read: run driver=mongodb tags==phase:rampup-read cycles===TEMPLATE(rampup-cycles, 10000000) field-projection=TEMPLATE(fields,null) threads=<<threads:auto>> errors=timer,warn
|
||||
main: run driver=mongodb tags==phase:main cycles===TEMPLATE(read-cycles,TEMPLATE(docscount,10000000)) field-projection=TEMPLATE(fields,null) threads=<<threads:auto>> errors=timer,warn
|
||||
main-eq: run driver=mongodb tags==phase:main,filter:eq cycles===TEMPLATE(read-cycles,TEMPLATE(docscount,10000000)) field-projection=TEMPLATE(fields,null) threads=<<threads:auto>> errors=timer,warn
|
||||
main-lt: run driver=mongodb tags==phase:main,filter:lt cycles===TEMPLATE(read-cycles,TEMPLATE(docscount,10000000)) field-projection=TEMPLATE(fields,null) threads=<<threads:auto>> errors=timer,warn
|
||||
main-and: run driver=mongodb tags==phase:main,filter:and cycles===TEMPLATE(read-cycles,TEMPLATE(docscount,10000000)) field-projection=TEMPLATE(fields,null) threads=<<threads:auto>> errors=timer,warn
|
||||
main-or: run driver=mongodb tags==phase:main,filter:or cycles===TEMPLATE(read-cycles,TEMPLATE(docscount,10000000)) field-projection=TEMPLATE(fields,null) threads=<<threads:auto>> errors=timer,warn
|
||||
main-or-single-match: run driver=mongodb tags==phase:main,filter:or-single-match cycles===TEMPLATE(read-cycles,TEMPLATE(docscount,10000000)) field-projection=TEMPLATE(fields,null) threads=<<threads:auto>> errors=timer,warn
|
||||
schema: run driver=mongodb tags==block:'schema.*' threads==1 cycles==UNDEF
|
||||
rampup-write: run driver=mongodb tags==block:rampup-write cycles===TEMPLATE(docscount,10000000) docpadding=TEMPLATE(docpadding,0) match-ratio=TEMPLATE(match-ratio,0.01) threads=auto errors=timer,warn
|
||||
rampup-read: run driver=mongodb tags==block:rampup-read cycles===TEMPLATE(rampup-cycles, 10000000) field-projection=TEMPLATE(fields,null) threads=<<threads:auto>> errors=timer,warn
|
||||
main: run driver=mongodb tags==block:main cycles===TEMPLATE(read-cycles,TEMPLATE(docscount,10000000)) field-projection=TEMPLATE(fields,null) threads=<<threads:auto>> errors=timer,warn
|
||||
main-eq: run driver=mongodb tags==block:main-eq,filter:eq cycles===TEMPLATE(read-cycles,TEMPLATE(docscount,10000000)) field-projection=TEMPLATE(fields,null) threads=<<threads:auto>> errors=timer,warn
|
||||
main-lt: run driver=mongodb tags==block:main-lt,filter:lt cycles===TEMPLATE(read-cycles,TEMPLATE(docscount,10000000)) field-projection=TEMPLATE(fields,null) threads=<<threads:auto>> errors=timer,warn
|
||||
main-and: run driver=mongodb tags==block:main-and,filter:and cycles===TEMPLATE(read-cycles,TEMPLATE(docscount,10000000)) field-projection=TEMPLATE(fields,null) threads=<<threads:auto>> errors=timer,warn
|
||||
main-or: run driver=mongodb tags==block:main-or,filter:or cycles===TEMPLATE(read-cycles,TEMPLATE(docscount,10000000)) field-projection=TEMPLATE(fields,null) threads=<<threads:auto>> errors=timer,warn
|
||||
main-or-single-match: run driver=mongodb tags==block:main-or-single-match,filter:or-single-match cycles===TEMPLATE(read-cycles,TEMPLATE(docscount,10000000)) field-projection=TEMPLATE(fields,null) threads=<<threads:auto>> errors=timer,warn
|
||||
|
||||
bindings:
|
||||
seq_key: Mod(<<docscount:10000000>>); ToString() -> String
|
||||
@ -34,57 +33,49 @@ bindings:
|
||||
match1: Identity(); CoinFunc(<<match-ratio>>, FixedValue(0), FixedValue(1000))
|
||||
match2: Identity(); CoinFunc(<<match-ratio>>, FixedValue("true"), FixedValue("false"))
|
||||
additional_fields: ListSizedStepped(<<docpadding:0>>,Template("\"{}\":{}",Identity(),Identity())); ToString(); ReplaceAll('\[\"', ',\"'); ReplaceAll('\[', ''); ReplaceAll('\]', '') -> String
|
||||
|
||||
blocks:
|
||||
- tags:
|
||||
phase: schema
|
||||
statements:
|
||||
- dummy-insert: |
|
||||
{
|
||||
insert: "<<collection:search_basic>>",
|
||||
documents: [ { _id: "dummyyyy" } ]
|
||||
}
|
||||
schema:
|
||||
ops:
|
||||
dummy-insert: |
|
||||
{
|
||||
insert: "<<collection:search_basic>>",
|
||||
documents: [ { _id: "dummyyyy" } ]
|
||||
}
|
||||
|
||||
- drop-collection: |
|
||||
{
|
||||
drop: "<<collection:search_basic>>"
|
||||
}
|
||||
tags:
|
||||
name: drop-collection
|
||||
drop-collection: |
|
||||
{
|
||||
drop: "<<collection:search_basic>>"
|
||||
}
|
||||
|
||||
- create-collection: |
|
||||
{
|
||||
create: "<<collection:search_basic>>"
|
||||
}
|
||||
tags:
|
||||
name: create-collection
|
||||
create-collection: |
|
||||
{
|
||||
create: "<<collection:search_basic>>"
|
||||
}
|
||||
|
||||
- create-indexes: |
|
||||
{
|
||||
createIndexes: "<<collection:search_basic>>",
|
||||
indexes: [
|
||||
{
|
||||
key: { user_id: 1 },
|
||||
name: "user_id_idx",
|
||||
unique: true
|
||||
},
|
||||
{
|
||||
key: { created_on: 1 },
|
||||
name: "created_on_idx"
|
||||
},
|
||||
{
|
||||
key: { city: 1 },
|
||||
name: "city_idx"
|
||||
}
|
||||
]
|
||||
}
|
||||
tags:
|
||||
name: create-indexes
|
||||
create-indexes: |
|
||||
{
|
||||
createIndexes: "<<collection:search_basic>>",
|
||||
indexes: [
|
||||
{
|
||||
key: { user_id: 1 },
|
||||
name: "user_id_idx",
|
||||
unique: true
|
||||
},
|
||||
{
|
||||
key: { created_on: 1 },
|
||||
name: "created_on_idx"
|
||||
},
|
||||
{
|
||||
key: { city: 1 },
|
||||
name: "city_idx"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
- name: rampup-write
|
||||
tags:
|
||||
phase: rampup-write
|
||||
statements:
|
||||
- write-document: |
|
||||
rampup-write:
|
||||
ops:
|
||||
write-document:
|
||||
{
|
||||
insert: "<<collection:search_basic>>",
|
||||
writeConcern: { w: "majority" },
|
||||
@ -118,83 +109,62 @@ blocks:
|
||||
}
|
||||
]
|
||||
}
|
||||
tags:
|
||||
name: rampup-write
|
||||
|
||||
- name: rampup
|
||||
tags:
|
||||
phase: rampup-read
|
||||
filter: eq
|
||||
statements:
|
||||
- read-document: |
|
||||
rampup-read:
|
||||
params:
|
||||
filter: eq
|
||||
ops:
|
||||
read-document: |
|
||||
{
|
||||
find: "<<collection:search_basic>>",
|
||||
filter: { match1: 0 }
|
||||
}, <<field-projection:null>>
|
||||
tags:
|
||||
name: rampup-read
|
||||
|
||||
- name: main-eq
|
||||
tags:
|
||||
phase: main
|
||||
main-eq:
|
||||
params:
|
||||
filter: eq
|
||||
statements:
|
||||
- read-document: |
|
||||
ops:
|
||||
read-document: |
|
||||
{
|
||||
find: "<<collection:search_basic>>",
|
||||
filter: { match3: true }
|
||||
}, <<field-projection:null>>
|
||||
tags:
|
||||
name: read-document
|
||||
}, <<field-projection:null>>
|
||||
|
||||
- name: main-lt
|
||||
tags:
|
||||
phase: main
|
||||
main-lt:
|
||||
params:
|
||||
filter: lt
|
||||
statements:
|
||||
- read-document: |
|
||||
ops:
|
||||
read-document: |
|
||||
{
|
||||
find: "<<collection:search_basic>>",
|
||||
filter: { match1: {$lt: 1}}
|
||||
}, <<field-projection:null>>
|
||||
tags:
|
||||
name: read-document
|
||||
}, <<field-projection:null>>
|
||||
|
||||
- name: main-and
|
||||
tags:
|
||||
phase: main
|
||||
main-and:
|
||||
params:
|
||||
filter: and
|
||||
statements:
|
||||
- read-document: |
|
||||
{
|
||||
find: "<<collection:search_basic>>",
|
||||
filter: { match1: {$lt: 1}, match2: "true"}
|
||||
}, <<field-projection:null>>
|
||||
tags:
|
||||
name: read-document
|
||||
ops:
|
||||
read-document: |
|
||||
{
|
||||
find: "<<collection:search_basic>>",
|
||||
filter: { match1: {$lt: 1}, match2: "true"}
|
||||
}, <<field-projection:null>>
|
||||
|
||||
- name: main-or
|
||||
tags:
|
||||
phase: main
|
||||
main-or:
|
||||
params:
|
||||
filter: or
|
||||
statements:
|
||||
- read-document: |
|
||||
ops:
|
||||
read-document: |
|
||||
{
|
||||
find: "<<collection:search_basic>>",
|
||||
filter: { $or: [ {match1: {$lt: 1}}, {match3: true}]}
|
||||
}, <<field-projection:null>>
|
||||
tags:
|
||||
name: read-document
|
||||
}, <<field-projection:null>>
|
||||
|
||||
- name: main-or-single-match
|
||||
tags:
|
||||
phase: main
|
||||
main-or-single-match:
|
||||
params:
|
||||
filter: or-single-match
|
||||
statements:
|
||||
- read-document: |
|
||||
{
|
||||
find: "<<collection:search_basic>>",
|
||||
filter: { $or: [ {match1: {$lt: 1}}, {match2: "notamatch"}]}
|
||||
}, <<field-projection:null>>
|
||||
tags:
|
||||
name: read-document
|
||||
ops:
|
||||
read-document: |
|
||||
{
|
||||
find: "<<collection:search_basic>>",
|
||||
filter: { $or: [ {match1: {$lt: 1}}, {match2: "notamatch"}]}
|
||||
}, <<field-projection:null>>
|
@ -302,7 +302,7 @@ in the workload construction guide.
|
||||
|
||||
```yaml
|
||||
tags:
|
||||
phase: main
|
||||
block: main
|
||||
```
|
||||
|
||||
*json:*
|
||||
@ -311,7 +311,7 @@ tags:
|
||||
|
||||
{
|
||||
"tags": {
|
||||
"phase": "main"
|
||||
"block": "main"
|
||||
}
|
||||
}
|
||||
```
|
||||
@ -331,7 +331,7 @@ Blocks are used to logically partition a workload for the purposes of grouping,
|
||||
executing subsets and op sequences. Blocks can contain any of the defined elements above.
|
||||
Every op template within a block automatically gets a tag with the name 'block' and the value of
|
||||
the block name. This makes it easy to select a whole block at a time with a tag filter like
|
||||
`tags=block:schema`.
|
||||
`tags=block:"schema.*"`.
|
||||
|
||||
Blocks are not recursive. You may not put a block inside another block.
|
||||
|
||||
|
@ -269,7 +269,7 @@ ops:
|
||||
bindings:
|
||||
binding1: NumberNameToString();
|
||||
tags:
|
||||
phase: schema
|
||||
block: schema
|
||||
params:
|
||||
prepared: false
|
||||
description: This is just an example operation
|
||||
@ -292,7 +292,7 @@ ops:
|
||||
"prepared": false
|
||||
},
|
||||
"tags": {
|
||||
"phase": "schema"
|
||||
"block": "schema"
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -317,7 +317,7 @@ ops:
|
||||
"prepared": false
|
||||
},
|
||||
"tags": {
|
||||
"phase": "schema",
|
||||
"block": "schema",
|
||||
"name": "block0--special-op-name",
|
||||
"block": "block0"
|
||||
}
|
||||
@ -351,7 +351,7 @@ blocks:
|
||||
bindings:
|
||||
binding1: NumberNameToString();
|
||||
tags:
|
||||
phase: schema
|
||||
block: schema
|
||||
params:
|
||||
prepared: false
|
||||
description: This is just an example operation
|
||||
@ -386,7 +386,7 @@ blocks:
|
||||
"prepared": false
|
||||
},
|
||||
"tags": {
|
||||
"phase": "schema"
|
||||
"block": "schema"
|
||||
},
|
||||
"ops": {
|
||||
"op1": {
|
||||
@ -416,7 +416,7 @@ blocks:
|
||||
"prepared": false
|
||||
},
|
||||
"tags": {
|
||||
"phase": "schema",
|
||||
"block": "schema",
|
||||
"docleveltag": "is-tagging-everything",
|
||||
"name": "block-named-fred--special-op-name",
|
||||
"block": "block-named-fred"
|
||||
|
@ -77,7 +77,7 @@ public class RawYamlTemplateLoaderTest {
|
||||
assertThat(schemaOnlyScenario.keySet())
|
||||
.containsExactly("000");
|
||||
assertThat(schemaOnlyScenario.values())
|
||||
.containsExactly("run driver=blah tags=phase:schema");
|
||||
.containsExactly("run driver=blah tags=block:'schema.*'");
|
||||
|
||||
assertThat(rawOpsDoc.getName()).isEqualTo("doc1");
|
||||
assertThat(blocks).hasSize(1);
|
||||
|
@ -7,7 +7,7 @@ scenarios:
|
||||
- run driver=stdout alias=step1
|
||||
- run driver=stdout alias=step2
|
||||
schema-only:
|
||||
- run driver=blah tags=phase:schema
|
||||
- run driver=blah tags=block:'schema.*'
|
||||
|
||||
tags:
|
||||
atagname: atagvalue
|
||||
|
@ -1,4 +1,3 @@
|
||||
# nb -v run driver=cql yaml=cql-iot tags=phase:schema host=dsehost
|
||||
description: |
|
||||
put workload descript here
|
||||
scenarios:
|
||||
|
@ -161,7 +161,7 @@ This puts NB on a footing to be "Modular Jar" compatible, which is a step toward
|
||||
* auto-injected statement block and statement name tags.
|
||||
- this means: You can now construct filters for specific blocks or statements simply by
|
||||
knowing their name:
|
||||
- `tags=block:schema` or `tags='main-.*'`
|
||||
- `tags=block:"schema.*"` or `tags='main-.*'`
|
||||
* safe usage of activity params and template vars are compatible, but may not be ambiguous. This
|
||||
means that if you have a template variable in myworkload.yaml, it must be distinctly named
|
||||
from any valid activity parameters, or an error is thrown. This eliminates a confusing source
|
||||
@ -229,7 +229,7 @@ cqlgen - takes schema.cql tablestats -> workload.yaml
|
||||
sstablegen
|
||||
|
||||
* yaml+nb version checks
|
||||
- `min_version: "4.17.15"`
|
||||
- `min_version: "5.17.1"`
|
||||
|
||||
|
||||
* Mac M1 support
|
||||
|
@ -230,6 +230,12 @@ public class NBCLIScenarioParser {
|
||||
String[] namedStepPieces = cmd.split(" ");
|
||||
for (String commandFragment : namedStepPieces) {
|
||||
Matcher matcher = WordAndMaybeAssignment.matcher(commandFragment);
|
||||
|
||||
if (commandFragment.equalsIgnoreCase("")) {
|
||||
logger.debug("Command fragment discovered to be empty. Skipping this fragment for cmd: {}", cmd);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!matcher.matches()) {
|
||||
throw new BasicError("Unable to recognize scenario cmd spec in '" + commandFragment + "'");
|
||||
}
|
||||
|
@ -153,19 +153,19 @@ public class TagFilterTest {
|
||||
public void testLeadingSpaceTrimmedInQuotedTag() {
|
||||
|
||||
Map<String, String> itemtags = new HashMap<>() {{
|
||||
put("phase", "main");
|
||||
put("block", "main");
|
||||
}};
|
||||
|
||||
TagFilter tf = new TagFilter("\"phase: main\"");
|
||||
TagFilter tf = new TagFilter("\"block: main\"");
|
||||
assertThat(tf.matches(itemtags).matched()).isTrue();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testAnyCondition() {
|
||||
Map<String, String> itemtags = Map.of("phase", "main", "truck", "car");
|
||||
TagFilter tf = new TagFilter("any(truck:car,phase:moon)");
|
||||
Map<String, String> itemtags = Map.of("block", "main", "truck", "car");
|
||||
TagFilter tf = new TagFilter("any(truck:car,block:moon)");
|
||||
assertThat(tf.matches(itemtags).matched()).isTrue();
|
||||
TagFilter tf2 = new TagFilter("any(car:truck,phase:moon)");
|
||||
TagFilter tf2 = new TagFilter("any(car:truck,block:moon)");
|
||||
assertThat(tf2.matches(itemtags).matched()).isFalse();
|
||||
}
|
||||
}
|
||||
|
@ -144,9 +144,9 @@ naming scheme for phase control. This means that you have tagged each of
|
||||
your statements or statement blocks with the appropriate phase tags from
|
||||
schema, rampup, main, for example.
|
||||
|
||||
- `schematags=phase:schema` - The tag filter for schema statements.
|
||||
- `schematags=block:"schema.*"` - The tag filter for schema statements.
|
||||
Findmax will run a schema phase with 1 thread by default.
|
||||
- `maintags=phase:main` - The tag filter for the main workload. This is
|
||||
- `maintags=block:main` - The tag filter for the main workload. This is
|
||||
the workload that is started and run in the background for all of the
|
||||
sampling windows.
|
||||
|
||||
|
@ -48,7 +48,7 @@ schema_activitydef = params.withDefaults({
|
||||
});
|
||||
schema_activitydef.alias="findmax_schema";
|
||||
schema_activitydef.threads="1";
|
||||
schema_activitydef.tags="TEMPLATE(schematags,phase:schema)";
|
||||
schema_activitydef.tags="TEMPLATE(schematags,block:'schema.*')";
|
||||
print("Creating schema with schematags:" + schema_activitydef.tags);
|
||||
|
||||
scenario.run(schema_activitydef);
|
||||
@ -63,7 +63,7 @@ activitydef = params.withDefaults({
|
||||
activitydef.alias="findmax";
|
||||
activitydef.cycles="1000000000";
|
||||
activitydef.recycles="1000000000";
|
||||
activitydef.tags="TEMPLATE(maintags,phase:main)";
|
||||
activitydef.tags="TEMPLATE(maintags,block:main)";
|
||||
print("Iterating main workload with tags:" + activitydef.tags);
|
||||
|
||||
|
||||
|
@ -16,8 +16,8 @@
|
||||
|
||||
package io.nosqlbench.engine.cli;
|
||||
|
||||
import io.nosqlbench.engine.api.scenarios.NBCLIScenarioParser;
|
||||
import io.nosqlbench.api.errors.BasicError;
|
||||
import io.nosqlbench.engine.api.scenarios.NBCLIScenarioParser;
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
||||
import java.nio.file.Path;
|
||||
@ -31,39 +31,39 @@ public class NBCLIScenarioParserTest {
|
||||
|
||||
@Test
|
||||
public void providePathForScenario() {
|
||||
NBCLIOptions opts = new NBCLIOptions(new String[]{ "local/example-scenarios" });
|
||||
NBCLIOptions opts = new NBCLIOptions(new String[]{"local/example-scenarios"});
|
||||
List<Cmd> cmds = opts.getCommands();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void defaultScenario() {
|
||||
NBCLIOptions opts = new NBCLIOptions(new String[]{ "scenario-test" });
|
||||
NBCLIOptions opts = new NBCLIOptions(new String[]{"scenario-test"});
|
||||
List<Cmd> cmds = opts.getCommands();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void defaultScenarioWithParams() {
|
||||
NBCLIOptions opts = new NBCLIOptions(new String[]{ "scenario-test", "cycles=100"});
|
||||
NBCLIOptions opts = new NBCLIOptions(new String[]{"scenario-test", "cycles=100"});
|
||||
List<Cmd> cmds = opts.getCommands();
|
||||
assertThat(cmds.get(0).getArg("cycles")).isEqualTo("100");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void namedScenario() {
|
||||
NBCLIOptions opts = new NBCLIOptions(new String[]{ "scenario-test", "schema-only"});
|
||||
NBCLIOptions opts = new NBCLIOptions(new String[]{"scenario-test", "schema-only"});
|
||||
List<Cmd> cmds = opts.getCommands();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void namedScenarioWithParams() {
|
||||
NBCLIOptions opts = new NBCLIOptions(new String[]{ "scenario-test", "schema-only", "cycles=100"});
|
||||
NBCLIOptions opts = new NBCLIOptions(new String[]{"scenario-test", "schema-only", "cycles=100"});
|
||||
List<Cmd> cmds = opts.getCommands();
|
||||
assertThat(cmds.get(0).getArg("cycles")).containsOnlyOnce("100");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testThatSilentFinalParametersPersist() {
|
||||
NBCLIOptions opts = new NBCLIOptions(new String[]{ "scenario-test", "type=foo"});
|
||||
NBCLIOptions opts = new NBCLIOptions(new String[]{"scenario-test", "type=foo"});
|
||||
List<Cmd> cmds = opts.getCommands();
|
||||
assertThat(cmds.get(0).getArg("driver")).isEqualTo("stdout");
|
||||
}
|
||||
@ -71,25 +71,25 @@ public class NBCLIScenarioParserTest {
|
||||
@Test
|
||||
public void testThatVerboseFinalParameterThrowsError() {
|
||||
assertThatExceptionOfType(BasicError.class)
|
||||
.isThrownBy(() -> new NBCLIOptions(new String[]{ "scenario-test", "workload=canttouchthis"}));
|
||||
.isThrownBy(() -> new NBCLIOptions(new String[]{"scenario-test", "workload=canttouchthis"}));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testThatMissingScenarioNameThrowsError() {
|
||||
assertThatExceptionOfType(BasicError.class)
|
||||
.isThrownBy(() -> new NBCLIOptions(new String[]{ "scenario-test", "missing-scenario"}));
|
||||
.isThrownBy(() -> new NBCLIOptions(new String[]{"scenario-test", "missing-scenario"}));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testThatMultipleScenariosConcatenate() {
|
||||
NBCLIOptions opts = new NBCLIOptions(new String[]{ "scenario-test", "default", "default"});
|
||||
NBCLIOptions opts = new NBCLIOptions(new String[]{"scenario-test", "default", "default"});
|
||||
List<Cmd> cmds = opts.getCommands();
|
||||
assertThat(cmds.size()).isEqualTo(6);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testThatTemplatesAreExpandedDefault() {
|
||||
NBCLIOptions opts = new NBCLIOptions(new String[]{ "scenario-test", "template-test"});
|
||||
NBCLIOptions opts = new NBCLIOptions(new String[]{"scenario-test", "template-test"});
|
||||
List<Cmd> cmds = opts.getCommands();
|
||||
assertThat(cmds.size()).isEqualTo(1);
|
||||
assertThat(cmds.get(0).getArg("driver")).isEqualTo("stdout");
|
||||
@ -99,31 +99,31 @@ public class NBCLIScenarioParserTest {
|
||||
|
||||
@Test
|
||||
public void testThatTemplateParamsAreExpandedAndNotRemovedOverride() {
|
||||
NBCLIOptions opts = new NBCLIOptions(new String[]{ "scenario-test", "template-test", "cycles-test=20"});
|
||||
NBCLIOptions opts = new NBCLIOptions(new String[]{"scenario-test", "template-test", "cycles-test=20"});
|
||||
List<Cmd> cmds = opts.getCommands();
|
||||
assertThat(cmds.size()).isEqualTo(1);
|
||||
assertThat(cmds.get(0).getParams()).isEqualTo(Map.of(
|
||||
"alias","scenariotest_templatetest_withtemplate",
|
||||
"cycles","20",
|
||||
"cycles-test","20",
|
||||
"driver","stdout",
|
||||
"workload","scenario-test"
|
||||
"alias", "scenariotest_templatetest_withtemplate",
|
||||
"cycles", "20",
|
||||
"cycles-test", "20",
|
||||
"driver", "stdout",
|
||||
"workload", "scenario-test"
|
||||
));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testThatUndefValuesAreUndefined() {
|
||||
NBCLIOptions opts = new NBCLIOptions(new String[]{ "scenario-test", "schema-only", "cycles-test=20"});
|
||||
NBCLIOptions opts = new NBCLIOptions(new String[]{"scenario-test", "schema-only", "cycles-test=20"});
|
||||
List<Cmd> cmds = opts.getCommands();
|
||||
assertThat(cmds.size()).isEqualTo(1);
|
||||
assertThat(cmds.get(0).getParams()).isEqualTo(Map.of(
|
||||
"alias","scenariotest_schemaonly_000",
|
||||
"cycles-test","20",
|
||||
"driver","stdout",
|
||||
"tags","phase:schema",
|
||||
"workload","scenario-test"
|
||||
"alias", "scenariotest_schemaonly_schema",
|
||||
"cycles-test", "20",
|
||||
"driver", "stdout",
|
||||
"tags", "block:'schema.*'",
|
||||
"workload", "scenario-test"
|
||||
));
|
||||
NBCLIOptions opts1 = new NBCLIOptions(new String[]{ "scenario-test", "schema-only", "doundef=20"});
|
||||
NBCLIOptions opts1 = new NBCLIOptions(new String[]{"scenario-test", "schema-only", "doundef=20"});
|
||||
List<Cmd> cmds1 = opts1.getCommands();
|
||||
assertThat(cmds1.size()).isEqualTo(1);
|
||||
assertThat(cmds1.get(0).getArg("cycles-test")).isNull();
|
||||
@ -140,7 +140,7 @@ public class NBCLIScenarioParserTest {
|
||||
Path absolute = rel.toAbsolutePath();
|
||||
assertThat(absolute).exists();
|
||||
|
||||
NBCLIOptions opts = new NBCLIOptions(new String[]{ absolute.toString(), "schema-only", "cycles-test=20"});
|
||||
NBCLIOptions opts = new NBCLIOptions(new String[]{absolute.toString(), "schema-only", "cycles-test=20"});
|
||||
List<Cmd> cmds = opts.getCommands();
|
||||
assertThat(cmds.size()).isGreaterThan(0);
|
||||
}
|
||||
@ -150,7 +150,7 @@ public class NBCLIScenarioParserTest {
|
||||
//TODO: This might change?
|
||||
String urlScenario = "https://raw.githubusercontent.com/nosqlbench/nosqlbench/main/engine-cli/src/test/resources/activities/scenario-test.yaml";
|
||||
|
||||
NBCLIOptions opts = new NBCLIOptions(new String[]{ urlScenario, "schema-only", "cycles-test=20"});
|
||||
NBCLIOptions opts = new NBCLIOptions(new String[]{urlScenario, "schema-only", "cycles-test=20"});
|
||||
List<Cmd> cmds = opts.getCommands();
|
||||
assertThat(cmds.size()).isGreaterThan(0);
|
||||
}
|
||||
@ -163,17 +163,17 @@ public class NBCLIScenarioParserTest {
|
||||
|
||||
@Test
|
||||
public void testSubStepSelection() {
|
||||
NBCLIOptions opts = new NBCLIOptions(new String[]{ "scenario-test", "schema-only", "cycles-test=20"});
|
||||
NBCLIOptions opts = new NBCLIOptions(new String[]{"scenario-test", "schema-only", "cycles-test=20"});
|
||||
List<Cmd> cmds = opts.getCommands();
|
||||
assertThat(cmds.size()).isEqualTo(1);
|
||||
assertThat(cmds.get(0).getParams()).isEqualTo(Map.of(
|
||||
"alias","scenariotest_schemaonly_000",
|
||||
"cycles-test","20",
|
||||
"driver","stdout",
|
||||
"tags","phase:schema",
|
||||
"workload","scenario-test"
|
||||
"alias", "scenariotest_schemaonly_schema",
|
||||
"cycles-test", "20",
|
||||
"driver", "stdout",
|
||||
"tags", "block:'schema.*'",
|
||||
"workload", "scenario-test"
|
||||
));
|
||||
NBCLIOptions opts1 = new NBCLIOptions(new String[]{ "local/example-scenarios", "namedsteps.one", "testparam1=testvalue2"});
|
||||
NBCLIOptions opts1 = new NBCLIOptions(new String[]{"local/example-scenarios", "namedsteps.one", "testparam1=testvalue2"});
|
||||
List<Cmd> cmds1 = opts1.getCommands();
|
||||
assertThat(cmds1.size()).isEqualTo(1);
|
||||
assertThat(cmds1.get(0).getArg("cycles-test")).isNull();
|
||||
|
@ -1,6 +1,6 @@
|
||||
name: alternate-format-test
|
||||
scenarios:
|
||||
default:
|
||||
schema: run driver=cql protocol_version=v4 tags=block:schema threads==1 cycles=UNDEF
|
||||
schema: run driver=cql protocol_version=v4 tags=block:'schema.*' threads==1 cycles=UNDEF
|
||||
rampup: run driver=cql protocol_version=v4 tags=block:rampup cycles=10000
|
||||
main: run driver=cql protocol_version=v4 tags=block:main_mixed cycles=10000
|
||||
|
@ -1,12 +1,13 @@
|
||||
min_version: "4.17.15"
|
||||
min_version: "5.17.1"
|
||||
|
||||
scenarios:
|
||||
default:
|
||||
schema: run driver==stdout workload===scenario-test tags=block:schema
|
||||
schema: run driver==stdout workload===scenario-test tags=block:'schema.*'
|
||||
rampup: run driver=stdout workload===scenario-test tags=block:rampup cycles=TEMPLATE(cycles1,10)
|
||||
main: run driver=stdout workload===scenario-test tags=block:"main.*" cycles=TEMPLATE(cycles2,10)
|
||||
main: run driver=stdout workload===scenario-test tags=block:'main.*' cycles=TEMPLATE(cycles2,10)
|
||||
schema-only:
|
||||
- "run driver=stdout workload=scenario-test tags=phase:schema doundef==undef"
|
||||
schema: run driver=stdout workload==scenario-test tags=block:'schema.*' doundef==undef
|
||||
|
||||
template-test:
|
||||
with-template: run driver=stdout cycles=TEMPLATE(cycles-test,10)
|
||||
|
||||
@ -22,6 +23,6 @@ blocks:
|
||||
main:
|
||||
ops:
|
||||
insert: |
|
||||
insert into puppies (test) values (1) ;
|
||||
insert into puppies (test) values (1);
|
||||
select: |
|
||||
select * from puppies;
|
||||
|
@ -1,8 +1,8 @@
|
||||
# example-scenarios.yaml
|
||||
scenarios:
|
||||
default:
|
||||
- run cycles=3 alias=A driver=stdout
|
||||
- run cycles=5 alias=B driver=stdout
|
||||
one: run cycles=3 alias=A driver=stdout
|
||||
two: run cycles=5 alias=B driver=stdout
|
||||
namedsteps:
|
||||
one: run cycles=3 alias=A driver=stdout testparam1=testvalue1
|
||||
two: run cycles=5 alias=B driver=stdout
|
||||
|
@ -74,7 +74,7 @@ public class GrafanaRegionAnalyzer implements Runnable {
|
||||
//[2020-12-15T05:04:37.232Z[GMT] - 2020-12-15T05:04:37.232Z[GMT]]
|
||||
//span:interval
|
||||
//details:
|
||||
// params: ActivityDef:(4)/{keycount=5000000000L, hosts=node1, main-cycles=500, threads=1, workload=./keyvalue.yaml, cycles=2, stride=2, tags=phase:schema, password=cassandra, rf=3, pooling=16:16:500, driver=cql, rampup-cycles=5000000000, alias=keyvalue_default_schema, valuecount=5000000000L, errors=count, username=cassandra}
|
||||
// params: ActivityDef:(4)/{keycount=5000000000L, hosts=node1, main-cycles=500, threads=1, workload=./keyvalue.yaml, cycles=2, stride=2, tags=block:'schema.*', password=cassandra, rf=3, pooling=16:16:500, driver=cql, rampup-cycles=5000000000, alias=keyvalue_default_schema, valuecount=5000000000L, errors=count, username=cassandra}
|
||||
//labels:
|
||||
// layer: Activity
|
||||
// alias: keyvalue_default_schema
|
||||
|
@ -53,7 +53,7 @@
|
||||
"span:interval",
|
||||
"appname:nosqlbench"
|
||||
],
|
||||
"text": "session: scenario_20201215_050302_981\n[2020-12-15T05:03:04.813Z[GMT] - 2020-12-15T05:03:04.813Z[GMT]]\nspan:interval\ndetails:\n params: ActivityDef:(4)/{keycount\u003d5B, hosts\u003dnode1, main-cycles\u003d1B, threads\u003d1, workload\u003d./keyvalue.yaml, cycles\u003d2, stride\u003d2, tags\u003dphase:schema, password\u003dcassandra, rf\u003d3, pooling\u003d16:16:500, driver\u003dcql, rampup-cycles\u003d5B, alias\u003dkeyvalue_default_schema, valuecount\u003d5B, errors\u003dcount, username\u003dcassandra}\nlabels:\n layer: Activity\n alias: keyvalue_default_schema\n driver: cql\n workload: ./keyvalue.yaml\n session: scenario_20201215_050302_981\n span: interval\n appname: nosqlbench\n",
|
||||
"text": "session: scenario_20201215_050302_981\n[2020-12-15T05:03:04.813Z[GMT] - 2020-12-15T05:03:04.813Z[GMT]]\nspan:interval\ndetails:\n params: ActivityDef:(4)/{keycount\u003d5B, hosts\u003dnode1, main-cycles\u003d1B, threads\u003d1, workload\u003d./keyvalue.yaml, cycles\u003d2, stride\u003d2, tags\u003dblock:"schema.*", password\u003dcassandra, rf\u003d3, pooling\u003d16:16:500, driver\u003dcql, rampup-cycles\u003d5B, alias\u003dkeyvalue_default_schema, valuecount\u003d5B, errors\u003dcount, username\u003dcassandra}\nlabels:\n layer: Activity\n alias: keyvalue_default_schema\n driver: cql\n workload: ./keyvalue.yaml\n session: scenario_20201215_050302_981\n span: interval\n appname: nosqlbench\n",
|
||||
"time": 1608008584813,
|
||||
"timeEnd": 1608008588900,
|
||||
"updated": 1608008588918,
|
||||
@ -81,7 +81,7 @@
|
||||
"span:interval",
|
||||
"appname:nosqlbench"
|
||||
],
|
||||
"text": "session: scenario_20201215_050355_270\n[2020-12-15T05:03:57.142Z[GMT] - 2020-12-15T05:03:57.142Z[GMT]]\nspan:interval\ndetails:\n params: ActivityDef:(4)/{keycount\u003d5000000000, hosts\u003dnode1, main-cycles\u003d5000000000, threads\u003d1, workload\u003d./keyvalue.yaml, cycles\u003d2, stride\u003d2, tags\u003dphase:schema, password\u003dcassandra, rf\u003d3, pooling\u003d16:16:500, driver\u003dcql, rampup-cycles\u003d5000000000, alias\u003dkeyvalue_default_schema, valuecount\u003d5000000000, errors\u003dcount, username\u003dcassandra}\nlabels:\n layer: Activity\n alias: keyvalue_default_schema\n driver: cql\n workload: ./keyvalue.yaml\n session: scenario_20201215_050355_270\n span: interval\n appname: nosqlbench\n",
|
||||
"text": "session: scenario_20201215_050355_270\n[2020-12-15T05:03:57.142Z[GMT] - 2020-12-15T05:03:57.142Z[GMT]]\nspan:interval\ndetails:\n params: ActivityDef:(4)/{keycount\u003d5000000000, hosts\u003dnode1, main-cycles\u003d5000000000, threads\u003d1, workload\u003d./keyvalue.yaml, cycles\u003d2, stride\u003d2, tags\u003dblock:"schema.*", password\u003dcassandra, rf\u003d3, pooling\u003d16:16:500, driver\u003dcql, rampup-cycles\u003d5000000000, alias\u003dkeyvalue_default_schema, valuecount\u003d5000000000, errors\u003dcount, username\u003dcassandra}\nlabels:\n layer: Activity\n alias: keyvalue_default_schema\n driver: cql\n workload: ./keyvalue.yaml\n session: scenario_20201215_050355_270\n span: interval\n appname: nosqlbench\n",
|
||||
"time": 1608008637142,
|
||||
"timeEnd": 1608008641044,
|
||||
"updated": 1608008641063,
|
||||
@ -109,7 +109,7 @@
|
||||
"span:interval",
|
||||
"appname:nosqlbench"
|
||||
],
|
||||
"text": "session: scenario_20201215_050435_240\n[2020-12-15T05:04:37.232Z[GMT] - 2020-12-15T05:04:37.232Z[GMT]]\nspan:interval\ndetails:\n params: ActivityDef:(4)/{keycount\u003d5000000000L, hosts\u003dnode1, main-cycles\u003d500, threads\u003d1, workload\u003d./keyvalue.yaml, cycles\u003d2, stride\u003d2, tags\u003dphase:schema, password\u003dcassandra, rf\u003d3, pooling\u003d16:16:500, driver\u003dcql, rampup-cycles\u003d5000000000, alias\u003dkeyvalue_default_schema, valuecount\u003d5000000000L, errors\u003dcount, username\u003dcassandra}\nlabels:\n layer: Activity\n alias: keyvalue_default_schema\n driver: cql\n workload: ./keyvalue.yaml\n session: scenario_20201215_050435_240\n span: interval\n appname: nosqlbench\n",
|
||||
"text": "session: scenario_20201215_050435_240\n[2020-12-15T05:04:37.232Z[GMT] - 2020-12-15T05:04:37.232Z[GMT]]\nspan:interval\ndetails:\n params: ActivityDef:(4)/{keycount\u003d5000000000L, hosts\u003dnode1, main-cycles\u003d500, threads\u003d1, workload\u003d./keyvalue.yaml, cycles\u003d2, stride\u003d2, tags\u003dblock:"schema.*", password\u003dcassandra, rf\u003d3, pooling\u003d16:16:500, driver\u003dcql, rampup-cycles\u003d5000000000, alias\u003dkeyvalue_default_schema, valuecount\u003d5000000000L, errors\u003dcount, username\u003dcassandra}\nlabels:\n layer: Activity\n alias: keyvalue_default_schema\n driver: cql\n workload: ./keyvalue.yaml\n session: scenario_20201215_050435_240\n span: interval\n appname: nosqlbench\n",
|
||||
"time": 1608008677232,
|
||||
"timeEnd": 1608008681038,
|
||||
"updated": 1608008681058,
|
||||
@ -137,7 +137,7 @@
|
||||
"span:interval",
|
||||
"appname:nosqlbench"
|
||||
],
|
||||
"text": "session: scenario_20201215_050435_240\n[2020-12-15T05:04:41.120Z[GMT] - 2020-12-15T05:04:41.120Z[GMT]]\nspan:interval\ndetails:\n params: ActivityDef:(3)/{keycount\u003d5000000000L, hosts\u003dnode1, main-cycles\u003d500, threads\u003d960, workload\u003d./keyvalue.yaml, cycles\u003d5000000000, stride\u003d1, tags\u003dphase:rampup, password\u003dcassandra, rf\u003d3, pooling\u003d16:16:500, driver\u003dcql, rampup-cycles\u003d5000000000, alias\u003dkeyvalue_default_rampup, valuecount\u003d5000000000L, errors\u003dcount, username\u003dcassandra}\nlabels:\n layer: Activity\n alias: keyvalue_default_rampup\n driver: cql\n workload: ./keyvalue.yaml\n session: scenario_20201215_050435_240\n span: interval\n appname: nosqlbench\n",
|
||||
"text": "session: scenario_20201215_050435_240\n[2020-12-15T05:04:41.120Z[GMT] - 2020-12-15T05:04:41.120Z[GMT]]\nspan:interval\ndetails:\n params: ActivityDef:(3)/{keycount\u003d5000000000L, hosts\u003dnode1, main-cycles\u003d500, threads\u003d960, workload\u003d./keyvalue.yaml, cycles\u003d5000000000, stride\u003d1, tags\u003dblock:rampup, password\u003dcassandra, rf\u003d3, pooling\u003d16:16:500, driver\u003dcql, rampup-cycles\u003d5000000000, alias\u003dkeyvalue_default_rampup, valuecount\u003d5000000000L, errors\u003dcount, username\u003dcassandra}\nlabels:\n layer: Activity\n alias: keyvalue_default_rampup\n driver: cql\n workload: ./keyvalue.yaml\n session: scenario_20201215_050435_240\n span: interval\n appname: nosqlbench\n",
|
||||
"time": 1608008681120,
|
||||
"timeEnd": 1608042107780,
|
||||
"updated": 1608042107859,
|
||||
@ -165,7 +165,7 @@
|
||||
"span:interval",
|
||||
"appname:nosqlbench"
|
||||
],
|
||||
"text": "session: scenario_20201215_050435_240\n[2020-12-15T14:21:47.918Z[GMT] - 2020-12-15T14:21:47.918Z[GMT]]\nspan:interval\ndetails:\n params: ActivityDef:(3)/{keycount\u003d5000000000L, hosts\u003dnode1, main-cycles\u003d500, threads\u003d500, workload\u003d./keyvalue.yaml, cycles\u003d500, stride\u003d10, tags\u003dphase:main, password\u003dcassandra, rf\u003d3, pooling\u003d16:16:500, driver\u003dcql, rampup-cycles\u003d5000000000, alias\u003dkeyvalue_default_main, valuecount\u003d5000000000L, errors\u003dcount, username\u003dcassandra}\nlabels:\n layer: Activity\n alias: keyvalue_default_main\n driver: cql\n workload: ./keyvalue.yaml\n session: scenario_20201215_050435_240\n span: interval\n appname: nosqlbench\n",
|
||||
"text": "session: scenario_20201215_050435_240\n[2020-12-15T14:21:47.918Z[GMT] - 2020-12-15T14:21:47.918Z[GMT]]\nspan:interval\ndetails:\n params: ActivityDef:(3)/{keycount\u003d5000000000L, hosts\u003dnode1, main-cycles\u003d500, threads\u003d500, workload\u003d./keyvalue.yaml, cycles\u003d500, stride\u003d10, tags\u003dblock:main, password\u003dcassandra, rf\u003d3, pooling\u003d16:16:500, driver\u003dcql, rampup-cycles\u003d5000000000, alias\u003dkeyvalue_default_main, valuecount\u003d5000000000L, errors\u003dcount, username\u003dcassandra}\nlabels:\n layer: Activity\n alias: keyvalue_default_main\n driver: cql\n workload: ./keyvalue.yaml\n session: scenario_20201215_050435_240\n span: interval\n appname: nosqlbench\n",
|
||||
"time": 1608042107918,
|
||||
"timeEnd": 1608042108099,
|
||||
"updated": 1608042108117,
|
||||
@ -193,7 +193,7 @@
|
||||
"span:interval",
|
||||
"appname:nosqlbench"
|
||||
],
|
||||
"text": "session: scenario_20201215_050435_240\n[2020-12-15T14:21:47.918Z[GMT] - 2020-12-15T14:21:47.918Z[GMT]]\nspan:interval\ndetails:\n params: ActivityDef:(3)/{keycount\u003d5000000000L, hosts\u003dnode1, main-cycles\u003d500, threads\u003d500, workload\u003d./keyvalue.yaml, cycles\u003d500, stride\u003d10, tags\u003dphase:main, password\u003dcassandra, rf\u003d3, pooling\u003d16:16:500, driver\u003dcql, rampup-cycles\u003d5000000000, alias\u003dkeyvalue_default_main, valuecount\u003d5000000000L, errors\u003dcount, username\u003dcassandra}\nlabels:\n layer: Activity\n alias: keyvalue_default_main\n driver: cql\n workload: ./keyvalue.yaml\n session: scenario_20201215_050435_240\n span: interval\n appname: nosqlbench\n",
|
||||
"text": "session: scenario_20201215_050435_240\n[2020-12-15T14:21:47.918Z[GMT] - 2020-12-15T14:21:47.918Z[GMT]]\nspan:interval\ndetails:\n params: ActivityDef:(3)/{keycount\u003d5000000000L, hosts\u003dnode1, main-cycles\u003d500, threads\u003d500, workload\u003d./keyvalue.yaml, cycles\u003d500, stride\u003d10, tags\u003dblock:main, password\u003dcassandra, rf\u003d3, pooling\u003d16:16:500, driver\u003dcql, rampup-cycles\u003d5000000000, alias\u003dkeyvalue_default_main, valuecount\u003d5000000000L, errors\u003dcount, username\u003dcassandra}\nlabels:\n layer: Activity\n alias: keyvalue_default_main\n driver: cql\n workload: ./keyvalue.yaml\n session: scenario_20201215_050435_240\n span: interval\n appname: nosqlbench\n",
|
||||
"time": 1608042107918,
|
||||
"timeEnd": 1608042108127,
|
||||
"updated": 1608042108144,
|
||||
@ -221,7 +221,7 @@
|
||||
"span:interval",
|
||||
"appname:nosqlbench"
|
||||
],
|
||||
"text": "session: scenario_20201215_050435_240\n[2020-12-15T05:04:41.120Z[GMT] - 2020-12-15T05:04:41.120Z[GMT]]\nspan:interval\ndetails:\n params: ActivityDef:(3)/{keycount\u003d5000000000L, hosts\u003dnode1, main-cycles\u003d500, threads\u003d960, workload\u003d./keyvalue.yaml, cycles\u003d5000000000, stride\u003d1, tags\u003dphase:rampup, password\u003dcassandra, rf\u003d3, pooling\u003d16:16:500, driver\u003dcql, rampup-cycles\u003d5000000000, alias\u003dkeyvalue_default_rampup, valuecount\u003d5000000000L, errors\u003dcount, username\u003dcassandra}\nlabels:\n layer: Activity\n alias: keyvalue_default_rampup\n driver: cql\n workload: ./keyvalue.yaml\n session: scenario_20201215_050435_240\n span: interval\n appname: nosqlbench\n",
|
||||
"text": "session: scenario_20201215_050435_240\n[2020-12-15T05:04:41.120Z[GMT] - 2020-12-15T05:04:41.120Z[GMT]]\nspan:interval\ndetails:\n params: ActivityDef:(3)/{keycount\u003d5000000000L, hosts\u003dnode1, main-cycles\u003d500, threads\u003d960, workload\u003d./keyvalue.yaml, cycles\u003d5000000000, stride\u003d1, tags\u003dblock:rampup, password\u003dcassandra, rf\u003d3, pooling\u003d16:16:500, driver\u003dcql, rampup-cycles\u003d5000000000, alias\u003dkeyvalue_default_rampup, valuecount\u003d5000000000L, errors\u003dcount, username\u003dcassandra}\nlabels:\n layer: Activity\n alias: keyvalue_default_rampup\n driver: cql\n workload: ./keyvalue.yaml\n session: scenario_20201215_050435_240\n span: interval\n appname: nosqlbench\n",
|
||||
"time": 1608008681120,
|
||||
"timeEnd": 1608042108127,
|
||||
"updated": 1608042108167,
|
||||
@ -249,7 +249,7 @@
|
||||
"span:interval",
|
||||
"appname:nosqlbench"
|
||||
],
|
||||
"text": "session: scenario_20201215_050435_240\n[2020-12-15T05:04:37.232Z[GMT] - 2020-12-15T05:04:37.232Z[GMT]]\nspan:interval\ndetails:\n params: ActivityDef:(4)/{keycount\u003d5000000000L, hosts\u003dnode1, main-cycles\u003d500, threads\u003d1, workload\u003d./keyvalue.yaml, cycles\u003d2, stride\u003d2, tags\u003dphase:schema, password\u003dcassandra, rf\u003d3, pooling\u003d16:16:500, driver\u003dcql, rampup-cycles\u003d5000000000, alias\u003dkeyvalue_default_schema, valuecount\u003d5000000000L, errors\u003dcount, username\u003dcassandra}\nlabels:\n layer: Activity\n alias: keyvalue_default_schema\n driver: cql\n workload: ./keyvalue.yaml\n session: scenario_20201215_050435_240\n span: interval\n appname: nosqlbench\n",
|
||||
"text": "session: scenario_20201215_050435_240\n[2020-12-15T05:04:37.232Z[GMT] - 2020-12-15T05:04:37.232Z[GMT]]\nspan:interval\ndetails:\n params: ActivityDef:(4)/{keycount\u003d5000000000L, hosts\u003dnode1, main-cycles\u003d500, threads\u003d1, workload\u003d./keyvalue.yaml, cycles\u003d2, stride\u003d2, tags\u003dblock:"schema.*", password\u003dcassandra, rf\u003d3, pooling\u003d16:16:500, driver\u003dcql, rampup-cycles\u003d5000000000, alias\u003dkeyvalue_default_schema, valuecount\u003d5000000000L, errors\u003dcount, username\u003dcassandra}\nlabels:\n layer: Activity\n alias: keyvalue_default_schema\n driver: cql\n workload: ./keyvalue.yaml\n session: scenario_20201215_050435_240\n span: interval\n appname: nosqlbench\n",
|
||||
"time": 1608008677232,
|
||||
"timeEnd": 1608042108127,
|
||||
"updated": 1608042108190,
|
||||
|
@ -33,7 +33,7 @@ You can mark statements as schema phase statements by adding this set of
|
||||
tags to the statements, either directly, or by block:
|
||||
|
||||
tags:
|
||||
phase: schema
|
||||
block: schema
|
||||
|
||||
## Rampup phase
|
||||
|
||||
@ -64,7 +64,7 @@ You can mark statements as rampup phase statements by adding this set of
|
||||
tags to the statements, either directly, or by block:
|
||||
|
||||
tags:
|
||||
phase: rampup
|
||||
block: rampup
|
||||
|
||||
## Main phase
|
||||
|
||||
@ -76,4 +76,4 @@ You can mark statement as schema phase statements by adding this set of
|
||||
tags to the statements, either directly, or by block:
|
||||
|
||||
tags:
|
||||
phase: main
|
||||
block: main
|
||||
|
@ -15,7 +15,7 @@ command line, go ahead and execute the following command, replacing
|
||||
the `host=<host-or-ip>` with that of one of your database nodes.
|
||||
|
||||
```text
|
||||
./nb run driver=cql workload=cql-keyvalue tags=phase:schema host=<host-or-ip>
|
||||
./nb run driver=cql workload=cql-keyvalue tags=block:'schema.*' host=<host-or-ip>
|
||||
```
|
||||
|
||||
This command is creating the following schema in your database:
|
||||
@ -45,8 +45,8 @@ defines the activity.
|
||||
In this example, we use `cql-keyvalue` which is a pre-built workload that
|
||||
is packaged with nosqlbench.
|
||||
|
||||
`tags=phase:schema` tells nosqlbench to run the yaml block that has
|
||||
the `phase:schema` defined as one of its tags.
|
||||
`tags=block:"schema.*"` tells nosqlbench to run the yaml block that has
|
||||
the `block:"schema.*"` defined as one of its tags.
|
||||
|
||||
In this example, that is the DDL portion of the `cql-keyvalue`
|
||||
workload. `host=...` tells nosqlbench how to connect to your database,
|
||||
@ -68,7 +68,7 @@ statements.
|
||||
|
||||
Go ahead and execute the following command:
|
||||
|
||||
./nb run driver=stdout workload=cql-keyvalue tags=phase:rampup cycles=10
|
||||
./nb run driver=stdout workload=cql-keyvalue tags=block:rampup cycles=10
|
||||
|
||||
You should see 10 of the following statements in your console
|
||||
|
||||
@ -91,12 +91,12 @@ be the same from run to run.
|
||||
Now we are ready to write some data to our database. Go ahead and execute
|
||||
the following from your command line:
|
||||
|
||||
./nb run driver=cql workload=cql-keyvalue tags=phase:rampup host=<host-or-ip> cycles=100k --progress console:1s
|
||||
./nb run driver=cql workload=cql-keyvalue tags=block:rampup host=<host-or-ip> cycles=100k --progress console:1s
|
||||
|
||||
Note the differences between this and the command that we used to generate
|
||||
the schema.
|
||||
|
||||
`tags=phase:rampup` is running the yaml block in `cql-keyvalue` that has
|
||||
`tags=block:rampup` is running the yaml block in `cql-keyvalue` that has
|
||||
only INSERT statements.
|
||||
|
||||
`cycles=100k` will run a total of 100,000 operations, in this case,
|
||||
@ -139,7 +139,7 @@ Now that we have a base dataset of 100k rows in the database, we will now
|
||||
run a mixed read / write workload, by default this runs a 50% read / 50%
|
||||
write workload.
|
||||
|
||||
./nb run driver=cql workload=cql-keyvalue tags=phase:main host=<host-or-ip> cycles=100k cyclerate=5000 threads=50 --progress console:1s
|
||||
./nb run driver=cql workload=cql-keyvalue tags=block:main host=<host-or-ip> cycles=100k cyclerate=5000 threads=50 --progress console:1s
|
||||
|
||||
You should see output that looks like this:
|
||||
|
||||
@ -174,7 +174,7 @@ cql-keyvalue: 100.00%/Finished (details: min=0 cycle=100000 max=100000)
|
||||
|
||||
We have a few new command line options here:
|
||||
|
||||
`tags=phase:main` is using a new block in our activity's yaml that
|
||||
`tags=block:main` is using a new block in our activity's yaml that
|
||||
contains both read and write queries.
|
||||
|
||||
`threads=50` is an important one. The default for nosqlbench is to run
|
||||
|
@ -103,8 +103,8 @@ semicolon, then a newline is also added immediately after.
|
||||
|
||||
~~~text
|
||||
./nb \
|
||||
start driver=stdout alias=a cycles=100K workload=cql-iot tags=phase:main\
|
||||
start driver=stdout alias=b cycles=200K workload=cql-iot tags=phase:main\
|
||||
start driver=stdout alias=a cycles=100K workload=cql-iot tags=block:main\
|
||||
start driver=stdout alias=b cycles=200K workload=cql-iot tags=block:main\
|
||||
waitmillis 10000 \
|
||||
await one \
|
||||
stop two
|
||||
|
@ -46,9 +46,9 @@ built-ins.
|
||||
|
||||
Each built-in contains the following tags that can be used to break the workload up into uniform phases:
|
||||
|
||||
- schema - selected with `tags=phase:schema`
|
||||
- rampup - selected with `tags=phase:rampup`
|
||||
- main - selected with `tags=phase:main`
|
||||
- schema - selected with `tags=block:"schema.*"`
|
||||
- rampup - selected with `tags=block:rampup`
|
||||
- main - selected with `tags=block:main`
|
||||
|
||||
### Parameters
|
||||
|
||||
|
@ -198,7 +198,7 @@
|
||||
<dependency>
|
||||
<groupId>io.netty</groupId>
|
||||
<artifactId>netty-handler</artifactId>
|
||||
<version>4.1.86.Final</version>
|
||||
<version>4.1.87.Final</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
|
@ -76,6 +76,7 @@ public class NBIO implements NBPathsAPI.Facets {
|
||||
return Arrays.asList(split);
|
||||
}
|
||||
|
||||
|
||||
public static CSVParser readFileCSV(String filename, String... searchPaths) {
|
||||
return NBIO.readFileDelimCSV(filename, ',', searchPaths);
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
min_version: "4.17.15"
|
||||
min_version: "5.17.1"
|
||||
|
||||
# eb sequences concat
|
||||
# yields A B B C C C D D D D A B B C C C D D D D
|
||||
|
@ -148,7 +148,7 @@ var yaml_file = "TEMPLATE(yaml_file,cql-iot)";
|
||||
//
|
||||
// schema_activitydef.alias = "findmax_schema";
|
||||
// schema_activitydef.threads = "1";
|
||||
// schema_activitydef.tags = "TEMPLATE(schematags,phase:schema)";
|
||||
// schema_activitydef.tags = "TEMPLATE(schematags,block:'schema.*')";
|
||||
// printf("Creating schema with schematags: %s\n",schema_activitydef.tags.toString());
|
||||
//
|
||||
// scenario.run(schema_activitydef);
|
||||
@ -164,7 +164,7 @@ activitydef = params.withDefaults({
|
||||
activitydef.alias = "findmax";
|
||||
activitydef.cycles = "1000000000";
|
||||
activitydef.recycles = "1000000000";
|
||||
activitydef.tags = "TEMPLATE(maintags,phase:main)";
|
||||
activitydef.tags = "TEMPLATE(maintags,block:main)";
|
||||
|
||||
function ops_s(iteration, results) {
|
||||
return results[iteration].ops_per_second;
|
||||
|
@ -25,7 +25,7 @@ function as_js(ref) {
|
||||
}
|
||||
if (ref instanceof java.util.Map) {
|
||||
let newobj = {};
|
||||
for each(key in ref.keySet()) {
|
||||
for (let key in ref.keySet()) {
|
||||
newobj[key] = Java.asJSONCompatible(ref.get(key));
|
||||
}
|
||||
return newobj;
|
||||
@ -112,7 +112,7 @@ schema_activitydef = params.withDefaults({
|
||||
});
|
||||
schema_activitydef.alias = "optimo_schema";
|
||||
schema_activitydef.threads = "1";
|
||||
schema_activitydef.tags = "TEMPLATE(schematags,phase:schema)";
|
||||
schema_activitydef.tags = "TEMPLATE(schematags,block:'schema.*')";
|
||||
schema_activitydef.speculative = "none"
|
||||
print("Creating schema with schematags:" + schema_activitydef.tags);
|
||||
|
||||
@ -129,7 +129,7 @@ activitydef = params.withDefaults({
|
||||
activitydef.alias = "optimo";
|
||||
activitydef.cycles = "1000000000";
|
||||
activitydef.recycles = "1000000000";
|
||||
activitydef.tags = "TEMPLATE(maintags,phase:main)";
|
||||
activitydef.tags = "TEMPLATE(maintags,block:main)";
|
||||
activitydef.speculative = "none"
|
||||
|
||||
print("Iterating main workload with tags:" + activitydef.tags);
|
||||
|
@ -60,7 +60,7 @@ print("starting activity for stepup analysis");
|
||||
var activitydef = params.withDefaults({
|
||||
'alias': 'stepup',
|
||||
'driver': driver,
|
||||
'tags':'any(block:main.*,phase:main)',
|
||||
'tags':'any(block:main.*,block:main)',
|
||||
'workload' : 'TEMPLATE(workload)',
|
||||
'cycles': '1t',
|
||||
'stride': '1000',
|
||||
|
@ -0,0 +1,64 @@
|
||||
/*
|
||||
* Copyright (c) 2023 nosqlbench
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package io.nosqlbench.virtdata.library.basics.shared.unary_string;
|
||||
|
||||
import io.nosqlbench.api.content.NBIO;
|
||||
import io.nosqlbench.api.errors.BasicError;
|
||||
import io.nosqlbench.virtdata.api.annotations.Categories;
|
||||
import io.nosqlbench.virtdata.api.annotations.Category;
|
||||
import io.nosqlbench.virtdata.api.annotations.Example;
|
||||
import io.nosqlbench.virtdata.api.annotations.ThreadSafeMapper;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.function.Function;
|
||||
|
||||
/**
|
||||
* Provides a single line of text from a target file provided.
|
||||
*/
|
||||
@ThreadSafeMapper
|
||||
@Categories({Category.general})
|
||||
public class TextOfFile implements Function<Object, String> {
|
||||
private static final Logger logger = LogManager.getLogger(TextOfFile.class);
|
||||
private final String text;
|
||||
|
||||
public String toString() {
|
||||
return getClass().getSimpleName();
|
||||
}
|
||||
|
||||
@Example({"TextOfFile()", "Provides the first line of text in the specified file."})
|
||||
public TextOfFile(String targetFile) {
|
||||
|
||||
try {
|
||||
final List<String> lines = NBIO.readLines(targetFile);
|
||||
logger.info("TextOfFile() reading: {}", targetFile);
|
||||
if (lines.isEmpty()) {
|
||||
throw new BasicError(String.format("Unable to locate content for %s", this));
|
||||
}
|
||||
text = lines.get(0);
|
||||
} catch (Exception ex) {
|
||||
throw new BasicError(String.format("Unable to locate file %s: ", targetFile), ex);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String apply(Object obj) {
|
||||
return text;
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,45 @@
|
||||
/*
|
||||
* Copyright (c) 2023 nosqlbench
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package io.nosqlbench.virtdata.library.basics.shared.unary_string;
|
||||
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
import static org.assertj.core.api.Assertions.assertThatException;
|
||||
|
||||
class TextOfFileTest {
|
||||
|
||||
private static final String EXPECTED_CONTENTS = "test-data-entry";
|
||||
private static final String NOT_EXPECTED_CONTENTS = "foozy-content";
|
||||
private static final String VALID_PATH = "text-provider-sample.txt";
|
||||
private static final String INVALID_PATH = "not-good.txt";
|
||||
private static final String PLACEHOLDER_APPLY_INPUT = "placeholder-input";
|
||||
|
||||
|
||||
@Test
|
||||
void testValidPathAndContents() {
|
||||
final TextOfFile TextOfFile = new TextOfFile(VALID_PATH);
|
||||
assertThat(TextOfFile.apply(PLACEHOLDER_APPLY_INPUT)).isEqualTo(EXPECTED_CONTENTS);
|
||||
}
|
||||
|
||||
@Test
|
||||
void testInvalidPathAndContents() {
|
||||
final TextOfFile textOfFileValid = new TextOfFile(VALID_PATH);
|
||||
assertThatException().isThrownBy(() -> new TextOfFile(INVALID_PATH));
|
||||
assertThat(textOfFileValid.apply(PLACEHOLDER_APPLY_INPUT)).isNotEqualTo(NOT_EXPECTED_CONTENTS);
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1 @@
|
||||
test-data-entry
|
Loading…
Reference in New Issue
Block a user