mirror of
https://github.com/nosqlbench/nosqlbench.git
synced 2025-02-25 18:55:28 -06:00
renaming workloads part2
This commit is contained in:
parent
222fbbbafd
commit
68f5fefd3d
@ -5,7 +5,7 @@ bindings:
|
||||
params:
|
||||
|
||||
blocks:
|
||||
msg-recv-block:
|
||||
msg_recv_block:
|
||||
ops:
|
||||
op1:
|
||||
AmqpMsgReceiver: ""
|
||||
|
@ -25,7 +25,7 @@ params:
|
||||
|
||||
|
||||
blocks:
|
||||
msg-send-block:
|
||||
msg_send_block:
|
||||
ops:
|
||||
op1:
|
||||
AmqpMsgSender: ""
|
||||
|
@ -35,11 +35,11 @@ blocks:
|
||||
params:
|
||||
prepared: false
|
||||
ops:
|
||||
create-keyspace: |
|
||||
create_keyspace: |
|
||||
create keyspace if not exists <<keyspace:baselines>>
|
||||
WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '<<rf:1>>'}
|
||||
AND durable_writes = true;
|
||||
create-table: |
|
||||
create_table: |
|
||||
create table if not exists <<keyspace:baselines>>.<<table:iot>> (
|
||||
machine_id UUID, // source machine
|
||||
sensor_name text, // sensor name
|
||||
@ -59,13 +59,13 @@ blocks:
|
||||
params:
|
||||
prepared: false
|
||||
ops:
|
||||
truncate-table: |
|
||||
truncate_table: |
|
||||
truncate table <<keyspace:baselines>>.<<table:iot>>;
|
||||
schema-astra:
|
||||
schema_astra:
|
||||
params:
|
||||
prepared: false
|
||||
ops:
|
||||
create-table-astra: |
|
||||
create_table_astra: |
|
||||
create table if not exists <<keyspace:baselines>>.<<table:iot>> (
|
||||
machine_id UUID, // source machine
|
||||
sensor_name text, // sensor name
|
||||
@ -81,7 +81,7 @@ blocks:
|
||||
instrument: TEMPLATE(instrument-writes,TEMPLATE(instrument,false))
|
||||
idempotent: true
|
||||
ops:
|
||||
insert-rampup: |
|
||||
insert_rampup: |
|
||||
insert into <<keyspace:baselines>>.<<table:iot>>
|
||||
(machine_id, sensor_name, time, sensor_value, station_id, data)
|
||||
values ({machine_id}, {sensor_name}, {time}, {sensor_value}, {station_id}, {data})
|
||||
@ -93,27 +93,27 @@ blocks:
|
||||
verify-fields: "*, -cell_timestamp"
|
||||
instrument: TEMPLATE(instrument-reads,TEMPLATE(instrument,false))
|
||||
ops:
|
||||
select-verify: |
|
||||
select_verify: |
|
||||
select * from <<keyspace:baselines>>.<<table:iot>>
|
||||
where machine_id={machine_id} and sensor_name={sensor_name} and time={time};
|
||||
main-read:
|
||||
main_read:
|
||||
params:
|
||||
ratio: <<read_ratio:1>>
|
||||
cl: <<read_cl:LOCAL_QUORUM>>
|
||||
instrument: TEMPLATE(instrument-reads,TEMPLATE(instrument,false))
|
||||
ops:
|
||||
select-read: |
|
||||
select_read: |
|
||||
select * from <<keyspace:baselines>>.<<table:iot>>
|
||||
where machine_id={machine_id} and sensor_name={sensor_name}
|
||||
limit <<limit:10>>
|
||||
main-write:
|
||||
main_write:
|
||||
params:
|
||||
ratio: <<write_ratio:9>>
|
||||
cl: <<write_cl:LOCAL_QUORUM>>
|
||||
idempotent: true
|
||||
instrument: TEMPLATE(instrument-writes,TEMPLATE(instrument,false))
|
||||
ops:
|
||||
insert-main: |
|
||||
insert_main: |
|
||||
insert into <<keyspace:baselines>>.<<table:iot>>
|
||||
(machine_id, sensor_name, time, sensor_value, station_id, data)
|
||||
values ({machine_id}, {sensor_name}, {time}, {sensor_value}, {station_id}, {data})
|
@ -24,12 +24,12 @@ blocks:
|
||||
params:
|
||||
prepared: false
|
||||
ops:
|
||||
create-keyspace: |
|
||||
create_keyspace: |
|
||||
create keyspace if not exists <<keyspace:baselines>>
|
||||
WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '<<rf:1>>'}
|
||||
AND durable_writes = true;
|
||||
|
||||
create-table: |
|
||||
create_table: |
|
||||
create table if not exists <<keyspace:baselines>>.<<table:iot>> (
|
||||
machine_id UUID, // source machine
|
||||
sensor_name text, // sensor name
|
||||
@ -51,14 +51,14 @@ blocks:
|
||||
params:
|
||||
prepared: false
|
||||
ops:
|
||||
truncate-table: |
|
||||
truncate_table: |
|
||||
truncate table TEMPLATE(keyspace, baselines).TEMPLATE(table:iot);
|
||||
rampup:
|
||||
params:
|
||||
cl: <<write_cl:LOCAL_QUORUM>>
|
||||
idempotent: true
|
||||
ops:
|
||||
rampup-insert: |
|
||||
rampup_insert: |
|
||||
insert into <<keyspace:baselines>>.<<table:iot>> (machine_id, sensor_name, time, sensor_value, station_id, data)
|
||||
values ({machine_id}, {sensor_name}, {time}, {sensor_value}, {station_id}, {data})
|
||||
using timestamp {cell_timestamp};
|
||||
@ -69,26 +69,26 @@ blocks:
|
||||
type: read
|
||||
verify-fields: "*, -cell_timestamp"
|
||||
ops:
|
||||
select-verify: |
|
||||
select_verify: |
|
||||
select * from <<keyspace:baselines>>.<<table:iot>> where machine_id={machine_id}
|
||||
and sensor_name={sensor_name} and time={time};
|
||||
|
||||
main-read:
|
||||
main_read:
|
||||
params:
|
||||
ratio: <<read_ratio:1>>
|
||||
cl: <<read_cl:LOCAL_QUORUM>>
|
||||
ops:
|
||||
select-read: |
|
||||
select_read: |
|
||||
select * from <<keyspace:baselines>>.<<table:iot>>
|
||||
where machine_id={machine_id} and sensor_name={sensor_name}
|
||||
limit <<limit:10>>;
|
||||
main-write:
|
||||
main_write:
|
||||
params:
|
||||
ratio: <<write_ratio:9>>
|
||||
cl: <<write_cl:LOCAL_QUORUM>>
|
||||
idempotent: true
|
||||
ops:
|
||||
insert-main: |
|
||||
insert_main: |
|
||||
insert into <<keyspace:baselines>>.<<table:iot>>
|
||||
(machine_id, sensor_name, time, sensor_value, station_id, data)
|
||||
values ({machine_id}, {sensor_name}, {time}, {sensor_value}, {station_id}, {data})
|
@ -31,21 +31,21 @@ bindings:
|
||||
blocks:
|
||||
schema:
|
||||
ops:
|
||||
create-keyspace:
|
||||
create_keyspace:
|
||||
raw: |
|
||||
create keyspace if not exists TEMPLATE(keyspace,baselines)
|
||||
WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 'TEMPLATE(rf,1)'}
|
||||
AND durable_writes = true;
|
||||
create-table:
|
||||
create_table:
|
||||
raw: |
|
||||
create table if not exists TEMPLATE(keyspace,baselines).TEMPLATE(table,keyvalue) (
|
||||
key text,
|
||||
value text,
|
||||
PRIMARY KEY (key)
|
||||
);
|
||||
schema-astra:
|
||||
schema_astra:
|
||||
ops:
|
||||
create-table:
|
||||
create_table:
|
||||
raw: |
|
||||
create table if not exists TEMPLATE(keyspace,baselines).TEMPLATE(table,keyvalue) (
|
||||
key text,
|
||||
@ -56,7 +56,7 @@ blocks:
|
||||
params:
|
||||
cl: TEMPLATE(write_cl,LOCAL_QUORUM)
|
||||
ops:
|
||||
rampup-insert:
|
||||
rampup_insert:
|
||||
prepared: |
|
||||
insert into TEMPLATE(keyspace,baselines).TEMPLATE(table,keyvalue)
|
||||
(key, value)
|
||||
@ -65,24 +65,24 @@ blocks:
|
||||
# params:
|
||||
# cl: <<read_cl:LOCAL_QUORUM>>
|
||||
# ops:
|
||||
# verify-select:
|
||||
# verify_select:
|
||||
# prepared: |
|
||||
# select * from <<keyspace:baselines>>.<<table:keyvalue>> where key={seq_key};
|
||||
# verify-fields: key->seq_key, value->seq_value
|
||||
main-read:
|
||||
main_read:
|
||||
params:
|
||||
ratio: 5
|
||||
cl: TEMPLATE(read_cl,LOCAL_QUORUM)
|
||||
ops:
|
||||
main-select:
|
||||
main_select:
|
||||
prepared: |
|
||||
select * from TEMPLATE(keyspace,baselines).TEMPLATE(table,keyvalue) where key={rw_key};
|
||||
main-write:
|
||||
main_write:
|
||||
params:
|
||||
ratio: 5
|
||||
cl: TEMPLATE(write_cl,LOCAL_QUORUM)
|
||||
ops:
|
||||
main-insert:
|
||||
main_insert:
|
||||
prepared: |
|
||||
insert into TEMPLATE(keyspace,baselines).TEMPLATE(table,keyvalue)
|
||||
(key, value) values ({rw_key}, {rw_value});
|
@ -35,11 +35,11 @@ blocks:
|
||||
params:
|
||||
prepared: false
|
||||
ops:
|
||||
create-keyspace: |
|
||||
create_keyspace: |
|
||||
create keyspace if not exists <<keyspace:starter>>
|
||||
WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '<<rf:1>>'}
|
||||
AND durable_writes = true;
|
||||
create-table: |
|
||||
create_table: |
|
||||
create table if not exists <<keyspace:starter>>.<<table:cqlstarter>> (
|
||||
machine_id UUID,
|
||||
message text,
|
||||
@ -47,11 +47,11 @@ blocks:
|
||||
PRIMARY KEY ((machine_id), time)
|
||||
) WITH CLUSTERING ORDER BY (time DESC);
|
||||
|
||||
schema-astra:
|
||||
schema_astra:
|
||||
params:
|
||||
prepared: false
|
||||
ops:
|
||||
create-table-astra: |
|
||||
create_table_astra: |
|
||||
create table if not exists <<keyspace:starter>>.<<table:cqlstarter>> (
|
||||
machine_id UUID,
|
||||
message text,
|
||||
@ -64,29 +64,29 @@ blocks:
|
||||
cl: <<write_cl:LOCAL_QUORUM>>
|
||||
idempotent: true
|
||||
ops:
|
||||
insert-rampup: |
|
||||
insert_rampup: |
|
||||
insert into <<keyspace:starter>>.<<table:cqlstarter>> (machine_id, message, time)
|
||||
values ({machine_id}, {rampup_message}, {time}) using timestamp {ts};
|
||||
|
||||
rampdown:
|
||||
ops:
|
||||
truncate-table: |
|
||||
truncate_table: |
|
||||
truncate table <<keyspace:starter>>.<<table:cqlstarter>>;
|
||||
|
||||
main-read:
|
||||
main_read:
|
||||
params:
|
||||
ratio: <<read_ratio:1>>
|
||||
cl: <<read_cl:LOCAL_QUORUM>>
|
||||
ops:
|
||||
select-read: |
|
||||
select_read: |
|
||||
select * from <<keyspace:starter>>.<<table:cqlstarter>>
|
||||
where machine_id={machine_id};
|
||||
main-write:
|
||||
main_write:
|
||||
params:
|
||||
ratio: <<write_ratio:9>>
|
||||
cl: <<write_cl:LOCAL_QUORUM>>
|
||||
idempotent: true
|
||||
ops:
|
||||
insert-main: |
|
||||
insert_main: |
|
||||
insert into <<keyspace:starter>>.<<table:cqlstarter>>
|
||||
(machine_id, message, time) values ({machine_id}, {message}, {time}) using timestamp {ts};
|
@ -34,22 +34,22 @@ blocks:
|
||||
params:
|
||||
prepared: false
|
||||
ops:
|
||||
create-keyspace: |
|
||||
create_keyspace: |
|
||||
create keyspace if not exists <<keyspace:baselines>>
|
||||
WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '<<rf:1>>'}
|
||||
AND durable_writes = true;
|
||||
create-table: |
|
||||
create_table: |
|
||||
create table if not exists <<keyspace:baselines>>.<<table:tabular>> (
|
||||
part text,
|
||||
clust text,
|
||||
data text,
|
||||
PRIMARY KEY (part,clust)
|
||||
);
|
||||
schema-astra:
|
||||
schema_astra:
|
||||
params:
|
||||
prepared: false
|
||||
ops:
|
||||
create-table: |
|
||||
create_table: |
|
||||
create table if not exists <<keyspace:baselines>>.<<table:tabular>> (
|
||||
part text,
|
||||
clust text,
|
||||
@ -60,7 +60,7 @@ blocks:
|
||||
params:
|
||||
cl: <<write_cl:LOCAL_QUORUM>>
|
||||
ops:
|
||||
rampup-insert: |
|
||||
rampup_insert: |
|
||||
insert into <<keyspace:baselines>>.<<table:tabular>>
|
||||
(part,clust,data)
|
||||
values ({part_layout},{clust_layout},{data})
|
||||
@ -68,22 +68,22 @@ blocks:
|
||||
params:
|
||||
cl: <<read_cl:LOCAL_QUORUM>>
|
||||
ops:
|
||||
verify-select: |
|
||||
verify_select: |
|
||||
select * from <<keyspace:baselines>>.<<table:tabular>> where part={part_layout} and clust={clust_layout};
|
||||
|
||||
main-read:
|
||||
main_read:
|
||||
params:
|
||||
ratio: 5
|
||||
cl: <<read_cl:LOCAL_QUORUM>>
|
||||
ops:
|
||||
main-select: |
|
||||
main_select: |
|
||||
select * from <<keyspace:baselines>>.<<table:tabular>> where part={part_read} limit {limit};
|
||||
|
||||
main-write:
|
||||
main_write:
|
||||
params:
|
||||
ratio: 5
|
||||
cl: <<write_cl:LOCAL_QUORUM>>
|
||||
ops:
|
||||
main-write: |
|
||||
main_write: |
|
||||
insert into <<keyspace:baselines>>.<<table:tabular>>
|
||||
(part, clust, data) values ({part_write},{clust_write},{data_write});
|
@ -17,14 +17,14 @@ scenarios:
|
||||
default:
|
||||
schema: run driver=cql tags==block:schema threads==1 cycles==UNDEF
|
||||
rampup: run driver=cql tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
|
||||
main-write: run driver=cql tags==block:main-write cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
main-read-ann: run driver=cql tags==block:main-read-ann cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
main-read-pk-ann: run driver=cql tags==block:main-read-pk-ann cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
main-read: run driver=cql tags==block:"main-read.*" cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
main_write: run driver=cql tags==block:main-write cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
main_read_ann: run driver=cql tags==block:main_read_ann cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
main_read_pk_ann: run driver=cql tags==block:main_read_pk_ann cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
main_read: run driver=cql tags==block:"main-read.*" cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
main: run driver=cql tags==block:"main.*" cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
astra:
|
||||
schema: run driver=cql tags==block:schema-astra threads==1 cycles==UNDEF
|
||||
drop-tables: run driver=cql tags==block:drop-tables threads==1 cycles==UNDEF
|
||||
drop_tables: run driver=cql tags==block:drop-tables threads==1 cycles==UNDEF
|
||||
truncate: run driver=cql tags==block:truncate-tables cycles===1 threads=1
|
||||
reads: run driver=cql tags==block:main-read cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
|
||||
@ -36,24 +36,24 @@ bindings:
|
||||
vector_value: CqlVector(ListSizedHashed(<<dimensions:5>>,HashRange(0.0f,100.0f))); NormalizeCqlVector();
|
||||
|
||||
blocks:
|
||||
drop-tables:
|
||||
drop_tables:
|
||||
ops:
|
||||
drop-table-vectors:
|
||||
drop_tables_vectors:
|
||||
raw: |
|
||||
DROP TABLE IF EXISTS TEMPLATE(keyspace,baselines).TEMPLATE(table,vectors);
|
||||
|
||||
schema-astra:
|
||||
schema_astra:
|
||||
params:
|
||||
prepared: false
|
||||
ops:
|
||||
create-table:
|
||||
create_table:
|
||||
raw: |
|
||||
CREATE TABLE IF NOT EXISTS TEMPLATE(keyspace,baselines).TEMPLATE(table,vectors) (
|
||||
key TEXT,
|
||||
value vector<float,<<dimensions:5>>>,
|
||||
PRIMARY KEY (key)
|
||||
);
|
||||
create-sai-index:
|
||||
create_sai_index:
|
||||
raw: |
|
||||
CREATE CUSTOM INDEX IF NOT EXISTS ON TEMPLATE(keyspace,baselines).TEMPLATE(table,vectors) (value) USING 'StorageAttachedIndex';
|
||||
|
||||
@ -61,26 +61,26 @@ blocks:
|
||||
params:
|
||||
prepared: false
|
||||
ops:
|
||||
create-keyspace:
|
||||
create_keyspace:
|
||||
raw: |
|
||||
CREATE KEYSPACE IF NOT EXISTS TEMPLATE(keyspace,baselines)
|
||||
WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '<<rf:1>>'};
|
||||
create-table:
|
||||
create_table:
|
||||
raw: |
|
||||
CREATE TABLE IF NOT EXISTS TEMPLATE(keyspace,baselines).TEMPLATE(table,vectors) (
|
||||
key TEXT,
|
||||
value vector<float,<<dimensions:5>>>,
|
||||
PRIMARY KEY (key)
|
||||
);
|
||||
create-sai-index:
|
||||
create_sai_index:
|
||||
raw: |
|
||||
CREATE CUSTOM INDEX IF NOT EXISTS ON TEMPLATE(keyspace,baselines).TEMPLATE(table,vectors) (value) USING 'StorageAttachedIndex';
|
||||
|
||||
truncate-tables:
|
||||
truncate_tables:
|
||||
params:
|
||||
prepared: false
|
||||
ops:
|
||||
truncate-vectors:
|
||||
truncate_vectors:
|
||||
raw: |
|
||||
TRUNCATE TABLE TEMPLATE(keyspace,baselines).TEMPLATE(table,vectors);
|
||||
|
||||
@ -88,38 +88,38 @@ blocks:
|
||||
params:
|
||||
cl: TEMPLATE(write_cl,LOCAL_QUORUM)
|
||||
ops:
|
||||
rampup-insert:
|
||||
rampup_insert:
|
||||
prepared: |
|
||||
INSERT INTO TEMPLATE(keyspace,baselines).TEMPLATE(table,vectors)
|
||||
(key, value) VALUES ({seq_key},{vector_value});
|
||||
|
||||
main-read-ann:
|
||||
main_read_ann:
|
||||
params:
|
||||
ratio: TEMPLATE(read_ratio,90)
|
||||
cl: TEMPLATE(read_cl,LOCAL_QUORUM)
|
||||
instrument: true
|
||||
ops:
|
||||
main-select-ann-limit:
|
||||
main_select_ann_limit:
|
||||
prepared: |
|
||||
SELECT * FROM TEMPLATE(keyspace,baselines).TEMPLATE(table,vectors) ORDER BY value ANN OF {vector_value} LIMIT TEMPLATE(select_limit,2);
|
||||
|
||||
main-read-pk-ann:
|
||||
main_read_pk_ann:
|
||||
params:
|
||||
ratio: TEMPLATE(read_ratio,90)
|
||||
cl: TEMPLATE(read_cl,LOCAL_QUORUM)
|
||||
instrument: true
|
||||
ops:
|
||||
main-select-pk-ann-limit:
|
||||
main_select_pk_ann_limit:
|
||||
prepared: |
|
||||
SELECT * FROM TEMPLATE(keyspace,baselines).TEMPLATE(table,vectors) WHERE KEY={rw_key} ORDER BY value ANN OF {vector_value} LIMIT TEMPLATE(select_limit,2);
|
||||
|
||||
main-write:
|
||||
main_write:
|
||||
params:
|
||||
ratio: TEMPLATE(write_ratio,10)
|
||||
cl: TEMPLATE(write_cl,LOCAL_QUORUM)
|
||||
instrument: true
|
||||
ops:
|
||||
main-insert:
|
||||
main_insert:
|
||||
prepared: |
|
||||
INSERT INTO TEMPLATE(keyspace,baselines).TEMPLATE(table,vectors)
|
||||
(key, value) VALUES ({rw_key}, {vector_value});
|
@ -14,7 +14,7 @@ scenarios:
|
||||
schema: run driver=cql tags==block:schema threads==1 cycles==UNDEF
|
||||
rampup: run driver=cql tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=100
|
||||
main: run driver=cql tags=='block:main.*' cycles===TEMPLATE(main-cycles,10000000) threads=5
|
||||
drop-tables:
|
||||
drop_tables:
|
||||
schema: run driver=cql tags==block:drop-tables threads==1 cycles==UNDEF
|
||||
truncate: run driver=cql tags==block:truncate-tables cycles===1 threads=1
|
||||
reads: run driver=cql tags==block:main-read cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
@ -27,9 +27,9 @@ bindings:
|
||||
vector_value: CqlVector(ListSizedHashed(<<dimensions:5>>,HashRange(0.0f,100.0f)); NormalizeCqlVector();
|
||||
|
||||
blocks:
|
||||
drop-tables:
|
||||
drop_tables:
|
||||
ops:
|
||||
drop-table-vectors:
|
||||
drop_tables_vectors:
|
||||
raw: |
|
||||
DROP TABLE IF EXISTS TEMPLATE(keyspace,baselines).TEMPLATE(table,vectors);
|
||||
|
||||
@ -37,26 +37,26 @@ blocks:
|
||||
params:
|
||||
prepared: false
|
||||
ops:
|
||||
create-keyspace:
|
||||
create_keyspace:
|
||||
raw: |
|
||||
CREATE KEYSPACE IF NOT EXISTS TEMPLATE(keyspace,baselines)
|
||||
WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '<<rf:1>>'};
|
||||
create-table:
|
||||
create_table:
|
||||
raw: |
|
||||
CREATE TABLE IF NOT EXISTS TEMPLATE(keyspace,baselines).TEMPLATE(table,vectors) (
|
||||
key TEXT,
|
||||
value vector<float,<<dimensions:5>>>,
|
||||
PRIMARY KEY (key)
|
||||
);
|
||||
create-sai-index:
|
||||
create_sai_index:
|
||||
raw: |
|
||||
CREATE CUSTOM INDEX IF NOT EXISTS ON TEMPLATE(keyspace,baselines).TEMPLATE(table,vectors) (value) USING 'StorageAttachedIndex';
|
||||
|
||||
truncate-tables:
|
||||
truncate_tables:
|
||||
params:
|
||||
prepared: false
|
||||
ops:
|
||||
truncate-vectors:
|
||||
truncate_vectors:
|
||||
raw: |
|
||||
TRUNCATE TABLE TEMPLATE(keyspace,baselines).TEMPLATE(table,vectors);
|
||||
|
||||
@ -64,31 +64,31 @@ blocks:
|
||||
params:
|
||||
cl: TEMPLATE(write_cl,LOCAL_QUORUM)
|
||||
ops:
|
||||
rampup-insert:
|
||||
rampup_insert:
|
||||
prepared: |
|
||||
INSERT INTO TEMPLATE(keyspace,baselines).TEMPLATE(table,vectors)
|
||||
(key, value) VALUES ({seq_key},{vector_value});
|
||||
|
||||
main-read:
|
||||
main_read:
|
||||
params:
|
||||
ratio: TEMPLATE(read_ratio,90)
|
||||
cl: TEMPLATE(read_cl,LOCAL_QUORUM)
|
||||
instrument: true
|
||||
ops:
|
||||
main-select-ann-limit:
|
||||
main_select_ann_limit:
|
||||
prepared: |
|
||||
SELECT * FROM TEMPLATE(keyspace,baselines).TEMPLATE(table,vectors) ORDER BY value ANN OF {vector_value} LIMIT TEMPLATE(select_limit,2);
|
||||
main-select-pk-ann-limit:
|
||||
main_select_pk_ann_limit:
|
||||
prepared: |
|
||||
SELECT * FROM TEMPLATE(keyspace,baselines).TEMPLATE(table,vectors) WHERE KEY={rw_key} ORDER BY value ANN OF {vector_value} LIMIT TEMPLATE(select_limit,2);
|
||||
|
||||
main-write:
|
||||
main_write:
|
||||
params:
|
||||
ratio: TEMPLATE(write_ratio,10)
|
||||
cl: TEMPLATE(write_cl,LOCAL_QUORUM)
|
||||
instrument: true
|
||||
ops:
|
||||
main-insert:
|
||||
main_insert:
|
||||
prepared: |
|
||||
INSERT INTO TEMPLATE(keyspace,baselines).TEMPLATE(table,vectors)
|
||||
(key, value) VALUES ({rw_key}, {vector_value});
|
||||
(key, value) VALUES ({rw_key}, {vector_value});
|
@ -8,8 +8,8 @@ scenarios:
|
||||
creategraph: run driver=cqld4 graphname=graph_wheels tags=block:create-graph cycles===UNDEF
|
||||
schema: run driver=cqld4 graphname=graph_wheels tags=block:graph-schema cycles===UNDEF
|
||||
rampup: run driver==cqld4 graphname=graph_wheels tags=block:rampup cycles=1
|
||||
drop-graph: run driver=cqld4 graphname=graph_wheels tags=block:drop-graph cycles===UNDEF
|
||||
creategraph-classic: run driver=cqld4 graphname=graph_wheels tags=block:create-graph-classic cycles===UNDEF
|
||||
drop_graph: run driver=cqld4 graphname=graph_wheels tags=block:drop-graph cycles===UNDEF
|
||||
creategraph_classic: run driver=cqld4 graphname=graph_wheels tags=block:create_graph_classic cycles===UNDEF
|
||||
fluent: run driver=cqld4 graphname=graph_wheels tags=block:fluent cycles=10
|
||||
devmode: run driver=cqld4 graphname=graph_wheels tags=name:dev-mode
|
||||
prodmode: run driver=cqld4 graphname=graph_wheels tags=name:prod-mode
|
||||
@ -26,12 +26,12 @@ bindings:
|
||||
diag_one_pct: WeightedLongs('1:1;0:99')
|
||||
|
||||
blocks:
|
||||
drop-graph:
|
||||
drop_graph:
|
||||
statements:
|
||||
drop-graph:
|
||||
drop_graph:
|
||||
type: gremlin
|
||||
script: "system.graph('<<graphname:graph_wheels>>').ifExists().drop();"
|
||||
create-graph-classic:
|
||||
create_graph_classic:
|
||||
statements:
|
||||
creategraph:
|
||||
type: gremlin
|
||||
@ -39,15 +39,15 @@ blocks:
|
||||
system.graph('<<graphname:graph_wheels>>')
|
||||
.classicEngine()
|
||||
.create()
|
||||
create-graph:
|
||||
create_graph:
|
||||
statements:
|
||||
creategraph:
|
||||
type: gremlin
|
||||
script: >-
|
||||
system.graph('<<graphname:graph_wheels>>').ifNotExists().create()
|
||||
create-schema:
|
||||
create_schema:
|
||||
statements:
|
||||
graph-schema:
|
||||
graph_schema:
|
||||
type: gremlin
|
||||
graphname: <<graphname:graph_wheels>>
|
||||
script: >-
|
||||
@ -72,20 +72,20 @@ blocks:
|
||||
.from('session')
|
||||
.to('device')
|
||||
.create()
|
||||
dev-mode:
|
||||
dev_mode:
|
||||
tags:
|
||||
block: dev-mode
|
||||
statements:
|
||||
dev-mode:
|
||||
dev_mode:
|
||||
type: gremlin
|
||||
graphname: <<graphname:graph_wheels>>
|
||||
script: >-
|
||||
schema.config().option('graph.schema_mode').set('Development');
|
||||
prod-mode:
|
||||
prod_mode:
|
||||
tags:
|
||||
block: prod-mode
|
||||
statements:
|
||||
prod-mode:
|
||||
prod_mode:
|
||||
type: gremlin
|
||||
graphname: <<graphname:graph_wheels>>
|
||||
script: >-
|
||||
@ -94,7 +94,7 @@ blocks:
|
||||
tags:
|
||||
block: rampup
|
||||
statements:
|
||||
main-add:
|
||||
main_add:
|
||||
type: gremlin
|
||||
diag: "{diag_one_pct}"
|
||||
graphname: <<graphname:graph_wheels>>
|
@ -29,8 +29,8 @@ scenarios:
|
||||
default:
|
||||
schema: run tags=block:schema.* threads==1
|
||||
main: run tags=block:main-.*.* cycles===TEMPLATE(main-cycles,0) threads=auto
|
||||
default-schema: run tags=block:"schema.*" threads==1
|
||||
default-main: run tags=block:"main.*" cycles===TEMPLATE(main-cycles,0) threads=auto
|
||||
default_schema: run tags=block:"schema.*" threads==1
|
||||
default_main: run tags=block:"main.*" cycles===TEMPLATE(main-cycles,0) threads=auto
|
||||
astra:
|
||||
schema: run tags=block:astra-schema threads==1
|
||||
main: run tags=block:"main.*" cycles===TEMPLATE(main-cycles,0) threads=auto
|
||||
@ -50,21 +50,21 @@ bindings:
|
||||
blocks:
|
||||
schema:
|
||||
ops:
|
||||
create-keyspace: |
|
||||
create_keyspace: |
|
||||
create keyspace if not exists TEMPLATE(keyspace,baselines)
|
||||
WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 'TEMPLATE(rf,1)'}
|
||||
AND durable_writes = true;
|
||||
create-table: |
|
||||
create_table: |
|
||||
create table if not exists TEMPLATE(keyspace,baselines).TEMPLATE(table,incremental) (
|
||||
key text,
|
||||
value text,
|
||||
PRIMARY KEY (key)
|
||||
);
|
||||
schema-astra:
|
||||
schema_astra:
|
||||
params:
|
||||
prepared: false
|
||||
ops:
|
||||
create-table: |
|
||||
create_table: |
|
||||
create table if not exists TEMPLATE(keyspace,baselines).TEMPLATE(table,incremental) (
|
||||
key text,
|
||||
value text,
|
||||
@ -74,23 +74,23 @@ blocks:
|
||||
params:
|
||||
cl: TEMPLATE(write_cl,LOCAL_QUORUM)
|
||||
ops:
|
||||
rampup-insert: |
|
||||
rampup_insert: |
|
||||
insert into TEMPLATE(keyspace,baselines).TEMPLATE(table,incremental)
|
||||
(key, value)
|
||||
values ({rampup_key},{rampup_value});
|
||||
main-read:
|
||||
main_read:
|
||||
params:
|
||||
ratio: 1
|
||||
cl: TEMPLATE(read_cl,LOCAL_QUORUM)
|
||||
ops:
|
||||
main-select: |
|
||||
main_select: |
|
||||
select * from TEMPLATE(keyspace,baselines).TEMPLATE(table,incremental) where key={read_key};
|
||||
main-write:
|
||||
main_write:
|
||||
params:
|
||||
ratio: 1
|
||||
cl: TEMPLATE(write_cl,LOCAL_QUORUM)
|
||||
ops:
|
||||
main-insert: |
|
||||
main_insert: |
|
||||
insert into TEMPLATE(keyspace,baselines).TEMPLATE(table,incremental)
|
||||
(key, value) values ({write_key}, {write_value});
|
||||
|
||||
|
@ -34,21 +34,21 @@ blocks:
|
||||
params:
|
||||
prepared: false
|
||||
ops:
|
||||
create-keyspace: |
|
||||
create_keyspace: |
|
||||
create keyspace if not exists TEMPLATE(keyspace,baselines)
|
||||
WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 'TEMPLATE(rf:1)'}
|
||||
AND durable_writes = true;
|
||||
create-table: |
|
||||
create_table: |
|
||||
create table if not exists TEMPLATE(keyspace,baselines).TEMPLATE(table,keyvalue) (
|
||||
key text,
|
||||
value text,
|
||||
PRIMARY KEY (key)
|
||||
);
|
||||
schema-astra:
|
||||
schema_astra:
|
||||
params:
|
||||
prepared: false
|
||||
statements:
|
||||
create-table: |
|
||||
create_table: |
|
||||
create table if not exists TEMPLATE(keyspace,baselines).TEMPLATE(table,keyvalue) (
|
||||
key text,
|
||||
value text,
|
||||
@ -58,7 +58,7 @@ blocks:
|
||||
params:
|
||||
cl: TEMPLATE(write_cl,LOCAL_QUORUM)
|
||||
ops:
|
||||
rampup-insert: |
|
||||
rampup_insert: |
|
||||
insert into TEMPLATE(keyspace,baselines).TEMPLATE(table,keyvalue)
|
||||
(key, value)
|
||||
values ({seq_key},{seq_value});
|
||||
@ -66,21 +66,21 @@ blocks:
|
||||
params:
|
||||
cl: TEMPLATE(read_cl,LOCAL_QUORUM)
|
||||
ops:
|
||||
verify-select: |
|
||||
verify_select: |
|
||||
select * from TEMPLATE(keyspace,baselines).TEMPLATE(table,keyvalue) where key={seq_key};
|
||||
verify-fields: key->seq_key, value->seq_value
|
||||
main-read:
|
||||
main_read:
|
||||
params:
|
||||
ratio: 5
|
||||
cl: TEMPLATE(read_cl,LOCAL_QUORUM)
|
||||
statements:
|
||||
main-select: |
|
||||
main_select: |
|
||||
select * from TEMPLATE(keyspace,baselines).TEMPLATE(table,keyvalue) where key={rw_key};
|
||||
main-write:
|
||||
main_write:
|
||||
params:
|
||||
ratio: 5
|
||||
cl: TEMPLATE(write_cl,LOCAL_QUORUM)
|
||||
statements:
|
||||
main-insert: |
|
||||
main_insert: |
|
||||
insert into TEMPLATE(keyspace,baselines).TEMPLATE(table,keyvalue)
|
||||
(key, value) values ({rw_key}, {rw_value});
|
||||
|
@ -39,21 +39,21 @@ blocks:
|
||||
params:
|
||||
prepared: false
|
||||
ops:
|
||||
create-keyspace: |
|
||||
create_keyspace: |
|
||||
create keyspace if not exists TEMPLATE(keyspace,baselines)
|
||||
WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 'TEMPLATE(rf,1)'}
|
||||
AND durable_writes = true;
|
||||
create-table: |
|
||||
create_table: |
|
||||
create table if not exists TEMPLATE(keyspace,baselines).TEMPLATE(table,keyvalue) (
|
||||
key text,
|
||||
value text,
|
||||
PRIMARY KEY (key)
|
||||
);
|
||||
schema-astra:
|
||||
schema_astra:
|
||||
params:
|
||||
prepared: false
|
||||
statements:
|
||||
create-table: |
|
||||
create_table: |
|
||||
create table if not exists TEMPLATE(keyspace,baselines).TEMPLATE(table,keyvalue) (
|
||||
key text,
|
||||
value text,
|
||||
@ -63,7 +63,7 @@ blocks:
|
||||
params:
|
||||
cl: TEMPLATE(write_cl,LOCAL_QUORUM)
|
||||
ops:
|
||||
rampup-insert: |
|
||||
rampup_insert: |
|
||||
insert into TEMPLATE(keyspace,baselines).TEMPLATE(table,keyvalue)
|
||||
(key, value)
|
||||
values ({seq_key},{seq_value});
|
||||
@ -71,20 +71,20 @@ blocks:
|
||||
params:
|
||||
cl: TEMPLATE(read_cl,LOCAL_QUORUM)
|
||||
ops:
|
||||
verify-select: |
|
||||
verify_select: |
|
||||
select * from TEMPLATE(keyspace,baselines).TEMPLATE(table,keyvalue) where key={seq_key};
|
||||
verify-fields: key->seq_key, value->seq_value
|
||||
main-read:
|
||||
main_read:
|
||||
params:
|
||||
ratio: 5
|
||||
cl: TEMPLATE(read_cl,LOCAL_QUORUM)
|
||||
statements:
|
||||
main-select: |
|
||||
main_select: |
|
||||
select * from TEMPLATE(keyspace,baselines).TEMPLATE(table,keyvalue) where key={rw_key};
|
||||
main-write:
|
||||
main_write:
|
||||
params:
|
||||
ratio: 5
|
||||
cl: TEMPLATE(write_cl,LOCAL_QUORUM)
|
||||
statements:
|
||||
main-insert: |
|
||||
main_insert: |
|
||||
insert into TEMPLATE(keyspace,baselines).TEMPLATE(table,keyvalue) (key, value) values ({rw_key}, {rw_value});
|
||||
|
@ -73,11 +73,11 @@ blocks:
|
||||
params:
|
||||
prepared: false
|
||||
ops:
|
||||
create-keyspace: |
|
||||
create_keyspace: |
|
||||
create keyspace if not exists TEMPLATE(keyspace,baselines)
|
||||
WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 'TEMPLATE(rf,1)'}
|
||||
AND durable_writes = true;
|
||||
create-table: |
|
||||
create_table: |
|
||||
create table if not exists TEMPLATE(keyspace,baselines).TEMPLATE(table,tabular) (
|
||||
part text,
|
||||
clust text,
|
||||
@ -85,11 +85,11 @@ blocks:
|
||||
data4 text, data5 text, data6 text, data7 text,
|
||||
PRIMARY KEY (part,clust)
|
||||
);
|
||||
schema-astra:
|
||||
schema_astra:
|
||||
params:
|
||||
prepared: false
|
||||
ops:
|
||||
create-table: |
|
||||
create_table: |
|
||||
create table if not exists TEMPLATE(keyspace,baselines).TEMPLATE(table,tabular) (
|
||||
part text,
|
||||
clust text,
|
||||
@ -101,7 +101,7 @@ blocks:
|
||||
params:
|
||||
cl: TEMPLATE(write_cl,LOCAL_QUORUM)
|
||||
ops:
|
||||
rampup-insert: |
|
||||
rampup_insert: |
|
||||
insert into TEMPLATE(keyspace,baselines).TEMPLATE(table,tabular)
|
||||
(part,clust,data0,data1,data2,data3,data4,data5,data6,data7)
|
||||
values ({part_layout},{clust_layout},{data0},{data1},{data2},{data3},{data4},{data5},{data6},{data7});
|
||||
@ -109,35 +109,35 @@ blocks:
|
||||
params:
|
||||
cl: TEMPLATE(read_cl,LOCAL_QUORUM)
|
||||
ops:
|
||||
verify-select: |
|
||||
verify_select: |
|
||||
select * from TEMPLATE(keyspace,baselines).TEMPLATE(table,tabular) where part={part_layout} and clust={clust_layout};
|
||||
main-read:
|
||||
main_read:
|
||||
params:
|
||||
ratio: 1
|
||||
cl: TEMPLATE(read_cl,LOCAL_QUORUM)
|
||||
ops:
|
||||
main-select-all: |
|
||||
main_select_all: |
|
||||
select * from TEMPLATE(keyspace,baselines).TEMPLATE(table,tabular) where part={part_read} limit {limit};
|
||||
main-select-01: |
|
||||
main_select_01: |
|
||||
select data0,data1 from TEMPLATE(keyspace,baselines).TEMPLATE(table,tabular) where part={part_read} limit {limit};
|
||||
main-select-0246: |
|
||||
main_select_0246: |
|
||||
select data0,data2,data4,data6 from TEMPLATE(keyspace,baselines).TEMPLATE(table,tabular) where part={part_read} limit {limit};
|
||||
main-select-1357: |
|
||||
main_select_1357: |
|
||||
select data1,data3,data5,data7 from TEMPLATE(keyspace,baselines).TEMPLATE(table,tabular) where part={part_read} limit {limit};
|
||||
main-select-0123: |
|
||||
main_select_0123: |
|
||||
select data0,data1,data2,data3 from TEMPLATE(keyspace,baselines).TEMPLATE(table,tabular) where part={part_read} limit {limit};
|
||||
main-select-4567: |
|
||||
main_select_4567: |
|
||||
select data4,data5,data6,data7 from TEMPLATE(keyspace,baselines).TEMPLATE(table,tabular) where part={part_read} limit {limit};
|
||||
main-select-67: |
|
||||
main_select_67: |
|
||||
select data6,data7 from TEMPLATE(keyspace,baselines).TEMPLATE(table,tabular) where part={part_read} limit {limit};
|
||||
main-select: |
|
||||
main_select: |
|
||||
select data0,data1,data2,data3,data4,data5,data6,data7 from TEMPLATE(keyspace,baselines).TEMPLATE(table,tabular) where part={part_read} limit {limit};
|
||||
main-write:
|
||||
main_write:
|
||||
params:
|
||||
ratio: 8
|
||||
cl: TEMPLATE(write_cl,LOCAL_QUORUM)
|
||||
ops:
|
||||
main-write: |
|
||||
main_write: |
|
||||
insert into TEMPLATE(keyspace,baselines).TEMPLATE(table,tabular)
|
||||
(part, clust, data0,data1,data2,data3,data4,data5,data6,data7)
|
||||
values ({part_write},{clust_write},{data0},{data1},{data2},{data3},{data4},{data5},{data6},{data7})
|
||||
|
@ -36,11 +36,11 @@ blocks:
|
||||
params:
|
||||
prepared: false
|
||||
ops:
|
||||
create-keyspace: |
|
||||
create_keyspace: |
|
||||
create keyspace if not exists TEMPLATE(keyspace,baselines)
|
||||
WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 'TEMPLATE(rf,1)'}
|
||||
AND durable_writes = true;
|
||||
create-table: |
|
||||
create_table: |
|
||||
create table if not exists TEMPLATE(keyspace,baselines).TEMPLATE(table,iot) (
|
||||
machine_id UUID, // source machine
|
||||
sensor_name text, // sensor name
|
||||
@ -61,13 +61,13 @@ blocks:
|
||||
params:
|
||||
prepared: false
|
||||
ops:
|
||||
truncate-table: |
|
||||
truncate_table: |
|
||||
truncate table TEMPLATE(keyspace,baselines).TEMPLATE(table,iot);
|
||||
schema-astra:
|
||||
schema_astra:
|
||||
params:
|
||||
prepared: false
|
||||
ops:
|
||||
create-table-astra: |
|
||||
create_table_astra: |
|
||||
create table if not exists TEMPLATE(keyspace,baselines).TEMPLATE(table,iot) (
|
||||
machine_id UUID, // source machine
|
||||
sensor_name text, // sensor name
|
||||
@ -83,7 +83,7 @@ blocks:
|
||||
idempotent: true
|
||||
instrument: TEMPLATE(instrument-writes,TEMPLATE(instrument,false))
|
||||
ops:
|
||||
insert-rampup: |
|
||||
insert_rampup: |
|
||||
insert into TEMPLATE(keyspace,baselines).TEMPLATE(table,iot)
|
||||
(machine_id, sensor_name, time, sensor_value, station_id, data)
|
||||
values ({machine_id}, {sensor_name}, {time}, {sensor_value}, {station_id}, {data})
|
||||
@ -95,28 +95,28 @@ blocks:
|
||||
instrument: TEMPLATE(instrument-reads,TEMPLATE(instrument,false))
|
||||
verify-fields: "*, -cell_timestamp"
|
||||
ops:
|
||||
select-verify: |
|
||||
select_verify: |
|
||||
select * from TEMPLATE(keyspace,baselines).TEMPLATE(table,iot)
|
||||
where machine_id={machine_id} and sensor_name={sensor_name} and time={time};
|
||||
|
||||
main-read:
|
||||
main_read:
|
||||
params:
|
||||
ratio: TEMPLATE(read_ratio,1)
|
||||
cl: TEMPLATE(read_cl,LOCAL_QUORUM)
|
||||
instrument: TEMPLATE(instrument-reads,TEMPLATE(instrument,false))
|
||||
ops:
|
||||
select-read: |
|
||||
select_read: |
|
||||
select * from TEMPLATE(keyspace,baselines).TEMPLATE(table,iot)
|
||||
where machine_id={machine_id} and sensor_name={sensor_name}
|
||||
limit TEMPLATE(limit,10);
|
||||
main-write:
|
||||
main_write:
|
||||
params:
|
||||
ratio: TEMPLATE(write_ratio,9)
|
||||
cl: TEMPLATE(write_cl,LOCAL_QUORUM)
|
||||
instrument: TEMPLATE(instrument-writes,TEMPLATE(instrument,false))
|
||||
idempotent: true
|
||||
ops:
|
||||
insert-main: |
|
||||
insert_main: |
|
||||
insert into TEMPLATE(keyspace,baselines).TEMPLATE(table,iot)
|
||||
(machine_id, sensor_name, time, sensor_value, station_id, data)
|
||||
values ({machine_id}, {sensor_name}, {time}, {sensor_value}, {station_id}, {data})
|
||||
|
@ -1,11 +1,11 @@
|
||||
blocks:
|
||||
schema:
|
||||
ops:
|
||||
create-keyspace: |
|
||||
create_keyspace: |
|
||||
create keyspace if not exists <<keyspace:baselines>>
|
||||
WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 'TEMPLATE(rf,1)'}
|
||||
AND durable_writes = true;
|
||||
create-table: |
|
||||
create_table: |
|
||||
CREATE TABLE baselines.alltypes (
|
||||
id text PRIMARY KEY,
|
||||
f_bigint bigint,
|
||||
|
@ -3,9 +3,9 @@ description: |
|
||||
scenarios:
|
||||
default: run driver=stdout cycles=10 format=readout
|
||||
simple: run driver=stdout bindings='simple.*' cycles=10 format=readout
|
||||
hof-four: run driver=stdout bindings=hof_vector cycles=10 format=readout
|
||||
hof-vary: run driver=stdout bindings='hof_vary.*' cycles=10 format=readout
|
||||
hof-tenunit: run driver=stdout bindings='"'hof_ten.*' cycles=10 format=readout
|
||||
hof_four: run driver=stdout bindings=hof_vector cycles=10 format=readout
|
||||
hof_vary: run driver=stdout bindings='hof_vary.*' cycles=10 format=readout
|
||||
hof_tenunit: run driver=stdout bindings='"'hof_ten.*' cycles=10 format=readout
|
||||
|
||||
bindings:
|
||||
# default provides a 5-component vector, with unit-interval values. (Not normalized)
|
||||
|
@ -13,15 +13,15 @@ bindings:
|
||||
blocks:
|
||||
schema:
|
||||
ops:
|
||||
create-keyspace: |
|
||||
create_keyspace: |
|
||||
create KEYSPACE if not exists TEMPLATE(keyspace,examples)
|
||||
WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'}
|
||||
AND durable_writes = 'true';
|
||||
create-users-table: |
|
||||
create_users_table: |
|
||||
create table if not exists TEMPLATE(keyspace,examples).users (
|
||||
userid text PRIMARY KEY
|
||||
);
|
||||
create-interests-table: |
|
||||
create_interests_table: |
|
||||
create table if not exists TEMPLATE(keyspace,examples).interests (
|
||||
userid text,
|
||||
interest text,
|
||||
@ -29,9 +29,9 @@ blocks:
|
||||
);
|
||||
rampup:
|
||||
ops:
|
||||
insert-users: |
|
||||
insert_users: |
|
||||
insert into TEMPLATE(keyspace,examples).users (userid) VALUES ({userid});
|
||||
insert-interests: |
|
||||
insert_interests: |
|
||||
insert into TEMPLATE(keyspace,examples).interests(
|
||||
interest, userid
|
||||
) VALUES (
|
||||
@ -39,9 +39,9 @@ blocks:
|
||||
);
|
||||
main:
|
||||
ops:
|
||||
read-user: |
|
||||
read_user: |
|
||||
select * from TEMPLATE(keyspace,examples).users
|
||||
where userid={userid};
|
||||
read interests: |
|
||||
read_interests: |
|
||||
select * from TEMPLATE(keyspace,examples).interests
|
||||
where interest={interest};
|
||||
|
@ -4,8 +4,8 @@ scenarios:
|
||||
schema: run driver=cql tags=block:"schema.*" threads===UNDEF cycles===UNDEF
|
||||
rampup: run driver=cql tags=block:"rampup.*" threads=auto cycles===TEMPLATE(rampup-cycles,10000)
|
||||
main: run driver=cql tags=block:"main.*" threads=auto cycles===TEMPLATE(main-cycles,10000)
|
||||
main-insert: run driver=cql tags=block:main-insert threads=auto cycles===TEMPLATE(main-cycles,10000)
|
||||
main-select: run driver=cql tags=block:main-select threads=auto cycles===TEMPLATE(main-cycles,10000)
|
||||
main_insert: run driver=cql tags=block:main-insert threads=auto cycles===TEMPLATE(main-cycles,10000)
|
||||
main_select: run driver=cql tags=block:main-select threads=auto cycles===TEMPLATE(main-cycles,10000)
|
||||
main-scan: run driver=cql tags=block:main-scan threads=auto cycles===TEMPLATE(main-cycles,10000)
|
||||
main-update: run driver=cql tags=block:main-update threads=auto cycles===TEMPLATE(main-cycles,10000)
|
||||
truncate: run driver=cql tags=block:'truncate.*' threads===UNDEF cycles===UNDEF
|
||||
@ -13,7 +13,7 @@ scenarios:
|
||||
schema-types: run driver=cql tags=block:schema-types threads===UNDEF cycles===UNDEF
|
||||
schema-tables: run driver=cql tags=block:schema-tables threads===UNDEF cycles===UNDEF
|
||||
drop: run driver=cql tags=block:'drop.*' threads===UNDEF cycles===UNDEF
|
||||
drop-tables: run driver=cql tags=block:drop-tables threads===UNDEF cycles===UNDEF
|
||||
drop_tables: run driver=cql tags=block:drop-tables threads===UNDEF cycles===UNDEF
|
||||
drop-types: run driver=cql tags=block:drop-types threads===UNDEF cycles===UNDEF
|
||||
drop-keyspaces: run driver=cql tags=block:drop-keyspaces threads===UNDEF cycles===UNDEF
|
||||
bindings:
|
||||
@ -91,7 +91,7 @@ blocks:
|
||||
}
|
||||
ops: {
|
||||
}
|
||||
truncate-tables:
|
||||
truncate_tables:
|
||||
params:
|
||||
timeout: 900.0
|
||||
ops:
|
||||
@ -102,7 +102,7 @@ blocks:
|
||||
}
|
||||
ops: {
|
||||
}
|
||||
drop-tables:
|
||||
drop_tables:
|
||||
params:
|
||||
timeout: 900.0
|
||||
ops:
|
||||
@ -124,7 +124,7 @@ blocks:
|
||||
VALUES
|
||||
( {text}, {bigint}, {blob}, {boolean}, {date}, {decimal}, {double}, {duration}, {float}, {frozen<list<int>>}, {list<text>}, {map<text,text>}, {set<text>}, {smallint}, {text}, {time}, {timestamp}, {timeuuid}, {tinyint}, {uuid}, {text}, {varint}, {ascii}, {inet}, {int} );
|
||||
ratio: 1
|
||||
main-insert:
|
||||
main_insert:
|
||||
params:
|
||||
timeout: 10.0
|
||||
ops:
|
||||
@ -135,7 +135,7 @@ blocks:
|
||||
VALUES
|
||||
( {text}, {bigint}, {blob}, {boolean}, {date}, {decimal}, {double}, {duration}, {float}, {frozen<list<int>>}, {list<text>}, {map<text,text>}, {set<text>}, {smallint}, {text}, {time}, {timestamp}, {timeuuid}, {tinyint}, {uuid}, {text}, {varint}, {ascii}, {inet}, {int} );
|
||||
ratio: 1
|
||||
main-select:
|
||||
main_select:
|
||||
params:
|
||||
timeout: 10.0
|
||||
ops:
|
||||
|
@ -24,7 +24,7 @@ bindings:
|
||||
blocks:
|
||||
schema:
|
||||
ops:
|
||||
create-table:
|
||||
create_table:
|
||||
CreateTable: TEMPLATE(table,keyvalue)
|
||||
Keys:
|
||||
partkey: HASH
|
||||
@ -42,7 +42,7 @@ blocks:
|
||||
"partkey": "{seq_key}",
|
||||
"value": "{seq_value}"
|
||||
}
|
||||
main-write:
|
||||
main_write:
|
||||
ops:
|
||||
main-putitem:
|
||||
PutItem: TEMPLATE(table,keyvalue)
|
||||
@ -51,7 +51,7 @@ blocks:
|
||||
"partkey": "{rw_key}",
|
||||
"value": "{rw_value}"
|
||||
}
|
||||
main-read:
|
||||
main_read:
|
||||
ops:
|
||||
main-getitem:
|
||||
GetItem: TEMPLATE(table,keyvalue)
|
||||
|
@ -43,7 +43,7 @@ params:
|
||||
blocks:
|
||||
schema:
|
||||
ops:
|
||||
create-table:
|
||||
create_table:
|
||||
CreateTable: TEMPLATE(table,tabular)
|
||||
Keys:
|
||||
part: HASH
|
||||
@ -57,7 +57,7 @@ blocks:
|
||||
# BillingMode: PAY_PER_REQUEST
|
||||
rampup:
|
||||
ops:
|
||||
put-items:
|
||||
put_items:
|
||||
PutItem: TEMPLATE(table,tabular)
|
||||
json: |
|
||||
{
|
||||
@ -76,14 +76,14 @@ blocks:
|
||||
params:
|
||||
ratio: 1
|
||||
ops:
|
||||
read-all:
|
||||
read_all:
|
||||
GetItem: TEMPLATE(table,tabular)
|
||||
key:
|
||||
part: "{part_read}"
|
||||
clust: "{clust_read}"
|
||||
main:
|
||||
ops:
|
||||
write-all:
|
||||
write_all:
|
||||
params:
|
||||
ratio: 8
|
||||
PutItem: TEMPLATE(table,tabular)
|
||||
@ -100,7 +100,7 @@ blocks:
|
||||
"data6": "{data6}",
|
||||
"data7": "{data7}"
|
||||
}
|
||||
main-read-all:
|
||||
main_read_all:
|
||||
Query: TEMPLATE(table,tabular)
|
||||
key:
|
||||
part: "{part_read}"
|
||||
@ -109,7 +109,7 @@ blocks:
|
||||
Limit: "{limit}"
|
||||
# no attributes means "all" implicitly
|
||||
|
||||
main-read-01:
|
||||
main_read_all01:
|
||||
Query: TEMPLATE(table,tabular)
|
||||
key:
|
||||
part: "{part_read}"
|
||||
@ -118,7 +118,7 @@ blocks:
|
||||
projection: data0, data1
|
||||
Limit: "{limit}"
|
||||
|
||||
main-read-0246:
|
||||
main_read_0246:
|
||||
Query: TEMPLATE(table,tabular)
|
||||
key:
|
||||
part: "{part_read}"
|
||||
@ -127,7 +127,7 @@ blocks:
|
||||
projection: data0, data2, data4, data6
|
||||
Limit: "{limit}"
|
||||
|
||||
main-read-1357:
|
||||
main_read_1357:
|
||||
Query: TEMPLATE(table,tabular)
|
||||
key:
|
||||
part: "{part_read}"
|
||||
@ -136,7 +136,7 @@ blocks:
|
||||
projection: data1, data3, data5, data7
|
||||
Limit: "{limit}"
|
||||
|
||||
main-read-0123:
|
||||
main_read_0123:
|
||||
Query: TEMPLATE(table,tabular)
|
||||
key:
|
||||
part: "{part_read}"
|
||||
@ -145,7 +145,7 @@ blocks:
|
||||
projection: data0, data1, data2, data3
|
||||
Limit: "{limit}"
|
||||
|
||||
main-read-4567:
|
||||
main_read_4567:
|
||||
Query: TEMPLATE(table,tabular)
|
||||
key:
|
||||
part: "{part_read}"
|
||||
@ -154,7 +154,7 @@ blocks:
|
||||
projection: data4, data5, data6, data7
|
||||
Limit: "{limit}"
|
||||
|
||||
main-read-67:
|
||||
main_read_67:
|
||||
Query: TEMPLATE(table,tabular)
|
||||
key:
|
||||
part: "{part_read}"
|
||||
@ -163,7 +163,7 @@ blocks:
|
||||
projection: data6, data7
|
||||
Limit: "{limit}"
|
||||
|
||||
main-read-01234567:
|
||||
main_read_01234567:
|
||||
Query: TEMPLATE(table,tabular)
|
||||
key:
|
||||
part: "{part_read}"
|
||||
@ -174,5 +174,5 @@ blocks:
|
||||
|
||||
delete:
|
||||
ops:
|
||||
delete-table:
|
||||
delete_table:
|
||||
DeleteTable: TEMPLATE(table,tabular)
|
||||
|
@ -32,7 +32,7 @@ bindings:
|
||||
blocks:
|
||||
schema:
|
||||
ops:
|
||||
create-table:
|
||||
create_table:
|
||||
CreateTable: TEMPLATE(table,timeseries)
|
||||
Keys:
|
||||
machine_id_sensor_name: HASH
|
||||
|
@ -28,7 +28,7 @@ bindings:
|
||||
blocks:
|
||||
schema:
|
||||
ops:
|
||||
create-keyspace:
|
||||
create_keyspace:
|
||||
method: POST
|
||||
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/schemas/keyspaces
|
||||
Accept: "application/json"
|
||||
@ -49,7 +49,7 @@ blocks:
|
||||
Content-Type: "application/json"
|
||||
ok-status: "[2-4][0-9][0-9]"
|
||||
|
||||
create-table:
|
||||
create_table:
|
||||
method: POST
|
||||
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/schemas/keyspaces/<<keyspace:baselines>>/tables
|
||||
Accept: "application/json"
|
||||
@ -77,9 +77,9 @@ blocks:
|
||||
"ifNotExists": true
|
||||
}
|
||||
|
||||
schema-astra:
|
||||
schema_astra:
|
||||
ops:
|
||||
create-table-astra:
|
||||
create_table_astra:
|
||||
method: POST
|
||||
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/schemas/keyspaces/<<keyspace:baselines>>/tables
|
||||
Accept: "application/json"
|
||||
@ -108,7 +108,7 @@ blocks:
|
||||
}
|
||||
rampup:
|
||||
ops:
|
||||
rampup-insert:
|
||||
rampup_insert:
|
||||
method: POST
|
||||
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/keyspaces/<<keyspace:baselines>>/<<table:keyvalue>>
|
||||
Accept: "application/json"
|
||||
@ -121,11 +121,11 @@ blocks:
|
||||
"value": "{seq_value}"
|
||||
}
|
||||
|
||||
main-read:
|
||||
main_read:
|
||||
params:
|
||||
ratio: <<read_ratio:5>>
|
||||
ops:
|
||||
main-select:
|
||||
main_select:
|
||||
method: GET
|
||||
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/keyspaces/<<keyspace:baselines>>/<<table:keyvalue>>/{rw_key}
|
||||
Accept: "application/json"
|
||||
@ -134,11 +134,11 @@ blocks:
|
||||
Content-Type: "application/json"
|
||||
ok-status: "[2-4][0-9][0-9]"
|
||||
|
||||
main-write:
|
||||
main_write:
|
||||
params:
|
||||
ratio: <<write_ratio:5>>
|
||||
ops:
|
||||
main-write:
|
||||
main_write:
|
||||
method: POST
|
||||
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/keyspaces/<<keyspace:baselines>>/<<table:keyvalue>>
|
||||
Accept: "application/json"
|
||||
@ -149,4 +149,4 @@ blocks:
|
||||
{
|
||||
"key": "{rw_key}",
|
||||
"value": "{rw_value}"
|
||||
}
|
||||
}
|
||||
|
@ -27,7 +27,7 @@ bindings:
|
||||
blocks:
|
||||
schema:
|
||||
ops:
|
||||
create-keyspace:
|
||||
create_keyspace:
|
||||
method: POST
|
||||
uri: http://<<stargate_host>>:8082/v2/schemas/keyspaces
|
||||
Accept: "application/json"
|
||||
@ -49,7 +49,7 @@ blocks:
|
||||
Content-Type: "application/json"
|
||||
ok-status: "[2-4][0-9][0-9]"
|
||||
|
||||
create-table:
|
||||
create_table:
|
||||
method: POST
|
||||
uri: http://<<stargate_host>>:8082/v2/schemas/keyspaces/starter/tables
|
||||
Accept: "application/json"
|
||||
@ -79,7 +79,7 @@ blocks:
|
||||
|
||||
rampup:
|
||||
ops:
|
||||
rampup-insert:
|
||||
rampup_insert:
|
||||
method: POST
|
||||
uri: http://<<stargate_host>>:8082/v2/keyspaces/starter/http_rest_starter
|
||||
Accept: "application/json"
|
||||
@ -92,11 +92,11 @@ blocks:
|
||||
"value": "{seq_value}"
|
||||
}
|
||||
|
||||
main-read:
|
||||
main_read:
|
||||
params:
|
||||
ratio: 5
|
||||
ops:
|
||||
main-select:
|
||||
main_select:
|
||||
method: GET
|
||||
uri: http://<<stargate_host>>:8082/v2/keyspaces/starter/http_rest_starter/{rw_key}
|
||||
Accept: "application/json"
|
||||
@ -105,11 +105,11 @@ blocks:
|
||||
Content-Type: "application/json"
|
||||
ok-status: "[2-4][0-9][0-9]"
|
||||
|
||||
main-write:
|
||||
main_write:
|
||||
params:
|
||||
ratio: 5
|
||||
ops:
|
||||
main-write:
|
||||
main_write:
|
||||
method: POST
|
||||
uri: http://<<stargate_host>>:8082/v2/keyspaces/starter/http_rest_starter
|
||||
Accept: "application/json"
|
||||
@ -120,4 +120,4 @@ blocks:
|
||||
{
|
||||
"key": "{rw_key}",
|
||||
"value": "{rw_value}"
|
||||
}
|
||||
}
|
||||
|
@ -37,7 +37,7 @@ bindings:
|
||||
blocks:
|
||||
schema:
|
||||
ops:
|
||||
create-keyspace:
|
||||
create_keyspace:
|
||||
method: POST
|
||||
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/schemas/keyspaces
|
||||
Accept: "application/json"
|
||||
@ -59,7 +59,7 @@ blocks:
|
||||
Content-Type: "application/json"
|
||||
ok-status: "[2-4][0-9][0-9]"
|
||||
|
||||
create-table:
|
||||
create_table:
|
||||
method: POST
|
||||
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/schemas/keyspaces/<<keyspace:baselines>>/tables
|
||||
Accept: "application/json"
|
||||
@ -96,7 +96,7 @@ blocks:
|
||||
|
||||
rampup:
|
||||
ops:
|
||||
rampup-insert:
|
||||
rampup_insert:
|
||||
method: POST
|
||||
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/keyspaces/<<keyspace:baselines>>/<<table:tabular>>
|
||||
Accept: "application/json"
|
||||
@ -110,11 +110,11 @@ blocks:
|
||||
"data": "{data}"
|
||||
}
|
||||
|
||||
main-read:
|
||||
main_read:
|
||||
params:
|
||||
ratio: 5
|
||||
ops:
|
||||
main-select:
|
||||
main_select:
|
||||
method: GET
|
||||
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/keyspaces/<<keyspace:baselines>>/<<table:tabular>>/{part_read}&page-size={limit}
|
||||
Accept: "application/json"
|
||||
@ -122,11 +122,11 @@ blocks:
|
||||
X-Cassandra-Token: "{token}"
|
||||
Content-Type: "application/json"
|
||||
|
||||
main-write:
|
||||
main_write:
|
||||
params:
|
||||
ratio: 5
|
||||
ops:
|
||||
main-write:
|
||||
main_write:
|
||||
method: POST
|
||||
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/keyspaces/<<keyspace:baselines>>/<<table:tabular>>
|
||||
Accept: "application/json"
|
||||
|
@ -3,7 +3,7 @@ min_version: "5.17.3"
|
||||
description: >2
|
||||
NOTE: THIS VERSION IS NOT FUNCTIONING FULLY AND HAS AN OPEN TICKET TO INVESTIGATE:
|
||||
See here: https://github.com/nosqlbench/nosqlbench/issues/1148
|
||||
|
||||
|
||||
This workload emulates a time-series data model and access patterns.
|
||||
This should be identical to the cql variant except for:
|
||||
- We can't specify the write timestamp to make the write idempotent like we can with cql.
|
||||
@ -34,7 +34,7 @@ bindings:
|
||||
blocks:
|
||||
rampup:
|
||||
ops:
|
||||
rampup-insert:
|
||||
rampup_insert:
|
||||
method: POST
|
||||
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/keyspaces/<<keyspace:baselines>>/<<table:iot>>
|
||||
Accept: "application/json"
|
||||
@ -53,7 +53,7 @@ blocks:
|
||||
|
||||
schema:
|
||||
ops:
|
||||
create-keyspace:
|
||||
create_keyspace:
|
||||
method: POST
|
||||
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/schemas/keyspaces
|
||||
Accept: "application/json"
|
||||
@ -75,7 +75,7 @@ blocks:
|
||||
Content-Type: "application/json"
|
||||
ok-status: "[2-4][0-9][0-9]"
|
||||
|
||||
create-table:
|
||||
create_table:
|
||||
method: POST
|
||||
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/schemas/keyspaces/<<keyspace:baselines>>/tables
|
||||
Accept: "application/json"
|
||||
@ -137,11 +137,11 @@ blocks:
|
||||
"ifNotExists": true
|
||||
}
|
||||
|
||||
main-read:
|
||||
main_read:
|
||||
params:
|
||||
ratio: <<read_ratio:1>>
|
||||
ops:
|
||||
main-select:
|
||||
main_select:
|
||||
method: GET
|
||||
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/keyspaces/<<keyspace:baselines>>/<<table:iot>>?where=URLENCODE[[{"machine_id":{"$eq":"{machine_id}"},"sensor_name":{"$eq":"{sensor_name}"}}]]&page-size=<<limit:10>>
|
||||
Accept: "application/json"
|
||||
@ -149,11 +149,11 @@ blocks:
|
||||
X-Cassandra-Token: "{token}"
|
||||
Content-Type: "application/json"
|
||||
|
||||
main-write:
|
||||
main_write:
|
||||
params:
|
||||
ratio: <<write_ratio:9>>
|
||||
ops:
|
||||
main-write:
|
||||
main_write:
|
||||
method: POST
|
||||
uri: <<protocol:http>>://{weighted_hosts}:<<restapi_port:8082>><<path_prefix:>>/v2/keyspaces/<<keyspace:baselines>>/<<table:iot>>
|
||||
Accept: "application/json"
|
||||
@ -168,4 +168,4 @@ blocks:
|
||||
"sensor_value": "{sensor_value}",
|
||||
"station_id": "{station_id}",
|
||||
"data": "{data}"
|
||||
}
|
||||
}
|
||||
|
@ -10,7 +10,7 @@ scenarios:
|
||||
#rampup: run driver=http tags==block:rampup cycles===47341 threads=1
|
||||
#rampup: run driver=http tags==block:rampup cycles===1183514 threads=10
|
||||
|
||||
drop-tables:
|
||||
drop_tables:
|
||||
schema: run driver=http tags==block:drop-tables threads==1 cycles==UNDEF
|
||||
truncate: run driver=http tags==block:truncate-tables cycles===1 threads=1
|
||||
#reads: run driver=http tags==block:main-read cycles===TEMPLATE(read-cycles,100) threads=100
|
||||
@ -171,7 +171,7 @@ blocks:
|
||||
{"index": {} }
|
||||
{"value": {train_vector25},"key": {rw_key25}}
|
||||
|
||||
#- rampup-insert: |
|
||||
#- rampup_insert: |
|
||||
# POST TEMPLATE(url, https://TODO.com)/TEMPLATE(index,vector)//_doc?refresh=true
|
||||
# Authorization: ApiKey TEMPLATE(apikey, required)
|
||||
# Content-Type: application/json
|
||||
@ -180,12 +180,12 @@ blocks:
|
||||
# "value": {train_vector},
|
||||
# "key": {rw_key}
|
||||
# }
|
||||
main-read:
|
||||
main_read:
|
||||
params:
|
||||
ratio: TEMPLATE(read_ratio,90)
|
||||
instrument: true
|
||||
ops:
|
||||
- main-select-ann-limit:
|
||||
- main_select_ann_limit:
|
||||
op: |
|
||||
POST TEMPLATE(url, https://TODO.com)/TEMPLATE(index,vector)/_search
|
||||
Authorization: ApiKey TEMPLATE(apikey, required)
|
||||
@ -217,14 +217,14 @@ blocks:
|
||||
relevancy.accept({relevant_indices},actual_indices);
|
||||
return true;
|
||||
|
||||
main-write:
|
||||
main_write:
|
||||
params:
|
||||
ratio: TEMPLATE(write_ratio,10)
|
||||
cl: TEMPLATE(write_cl,LOCAL_QUORUM)
|
||||
instrument: true
|
||||
prepared: true
|
||||
ops:
|
||||
- main-insert: |
|
||||
- main_insert: |
|
||||
POST TEMPLATE(url, https://TODO.com)/TEMPLATE(index,vector)/_doc?refresh=true
|
||||
Authorization: ApiKey TEMPLATE(apikey, required)
|
||||
Content-Type: application/json
|
||||
|
@ -62,7 +62,7 @@ blocks:
|
||||
|
||||
rampup:
|
||||
ops:
|
||||
rampup-insert:
|
||||
rampup_insert:
|
||||
method: PUT
|
||||
uri: <<protocol:http>>://{weighted_hosts}:<<docsapi_port:8180>><<path_prefix:>>/v2/namespaces/<<namespace:docs_keyvalue>>/collections/<<collection:docs_collection>>/{seq_key}
|
||||
Accept: "application/json"
|
||||
@ -78,7 +78,7 @@ blocks:
|
||||
params:
|
||||
ratio: <<read_ratio:5>>
|
||||
ops:
|
||||
main-select:
|
||||
main_select:
|
||||
method: GET
|
||||
uri: <<protocol:http>>://{weighted_hosts}:<<docsapi_port:8180>><<path_prefix:>>/v2/namespaces/<<namespace:docs_keyvalue>>/collections/<<collection:docs_collection>>/{rw_key}
|
||||
Accept: "application/json"
|
||||
@ -86,7 +86,7 @@ blocks:
|
||||
X-Cassandra-Token: "{token}"
|
||||
ok-status: "[2-4][0-9][0-9]"
|
||||
|
||||
main-write:
|
||||
main_write:
|
||||
method: PUT
|
||||
uri: <<protocol:http>>://{weighted_hosts}:<<docsapi_port:8180>><<path_prefix:>>/v2/namespaces/<<namespace:docs_keyvalue>>/collections/<<collection:docs_collection>>/{rw_key}
|
||||
Accept: "application/json"
|
||||
@ -96,4 +96,4 @@ blocks:
|
||||
body: >2
|
||||
{
|
||||
"{rw_key}":"{rw_value}"
|
||||
}
|
||||
}
|
||||
|
@ -26,7 +26,7 @@ bindings:
|
||||
blocks:
|
||||
schema:
|
||||
ops:
|
||||
create-keyspace:
|
||||
create_keyspace:
|
||||
method: POST
|
||||
uri: <<protocol:http>>://{weighted_hosts}:<<graphql_port:8080>><<path_prefix:>>/graphql-schema
|
||||
Accept: "application/json"
|
||||
@ -36,7 +36,7 @@ blocks:
|
||||
body: >2
|
||||
{"query":"mutation {\n createKeyspace(name:\"<<keyspace:gqlcf_keyvalue>>\", replicas: <<rf:1>>, ifNotExists: true)\n}"}
|
||||
|
||||
create-table:
|
||||
create_table:
|
||||
method: POST
|
||||
uri: <<protocol:http>>://{weighted_hosts}:<<graphql_port:8080>><<path_prefix:>>/graphql-schema
|
||||
Accept: "application/json"
|
||||
@ -48,7 +48,7 @@ blocks:
|
||||
|
||||
rampup:
|
||||
ops:
|
||||
rampup-insert:
|
||||
rampup_insert:
|
||||
method: POST
|
||||
uri: <<protocol:http>>://{weighted_hosts}:<<graphql_port:8080>><<path_prefix:>>/graphql/<<keyspace:gqlcf_keyvalue>>
|
||||
Accept: "application/json"
|
||||
@ -58,11 +58,11 @@ blocks:
|
||||
body: |
|
||||
{"query":"mutation {\n insert<<table:keyvalue>>( value: {key: \"{seq_key}\", value: \"{seq_value}\",}) {value {key, value}}}"}
|
||||
|
||||
main-read:
|
||||
main_read:
|
||||
params:
|
||||
ratio: <<read_ratio:5>>
|
||||
ops:
|
||||
main-select:
|
||||
main_select:
|
||||
method: POST
|
||||
uri: <<protocol:http>>://{weighted_hosts}:<<graphql_port:8080>><<path_prefix:>>/graphql/<<keyspace:gqlcf_keyvalue>>
|
||||
Accept: "application/json"
|
||||
@ -72,11 +72,11 @@ blocks:
|
||||
body: |
|
||||
{"query":"{<<table:keyvalue>>(value: {key: \"{rw_key}\"}) {values {key, value}}}"}
|
||||
|
||||
main-write:
|
||||
main_write:
|
||||
params:
|
||||
ratio: <<write_ratio:5>>
|
||||
ops:
|
||||
main-write:
|
||||
main_write:
|
||||
method: POST
|
||||
uri: <<protocol:http>>://{weighted_hosts}:<<graphql_port:8080>><<path_prefix:>>/graphql/<<keyspace:gqlcf_keyvalue>>
|
||||
Accept: "application/json"
|
||||
|
@ -11,9 +11,9 @@ description: |
|
||||
scenarios:
|
||||
default:
|
||||
schema: run driver=http tags==block:"schema.*" threads==1 cycles==UNDEF
|
||||
rampup-insert: run driver=http tags==block:"rampup-insert.*" cycles===TEMPLATE(rampup-cycles,12) threads=auto
|
||||
main-read: run driver=http tags==block:"main-read.*" cycles===TEMPLATE(main-cycles,12) threads=auto
|
||||
main-write: run driver=http tags==block:"main-write.*" cycles===TEMPLATE(main-cycles,12) threads=auto
|
||||
rampup_insert: run driver=http tags==block:"rampup-insert.*" cycles===TEMPLATE(rampup-cycles,12) threads=auto
|
||||
main_read: run driver=http tags==block:"main-read.*" cycles===TEMPLATE(main-cycles,12) threads=auto
|
||||
main_write: run driver=http tags==block:"main-write.*" cycles===TEMPLATE(main-cycles,12) threads=auto
|
||||
|
||||
bindings:
|
||||
weighted_hosts: WeightedStrings('<<graphql_host:stargate>>')
|
||||
@ -35,7 +35,7 @@ bindings:
|
||||
blocks:
|
||||
schema:
|
||||
ops:
|
||||
create-keyspace:
|
||||
create_keyspace:
|
||||
method: POST
|
||||
uri: <<protocol:http>>://{weighted_hosts}:<<graphql_port:8080>><<path_prefix:>>/graphql-schema
|
||||
Accept: "application/json"
|
||||
@ -45,7 +45,7 @@ blocks:
|
||||
body: >2
|
||||
{"query":"mutation {\n createKeyspace(name:\"<<keyspace:gqlcf_tabular>>\", replicas: <<rf:1>>, ifNotExists: true)\n}"}
|
||||
|
||||
create-table:
|
||||
create_table:
|
||||
method: POST
|
||||
uri: <<protocol:http>>://{weighted_hosts}:<<graphql_port:8080>><<path_prefix:>>/graphql-schema
|
||||
Accept: "application/json"
|
||||
@ -55,7 +55,7 @@ blocks:
|
||||
body: >2
|
||||
{"query":"mutation {\n createTable(\n keyspaceName: \"<<keyspace:gqlcf_tabular>>\"\n tableName: \"<<table:tabular>>\"\n partitionKeys: [{ name: \"part\", type: { basic: TEXT } }]\n clusteringKeys: [{ name: \"clust\", type: { basic: TEXT } }]\n values: [{ name: \"data\", type: { basic: TEXT } }]\n ifNotExists: true\n )\n}\n"}
|
||||
|
||||
rampup-insert:
|
||||
rampup_insert:
|
||||
ops:
|
||||
action:
|
||||
method: POST
|
||||
@ -67,7 +67,7 @@ blocks:
|
||||
body: >2
|
||||
{"query":"mutation {\n insert<<table:tabular>>( value: {part: \"{part_layout}\", clust: \"{clust_layout}\", data: \"{data}\"}) {value {part, clust, data}}}"}
|
||||
|
||||
main-read:
|
||||
main_read:
|
||||
params:
|
||||
ratio: 5
|
||||
ops:
|
||||
@ -80,9 +80,9 @@ blocks:
|
||||
Content-Type: "application/json"
|
||||
body: >2
|
||||
{"query":"{<<table:tabular>>(value: {part: \"{part_read}\"}, options: { pageSize: <<limit:10>> }) {values {part, clust, data}}}"}
|
||||
|
||||
|
||||
main-write:
|
||||
|
||||
main_write:
|
||||
params:
|
||||
ratio: 5
|
||||
ops:
|
||||
@ -94,4 +94,4 @@ blocks:
|
||||
X-Cassandra-Token: "{token}"
|
||||
Content-Type: "application/json"
|
||||
body: >2
|
||||
{"query":"mutation {\n insert<<table:tabular>>( value: {part: \"{part_write}\", clust: \"{clust_write}\", data: \"{data_write}\"}) {value {part, clust, data}}}"}
|
||||
{"query":"mutation {\n insert<<table:tabular>>( value: {part: \"{part_write}\", clust: \"{clust_write}\", data: \"{data_write}\"}) {value {part, clust, data}}}"}
|
||||
|
@ -15,8 +15,8 @@ scenarios:
|
||||
default:
|
||||
schema: run driver=http tags==block:schema threads==1 cycles==UNDEF
|
||||
rampup: run driver=http tags==block:rampup cycles===TEMPLATE(rampup-cycles,10) threads=auto
|
||||
main-read: run driver=http tags==block:"main-read.*" cycles===TEMPLATE(main-cycles,10) threads=auto
|
||||
main-write: run driver=http tags==block:"main-write.*" cycles===TEMPLATE(main-cycles,10) threads=auto
|
||||
main_read: run driver=http tags==block:"main-read.*" cycles===TEMPLATE(main-cycles,10) threads=auto
|
||||
main_write: run driver=http tags==block:"main-write.*" cycles===TEMPLATE(main-cycles,10) threads=auto
|
||||
|
||||
|
||||
bindings:
|
||||
@ -36,7 +36,7 @@ blocks:
|
||||
params:
|
||||
prepared: false
|
||||
ops:
|
||||
create-keyspace:
|
||||
create_keyspace:
|
||||
method: POST
|
||||
uri: <<protocol:http>>://{weighted_hosts}:<<graphql_port:8080>><<path_prefix:>>/graphql-schema
|
||||
Accept: "application/json"
|
||||
@ -46,7 +46,7 @@ blocks:
|
||||
body: >2
|
||||
{"query":"mutation {\n createKeyspace(name:\"<<keyspace:gqlcf_iot>>\", replicas: <<rf:1>>, ifNotExists: true)\n}"}
|
||||
|
||||
create-table:
|
||||
create_table:
|
||||
method: POST
|
||||
uri: <<protocol:http>>://{weighted_hosts}:<<graphql_port:8080>><<path_prefix:>>/graphql-schema
|
||||
Accept: "application/json"
|
||||
@ -58,7 +58,7 @@ blocks:
|
||||
|
||||
rampup:
|
||||
ops:
|
||||
rampup-insert:
|
||||
rampup_insert:
|
||||
method: POST
|
||||
uri: <<protocol:http>>://{weighted_hosts}:<<graphql_port:8080>><<path_prefix:>>/graphql/<<keyspace:gqlcf_iot>>
|
||||
Accept: "application/json"
|
||||
@ -68,11 +68,11 @@ blocks:
|
||||
body: >2
|
||||
{"query":"mutation insertReading {\n reading: insert<<table:iot>>( value: {machine_id: \"{machine_id}\", sensor_name: \"{sensor_name}\", time: \"{time}\", data: \"{data}\", sensor_value: {sensor_value}, station_id: \"{station_id}\"}) {value {machine_id, sensor_name, time, data, sensor_value, station_id}}}"}
|
||||
|
||||
main-read:
|
||||
main_read:
|
||||
params:
|
||||
ratio: <<read_ratio:1>>
|
||||
ops:
|
||||
main-select:
|
||||
main_select:
|
||||
method: POST
|
||||
uri: <<protocol:http>>://{weighted_hosts}:<<graphql_port:8080>><<path_prefix:>>/graphql/<<keyspace:gqlcf_iot>>
|
||||
Accept: "application/json"
|
||||
@ -82,11 +82,11 @@ blocks:
|
||||
body: >2
|
||||
{"query":"query readings {<<table:iot>>(value: {machine_id: \"{machine_id}\",sensor_name: \"{sensor_name}\"}, options: { pageSize: <<limit:10>> }) {values {machine_id, sensor_name, time, data, sensor_value, station_id}}}"}
|
||||
|
||||
main-write:
|
||||
main_write:
|
||||
params:
|
||||
ratio: <<write_ratio:9>>
|
||||
ops:
|
||||
main-write:
|
||||
main_write:
|
||||
method: POST
|
||||
uri: <<protocol:http>>://{weighted_hosts}:<<graphql_port:8080>><<path_prefix:>>/graphql/<<keyspace:gqlcf_iot>>
|
||||
Accept: "application/json"
|
||||
|
@ -31,7 +31,7 @@ bindings:
|
||||
blocks:
|
||||
schema:
|
||||
ops:
|
||||
create-keyspace:
|
||||
create_keyspace:
|
||||
method: POST
|
||||
uri: <<protocol:http>>://{weighted_hosts}:<<graphql_port:8080>><<path_prefix:>>/graphql-schema
|
||||
Accept: "application/json"
|
||||
@ -57,7 +57,7 @@ blocks:
|
||||
|
||||
rampup:
|
||||
ops:
|
||||
rampup-insert:
|
||||
rampup_insert:
|
||||
method: POST
|
||||
uri: <<protocol:http>>://{weighted_hosts}:<<graphql_port:8080>><<path_prefix:>>/graphql/<<keyspace:gqlsf_keyvalue>>
|
||||
Accept: "application/json"
|
||||
@ -69,11 +69,11 @@ blocks:
|
||||
"query":"mutation {\n insertKeyValue(keyValue: {key: \"{seq_key}\", value: \"{seq_value}\"}) {\n key\n value\n }\n}\n"
|
||||
}
|
||||
|
||||
main-read:
|
||||
main_read:
|
||||
params:
|
||||
ratio: <<read_ratio:1>>
|
||||
ops:
|
||||
main-select:
|
||||
main_select:
|
||||
method: POST
|
||||
uri: <<protocol:http>>://{weighted_hosts}:<<graphql_port:8080>><<path_prefix:>>/graphql/<<keyspace:gqlsf_keyvalue>>
|
||||
Accept: "application/json"
|
||||
@ -85,11 +85,11 @@ blocks:
|
||||
"query":"{\n getKeyValue(key: \"rw_key\") {\n key\n value\n }\n}\n"
|
||||
}
|
||||
|
||||
main-write:
|
||||
main_write:
|
||||
params:
|
||||
ratio: <<write_ratio:9>>
|
||||
ops:
|
||||
main-write:
|
||||
main_write:
|
||||
method: POST
|
||||
uri: <<protocol:http>>://{weighted_hosts}:<<graphql_port:8080>><<path_prefix:>>/graphql/<<keyspace:gqlsf_keyvalue>>
|
||||
Accept: "application/json"
|
||||
|
@ -39,7 +39,7 @@ bindings:
|
||||
blocks:
|
||||
schema:
|
||||
ops:
|
||||
create-keyspace:
|
||||
create_keyspace:
|
||||
method: POST
|
||||
uri: <<protocol:http>>://{weighted_hosts}:<<graphql_port:8080>><<path_prefix:>>/graphql-schema
|
||||
Accept: "application/json"
|
||||
@ -65,7 +65,7 @@ blocks:
|
||||
|
||||
rampup:
|
||||
ops:
|
||||
rampup-insert:
|
||||
rampup_insert:
|
||||
method: POST
|
||||
uri: <<protocol:http>>://{weighted_hosts}:<<graphql_port:8080>><<path_prefix:>>/graphql/<<keyspace:gqlsf_tabular>>
|
||||
Accept: "application/json"
|
||||
@ -77,11 +77,11 @@ blocks:
|
||||
"query":"mutation {\n insertTabular(tabular: {part: \"{part_layout}\", clust: \"{clust_layout}\", data: \"{data}\"}) {\n part\n clust\n data\n }\n}\n"
|
||||
}
|
||||
|
||||
main-read:
|
||||
main_read:
|
||||
params:
|
||||
ratio: <<read_ratio:1>>
|
||||
ops:
|
||||
main-select:
|
||||
main_select:
|
||||
method: POST
|
||||
uri: <<protocol:http>>://{weighted_hosts}:<<graphql_port:8080>><<path_prefix:>>/graphql/<<keyspace:gqlsf_tabular>>
|
||||
Accept: "application/json"
|
||||
@ -93,11 +93,11 @@ blocks:
|
||||
"query":"{\n getTabulars(part: \"{part_read}\", clust: \"{clust_read}\") {\n data {\n part\n clust\n data\n }\n pagingState\n }\n}\n"
|
||||
}
|
||||
|
||||
main-write:
|
||||
main_write:
|
||||
params:
|
||||
ratio: <<write_ratio:9>>
|
||||
ops:
|
||||
main-write:
|
||||
main_write:
|
||||
method: POST
|
||||
uri: <<protocol:http>>://{weighted_hosts}:<<graphql_port:8080>><<path_prefix:>>/graphql/<<keyspace:gqlsf_tabular>>
|
||||
Accept: "application/json"
|
||||
|
@ -39,7 +39,7 @@ bindings:
|
||||
blocks:
|
||||
schema:
|
||||
ops:
|
||||
create-keyspace:
|
||||
create_keyspace:
|
||||
method: POST
|
||||
uri: <<protocol:http>>://{weighted_hosts}:<<graphql_port:8080>><<path_prefix:>>/graphql-schema
|
||||
Accept: "application/json"
|
||||
@ -65,7 +65,7 @@ blocks:
|
||||
|
||||
rampup:
|
||||
ops:
|
||||
rampup-insert:
|
||||
rampup_insert:
|
||||
method: POST
|
||||
uri: <<protocol:http>>://{weighted_hosts}:<<graphql_port:8080>><<path_prefix:>>/graphql/<<keyspace:gqlsf_timeseries>>
|
||||
Accept: "application/json"
|
||||
@ -77,11 +77,11 @@ blocks:
|
||||
"query":"mutation {\n insertIot(iot: {machine_id: \"{machine_id}\", sensor_name: \"{sensor_name}\", time: \"{time}\", sensor_value: {sensor_value}, station_id: \"{station_id}\", data: \"{data}\"}) {\n machine_id\n sensor_name\n time\n sensor_value\n station_id\n data\n }\n}\n"
|
||||
}
|
||||
|
||||
main-read:
|
||||
main_read:
|
||||
params:
|
||||
ratio: <<read_ratio:1>>
|
||||
ops:
|
||||
main-select:
|
||||
main_select:
|
||||
method: POST
|
||||
uri: <<protocol:http>>://{weighted_hosts}:<<graphql_port:8080>><<path_prefix:>>/graphql/<<keyspace:gqlsf_timeseries>>
|
||||
Accept: "application/json"
|
||||
@ -93,11 +93,11 @@ blocks:
|
||||
"query":"{\n getIots(machine_id: \"{machine_id}\", sensor_name: \"{sensor_name}\") {\n data {\n machine_id\n sensor_name\n time\n sensor_value\n station_id\n data\n }\n }\n}\n"
|
||||
}
|
||||
|
||||
main-write:
|
||||
main_write:
|
||||
params:
|
||||
ratio: <<write_ratio:9>>
|
||||
ops:
|
||||
main-write:
|
||||
main_write:
|
||||
method: POST
|
||||
uri: <<protocol:http>>://{weighted_hosts}:<<graphql_port:8080>><<path_prefix:>>/graphql/<<keyspace:gqlsf_timeseries>>
|
||||
Accept: "application/json"
|
||||
|
@ -82,7 +82,7 @@ blocks:
|
||||
}
|
||||
rampup:
|
||||
ops:
|
||||
rampup-insert: |
|
||||
rampup_insert: |
|
||||
{
|
||||
insert: "TEMPLATE(collection,keyvalue)",
|
||||
documents: [
|
||||
@ -93,22 +93,22 @@ blocks:
|
||||
],
|
||||
comment: "Insert documents into keyvalue collection."
|
||||
}
|
||||
main-read:
|
||||
main_read:
|
||||
params:
|
||||
ratio: 5
|
||||
ops:
|
||||
main-select: |
|
||||
main_select: |
|
||||
{
|
||||
find: "TEMPLATE(collection,keyvalue)",
|
||||
filter: { key: { $eq: "{rw_key}" } },
|
||||
readConcern: { level: "majority" },
|
||||
comment: "Find the value for the given 'key'."
|
||||
}
|
||||
main-write:
|
||||
main_write:
|
||||
params:
|
||||
ratio: 5
|
||||
ops:
|
||||
main-insert: |
|
||||
main_insert: |
|
||||
{
|
||||
insert: "TEMPLATE(collection,keyvalue)",
|
||||
documents: [
|
||||
|
@ -133,7 +133,7 @@ blocks:
|
||||
}
|
||||
rampup:
|
||||
ops:
|
||||
rampup-insert: |
|
||||
rampup_insert: |
|
||||
{
|
||||
insert: "TEMPLATE(collection,tabular)",
|
||||
documents: [
|
||||
@ -153,7 +153,7 @@ blocks:
|
||||
writeConcern: { w: "majority" },
|
||||
comment: "Insert documents into tabular collection."
|
||||
}
|
||||
main-read:
|
||||
main_read:
|
||||
params:
|
||||
ratio: TEMPLATE(read_ratio,1)
|
||||
ops:
|
||||
@ -233,7 +233,7 @@ blocks:
|
||||
comment: "Find the data67 value for the given 'part'."
|
||||
}
|
||||
ratio: TEMPLATE(read_ratio,1)
|
||||
main-select:
|
||||
main_select:
|
||||
statement: |
|
||||
{
|
||||
find: "TEMPLATE(collection,tabular)",
|
||||
@ -244,11 +244,11 @@ blocks:
|
||||
comment: "Find the data01234567 value for the given 'part'."
|
||||
}
|
||||
ratio: TEMPLATE(read_ratio,1)
|
||||
main-write:
|
||||
main_write:
|
||||
params:
|
||||
ratio: TEMPLATE(write_ratio,8)
|
||||
ops:
|
||||
main-insert:
|
||||
main_insert:
|
||||
statement: |
|
||||
{
|
||||
insert: "TEMPLATE(collection,tabular)",
|
||||
@ -269,7 +269,7 @@ blocks:
|
||||
comment: "Insert documents into tabular collection."
|
||||
}
|
||||
ratio: TEMPLATE(write_ratio,8)
|
||||
|
||||
|
||||
# The below drop-collection blocks expects the collection to exist or else this will fail
|
||||
drop-collection:
|
||||
ops:
|
||||
|
@ -184,7 +184,7 @@ blocks:
|
||||
rampup:
|
||||
ops:
|
||||
# time: BinData(4, "{time}"),
|
||||
rampup-insert: |
|
||||
rampup_insert: |
|
||||
{
|
||||
insert: "TEMPLATE(collection,timeseries)",
|
||||
documents: [
|
||||
@ -208,11 +208,11 @@ blocks:
|
||||
writeConcern: { w: "majority" },
|
||||
comment: "Insert documents into a timeseries collection."
|
||||
}
|
||||
main-read:
|
||||
main_read:
|
||||
params:
|
||||
ratio: TEMPLATE(read_ratio,1)
|
||||
ops:
|
||||
select-read:
|
||||
select_read:
|
||||
statement: |
|
||||
{
|
||||
find: "TEMPLATE(collection,timeseries)",
|
||||
@ -222,11 +222,11 @@ blocks:
|
||||
comment: "Find the value for the given 'machine_id' and 'sensor_name'."
|
||||
}
|
||||
ratio: TEMPLATE(read_ratio,1)
|
||||
main-write:
|
||||
main_write:
|
||||
params:
|
||||
ratio: TEMPLATE(write_ratio,9)
|
||||
ops:
|
||||
main-insert:
|
||||
main_insert:
|
||||
statement: |
|
||||
{
|
||||
insert: "TEMPLATE(collection,timeseries)",
|
||||
@ -252,7 +252,7 @@ blocks:
|
||||
comment: "Insert documents into a timeseries collection."
|
||||
}
|
||||
ratio: TEMPLATE(write_ratio,9)
|
||||
|
||||
|
||||
# The below drop-collection blocks expects the collection to exist or else this will fail
|
||||
drop-collection:
|
||||
ops:
|
||||
|
@ -109,7 +109,7 @@ blocks:
|
||||
}
|
||||
rampup:
|
||||
ops:
|
||||
rampup-insert: |
|
||||
rampup_insert: |
|
||||
{
|
||||
insert: "TEMPLATE(collection,keyvalue)",
|
||||
documents: [
|
||||
@ -120,7 +120,7 @@ blocks:
|
||||
],
|
||||
comment: "Insert documents into keyvalue collection."
|
||||
}
|
||||
main-read:
|
||||
main_read:
|
||||
params:
|
||||
ratio: TEMPLATE(read_ratio,5)
|
||||
ops:
|
||||
@ -167,11 +167,11 @@ blocks:
|
||||
actual_indices=MongoDbUtils.getFieldFromResults("key",result);
|
||||
relevancy.accept({relevant_indices},actual_indices);
|
||||
return true;
|
||||
main-write:
|
||||
main_write:
|
||||
params:
|
||||
ratio: TEMPLATE(write_ratio,5)
|
||||
ops:
|
||||
main-insert: |
|
||||
main_insert: |
|
||||
{
|
||||
insert: "TEMPLATE(collection,keyvalue)",
|
||||
documents: [
|
||||
|
@ -16,7 +16,7 @@ blocks:
|
||||
readPreference: primary
|
||||
rampup:
|
||||
ops:
|
||||
rampup-insert: |
|
||||
rampup_insert: |
|
||||
{
|
||||
insert: "<<collection:keyvalueuuid>>",
|
||||
documents: [ { _id: UUID("{seq_uuid}"),
|
||||
@ -34,7 +34,7 @@ blocks:
|
||||
find: "<<collection:keyvalueuuid>>",
|
||||
filter: { _id: UUID("{seq_uuid}") }
|
||||
}
|
||||
main-read:
|
||||
main_read:
|
||||
params:
|
||||
ratio: <<read_ratio:1>>
|
||||
type: read
|
||||
@ -45,16 +45,16 @@ blocks:
|
||||
find: "<<collection:keyvalueuuid>>",
|
||||
filter: { _id: UUID("{rw_uuid}") }
|
||||
}
|
||||
main-write:
|
||||
main_write:
|
||||
params:
|
||||
ratio: <<write_ratio:1>>
|
||||
type: write
|
||||
readPreference: primary
|
||||
ops:
|
||||
main-insert: |
|
||||
main_insert: |
|
||||
{
|
||||
insert: "<<collection:keyvalueuuid>>",
|
||||
documents: [ { _id: UUID("{rw_uuid}")
|
||||
key: {rw_key},
|
||||
value: NumberLong({rw_value}) } ]
|
||||
}
|
||||
}
|
||||
|
@ -18,7 +18,7 @@ params:
|
||||
durable_topic: "false"
|
||||
|
||||
blocks:
|
||||
msg-consume-block:
|
||||
msg_consume_block:
|
||||
ops:
|
||||
op1:
|
||||
## The value represents the destination (queue or topic) name)
|
||||
|
@ -13,7 +13,7 @@ params:
|
||||
async_api: "true"
|
||||
|
||||
blocks:
|
||||
msg-produce-block:
|
||||
msg_produce_block:
|
||||
ops:
|
||||
op1:
|
||||
## The value represents the destination (queue or topic) name
|
||||
|
@ -6,7 +6,7 @@ scenarios:
|
||||
default:
|
||||
- run driver=stdout alias=step1
|
||||
- run driver=stdout alias=step2
|
||||
schema-only:
|
||||
schema_only:
|
||||
- run driver=blah tags=block:"schema.*"
|
||||
|
||||
tags:
|
||||
|
@ -5,16 +5,16 @@ scenarios:
|
||||
schema: run driver==stdout workload===scenario-test tags=block:"schema.*"
|
||||
rampup: run driver=stdout workload===scenario-test tags=block:rampup cycles=TEMPLATE(cycles1,10)
|
||||
main: run driver=stdout workload===scenario-test tags=block:"main.*" cycles=TEMPLATE(cycles2,10)
|
||||
schema-only:
|
||||
schema_only:
|
||||
schema: run driver=stdout workload==scenario-test tags=block:"schema.*" doundef==undef
|
||||
|
||||
template-test:
|
||||
with-template: run driver=stdout cycles=TEMPLATE(cycles-test,10)
|
||||
template_test:
|
||||
with_template: run driver=stdout cycles=TEMPLATE(cycles-test,10)
|
||||
|
||||
blocks:
|
||||
schema:
|
||||
ops:
|
||||
create-keyspace: |
|
||||
create_keyspace: |
|
||||
create keyspace if not exists puppies
|
||||
rampup:
|
||||
ops:
|
||||
|
Loading…
Reference in New Issue
Block a user