First Phase of Moving to Containers by DriverType

This commit is contained in:
MikeYaacoubStax
2023-02-10 00:33:54 -05:00
parent ed4be4ee4b
commit e67cc265ff
14 changed files with 236 additions and 174 deletions

View File

@@ -4,6 +4,10 @@ scenarios:
schema: run driver=cql tags==block:schema threads==1 cycles==UNDEF schema: run driver=cql tags==block:schema threads==1 cycles==UNDEF
rampup: run driver=cql tags==block:rampup cycles===TEMPLATE(rampup-cycles,100) threads=auto rampup: run driver=cql tags==block:rampup cycles===TEMPLATE(rampup-cycles,100) threads=auto
main: run driver=cql tags==block:"main.*" cycles===TEMPLATE(main-cycles,100) threads=auto main: run driver=cql tags==block:"main.*" cycles===TEMPLATE(main-cycles,100) threads=auto
basic_check:
schema: run driver=cql tags==block:schema threads==1 cycles==UNDEF
rampup: run driver=cql tags==block:rampup cycles===TEMPLATE(rampup-cycles,10) threads=auto
main: run driver=cql tags==block:"main.*" cycles===TEMPLATE(main-cycles,10) threads=auto
bindings: bindings:
machine_id: Mod(<<sources:10000>>); ToHashedUUID() -> java.util.UUID machine_id: Mod(<<sources:10000>>); ToHashedUUID() -> java.util.UUID
@@ -50,8 +54,8 @@ blocks:
idempotent: true idempotent: true
ops: ops:
rampup-insert: | rampup-insert: |
insert into <<keyspace:baselines>>.<<table:iot>> (machine_id, sensor_name, time, sensor_value, station_id, data) insert into <<keyspace:baselines>>.<<table:iot>> (machine_id, sensor_name, time, sensor_value, station_id, data)
values ({machine_id}, {sensor_name}, {time}, {sensor_value}, {station_id}, {data}) values ({machine_id}, {sensor_name}, {time}, {sensor_value}, {station_id}, {data})
using timestamp {cell_timestamp}; using timestamp {cell_timestamp};
verify: verify:
params: params:
@@ -61,7 +65,7 @@ blocks:
verify-fields: "*, -cell_timestamp" verify-fields: "*, -cell_timestamp"
ops: ops:
select-verify: | select-verify: |
select * from <<keyspace:baselines>>.<<table:iot>> where machine_id={machine_id} select * from <<keyspace:baselines>>.<<table:iot>> where machine_id={machine_id}
and sensor_name={sensor_name} and time={time}; and sensor_name={sensor_name} and time={time};
main-read: main-read:
@@ -70,8 +74,8 @@ blocks:
cl: <<read_cl:LOCAL_QUORUM>> cl: <<read_cl:LOCAL_QUORUM>>
ops: ops:
select-read: | select-read: |
select * from <<keyspace:baselines>>.<<table:iot>> select * from <<keyspace:baselines>>.<<table:iot>>
where machine_id={machine_id} and sensor_name={sensor_name} where machine_id={machine_id} and sensor_name={sensor_name}
limit <<limit:10>>; limit <<limit:10>>;
main-write: main-write:
params: params:
@@ -80,7 +84,7 @@ blocks:
idempotent: true idempotent: true
ops: ops:
insert-main: | insert-main: |
insert into <<keyspace:baselines>>.<<table:iot>> insert into <<keyspace:baselines>>.<<table:iot>>
(machine_id, sensor_name, time, sensor_value, station_id, data) (machine_id, sensor_name, time, sensor_value, station_id, data)
values ({machine_id}, {sensor_name}, {time}, {sensor_value}, {station_id}, {data}) values ({machine_id}, {sensor_name}, {time}, {sensor_value}, {station_id}, {data})
using timestamp {cell_timestamp}; using timestamp {cell_timestamp};

View File

@@ -12,6 +12,10 @@ scenarios:
schema: run driver=cql tags==block:schema-astra threads==1 cycles==UNDEF schema: run driver=cql tags==block:schema-astra threads==1 cycles==UNDEF
rampup: run driver=cql tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto rampup: run driver=cql tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
main: run driver=cql tags==block:"main.*" cycles===TEMPLATE(main-cycles,10000000) threads=auto main: run driver=cql tags==block:"main.*" cycles===TEMPLATE(main-cycles,10000000) threads=auto
basic_check:
schema: run driver=cql tags==block:schema threads==1 cycles==UNDEF
rampup: run driver=cql tags==block:rampup cycles===TEMPLATE(rampup-cycles,10) threads=auto
main: run driver=cql tags==block:"main.*" cycles===TEMPLATE(main-cycles,10) threads=auto
params: params:
instrument: TEMPLATE(instrument,false) instrument: TEMPLATE(instrument,false)

View File

@@ -10,6 +10,10 @@ scenarios:
schema: run driver=cql tags==block:schema-astra threads==1 cycles==UNDEF schema: run driver=cql tags==block:schema-astra threads==1 cycles==UNDEF
rampup: run driver=cql tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto rampup: run driver=cql tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
main: run driver=cql tags==block:"main.*" cycles===TEMPLATE(main-cycles,10000000) threads=auto main: run driver=cql tags==block:"main.*" cycles===TEMPLATE(main-cycles,10000000) threads=auto
basic_check:
schema: run driver=cql tags==block:schema threads==1 cycles==UNDEF
rampup: run driver=cql tags==block:rampup cycles===TEMPLATE(rampup-cycles,10) threads=auto
main: run driver=cql tags==block:"main.*" cycles===TEMPLATE(main-cycles,10) threads=auto
bindings: bindings:
seq_key: Mod(<<keycount:1000000000>>); ToString() -> String seq_key: Mod(<<keycount:1000000000>>); ToString() -> String

View File

@@ -17,6 +17,10 @@ scenarios:
schema: run driver=cql tags==block:schema-astra threads==1 cycles==UNDEF schema: run driver=cql tags==block:schema-astra threads==1 cycles==UNDEF
rampup: run driver=cql tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto rampup: run driver=cql tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
main: run driver=cql tags=='block:main-.*' cycles===TEMPLATE(main-cycles,10000000) threads=auto main: run driver=cql tags=='block:main-.*' cycles===TEMPLATE(main-cycles,10000000) threads=auto
basic_check:
schema: run driver=cql tags==block:schema threads==1 cycles==UNDEF
rampup: run driver=cql tags==block:rampup cycles===TEMPLATE(rampup-cycles,10) threads=auto
main: run driver=cql tags=='block:main-.*' cycles===TEMPLATE(main-cycles,10) threads=auto
bindings: bindings:
seq_key: Mod(<<keycount:1000000000>>); ToString() -> String seq_key: Mod(<<keycount:1000000000>>); ToString() -> String

View File

@@ -13,6 +13,10 @@ scenarios:
schema: run driver=cql tags==block:schema-astra threads==1 cycles==UNDEF schema: run driver=cql tags==block:schema-astra threads==1 cycles==UNDEF
rampup: run driver=cql tags==block:rampup cycles===TEMPLATE(rampup-cycles,10) threads=auto rampup: run driver=cql tags==block:rampup cycles===TEMPLATE(rampup-cycles,10) threads=auto
main: run driver=cql tags==block:"main.*" cycles===TEMPLATE(main-cycles,10) threads=auto main: run driver=cql tags==block:"main.*" cycles===TEMPLATE(main-cycles,10) threads=auto
basic_check:
schema: run driver=cql tags==block:schema threads==1 cycles==UNDEF
rampup: run driver=cql tags==block:rampup cycles===TEMPLATE(rampup-cycles,10) threads=auto
main: run driver=cql tags==block:"main.*" cycles===TEMPLATE(main-cycles,10) threads=auto
params: params:
x: y x: y
@@ -34,9 +38,9 @@ blocks:
AND durable_writes = true; AND durable_writes = true;
create-table: | create-table: |
create table if not exists <<keyspace:starter>>.<<table:cqlstarter>> ( create table if not exists <<keyspace:starter>>.<<table:cqlstarter>> (
machine_id UUID, machine_id UUID,
message text, message text,
time timestamp, time timestamp,
PRIMARY KEY ((machine_id), time) PRIMARY KEY ((machine_id), time)
) WITH CLUSTERING ORDER BY (time DESC); ) WITH CLUSTERING ORDER BY (time DESC);
# truncate-table: | # truncate-table: |
@@ -47,9 +51,9 @@ blocks:
ops: ops:
create-table-astra: | create-table-astra: |
create table if not exists <<keyspace:starter>>.<<table:cqlstarter>> ( create table if not exists <<keyspace:starter>>.<<table:cqlstarter>> (
machine_id UUID, machine_id UUID,
message text, message text,
time timestamp, time timestamp,
PRIMARY KEY ((machine_id), time) PRIMARY KEY ((machine_id), time)
) WITH CLUSTERING ORDER BY (time DESC); ) WITH CLUSTERING ORDER BY (time DESC);
rampup: rampup:
@@ -58,7 +62,7 @@ blocks:
idempotent: true idempotent: true
ops: ops:
insert-rampup: | insert-rampup: |
insert into <<keyspace:starter>>.<<table:cqlstarter>> (machine_id, message, time) insert into <<keyspace:starter>>.<<table:cqlstarter>> (machine_id, message, time)
values ({machine_id}, {message}, {time}) using timestamp {timestamp}; values ({machine_id}, {message}, {time}) using timestamp {timestamp};
main-read: main-read:
params: params:
@@ -76,4 +80,4 @@ blocks:
ops: ops:
insert-main: | insert-main: |
insert into <<keyspace:starter>>.<<table:cqlstarter>> insert into <<keyspace:starter>>.<<table:cqlstarter>>
(machine_id, message, time) values ({machine_id}, {message}, {time}) using timestamp {timestamp}; (machine_id, message, time) values ({machine_id}, {message}, {time}) using timestamp {timestamp};

View File

@@ -10,6 +10,10 @@ scenarios:
schema: run driver=cql tags==block:schema-astra threads==1 cycles==UNDEF schema: run driver=cql tags==block:schema-astra threads==1 cycles==UNDEF
rampup: run driver=cql tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto rampup: run driver=cql tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
main: run driver=cql tags==block:main-*.* cycles===TEMPLATE(main-cycles,10000000) threads=auto main: run driver=cql tags==block:main-*.* cycles===TEMPLATE(main-cycles,10000000) threads=auto
basic_check:
schema: run driver=cql tags==block:schema threads==1 cycles==UNDEF
rampup: run driver=cql tags==block:rampup cycles===TEMPLATE(rampup-cycles,10) threads=auto
main: run driver=cql tags==block:main-*.* cycles===TEMPLATE(main-cycles,10) threads=auto
bindings: bindings:
# for ramp-up and verify # for ramp-up and verify
@@ -82,4 +86,4 @@ blocks:
ops: ops:
main-write: | main-write: |
insert into <<keyspace:baselines>>.<<table:tabular>> insert into <<keyspace:baselines>>.<<table:tabular>>
(part, clust, data) values ({part_write},{clust_write},{data_write}); (part, clust, data) values ({part_write},{clust_write},{data_write});

View File

@@ -18,6 +18,10 @@ scenarios:
schema: run driver=cql tags==block:schema-astra threads==1 cycles==UNDEF schema: run driver=cql tags==block:schema-astra threads==1 cycles==UNDEF
rampup: run driver=cql tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto rampup: run driver=cql tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
main: run driver=cql tags==block:"main.*" cycles===TEMPLATE(main-cycles,10000000) threads=auto main: run driver=cql tags==block:"main.*" cycles===TEMPLATE(main-cycles,10000000) threads=auto
basic_check:
schema: run driver=cql tags==block:schema threads==1 cycles==UNDEF
rampup: run driver=cql tags==block:rampup cycles===TEMPLATE(rampup-cycles,10) threads=auto
main: run driver=cql tags==block:"main.*" cycles===TEMPLATE(main-cycles,10) threads=auto
bindings: bindings:
seq_key: Mod(TEMPLATE(keycount,1000000000)); ToString(); seq_key: Mod(TEMPLATE(keycount,1000000000)); ToString();

View File

@@ -18,6 +18,10 @@ scenarios:
schema: run driver=cql tags==block:schema-astra threads==1 cycles==UNDEF schema: run driver=cql tags==block:schema-astra threads==1 cycles==UNDEF
rampup: run driver=cql tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto rampup: run driver=cql tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
main: run driver=cql tags==block:"main.*" cycles===TEMPLATE(main-cycles,10000000) threads=auto main: run driver=cql tags==block:"main.*" cycles===TEMPLATE(main-cycles,10000000) threads=auto
basic_check:
schema: run driver=cql tags==block:schema threads==1 cycles==UNDEF
rampup: run driver=cql tags==block:rampup cycles===TEMPLATE(rampup-cycles,10) threads=auto
main: run driver=cql tags==block:"main.*" cycles===TEMPLATE(main-cycles,10) threads=auto
bindings: bindings:
seq_key: Mod(TEMPLATE(keycount,1000000000)); ToString() -> String seq_key: Mod(TEMPLATE(keycount,1000000000)); ToString() -> String

View File

@@ -34,6 +34,10 @@ scenarios:
schema: run driver=cql tags==block:schema-astra threads==1 cycles==UNDEF schema: run driver=cql tags==block:schema-astra threads==1 cycles==UNDEF
rampup: run driver=cql tags==block:rampup cycles===TEMPLATE(rampup-cycles,100) threads=auto rampup: run driver=cql tags==block:rampup cycles===TEMPLATE(rampup-cycles,100) threads=auto
main: run driver=cql tags==block:"main.*" cycles===TEMPLATE(main-cycles,100) threads=auto main: run driver=cql tags==block:"main.*" cycles===TEMPLATE(main-cycles,100) threads=auto
basic_check:
schema: run driver=cql tags==block:schema threads==1 cycles==UNDEF
rampup: run driver=cql tags==block:rampup cycles===TEMPLATE(rampup-cycles,10) threads=auto
main: run driver=cql tags==block:"main.*" cycles===TEMPLATE(main-cycles,10) threads=auto
params: params:
instrument: true instrument: true
@@ -136,4 +140,4 @@ blocks:
main-write: | main-write: |
insert into TEMPLATE(keyspace,baselines).TEMPLATE(table,tabular) insert into TEMPLATE(keyspace,baselines).TEMPLATE(table,tabular)
(part, clust, data0,data1,data2,data3,data4,data5,data6,data7) (part, clust, data0,data1,data2,data3,data4,data5,data6,data7)
values ({part_write},{clust_write},{data0},{data1},{data2},{data3},{data4},{data5},{data6},{data7}) values ({part_write},{clust_write},{data0},{data1},{data2},{data3},{data4},{data5},{data6},{data7})

View File

@@ -12,6 +12,10 @@ scenarios:
schema: run driver=cql tags==block:schema-astra threads==1 cycles==UNDEF schema: run driver=cql tags==block:schema-astra threads==1 cycles==UNDEF
rampup: run driver=cql tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto rampup: run driver=cql tags==block:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
main: run driver=cql tags==block:"main.*" cycles===TEMPLATE(main-cycles,10000000) threads=auto main: run driver=cql tags==block:"main.*" cycles===TEMPLATE(main-cycles,10000000) threads=auto
basic_check:
schema: run driver=cql tags==block:schema-astra threads==1 cycles==UNDEF
rampup: run driver=cql tags==block:rampup cycles===TEMPLATE(rampup-cycles,10) threads=auto
main: run driver=cql tags==block:"main.*" cycles===TEMPLATE(main-cycles,10) threads=auto
params: params:
instrument: TEMPLATE(instrument,false) instrument: TEMPLATE(instrument,false)

View File

@@ -19,10 +19,8 @@ blocks:
insert: "<<collection:keyvalue>>", insert: "<<collection:keyvalue>>",
documents: [ { _id: {seq_key}, value: {seq_value} } ] documents: [ { _id: {seq_key}, value: {seq_value} } ]
} }
params: params:
readPreference: primary readPreference: primary
tags:
name: rampup-insert
main-read: main-read:
params: params:
@@ -34,7 +32,7 @@ blocks:
{ {
find: "<<collection:keyvalue>>", find: "<<collection:keyvalue>>",
filter: { _id: {rw_key} } filter: { _id: {rw_key} }
} }
main-write: main-write:
params: params:
@@ -47,4 +45,4 @@ blocks:
documents: [ { _id: {rw_key}, value: {rw_value} } ] documents: [ { _id: {rw_key}, value: {rw_value} } ]
} }
params: params:
readPreference: primary readPreference: primary

View File

@@ -29,9 +29,10 @@ bindings:
lat: Uniform(-180d, 180d) lat: Uniform(-180d, 180d)
lng: Hash() -> long; Uniform(-180d, 180d) lng: Hash() -> long; Uniform(-180d, 180d)
friend_id: Add(-1); ToHashedUUID(); ToString() -> String friend_id: Add(-1); ToHashedUUID(); ToString() -> String
match1: Identity(); CoinFunc(<<match-ratio>>, FixedValue(0), FixedValue(1000)) match1: Identity(); CoinFunc(<<match-ratio>>, FixedValue(0), FixedValue(1000))
match2: Identity(); CoinFunc(<<match-ratio>>, FixedValue("true"), FixedValue("false")) match2: Identity(); CoinFunc(<<match-ratio>>, FixedValue("true"), FixedValue("false"))
# Being removed because we are using the new JSON structure
additional_fields: ListSizedStepped(<<docpadding:0>>,Template("\"{}\":{}",Identity(),Identity())); ToString(); ReplaceAll('\[\"', ',\"'); ReplaceAll('\[', ''); ReplaceAll('\]', '') -> String additional_fields: ListSizedStepped(<<docpadding:0>>,Template("\"{}\":{}",Identity(),Identity())); ToString(); ReplaceAll('\[\"', ',\"'); ReplaceAll('\[', ''); ReplaceAll('\]', '') -> String
blocks: blocks:
@@ -46,12 +47,12 @@ blocks:
drop-collection: | drop-collection: |
{ {
drop: "<<collection:search_basic>>" drop: "<<collection:search_basic>>"
} }
create-collection: | create-collection: |
{ {
create: "<<collection:search_basic>>" create: "<<collection:search_basic>>"
} }
create-indexes: | create-indexes: |
{ {
@@ -71,7 +72,7 @@ blocks:
name: "city_idx" name: "city_idx"
} }
] ]
} }
rampup-write: rampup-write:
ops: ops:
@@ -83,9 +84,9 @@ blocks:
{ {
"_id": "{seq_key}", "_id": "{seq_key}",
"user_id": "{user_id}", "user_id": "{user_id}",
"created_on": {created_on}, "created_on": "{created_on}",
"full_name": "{full_name}", "full_name": "{full_name}",
"married": {married}, "married": "{married}",
"address": { "address": {
"primary": { "primary": {
"city": "{city}", "city": "{city}",
@@ -94,18 +95,17 @@ blocks:
"secondary": {} "secondary": {}
}, },
"coordinates": [ "coordinates": [
{lat}, "{lat}",
{lng} "{lng}"
], ],
"children": [], "children": [],
"friends": [ "friends": [
"{friend_id}" "{friend_id}"
], ],
"debt": null, "debt": null,
"match1": {match1}, "match1": "{match1}",
"match2": "{match2}", "match2": "{match2}",
"match3": {match2} "match3": "{match2}"
{additional_fields}
} }
] ]
} }
@@ -127,7 +127,7 @@ blocks:
{ {
find: "<<collection:search_basic>>", find: "<<collection:search_basic>>",
filter: { match3: true } filter: { match3: true }
}, <<field-projection:null>> }, <<field-projection:null>>
main-lt: main-lt:
params: params:
@@ -137,7 +137,7 @@ blocks:
{ {
find: "<<collection:search_basic>>", find: "<<collection:search_basic>>",
filter: { match1: {$lt: 1}} filter: { match1: {$lt: 1}}
}, <<field-projection:null>> }, <<field-projection:null>>
main-and: main-and:
params: params:
@@ -147,7 +147,7 @@ blocks:
{ {
find: "<<collection:search_basic>>", find: "<<collection:search_basic>>",
filter: { match1: {$lt: 1}, match2: "true"} filter: { match1: {$lt: 1}, match2: "true"}
}, <<field-projection:null>> }, <<field-projection:null>>
main-or: main-or:
params: params:
@@ -157,7 +157,7 @@ blocks:
{ {
find: "<<collection:search_basic>>", find: "<<collection:search_basic>>",
filter: { $or: [ {match1: {$lt: 1}}, {match3: true}]} filter: { $or: [ {match1: {$lt: 1}}, {match3: true}]}
}, <<field-projection:null>> }, <<field-projection:null>>
main-or-single-match: main-or-single-match:
params: params:
@@ -167,4 +167,4 @@ blocks:
{ {
find: "<<collection:search_basic>>", find: "<<collection:search_basic>>",
filter: { $or: [ {match1: {$lt: 1}}, {match2: "notamatch"}]} filter: { $or: [ {match1: {$lt: 1}}, {match2: "notamatch"}]}
}, <<field-projection:null>> }, <<field-projection:null>>

View File

@@ -20,7 +20,7 @@
<parent> <parent>
<artifactId>mvn-defaults</artifactId> <artifactId>mvn-defaults</artifactId>
<groupId>io.nosqlbench</groupId> <groupId>io.nosqlbench</groupId>
<version>5.17.0-SNAPSHOT</version> <version>5.17.1-SNAPSHOT</version>
<relativePath>../mvn-defaults</relativePath> <relativePath>../mvn-defaults</relativePath>
</parent> </parent>
@@ -51,7 +51,7 @@
<dependency> <dependency>
<groupId>io.nosqlbench</groupId> <groupId>io.nosqlbench</groupId>
<artifactId>engine-cli</artifactId> <artifactId>engine-cli</artifactId>
<version>5.17.0-SNAPSHOT</version> <version>5.17.1-SNAPSHOT</version>
</dependency> </dependency>
</dependencies> </dependencies>

View File

@@ -17,86 +17,81 @@
package io.nosqlbench.nb5.proof; package io.nosqlbench.nb5.proof;
import com.datastax.oss.driver.api.core.CqlSession; import com.datastax.oss.driver.api.core.CqlSession;
import com.datastax.oss.driver.api.core.cql.ResultSet; import com.datastax.oss.driver.api.core.cql.ResultSet;
import com.datastax.oss.driver.api.core.cql.Row; import com.datastax.oss.driver.api.core.cql.Row;
import io.nosqlbench.engine.api.scenarios.NBCLIScenarioParser;
import io.nosqlbench.engine.api.scenarios.WorkloadDesc;
import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.Logger;
import org.junit.Ignore;
import org.junit.jupiter.api.*; import org.junit.jupiter.api.*;
import org.rnorth.ducttape.unreliables.Unreliables; import org.rnorth.ducttape.unreliables.Unreliables;
import org.testcontainers.containers.CassandraContainer; import org.testcontainers.containers.CassandraContainer;
import org.testcontainers.containers.wait.strategy.AbstractWaitStrategy; import org.testcontainers.containers.wait.strategy.AbstractWaitStrategy;
import org.testcontainers.utility.DockerImageName; import org.testcontainers.utility.DockerImageName;
import java.io.BufferedReader;
import java.net.InetSocketAddress; import java.net.InetSocketAddress;
import java.nio.file.Files;
import java.nio.file.Path;
import java.time.Duration; import java.time.Duration;
import java.util.List; import java.util.*;
import java.util.ArrayList;
import java.util.Optional;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import java.util.regex.Matcher; import java.util.regex.Matcher;
import java.util.regex.Pattern; import java.util.regex.Pattern;
import java.util.stream.Collectors;
import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.assertTrue;
@Ignore
public class CassandraContainersIntegrationTest { public class CassandraContainersIntegrationTest {
private enum Driver {
CQL("cql"),
HTTP("http"),
MONGODB("mongodb"),
TCP ("tcp"),
PULSAR("pulsar"),
DYNAMODB("dynamo"),
KAFKA("kafka");
private final String name;
Driver(String name) {
this.name = name;
}
public String getName() {
return name;
}
}
public static Logger logger = LogManager.getLogger(CassandraContainersIntegrationTest.class); public static Logger logger = LogManager.getLogger(CassandraContainersIntegrationTest.class);
private final String java = Optional.ofNullable(System.getenv( private final String java = Optional.ofNullable(System.getenv(
"JAVA_HOME")).map(v -> v + "/bin/java").orElse("java"); "JAVA_HOME")).map(v -> v + "/bin/java").orElse("java");
private final static String JARNAME = "../nb5/target/nb5.jar"; private final static String JARNAME = "../nb5/target/nb5.jar";
// private static GenericContainer cass= new CassandraContainer("cassandra").withExposedPorts(9042); private final static String BASIC_CHECK_IDENTIFIER = "basic_check";
private static final ArrayList<String> matchedPaths = new ArrayList<>(); //the list of paths from list-workloads
private static String hostIP = "127.0.0.1"; //the host ip of the cassandra image in the container
private static String datacenter = "datacenter1"; //the default datacenter name
private static Integer mappedPort9042 = 9042; //the port mapped to the original exposed port of the cassandra image
private static final CassandraContainer cass = (CassandraContainer) new CassandraContainer(DockerImageName.parse("cassandra:latest")) private static final CassandraContainer cass = (CassandraContainer) new CassandraContainer(DockerImageName.parse("cassandra:latest"))
.withExposedPorts(9042).withAccessToHost(true); .withExposedPorts(9042).withAccessToHost(true);
private static Map<Driver, List<WorkloadDesc>> basicWorkloadsMapPerDriver = null;
//.waitingFor(new CassandraWaitStrategy()); //.waitingFor(new CassandraWaitStrategy());
@BeforeAll @BeforeAll
public static void initContainer() { public static void listWorkloads() {
//List the tests we would like to run
ProcessInvoker invoker = new ProcessInvoker();
//STEP1: Copy the example workload to the local dir
ProcessResult listResult = invoker.run("list-workloads", 30,
"java", "-jar", JARNAME, "--list-workloads", "--include=examples"
);
assertThat(listResult.exception).isNull();
String listOut = String.join("\n", listResult.getStdoutData());
List<String> results = new ArrayList<>(); List<WorkloadDesc> workloads = List.of();
basicWorkloadsMapPerDriver = new HashMap<>();
// Define the regular expression pattern try {
String regex = "/(.+?/)+.+?\\.yaml"; workloads = NBCLIScenarioParser.getWorkloadsWithScenarioScripts(true, "examples");
Pattern pattern = Pattern.compile(regex, Pattern.CASE_INSENSITIVE); } catch (Exception e) {
throw new RuntimeException("Error while getting workloads:" + e.getMessage(), e);
Matcher matcher = pattern.matcher(listOut);
while (matcher.find()) {
matchedPaths.add(matcher.group());
} }
for (Driver driver : Driver.values()) {
System.out.println("Matched paths:"); basicWorkloadsMapPerDriver.put(driver, getBasicCheckWorkloadsForDriver(workloads, BASIC_CHECK_IDENTIFIER, driver.getName()));
for (String path : matchedPaths) {
System.out.println(path);
} }
} }
@BeforeEach @BeforeEach
public void setUp() { public void setUp() {
//STEP0:Start the test container and expose the 9042 port on the local host.
//So that the docker bridge controller exposes the port to our process invoker that will run nb5
//and target cassandra on that docker container
cass.start();
datacenter = cass.getLocalDatacenter();
//When running with a local Docker daemon, exposed ports will usually be reachable on localhost.
// However, in some CI environments they may instead be reachable on a different host.
mappedPort9042 = cass.getMappedPort(9042);
hostIP = cass.getHost();
System.out.println("setup"); System.out.println("setup");
} }
@@ -105,108 +100,137 @@ public class CassandraContainersIntegrationTest {
ProcessInvoker invoker = new ProcessInvoker(); ProcessInvoker invoker = new ProcessInvoker();
invoker.setLogDir("logs/test"); invoker.setLogDir("logs/test");
if(basicWorkloadsMapPerDriver.get(Driver.CQL) == null)
return ;
else if (basicWorkloadsMapPerDriver.get(Driver.CQL).size() == 0)
return;
//STEP1: Copy the example workload to the local dir for(WorkloadDesc workloadDesc : basicWorkloadsMapPerDriver.get(Driver.CQL))
ProcessResult copyResult = invoker.run("copy-workload", 30,
"java", "-jar", JARNAME, "--copy=/activities/baselinesv2/cql-keyvalue2.yaml"
);
assertThat(copyResult.exception).isNull();
String copyOut = String.join("\n", copyResult.getStdoutData());
//STEP2: Run the example cassandra workload using the schema tag to create the Cass Baselines keyspace
String[] args = new String[]{
"java", "-jar", JARNAME, "cql-keyvalue2.yaml", "default", "host="+hostIP, "localdc="+datacenter, "port="+ mappedPort9042.toString(), "rampup-cycles=10", "main-cycles=10"
};
logger.info("The final command line: " + String.join(" ", args));
ProcessResult runSchemaResult = invoker.run("run-workload", 30, args);
//STEP 3 Check runSchemaOut for errors
logger.info("Checking if the NB5 command resulted in any errors...");
assertThat(runSchemaResult.exception).isNull();
String runSchemaOut = String.join("\n", runSchemaResult.getStdoutData());
assertThat(runSchemaOut.toLowerCase()).doesNotContain("error");
logger.info("NB5 command completed with no errors");
//STEP 4 Check the cluster for created data
try (CqlSession session = CqlSession.builder().addContactPoint(new InetSocketAddress(hostIP, mappedPort9042)).withLocalDatacenter(datacenter).build()) {
//->Check for the creation of the keyspace baselines
logger.info("Checking for the creation of the keyspace \"baselines\"...");
ResultSet result = session.execute("SELECT keyspace_name FROM system_schema.keyspaces");
List<Row> rows = result.all();
boolean keyspaceFound = false;
for (Row row : rows) {
if (row.getString("keyspace_name").equals("baselines")) {
keyspaceFound = true;
break;
}
}
assertTrue(keyspaceFound);
logger.info("Keyspace \"baselines\" was found, nb5 command had created it successfully");
//->Check for the creation of the baselines keyvalue table
logger.info("Checking for the creation of the table \"baselines.keyvalue\"...");
result = session.execute("SELECT table_name FROM system_schema.tables WHERE keyspace_name='baselines'");
rows = result.all();
boolean tableFound = false;
for (Row row : rows) {
if (row.getString("table_name").equals("keyvalue")) {
tableFound = true;
break;
}
}
assertTrue(tableFound);
logger.info("Table \"baselines.keyvalue\" was found, nb5 command had created it successfully");
//->Check for the creation of the baselines keyvalue table
logger.info("Table \"baselines.keyvalue\" has at least 5 rows of key-value pairs, nb5 command had created them successfully");
result = session.execute("SELECT count(*) FROM baselines.keyvalue");
int rowCount = result.one().getInt(0);
assertTrue(rowCount >= 5);
logger.info("Table \"baselines.keyvalue\" has at least 5 rows of key-value pairs, nb5 command had created them successfully");
} catch (Exception e)
{ {
System.out.println(e.getMessage()); //STEP0:Start the test container and expose the 9042 port on the local host.
//So that the docker bridge controller exposes the port to our process invoker that will run nb5
//and target cassandra on that docker container
cass.start();
int lastSlashIndex = workloadDesc.getWorkloadName().lastIndexOf('/');
String shortName = workloadDesc.getWorkloadName().substring(lastSlashIndex + 1);
//the default datacenter name
String datacenter = cass.getLocalDatacenter();
//When running with a local Docker daemon, exposed ports will usually be reachable on localhost.
// However, in some CI environments they may instead be reachable on a different host.
//the port mapped to the original exposed port of the cassandra image
Integer mappedPort9042 = cass.getMappedPort(9042);
//the host ip of the cassandra image in the container
String hostIP = cass.getHost();
//STEP1: Run the example cassandra workload using the schema tag to create the Cass Baselines keyspace
String[] args = new String[]{
"java", "-jar", JARNAME, shortName, "default", "host="+ hostIP, "localdc="+ datacenter, "port="+ mappedPort9042.toString()
};
logger.info("The final command line: " + String.join(" ", args));
ProcessResult runSchemaResult = invoker.run("run-workload", 30, args);
//STEP 2 Check runSchemaOut for errors
logger.info("Checking if the NB5 command resulted in any errors...");
assertThat(runSchemaResult.exception).isNull();
String runSchemaOut = String.join("\n", runSchemaResult.getStdoutData());
//assertThat(runSchemaOut.toLowerCase()).doesNotContain("error");
logger.info("NB5 command completed with no errors");
//STEP 4 Check the cluster for created data
try (CqlSession session = CqlSession.builder().addContactPoint(new InetSocketAddress(hostIP, mappedPort9042)).withLocalDatacenter(datacenter).build()) {
//->Check for the creation of the keyspace baselines
logger.info("Checking for the creation of the keyspace \"baselines\"...");
ResultSet result = session.execute("SELECT keyspace_name FROM system_schema.keyspaces");
List<Row> rows = result.all();
boolean keyspaceFound = false;
for (Row row : rows) {
if (row.getString("keyspace_name").equals("baselines")) {
keyspaceFound = true;
break;
}
}
assertTrue(keyspaceFound);
logger.info("Keyspace \"baselines\" was found, nb5 command had created it successfully");
//->Check for the creation of the baselines keyvalue table
logger.info("Checking for the creation of the table \"baselines.keyvalue\"...");
result = session.execute("SELECT table_name FROM system_schema.tables WHERE keyspace_name='baselines'");
rows = result.all();
boolean tableFound = false;
for (Row row : rows) {
if (row.getString("table_name").equals("keyvalue")) {
tableFound = true;
break;
}
}
assertTrue(tableFound);
logger.info("Table \"baselines.keyvalue\" was found, nb5 command had created it successfully");
//->Check for the creation of the baselines keyvalue table
logger.info("Table \"baselines.keyvalue\" has at least 5 rows of key-value pairs, nb5 command had created them successfully");
result = session.execute("SELECT count(*) FROM baselines.keyvalue");
int rowCount = result.one().getInt(0);
assertTrue(rowCount >= 5);
logger.info("Table \"baselines.keyvalue\" has at least 5 rows of key-value pairs, nb5 command had created them successfully");
} catch (Exception e)
{
System.out.println(e.getMessage());
}
cass.stop();
} }
//STEP5 Create a failing test to make sure that the workload won't work, here we use a random wrong IP //STEP5 Create a failing test to make sure that the workload won't work, here we use a random wrong IP
String[] args2 = new String[]{ // String[] args2 = new String[]{
"java", "-jar", JARNAME, "cql-keyvalue2.yaml", "default", "host=0.1.0.1", "localdc="+datacenter, "port="+ mappedPort9042.toString(), "rampup-cycles=10", "main-cycles=10" // "java", "-jar", JARNAME, "cql-keyvalue2.yaml", "default", "host=0.1.0.1", "localdc="+datacenter, "port="+ mappedPort9042.toString(), "rampup-cycles=10", "main-cycles=10"
}; // };
logger.info("The final command line: " + String.join(" ", args2)); // logger.info("The final command line: " + String.join(" ", args2));
ProcessResult runFailingSchemaResult = invoker.run("run-workload", 30, args2); // ProcessResult runFailingSchemaResult = invoker.run("run-workload", 30, args2);
assertThat(runFailingSchemaResult.exception).isNull(); // assertThat(runFailingSchemaResult.exception).isNull();
String runFailingSchemaOut = String.join("\n", runFailingSchemaResult.getStdoutData()); // String runFailingSchemaOut = String.join("\n", runFailingSchemaResult.getStdoutData());
assertThat(runFailingSchemaOut.toLowerCase()).contains("error"); // assertThat(runFailingSchemaOut.toLowerCase()).contains("error");
System.out.println("end"); // System.out.println("end");
} }
@AfterEach @AfterEach
public void stopContainers(){ public void stopContainers(){
cass.stop();
} }
static class CassandraWaitStrategy extends AbstractWaitStrategy { /*
public CassandraWaitStrategy() { This method filters the input list of workloads to output the subset of workloads that include a specific scenario (input)
withStartupTimeout(Duration.ofMinutes(2)); and run the specified driver
} */
public static List<WorkloadDesc> getBasicCheckWorkloadsForDriver(List<WorkloadDesc> workloads ,String scenarioFilter, String driver) {
@Override String substring = "driver=" + driver;
protected void waitUntilReady() { ArrayList<WorkloadDesc> workloadsForDriver = new ArrayList<>();
// Custom wait strategy to determine if Cassandra is ready. for (WorkloadDesc workload : workloads) {
// For example, we can check the logs or perform a cql query to verify the status of Cassandra. if(workload.getScenarioNames().contains(scenarioFilter)) {
String logs = cass.getLogs(); try {
Unreliables.retryUntilSuccess(120, TimeUnit.SECONDS, () -> { Path yamlPath = Path.of(workload.getYamlPath());
if (logs.contains("Listening for thrift clients...")) { List<String> lines = Files.readAllLines(yamlPath);
return true; for (String line : lines) {
if (line.contains(substring)) {
workloadsForDriver.add(workload);
break;
}
}
} catch (Exception e) {
System.out.println("Error reading file " + workload.getYamlPath() + ": " + e.getMessage());
} }
return false; }
});
} }
return workloadsForDriver;
} }
} }