diff --git a/activitytype-cql/pom.xml b/activitytype-cql/pom.xml new file mode 100644 index 000000000..1c1e352b1 --- /dev/null +++ b/activitytype-cql/pom.xml @@ -0,0 +1,140 @@ + + 4.0.0 + + + io.nosqlbench + mvn-defaults + 3.12.2-SNAPSHOT + ../mvn-defaults + + + at-cql + jar + ${project.artifactId} + + + A CQL ActivityType driver for http://nosqlbench.io/ + + + + UTF-8 + 1.9.0 + + + + + + + + io.nosqlbench + engine-api + 3.12.2-SNAPSHOT + + + + + + + + + + + + + + + + + + com.datastax.dse + dse-java-driver-core + ${dse-driver-version} + + + com.datastax.dse + dse-java-driver-extras + ${dse-driver-version} + + + com.datastax.dse + dse-java-driver-mapping + ${dse-driver-version} + + + + + org.lz4 + lz4-java + 1.4.1 + + + + + org.xerial.snappy + snappy-java + 1.1.2.6 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + org.testng + testng + 6.13.1 + test + + + + org.assertj + assertj-core-java8 + 1.0.0m1 + test + + + + + + + shade + + true + + + + + maven-shade-plugin + + ${project.artifactId} + + + + + + + + diff --git a/activitytype-cql/src/main/java/com/datastax/driver/core/M3PTokenFilter.java b/activitytype-cql/src/main/java/com/datastax/driver/core/M3PTokenFilter.java new file mode 100644 index 000000000..fd942c546 --- /dev/null +++ b/activitytype-cql/src/main/java/com/datastax/driver/core/M3PTokenFilter.java @@ -0,0 +1,47 @@ +package com.datastax.driver.core; + +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.List; +import java.util.OptionalLong; +import java.util.Set; + +public class M3PTokenFilter { + private final TokenRange[] ranges; + private final ProtocolVersion protocolVersion; + private final CodecRegistry codecRegistry; + private final Metadata clusterMetadata; + private final Token.Factory factory; + + public M3PTokenFilter(Set ranges, Cluster cluster) { + protocolVersion = cluster.getConfiguration().getProtocolOptions().getProtocolVersion(); + codecRegistry = cluster.getConfiguration().getCodecRegistry(); + clusterMetadata = cluster.getMetadata(); + factory = Token.getFactory(clusterMetadata.partitioner); + List rangeList = new ArrayList<>(); + for (TokenRange range : ranges) { + if (!range.getStart().getType().equals(DataType.bigint())) { + throw new RuntimeException("This filter only works with bigint valued token types"); + } + rangeList.add(range); + } + this.ranges=rangeList.toArray(new TokenRange[0]); + if (this.ranges.length<1) { + throw new RuntimeException("There were no tokens found. Please check your keyspace and cluster settings."); + } + } + + public OptionalLong matches(Statement statement) { + ByteBuffer routingKey = statement.getRoutingKey(protocolVersion, codecRegistry); + Token token = factory.hash(routingKey); + + for (TokenRange range : ranges) { + if (range.contains(token)) { + return OptionalLong.of((long)token.getValue()); + } + } + return OptionalLong.empty(); + } + + +} diff --git a/activitytype-cql/src/main/java/com/datastax/driver/core/TokenRangeStmtFilter.java b/activitytype-cql/src/main/java/com/datastax/driver/core/TokenRangeStmtFilter.java new file mode 100644 index 000000000..73d90b628 --- /dev/null +++ b/activitytype-cql/src/main/java/com/datastax/driver/core/TokenRangeStmtFilter.java @@ -0,0 +1,60 @@ +package com.datastax.driver.core; + +import io.nosqlbench.activitytype.cql.ebdrivers.cql.api.StatementFilter; + +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.stream.Collectors; + +public class TokenRangeStmtFilter implements StatementFilter { + + private final Metadata clusterMetadata; + private final ProtocolVersion protocolVersion; + private final CodecRegistry codecRegistry; + private final Token.Factory factory; + private TokenRange[] ranges; + + public TokenRangeStmtFilter(Cluster cluster, String rangesSpec) { + clusterMetadata = cluster.getMetadata(); + protocolVersion = cluster.getConfiguration().getProtocolOptions().getProtocolVersion(); + codecRegistry = cluster.getConfiguration().getCodecRegistry(); + factory = Token.getFactory(clusterMetadata.partitioner); + ranges = parseRanges(factory, rangesSpec); + } + + private TokenRange[] parseRanges(Token.Factory factory, String rangesStr) { + String[] ranges = rangesStr.split(","); + List tr = new ArrayList<>(); + + for (String range : ranges) { + String[] interval = range.split(":"); + Token start = factory.fromString(interval[0]); + Token end = factory.fromString(interval[1]); + TokenRange tokenRange = new TokenRange(start, end, factory); + tr.add(tokenRange); + } + return tr.toArray(new TokenRange[tr.size()]); + } + + @Override + public boolean matches(Statement statement) { + ByteBuffer routingKey = statement.getRoutingKey(protocolVersion, codecRegistry); + Token token = factory.hash(routingKey); + for (TokenRange range : ranges) { + if (range.contains(token)) { + return true; + } + } + return false; + } + + @Override + public String toString() { + return "including token ranges: " + + Arrays.stream(ranges) + .map(String::valueOf) + .collect(Collectors.joining(",")); + } +} diff --git a/activitytype-cql/src/main/java/com/datastax/driver/core/TokenRangeUtil.java b/activitytype-cql/src/main/java/com/datastax/driver/core/TokenRangeUtil.java new file mode 100644 index 000000000..3f4f23e0e --- /dev/null +++ b/activitytype-cql/src/main/java/com/datastax/driver/core/TokenRangeUtil.java @@ -0,0 +1,71 @@ +package com.datastax.driver.core; + +import java.io.BufferedWriter; +import java.io.FileWriter; +import java.io.IOException; +import java.util.Comparator; +import java.util.Set; + +public class TokenRangeUtil { + + private final Metadata clusterMetadata; + private final ProtocolVersion protocolVersion; + private final CodecRegistry codecRegistry; + private final Token.Factory factory; + private final Cluster cluster; + + public TokenRangeUtil(Cluster cluster) { + this.cluster= cluster; + clusterMetadata = cluster.getMetadata(); + protocolVersion = cluster.getConfiguration().getProtocolOptions().getProtocolVersion(); + codecRegistry = cluster.getConfiguration().getCodecRegistry(); + factory = Token.getFactory(clusterMetadata.partitioner); + } + + public Set getTokenRangesFor(String keyspace, String hostaddress) { + Host host=null; + if (hostaddress.matches("\\d+")) { + int hostenum = Integer.parseInt(hostaddress); + host = clusterMetadata.getAllHosts().stream() + .sorted(Comparator.comparing(h -> h.getAddress().toString())) + .skip(hostenum) + .findFirst() + .orElseThrow(); + } else if (!hostaddress.isEmpty()) { + host = clusterMetadata.getAllHosts().stream() + .filter(h -> h.getAddress().toString().replaceAll("/","").equals(hostaddress)) + .findFirst() + .orElseThrow(); + } else { + throw new RuntimeException("You must specify a host enum in order or a host address."); + } + return clusterMetadata.getTokenRanges(keyspace,host); + } + + + public void printRanges(String tokensks) { + Set hosts = clusterMetadata.getAllHosts(); + + for (Host host : hosts) { + String address = host.getAddress().toString().substring(1); + BufferedWriter writer = null; + try { + writer = new BufferedWriter(new FileWriter("ranges-"+address)); + String ranges = getTokenRangesFor(tokensks, address).toString(); + writer.write(ranges); + + writer.close(); + } catch (IOException e) { + e.printStackTrace(); + throw new RuntimeException("Can't write token range files"); + } + } + + } + + + public M3PTokenFilter getFilterFor(Set ranges) { + return new M3PTokenFilter(ranges, this.cluster); + } + +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/codecsupport/CQLUserTypeNames.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/codecsupport/CQLUserTypeNames.java new file mode 100644 index 000000000..9d92aa59f --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/codecsupport/CQLUserTypeNames.java @@ -0,0 +1,12 @@ +package io.nosqlbench.activitytype.cql.codecsupport; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +@Retention(RetentionPolicy.RUNTIME) +@Target(ElementType.TYPE) +public @interface CQLUserTypeNames { + String[] value(); +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/codecsupport/UDTCodecClasses.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/codecsupport/UDTCodecClasses.java new file mode 100644 index 000000000..ff845a247 --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/codecsupport/UDTCodecClasses.java @@ -0,0 +1,12 @@ +package io.nosqlbench.activitytype.cql.codecsupport; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +@Retention(RetentionPolicy.RUNTIME) +@Target(ElementType.TYPE) +public @interface UDTCodecClasses { + Class[] value(); +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/codecsupport/UDTCodecInjector.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/codecsupport/UDTCodecInjector.java new file mode 100644 index 000000000..c2fb720c9 --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/codecsupport/UDTCodecInjector.java @@ -0,0 +1,33 @@ +package io.nosqlbench.activitytype.cql.codecsupport; + +import com.datastax.driver.core.CodecRegistry; +import com.datastax.driver.core.Session; +import com.datastax.driver.core.UserType; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.ArrayList; +import java.util.List; +import java.util.ServiceLoader; + +public class UDTCodecInjector { + private final static Logger logger = LoggerFactory.getLogger(UDTCodecInjector.class); + + private List codecProviders = new ArrayList<>(); + private List userTypes = new ArrayList<>(); + + public void injectUserProvidedCodecs(Session session, boolean allowAcrossKeyspaces) { + + CodecRegistry registry = session.getCluster().getConfiguration().getCodecRegistry(); + + ServiceLoader codecLoader = ServiceLoader.load(UserCodecProvider.class); + + for (UserCodecProvider userCodecProvider : codecLoader) { + codecProviders.add(userCodecProvider); + } + + for (UserCodecProvider codecProvider : codecProviders) { + codecProvider.registerCodecsForCluster(session,true); + } + } +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/codecsupport/UDTJavaType.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/codecsupport/UDTJavaType.java new file mode 100644 index 000000000..8cc74b8e5 --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/codecsupport/UDTJavaType.java @@ -0,0 +1,12 @@ +package io.nosqlbench.activitytype.cql.codecsupport; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +@Retention(RetentionPolicy.RUNTIME) +@Target(ElementType.TYPE) +public @interface UDTJavaType { + Class value(); +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/codecsupport/UDTTransformCodec.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/codecsupport/UDTTransformCodec.java new file mode 100644 index 000000000..0328a18f9 --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/codecsupport/UDTTransformCodec.java @@ -0,0 +1,22 @@ +package io.nosqlbench.activitytype.cql.codecsupport; + +import com.datastax.driver.core.TypeCodec; +import com.datastax.driver.core.UDTValue; +import com.datastax.driver.core.UserType; +import com.datastax.driver.extras.codecs.MappingCodec; + +public abstract class UDTTransformCodec extends MappingCodec { + + protected UserType userType; + + public UDTTransformCodec(UserType userType, Class javaType) { + super(TypeCodec.userType(userType), javaType); + this.userType = userType; + } + + public UserType getUserType() { + return userType; + } + + +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/codecsupport/UserCodecProvider.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/codecsupport/UserCodecProvider.java new file mode 100644 index 000000000..3a9cb4efa --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/codecsupport/UserCodecProvider.java @@ -0,0 +1,138 @@ +package io.nosqlbench.activitytype.cql.codecsupport; + +import com.datastax.driver.core.*; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.lang.reflect.Constructor; +import java.util.*; +import java.util.stream.Collectors; + +public abstract class UserCodecProvider { + + private final static Logger logger = LoggerFactory.getLogger(UserCodecProvider.class); + + public List registerCodecsForCluster( + Session session, + boolean allowAcrossKeyspaces + ) { + List typeCodecs = new ArrayList<>(); + + List ksMetas = new ArrayList<>(session.getCluster().getMetadata().getKeyspaces()); + + for (KeyspaceMetadata keyspace : ksMetas) { + + List keyspaceCodecs = registerCodecsForKeyspace(session, keyspace.getName()); + + for (UDTTransformCodec typeCodec : keyspaceCodecs) { + if (typeCodecs.contains(typeCodec) && !allowAcrossKeyspaces) { + throw new RuntimeException("codec " + typeCodec + " could be registered" + + "in multiple keyspaces, but this is not allowed."); + } + typeCodecs.add(typeCodec); + logger.debug("Found user-provided codec for ks:" + keyspace + ", udt:" + typeCodec); + } + } + return typeCodecs; + } + + public List registerCodecsForKeyspace(Session session, String keyspace) { + + CodecRegistry registry = session.getCluster().getConfiguration().getCodecRegistry(); + + List codecsForKeyspace = new ArrayList<>(); + + KeyspaceMetadata ksMeta = session.getCluster().getMetadata().getKeyspace(keyspace); + if (ksMeta==null) { + logger.warn("No metadata for " + keyspace); + return Collections.emptyList(); + } + Collection typesInKeyspace = ksMeta.getUserTypes(); + + List> providedCodecClasses = getUDTCodecClasses(); + + Map> codecMap = new HashMap<>(); + + for (Class providedCodecClass : providedCodecClasses) { + Class udtCodecClass = (Class) providedCodecClass; + + List targetUDTTypes = getUDTTypeNames(udtCodecClass); + for (UserType keyspaceUserType : typesInKeyspace) { + String ksTypeName = keyspaceUserType.getTypeName(); + String globalTypeName = (ksTypeName.contains(".") ? ksTypeName.split("\\.",2)[1] : ksTypeName); + if (targetUDTTypes.contains(ksTypeName) || targetUDTTypes.contains(globalTypeName)) { + codecMap.put(keyspaceUserType, udtCodecClass); + } + } + } + + for (UserType userType : codecMap.keySet()) { + Class codecClass = codecMap.get(userType); + Class udtJavaType = getUDTJavaType(codecClass); + UDTTransformCodec udtCodec = instantiate(userType, codecClass, udtJavaType); + codecsForKeyspace.add(udtCodec); + registry.register(udtCodec); + logger.info("registered codec:" + udtCodec); + } + + return codecsForKeyspace; + + } + + private UDTTransformCodec instantiate(UserType key, Class codecClass, Class javaType) { + try { + Constructor ctor = codecClass.getConstructor(UserType.class, Class.class); + UDTTransformCodec typeCodec = ctor.newInstance(key, javaType); + return typeCodec; + } catch (Exception e) { + e.printStackTrace(); + throw new RuntimeException(e); + } + } + + private List> getUDTCodecClasses() { + UDTCodecClasses[] annotationsByType = this.getClass().getAnnotationsByType(UDTCodecClasses.class); + List> codecClasses = Arrays.stream(annotationsByType) + .map(UDTCodecClasses::value) + .flatMap(Arrays::stream) + .collect(Collectors.toList()); + return codecClasses; + } + + /** + * Allows simple annotation of implementations of this class to use + * {@code @CQLUserTypeNames({"type1","type2",...}} + * + * @param codecClass the UDTTransformCode class which is to be inspected + * @return THe list of target UDT type names, as defined in CQL + */ + private List getUDTTypeNames(Class codecClass) { + CQLUserTypeNames[] annotationsByType = codecClass.getAnnotationsByType(CQLUserTypeNames.class); + List cqlTypeNames = new ArrayList<>(); + + for (CQLUserTypeNames cqlUserTypeNames : annotationsByType) { + cqlTypeNames.addAll(Arrays.asList(cqlUserTypeNames.value())); + } + return cqlTypeNames; + } + + /** + * Allows simple annotation of implementations of this class to use + * {@code @UDTJavaType(POJOType.class)} + * + * @param codecClass the UDTTransformCode class which is to be inspected + * @return The class type of the POJO which this codec maps to and from + */ + private Class getUDTJavaType(Class codecClass) { + UDTJavaType[] annotationsByType = codecClass.getAnnotationsByType(UDTJavaType.class); + Class javaType = Arrays.stream(annotationsByType) + .map(UDTJavaType::value) + .findFirst() + .orElseThrow( + () -> new RuntimeException("Unable to find UDTJavaType annotation for " + codecClass.getCanonicalName()) + ); + return (Class) javaType; + } + + +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/datamappers/functions/collectionclobs/StringMapClob.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/datamappers/functions/collectionclobs/StringMapClob.java new file mode 100644 index 000000000..7ad2b24ac --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/datamappers/functions/collectionclobs/StringMapClob.java @@ -0,0 +1,124 @@ +package io.nosqlbench.activitytype.cql.datamappers.functions.collectionclobs; + +import io.nosqlbench.virtdata.annotations.Categories; +import io.nosqlbench.virtdata.annotations.Category; +import io.nosqlbench.virtdata.annotations.Example; +import io.nosqlbench.virtdata.annotations.ThreadSafeMapper; + +import java.util.function.LongFunction; +import java.util.function.LongToIntFunction; + +/** + * Create a {@code Map} from a long input + * based on three functions, + * the first to determine the map size, and the second to populate + * the map with key objects, and the third to populate the map with + * value objects. The long input fed to the second and third functions + * is incremented between entries. Regardless of the object type provided + * by the second and third functions, {@link Object#toString()} + * is used to determine the key and value to add to the map. + * + * To create Maps of any key and value types, simply use + * {@link java.util.Map} with + * an specific key and value mapping functions. + */ + +@Categories({Category.collections}) +@ThreadSafeMapper +public class StringMapClob implements LongFunction { + + private final static ThreadLocal tl_sb = ThreadLocal.withInitial(StringBuilder::new); + + private final LongToIntFunction sizeFunc; + private final LongFunction[] keyFuncs; + private final LongFunction[] valueFuncs; + private final Mode mode; + private final static String BEFORE_RESULT = "{"; + private final static String AFTER_RESULT = "}"; + private final static String KEY_QUOTE ="'"; + private final static String VAL_QUOTE = "'"; + private final static String ASSIGNMENT = ": "; + private final static String BETWEEN_ENTRIES = ", "; + + @Example({"StringMap(HashRange(3,7),NumberNameToString(),HashRange(1300,1700))", + "create a map of size 3-7 entries, with a key of type " + + "string and a value of type int (Integer by autoboxing)"}) + public StringMapClob(LongToIntFunction sizeFunc, + LongFunction keyFunc, + LongFunction valueFunc) { + this.mode = Mode.VarSized; + + this.sizeFunc = sizeFunc; + this.keyFuncs = new LongFunction[1]; + keyFuncs[0] = keyFunc; + this.valueFuncs = new LongFunction[1]; + valueFuncs[0] = valueFunc; + } + + @Example({"StringMapClob(NumberNameToString(),HashRange(1300,1700),NumberNameToString(),HashRange(3,7))", + "create a map of size 2, with a specific function for each key and each value"}) + @SafeVarargs + public StringMapClob(LongFunction... objfuncs) { + this.mode = Mode.Tuples; + if ((objfuncs.length % 2) != 0) { + throw new RuntimeException("An even number of functions must be provided."); + } + int size = objfuncs.length / 2; + sizeFunc = (l) -> size; + keyFuncs = new LongFunction[size]; + valueFuncs = new LongFunction[size]; + for (int i = 0; i < size; i++) { + keyFuncs[i] = objfuncs[i << 1]; + valueFuncs[i] = objfuncs[(i << 1) + 1]; + } + } + + + @Override + public String apply(long value) { + + // "{key='value',key='value'}" + + StringBuilder sb = tl_sb.get(); + sb.setLength(0); + sb.append(BEFORE_RESULT); + + int size = sizeFunc.applyAsInt(value); + + switch (mode) { + case VarSized: + for (int i = 0; i < size; i++) { + Object keyObject = keyFuncs[0].apply(value + i); + Object valueObject = valueFuncs[0].apply(value + i); + + sb.append(KEY_QUOTE).append(keyObject).append(KEY_QUOTE); + sb.append(ASSIGNMENT); + sb.append(VAL_QUOTE).append(valueObject).append(VAL_QUOTE); + sb.append(BETWEEN_ENTRIES); + } + break; + case Tuples: + for (int i = 0; i < keyFuncs.length; i++) { + Object keyObject = keyFuncs[i].apply(value + i); + Object valueObject = valueFuncs[i].apply(value + i); + + sb.append(KEY_QUOTE).append(keyObject).append(KEY_QUOTE); + sb.append(ASSIGNMENT); + sb.append(VAL_QUOTE).append(valueObject).append(VAL_QUOTE); + sb.append(BETWEEN_ENTRIES); + } + break; + } + sb.setLength(sb.length()-BETWEEN_ENTRIES.length()); + + sb.append(AFTER_RESULT); + return sb.toString(); + } + + private enum Mode { + VarSized, + Tuples + } + + +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/datamappers/functions/collections/ListMapper.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/datamappers/functions/collections/ListMapper.java new file mode 100644 index 000000000..8c21a50cc --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/datamappers/functions/collections/ListMapper.java @@ -0,0 +1,41 @@ +package io.nosqlbench.activitytype.cql.datamappers.functions.collections; + +import io.nosqlbench.virtdata.annotations.Example; +import io.nosqlbench.virtdata.annotations.ThreadSafeMapper; +import io.nosqlbench.virtdata.api.DataMapper; +import io.nosqlbench.virtdata.api.VirtData; + +import java.util.ArrayList; +import java.util.List; +import java.util.function.LongFunction; + +/** + * This is an example of a mapping function that can create a list of objects + * from another internal mapping function. + * + * The input value for each function is incremented by one from the initial input value + * this this overall function. + * + */ +@ThreadSafeMapper +public class ListMapper implements LongFunction> { + + private int size; + private DataMapper elementMapper; + + @Example({"ListMapper(5,NumberNameToString())","creates a list of number names"}) + public ListMapper(int size, String genSpec) { + this.size = size; + elementMapper = VirtData.getMapper(genSpec,String.class); + } + + @Override + public List apply(long value) { + List list = new ArrayList<>(size); + for (int listpos = 0; listpos < size; listpos++) { + Object o = elementMapper.get(value + listpos); + list.add(o); + } + return list; + } +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/datamappers/functions/contrib/WrappedClustering.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/datamappers/functions/contrib/WrappedClustering.java new file mode 100644 index 000000000..59bcf2207 --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/datamappers/functions/contrib/WrappedClustering.java @@ -0,0 +1,23 @@ +package io.nosqlbench.activitytype.cql.datamappers.functions.contrib; + + +import io.nosqlbench.virtdata.annotations.ThreadSafeMapper; + +import java.util.function.IntUnaryOperator; + +@ThreadSafeMapper +public class WrappedClustering implements IntUnaryOperator { + + @Override + public int applyAsInt(int operand) { + long longOperand = operand; + long longOperandTimes15 = longOperand * 15; + long integerMax = Integer.MAX_VALUE + 1; + long integerMin = Integer.MIN_VALUE; + long sign = (long) Math.pow((-1), longOperandTimes15/integerMax); + if (sign > 0) + return (int) (sign * (longOperandTimes15 % integerMax)); + else + return (int) (integerMin - (sign * (longOperandTimes15 % integerMax))); + } +} \ No newline at end of file diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/datamappers/functions/diagnostics/ToCqlType.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/datamappers/functions/diagnostics/ToCqlType.java new file mode 100644 index 000000000..afce8c7a7 --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/datamappers/functions/diagnostics/ToCqlType.java @@ -0,0 +1,114 @@ +package io.nosqlbench.activitytype.cql.datamappers.functions.diagnostics; + +import com.datastax.driver.core.DataType; +import com.datastax.driver.core.LocalDate; +import com.datastax.driver.core.TupleValue; +import com.datastax.driver.core.UDTValue; +import io.nosqlbench.virtdata.annotations.ThreadSafeMapper; + +import java.math.BigDecimal; +import java.math.BigInteger; +import java.net.InetAddress; +import java.nio.ByteBuffer; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.function.Function; + +/** + * Shows the compatible CQL type most associated with the incoming Java type. + */ +@ThreadSafeMapper +public class ToCqlType implements Function { + + private final static Map typemap = new HashMap() {{ + put("a", "b"); + put(String.class.getCanonicalName(), DataType.text().getName().toString() + + " or " + DataType.ascii().getName().toString() + + " or " + DataType.varchar().getName().toString()); + put(Long.class.getCanonicalName(), DataType.bigint().getName().toString() + + " or " + DataType.time().getName().toString() + + " or " + DataType.counter().getName().toString()); + put(long.class.getCanonicalName(), DataType.bigint().getName().toString() + + " or " + DataType.counter().getName().toString()); + put(ByteBuffer.class.getCanonicalName(), DataType.blob().getName().toString() + + ",CUSTOM"); + put(Boolean.class.getCanonicalName(), DataType.cboolean().getName().toString()); + put(boolean.class.getCanonicalName(), DataType.cboolean().getName().toString()); + put(BigDecimal.class.getCanonicalName(), DataType.decimal().getName().toString()); + put(Double.class.getCanonicalName(),DataType.cdouble().getName().toString()); + put(double.class.getCanonicalName(),DataType.cdouble().getName().toString()); + put(Float.class.getCanonicalName(), DataType.cfloat().getName().toString()); + put(float.class.getCanonicalName(), DataType.cfloat().getName().toString()); + put(InetAddress.class.getCanonicalName(), DataType.inet().getName().toString()); + put(Integer.class.getCanonicalName(),DataType.cint().getName().toString()); + put(int.class.getCanonicalName(),DataType.cint().getName().toString()); + put(java.util.Date.class.getCanonicalName(),DataType.timestamp().getName().toString()); + put(java.util.UUID.class.getCanonicalName(),DataType.timeuuid().getName().toString()+" or "+DataType.uuid().getName().toString()); + put(BigInteger.class.getCanonicalName(),DataType.varint().getName().toString()); + put(Short.class.getCanonicalName(), DataType.smallint().getName().toString()); + put(short.class.getCanonicalName(), DataType.smallint().getName().toString()); + put(Byte.class.getCanonicalName(), DataType.tinyint().getName().toString()); + put(byte.class.getCanonicalName(), DataType.tinyint().getName().toString()); + put(LocalDate.class.getCanonicalName(), DataType.date().getName().toString()); + put(UDTValue.class.getCanonicalName(), ""); + put(TupleValue.class.getCanonicalName(),""); + }}; + + private final ThreadLocal tlsb = ThreadLocal.withInitial(StringBuilder::new); + + @Override + public String apply(Object o) { + String canonicalName = o.getClass().getCanonicalName(); + String cqlTypeName = typemap.get(canonicalName); + StringBuilder sb = tlsb.get(); + sb.setLength(0); + if (cqlTypeName!=null) { + return sb.append(canonicalName).append(" -> ").append(cqlTypeName).toString(); + } + return findAlternates(o,canonicalName); + } + + private String findAlternates(Object o, String canonicalName) { + StringBuilder sb = tlsb.get(); + + if (List.class.isAssignableFrom(o.getClass())) { + sb.append(canonicalName).append("<"); + + if (((List)o).size()>0) { + Object o1 = ((List) o).get(0); + String elementType = o1.getClass().getCanonicalName(); + sb.append(elementType).append("> -> List<"); + sb.append(typemap.getOrDefault(elementType,"UNKNOWN")).append(">"); + return sb.toString(); + } + return sb.append("?> -> List").toString(); + } + if (Map.class.isAssignableFrom(o.getClass())) { + sb.append(canonicalName).append("<"); + if (((Map)o).size()>0) { + Map.Entry next = (Map.Entry) ((Map) o).entrySet().iterator().next(); + String keyType = next.getKey().getClass().getCanonicalName(); + String valType = next.getValue().getClass().getCanonicalName(); + sb.append(keyType).append(",").append(valType).append("> -> Map<"); + sb.append(typemap.getOrDefault(keyType,"UNKNOWN")).append(","); + sb.append(typemap.getOrDefault(valType,"UNKNOWN")).append(">"); + return sb.toString(); + } + return sb.append("?,?> -> Map").toString(); + } + if (Set.class.isAssignableFrom(o.getClass())) { + sb.append(canonicalName).append("<"); + if (((Set)o).size()>0) { + Object o1=((Set)o).iterator().next(); + String elementType = o1.getClass().getCanonicalName(); + sb.append(elementType).append("> -> Set<"); + sb.append(typemap.getOrDefault(elementType,"UNKNOWN")).append(">"); + return sb.toString(); + } + return sb.append("?> -> Set").toString(); + } + return typemap.getOrDefault(o.getClass().getSuperclass().getCanonicalName(), "UNKNOWN"); + } +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/datamappers/functions/geometry/Distance.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/datamappers/functions/geometry/Distance.java new file mode 100644 index 000000000..1121621f0 --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/datamappers/functions/geometry/Distance.java @@ -0,0 +1,66 @@ +package io.nosqlbench.activitytype.cql.datamappers.functions.geometry; + +import com.datastax.driver.dse.geometry.Point; +import io.nosqlbench.virtdata.annotations.ThreadSafeMapper; + +import java.util.function.LongFunction; +import java.util.function.LongToDoubleFunction; + +/** + * Create a Distance generator which produces + * com.datastax.driver.dse.geometry.Distance objects. + */ +@ThreadSafeMapper +public class Distance implements LongFunction { + + private final io.nosqlbench.activitytype.cql.datamappers.functions.geometry.Point pointfunc; + private final LongToDoubleFunction rfunc; + + public Distance(LongToDoubleFunction xfunc, LongToDoubleFunction yfunc, LongToDoubleFunction rfunc) { + pointfunc = new io.nosqlbench.activitytype.cql.datamappers.functions.geometry.Point(xfunc,yfunc); + this.rfunc = rfunc; + } + + public Distance(double x, LongToDoubleFunction yfunc, LongToDoubleFunction rfunc) { + pointfunc = new io.nosqlbench.activitytype.cql.datamappers.functions.geometry.Point((u)->x,yfunc); + this.rfunc = rfunc; + } + + public Distance(LongToDoubleFunction xfunc, double y, LongToDoubleFunction rfunc) { + pointfunc = new io.nosqlbench.activitytype.cql.datamappers.functions.geometry.Point(xfunc,(v)->y); + this.rfunc = rfunc; + } + + public Distance(double x, double y, LongToDoubleFunction rfunc) { + pointfunc = new io.nosqlbench.activitytype.cql.datamappers.functions.geometry.Point((u)->x,(v)->y); + this.rfunc = rfunc; + } + + public Distance(LongToDoubleFunction xfunc, LongToDoubleFunction yfunc, double r) { + pointfunc = new io.nosqlbench.activitytype.cql.datamappers.functions.geometry.Point(xfunc,yfunc); + this.rfunc = (w) -> r; + } + + public Distance(double x, LongToDoubleFunction yfunc, double r) { + pointfunc = new io.nosqlbench.activitytype.cql.datamappers.functions.geometry.Point((u)->x,yfunc); + this.rfunc = (w) -> r; + } + + public Distance(LongToDoubleFunction xfunc, double y, double r) { + pointfunc = new io.nosqlbench.activitytype.cql.datamappers.functions.geometry.Point(xfunc,(v)->y); + this.rfunc = (w) -> r; + } + + public Distance(double x, double y, double r) { + pointfunc = new io.nosqlbench.activitytype.cql.datamappers.functions.geometry.Point((u) -> x, (v) -> y); + this.rfunc = (w) -> r; + } + + + @Override + public com.datastax.driver.dse.geometry.Distance apply(long value) { + Point apoint = pointfunc.apply(value); + double aradius = rfunc.applyAsDouble(value); + return new com.datastax.driver.dse.geometry.Distance(apoint,aradius); + } +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/datamappers/functions/geometry/LineString.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/datamappers/functions/geometry/LineString.java new file mode 100644 index 000000000..fd7c7c077 --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/datamappers/functions/geometry/LineString.java @@ -0,0 +1,46 @@ +package io.nosqlbench.activitytype.cql.datamappers.functions.geometry; + +//import com.datastax.driver.dse.geometry.Point; + +import io.nosqlbench.virtdata.annotations.ThreadSafeMapper; + +import java.util.function.LongFunction; +import java.util.function.LongToDoubleFunction; +import java.util.function.LongToIntFunction; + +@SuppressWarnings("Duplicates") +@ThreadSafeMapper +public class LineString implements LongFunction { + + private final LongFunction pointfunc; + private final LongToIntFunction lenfunc; + + public LineString(LongToIntFunction lenfunc, LongFunction pointfunc) { + this.pointfunc = pointfunc; + this.lenfunc = lenfunc; + } + + public LineString(LongToIntFunction lenfunc, LongToDoubleFunction xfunc, LongToDoubleFunction yfunc) { + this.lenfunc = lenfunc; + this.pointfunc=new Point(xfunc,yfunc); + } + + public LineString(int len, LongFunction pointfunc) { + this.lenfunc = (i) -> len; + this.pointfunc = pointfunc; + } + + @Override + public com.datastax.driver.dse.geometry.LineString apply(long value) { + int linelen = Math.max(lenfunc.applyAsInt(value),2); + com.datastax.driver.dse.geometry.Point p0 = pointfunc.apply(value); + com.datastax.driver.dse.geometry.Point p1 = pointfunc.apply(value+1); + + com.datastax.driver.dse.geometry.Point[] points = new com.datastax.driver.dse.geometry.Point[linelen-2]; + + for (int i = 2; i < linelen; i++) { + points[i-2]=pointfunc.apply(value+i); + } + return new com.datastax.driver.dse.geometry.LineString(p0,p1,points); + } +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/datamappers/functions/geometry/Point.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/datamappers/functions/geometry/Point.java new file mode 100644 index 000000000..27d08695d --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/datamappers/functions/geometry/Point.java @@ -0,0 +1,44 @@ +package io.nosqlbench.activitytype.cql.datamappers.functions.geometry; + +import io.nosqlbench.virtdata.annotations.ThreadSafeMapper; + +import java.util.function.LongFunction; +import java.util.function.LongToDoubleFunction; + +/** + * Create a Point generator which generates com.datastax.driver.dse.geometry.Point + * objects. + */ +@ThreadSafeMapper +public class Point implements LongFunction { + + private final LongToDoubleFunction xfunc; + private final LongToDoubleFunction yfunc; + + + public Point(double x, double y) { + this.xfunc = (u) -> x; + this.yfunc = (v) -> y; + } + + public Point(double x, LongToDoubleFunction yfunc) { + this.xfunc = (u) -> x; + this.yfunc = yfunc; + } + + public Point(LongToDoubleFunction xfunc, double y) { + this.xfunc = xfunc; + this.yfunc = (v) -> y; + } + + public Point(LongToDoubleFunction xfunc, LongToDoubleFunction yfunc) { + this.xfunc = xfunc; + this.yfunc = yfunc; + } + + + @Override + public com.datastax.driver.dse.geometry.Point apply(long value) { + return new com.datastax.driver.dse.geometry.Point(xfunc.applyAsDouble(value), yfunc.applyAsDouble(value)); + } +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/datamappers/functions/geometry/Polygon.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/datamappers/functions/geometry/Polygon.java new file mode 100644 index 000000000..3dd4635df --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/datamappers/functions/geometry/Polygon.java @@ -0,0 +1,45 @@ +package io.nosqlbench.activitytype.cql.datamappers.functions.geometry; + +import com.datastax.driver.dse.geometry.Point; +import io.nosqlbench.virtdata.annotations.ThreadSafeMapper; + +import java.util.function.LongFunction; +import java.util.function.LongToDoubleFunction; +import java.util.function.LongToIntFunction; + +@SuppressWarnings("ALL") +@ThreadSafeMapper +public class Polygon implements LongFunction { + + private final LongFunction pointfunc; + private final LongToIntFunction lenfunc; + + public Polygon(LongToIntFunction lenfunc, LongFunction pointfunc) { + this.pointfunc = pointfunc; + this.lenfunc = lenfunc; + } + + public Polygon(LongToIntFunction lenfunc, LongToDoubleFunction xfunc, LongToDoubleFunction yfunc) { + this.lenfunc = lenfunc; + this.pointfunc=new io.nosqlbench.activitytype.cql.datamappers.functions.geometry.Point(xfunc,yfunc); + } + + public Polygon(int len, LongFunction pointfunc) { + this.lenfunc = (i) -> len; + this.pointfunc = pointfunc; + } + + @Override + public com.datastax.driver.dse.geometry.Polygon apply(long value) { + int linelen = Math.max(lenfunc.applyAsInt(value),3); + Point p0 = pointfunc.apply(value); + Point p1 = pointfunc.apply(value+1); + Point p2 = pointfunc.apply(value+2); + Point[] points = new Point[linelen-3]; + + for (int i = 3; i < linelen; i++) { + points[i-3]=pointfunc.apply(value+i); + } + return new com.datastax.driver.dse.geometry.Polygon(p0,p1,p2,points); + } +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/datamappers/functions/geometry/PolygonOnGrid.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/datamappers/functions/geometry/PolygonOnGrid.java new file mode 100644 index 000000000..381d7c659 --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/datamappers/functions/geometry/PolygonOnGrid.java @@ -0,0 +1,86 @@ +package io.nosqlbench.activitytype.cql.datamappers.functions.geometry; + +import com.datastax.driver.dse.geometry.Point; +import com.datastax.driver.dse.geometry.Polygon; +import io.nosqlbench.virtdata.annotations.Example; +import io.nosqlbench.virtdata.annotations.ThreadSafeMapper; +import io.nosqlbench.virtdata.library.curves4.discrete.long_int.Uniform; + +import java.util.function.LongFunction; + +/** + * This function will return a polygon in the form of a rectangle from the specified + * grid system. The coordinates define the top left and bottom right coordinates in + * (x1,y1),(x2,y2) order, while the number of rows and columns divides these ranges + * into the unit-length for each square. + * x1 must be greater than x2. y1 must be less than y2. + * + * This grid system can be used to construct a set of overlapping grids such that the + * likelyhood of overlap is somewhat easy to reason about. For example, if you create + * one grid system as a refernce grid, then attempt to map another grid system which + * half overlaps the original grid, you can easily determine that half the time, a + * random rectangle selected from the second grid will overlap a rectangle from the + * first, for simple even-numbered grids and the expected uniform sampling on the + * internal coordinate selector functions. + */ +@SuppressWarnings("ALL") +@ThreadSafeMapper +public class PolygonOnGrid implements LongFunction { + + private final double rows; + private final double columns; + private final double x_topleft; + private final double y_topleft; + private final double x_bottomright; + private final double y_bottomright; + private final Uniform rowfunc; + private final Uniform colfunc; + private final double xwidth; + private final double yheight; + + @Example({"PolygonOnGrid(1, 11, 11, 1, 10, 10)","Create a 10x10 grid with cells 1x1, spaced one off the y=0 and x=0 axes"}) + public PolygonOnGrid(double x_topleft, double y_topleft, double x_bottomright, double y_bottomright, int rows, int columns) { + + if (x_topleft>=x_bottomright) { + throw new RuntimeException("x_topleft should be less than x_bottomright"); + } + if (y_topleft<=y_bottomright) { + throw new RuntimeException("y_topleft should be more than y_bottomright"); + } + + this.x_topleft = x_topleft; + this.y_topleft = y_topleft; + this.x_bottomright = x_bottomright; + this.y_bottomright = y_bottomright; + + this.rows = rows; + this.columns = columns; + + this.xwidth = (x_bottomright-x_topleft) / columns; + this.yheight = (y_topleft-y_bottomright) / columns; + + this.rowfunc = new Uniform(0, rows - 1); + this.colfunc = new Uniform(0,columns-1); + } + + @Override + public Polygon apply(long value) { + int row = rowfunc.applyAsInt(value); + int column = colfunc.applyAsInt(value+33); + + double left=x_topleft + (column*xwidth); + double top =y_topleft - (row*yheight); + double right = left+xwidth; + double bottom = top - yheight; + + Polygon polygon = new Polygon( + new Point(left, bottom), + new Point(left, top), + new Point(right, top), + new Point(right, bottom) + ); + + return polygon; + + } +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/datamappers/functions/long_localdate/EpochMillisToCqlLocalDate.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/datamappers/functions/long_localdate/EpochMillisToCqlLocalDate.java new file mode 100644 index 000000000..8cd48667b --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/datamappers/functions/long_localdate/EpochMillisToCqlLocalDate.java @@ -0,0 +1,25 @@ +package io.nosqlbench.activitytype.cql.datamappers.functions.long_localdate; + +import com.datastax.driver.core.LocalDate; +import io.nosqlbench.virtdata.annotations.Example; +import io.nosqlbench.virtdata.annotations.ThreadSafeMapper; + +import java.util.function.LongFunction; + +/** + * Converts epoch millis to a + * com.datastax.driver.core.{@link LocalDate} object, as + * the number of milliseconds since January 1st, 1970 GMT. + */ +@ThreadSafeMapper +public class EpochMillisToCqlLocalDate implements LongFunction { + + @Example({"EpochMillisToJavaLocalDate()", "Yields the LocalDate for the millis in GMT"}) + public EpochMillisToCqlLocalDate() { + } + + @Override + public LocalDate apply(long value) { + return LocalDate.fromMillisSinceEpoch(value); + } +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/datamappers/functions/long_localdate/EpochMillisToJavaLocalDate.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/datamappers/functions/long_localdate/EpochMillisToJavaLocalDate.java new file mode 100644 index 000000000..0c4b3a39b --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/datamappers/functions/long_localdate/EpochMillisToJavaLocalDate.java @@ -0,0 +1,43 @@ +package io.nosqlbench.activitytype.cql.datamappers.functions.long_localdate; + +import io.nosqlbench.virtdata.annotations.Example; +import io.nosqlbench.virtdata.annotations.ThreadSafeMapper; + +import java.time.Instant; +import java.time.LocalDate; +import java.time.ZoneId; +import java.util.function.LongFunction; + +/** + * Converts epoch millis to a java.time.{@link LocalDate} object, + * using either the system + * default timezone or the timezone provided. If the specified ZoneId is not + * the same as the time base of the epoch millis instant, then conversion + * errors will occur. + * + * Short form ZoneId values like 'CST' can be used, although US Domestic names + * which specify the daylight savings hours are not supported. The full list of + * short Ids at @see JavaSE ZoneId Ids + * + * Any timezone specifier may be used which can be read by {@link ZoneId#of(String)} + */ +@ThreadSafeMapper +public class EpochMillisToJavaLocalDate implements LongFunction { + + ZoneId timezone; + + @Example({"EpochMillisToJavaLocalDate()","Yields the LocalDate for the system default ZoneId"}) + public EpochMillisToJavaLocalDate() { + this.timezone = ZoneId.systemDefault(); + } + + @Example({"EpochMillisToJavaLocalDate('ECT')","Yields the LocalDate for the ZoneId entry for 'Europe/Paris'"}) + public EpochMillisToJavaLocalDate(String zoneid) { + this.timezone = ZoneId.of(zoneid); + } + + @Override + public LocalDate apply(long value) { + return Instant.ofEpochMilli(value).atZone(timezone).toLocalDate(); + } +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/datamappers/functions/long_localdate/EpochMillisToJavaLocalDateTime.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/datamappers/functions/long_localdate/EpochMillisToJavaLocalDateTime.java new file mode 100644 index 000000000..40eeaa2c2 --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/datamappers/functions/long_localdate/EpochMillisToJavaLocalDateTime.java @@ -0,0 +1,43 @@ +package io.nosqlbench.activitytype.cql.datamappers.functions.long_localdate; + +import io.nosqlbench.virtdata.annotations.Example; +import io.nosqlbench.virtdata.annotations.ThreadSafeMapper; + +import java.time.Instant; +import java.time.LocalDateTime; +import java.time.ZoneId; +import java.util.function.LongFunction; + +/** + * Converts epoch millis to a + * java.time.{@link LocalDateTime} object, using either the system + * default timezone or the timezone provided. If the specified ZoneId is not + * the same as the time base of the epoch millis instant, then conversion + * errors will occur. + * + * Short form ZoneId values like 'CST' can be used, although US Domestic names + * which specify the daylight savings hours are not supported. The full list of + * short Ids at @see JavaSE ZoneId Ids + * + * Any timezone specifier may be used which can be read by {@link ZoneId#of(String)} + */ +@ThreadSafeMapper +public class EpochMillisToJavaLocalDateTime implements LongFunction { + + ZoneId timezone; + + @Example({"EpochMillisToJavaLocalDateTime()","Yields the LocalDateTime for the system default ZoneId"}) + public EpochMillisToJavaLocalDateTime() { + this.timezone = ZoneId.systemDefault(); + } + + @Example({"EpochMillisToJavaLocalDateTime('ECT')","Yields the LocalDateTime for the ZoneId entry for 'Europe/Paris'"}) + public EpochMillisToJavaLocalDateTime(String zoneid) { + this.timezone = ZoneId.of(zoneid); + } + + @Override + public LocalDateTime apply(long value) { + return Instant.ofEpochMilli(value).atZone(timezone).toLocalDateTime(); + } +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/datamappers/functions/long_localdate/LongToLocalDateDays.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/datamappers/functions/long_localdate/LongToLocalDateDays.java new file mode 100644 index 000000000..c72f958ba --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/datamappers/functions/long_localdate/LongToLocalDateDays.java @@ -0,0 +1,22 @@ +package io.nosqlbench.activitytype.cql.datamappers.functions.long_localdate; + +import com.datastax.driver.core.LocalDate; +import io.nosqlbench.virtdata.annotations.Example; +import io.nosqlbench.virtdata.annotations.ThreadSafeMapper; + +import java.util.function.LongFunction; + +/** + * Days since Jan 1st 1970 + */ +@ThreadSafeMapper +public class LongToLocalDateDays implements LongFunction { + @Override + public LocalDate apply(long value) { + return LocalDate.fromDaysSinceEpoch((int) value % Integer.MAX_VALUE); + } + + @Example({"LongToLocalDateDays()","take the cycle number and turn it into a LocalDate based on days since 1970"}) + public LongToLocalDateDays (){ + } +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/datamappers/functions/long_string/ModuloCSVLineToString.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/datamappers/functions/long_string/ModuloCSVLineToString.java new file mode 100644 index 000000000..601b97a3c --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/datamappers/functions/long_string/ModuloCSVLineToString.java @@ -0,0 +1,69 @@ +/* + * + * Copyright 2015 Jonathan Shook + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package io.nosqlbench.activitytype.cql.datamappers.functions.long_string; + +import io.nosqlbench.virtdata.annotations.Example; +import io.nosqlbench.virtdata.annotations.ThreadSafeMapper; +import io.nosqlbench.virtdata.api.VirtDataResources; +import io.nosqlbench.virtdata.library.basics.shared.from_long.to_string.ModuloLineToString; +import org.apache.commons.csv.CSVParser; +import org.apache.commons.csv.CSVRecord; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.ArrayList; +import java.util.List; +import java.util.function.LongFunction; + +/** + * Select a value from a CSV file line by modulo division against the number + * of lines in the file. The second parameter is the field name, and this must + * be provided in the CSV header line as written. + */ +@ThreadSafeMapper +public class ModuloCSVLineToString implements LongFunction { + private final static Logger logger = LoggerFactory.getLogger(ModuloLineToString.class); + + private List lines = new ArrayList<>(); + + private String filename; + + @Example({"ModuloCSVLineToString('data/myfile.csv','lat')","load values for 'lat' from the CSV file myfile.csv."}) + public ModuloCSVLineToString(String filename, String fieldname) { + this.filename = filename; + CSVParser csvp = VirtDataResources.readFileCSV(filename); + int column = csvp.getHeaderMap().get(fieldname); + for (CSVRecord strings : csvp) { + lines.add(strings.get(column)); + } + } + + @Override + public String apply(long input) { + int itemIdx = (int) (input % lines.size()) % Integer.MAX_VALUE; + String item = lines.get(itemIdx); + return item; + } + + public String toString() { + return getClass().getSimpleName() + ":" + filename; + } + + +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/datamappers/functions/long_string/ModuloCSVLineToUUID.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/datamappers/functions/long_string/ModuloCSVLineToUUID.java new file mode 100644 index 000000000..2d009d500 --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/datamappers/functions/long_string/ModuloCSVLineToUUID.java @@ -0,0 +1,71 @@ +/* + * + + * Copyright 2015 Jonathan Shook + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package io.nosqlbench.activitytype.cql.datamappers.functions.long_string; + +import io.nosqlbench.virtdata.annotations.Example; +import io.nosqlbench.virtdata.annotations.ThreadSafeMapper; +import io.nosqlbench.virtdata.api.VirtDataResources; +import io.nosqlbench.virtdata.library.basics.shared.from_long.to_string.ModuloLineToString; +import org.apache.commons.csv.CSVParser; +import org.apache.commons.csv.CSVRecord; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.ArrayList; +import java.util.List; +import java.util.UUID; +import java.util.function.LongFunction; + +/** + * Select a value from a CSV file line by modulo division against the number + * of lines in the file. The second parameter is the field name, and this must + * be provided in the CSV header line as written. + */ +@ThreadSafeMapper +public class ModuloCSVLineToUUID implements LongFunction { + private final static Logger logger = LoggerFactory.getLogger(ModuloLineToString.class); + + private List lines = new ArrayList<>(); + + private String filename; + + @Example({"ModuloCSVLineToUUID('data/myfile.csv','lat')","load values for 'lat' from the CSV file myfile.csv."}) + public ModuloCSVLineToUUID(String filename, String fieldname) { + this.filename = filename; + CSVParser csvp = VirtDataResources.readFileCSV(filename); + int column = csvp.getHeaderMap().get(fieldname); + for (CSVRecord strings : csvp) { + lines.add(strings.get(column)); + } + } + + @Override + public UUID apply(long input) { + int itemIdx = (int) (input % lines.size()) % Integer.MAX_VALUE; + String item = lines.get(itemIdx); + return UUID.fromString(item); + } + + public String toString() { + return getClass().getSimpleName() + ":" + filename; + } + + +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/datamappers/functions/long_uuid/ToTimeUUIDMax.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/datamappers/functions/long_uuid/ToTimeUUIDMax.java new file mode 100644 index 000000000..4bf84f07f --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/datamappers/functions/long_uuid/ToTimeUUIDMax.java @@ -0,0 +1,23 @@ +package io.nosqlbench.activitytype.cql.datamappers.functions.long_uuid; + +import com.datastax.driver.core.utils.UUIDs; +import io.nosqlbench.virtdata.annotations.Categories; +import io.nosqlbench.virtdata.annotations.Category; +import io.nosqlbench.virtdata.annotations.ThreadSafeMapper; + +import java.util.UUID; +import java.util.function.LongFunction; + +/** + * Converts a long timestamp in epoch millis form into a Version 1 TimeUUID + * according to RFC 4122. + * This form uses {@link UUIDs#startOf(long)} + */ +@Categories({Category.datetime}) +@ThreadSafeMapper +public class ToTimeUUIDMax implements LongFunction { + @Override + public UUID apply(long value) { + return UUIDs.endOf(value); + } +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/datamappers/functions/long_uuid/ToTimeUUIDMin.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/datamappers/functions/long_uuid/ToTimeUUIDMin.java new file mode 100644 index 000000000..30211884f --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/datamappers/functions/long_uuid/ToTimeUUIDMin.java @@ -0,0 +1,23 @@ +package io.nosqlbench.activitytype.cql.datamappers.functions.long_uuid; + +import com.datastax.driver.core.utils.UUIDs; +import io.nosqlbench.virtdata.annotations.Categories; +import io.nosqlbench.virtdata.annotations.Category; +import io.nosqlbench.virtdata.annotations.ThreadSafeMapper; + +import java.util.UUID; +import java.util.function.LongFunction; + +/** + * Converts a long timestamp in epoch millis form into a Version 1 TimeUUID + * according to RFC 4122. + * This form uses {@link UUIDs#startOf(long)} + */ +@Categories({Category.datetime}) +@ThreadSafeMapper +public class ToTimeUUIDMin implements LongFunction { + @Override + public UUID apply(long value) { + return UUIDs.startOf(value); + } +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/datamappers/functions/rainbow/TokenMapFileAPIService.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/datamappers/functions/rainbow/TokenMapFileAPIService.java new file mode 100644 index 000000000..546394f92 --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/datamappers/functions/rainbow/TokenMapFileAPIService.java @@ -0,0 +1,98 @@ +package io.nosqlbench.activitytype.cql.datamappers.functions.rainbow; + +import java.nio.BufferUnderflowException; +import java.nio.ByteBuffer; + +/** + *

This class provides cursor-like access to a set of data from + * a binary file using Java nio buffers. Calling {@link #next()} causes + * the next record to be loaded, after which the getter methods return + * the loaded values. You must call next before access each record's fields.

+ * + *

The {@link #next(int)} method may be used for absolute offset access. + * In this mode, no thread safety is imposed, as there is no chance of the + * internal buffer's position to affect the result.

+ * + *

Buffers may be accessed as shared or not. If

+ * + */ +public class TokenMapFileAPIService { + +// public static ThreadLocal> tl_cll = +// ThreadLocal.withInitial(HashMap::new); +// + private final int recordCount; + private final ByteBuffer buffer; + private final int RECORD_LEN = Long.BYTES * 2; + + private int recordPosition; + + private long token; + private int TOKEN_OFFSET = 0; + private long cycle; + private int CYCLE_OFFSET = Long.BYTES; + + private boolean loopdata; + + /** + * Create a new binary cursor for data in a binary file which consists of a (long,long) tuple of + * token values (murmur3 partitioner tokens) and cycle values that correspond to them. The cycles + * are the ones responsible for producing the associated token values. + * @param datafile The data file to read from + * @param loopdata Whether or not to loop around to the beginning of the data. For positional reads this is also + * modulo-based, such that relatively prime sizes and increments will loop not simply repeat + * values at the start of the buffer + * @param instanced Whether or not to provide an instanced view into the byte buffer, where each thread can have + * its own read tracking state + * @param ascending Whether to reverse the order othe long,long tuples when the file is read. + */ + public TokenMapFileAPIService(String datafile, boolean loopdata, boolean instanced, boolean ascending) { + this.loopdata = loopdata; + buffer = TokenMapFileSharedBuffers.getByteBuffer(datafile,instanced,ascending).asReadOnlyBuffer(); + this.recordCount = (int) (buffer.capacity() / RECORD_LEN); + this.recordPosition = 0; + } + + + public synchronized void next() { + try { + token = buffer.getLong(); + cycle = buffer.getLong(); + } catch (BufferUnderflowException bue) { + if (loopdata) { + buffer.position(0); + next(); + } + else { + throw bue; + } + } + } + + /** + * Do a read of [token,cycle] record without incremental read state. + * @param position The logical record within the buffer to read + */ + public void next(int position) { + + if (loopdata) { + position = (position % recordCount) * RECORD_LEN; + } + token = buffer.getLong(position+TOKEN_OFFSET); + cycle = buffer.getLong(position+CYCLE_OFFSET); + } + + public long getToken() { + return token; + } + + public long getCycle() { + return cycle; + } + +// public static BinaryCursorForTokenCycle get(String mapname) { +// BinaryCursorForTokenCycle cursorLongLong = tl_cll.get().get(mapname); +// return cursorLongLong; +// } + +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/datamappers/functions/rainbow/TokenMapFileBaseFunction.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/datamappers/functions/rainbow/TokenMapFileBaseFunction.java new file mode 100644 index 000000000..bcb83edde --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/datamappers/functions/rainbow/TokenMapFileBaseFunction.java @@ -0,0 +1,22 @@ +package io.nosqlbench.activitytype.cql.datamappers.functions.rainbow; + +import java.util.function.IntToLongFunction; + +public abstract class TokenMapFileBaseFunction implements IntToLongFunction { + protected static ThreadLocal tl_DataSvc; + + public TokenMapFileBaseFunction(String filename, boolean loopdata, boolean instanced, boolean ascending) { + tl_DataSvc = ThreadLocal.withInitial(() -> new TokenMapFileAPIService(filename, loopdata, instanced, ascending)); + } + + public TokenMapFileBaseFunction(String filename) { + this(filename, false, true, true); + } + +// @Override +// public long applyAsLong(long operand) { +// BinaryCursorForTokenCycle bc; +// bc.next(operand); +// return 0; +// } +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/datamappers/functions/rainbow/TokenMapFileCycle.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/datamappers/functions/rainbow/TokenMapFileCycle.java new file mode 100644 index 000000000..25464df2d --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/datamappers/functions/rainbow/TokenMapFileCycle.java @@ -0,0 +1,17 @@ +package io.nosqlbench.activitytype.cql.datamappers.functions.rainbow; + +import io.nosqlbench.virtdata.annotations.ThreadSafeMapper; + +@ThreadSafeMapper +public class TokenMapFileCycle extends TokenMapFileBaseFunction { + + public TokenMapFileCycle(String filename, boolean loopdata, boolean ascending) { + super(filename, loopdata, false, ascending); + } + + @Override + public long applyAsLong(int value) { + TokenMapFileAPIService datasvc = tl_DataSvc.get(); + return datasvc.getCycle(); + } +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/datamappers/functions/rainbow/TokenMapFileNextCycle.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/datamappers/functions/rainbow/TokenMapFileNextCycle.java new file mode 100644 index 000000000..7ae205ca6 --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/datamappers/functions/rainbow/TokenMapFileNextCycle.java @@ -0,0 +1,18 @@ +package io.nosqlbench.activitytype.cql.datamappers.functions.rainbow; + +import io.nosqlbench.virtdata.annotations.ThreadSafeMapper; + +@ThreadSafeMapper +public class TokenMapFileNextCycle extends TokenMapFileBaseFunction { + + public TokenMapFileNextCycle(String filename, boolean loopdata, boolean ascending) { + super(filename, loopdata, false, ascending); + } + + @Override + public long applyAsLong(int value) { + TokenMapFileAPIService datasvc = tl_DataSvc.get(); + datasvc.next(value); + return datasvc.getCycle(); + } +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/datamappers/functions/rainbow/TokenMapFileNextToken.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/datamappers/functions/rainbow/TokenMapFileNextToken.java new file mode 100644 index 000000000..503ccb95e --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/datamappers/functions/rainbow/TokenMapFileNextToken.java @@ -0,0 +1,18 @@ +package io.nosqlbench.activitytype.cql.datamappers.functions.rainbow; + +import io.nosqlbench.virtdata.annotations.ThreadSafeMapper; + +@ThreadSafeMapper +public class TokenMapFileNextToken extends TokenMapFileBaseFunction { + + public TokenMapFileNextToken(String filename, boolean loopdata, boolean ascending) { + super(filename, loopdata, false, ascending); + } + + @Override + public long applyAsLong(int value) { + TokenMapFileAPIService datasvc = tl_DataSvc.get(); + datasvc.next(value); + return datasvc.getToken(); + } +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/datamappers/functions/rainbow/TokenMapFileSharedBuffers.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/datamappers/functions/rainbow/TokenMapFileSharedBuffers.java new file mode 100644 index 000000000..a4629a62e --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/datamappers/functions/rainbow/TokenMapFileSharedBuffers.java @@ -0,0 +1,60 @@ +package io.nosqlbench.activitytype.cql.datamappers.functions.rainbow; + +import java.io.RandomAccessFile; +import java.nio.ByteBuffer; +import java.nio.channels.FileChannel; +import java.util.HashMap; +import java.util.Map; + +public class TokenMapFileSharedBuffers { + public final static TokenMapFileSharedBuffers INSTANCE = new TokenMapFileSharedBuffers(); + private final static Map BUFFERS = new HashMap<>(); + private TokenMapFileSharedBuffers() {} + + /** + * Find and load the {@link ByteBuffer} which can be read at the specified + * location. This will only be loaded into memory once. All callers will + * get access to the same logical source data. Whether or not the caller + * gets its own buffer tracking state (see {@link java.nio.Buffer}). + * If each caller will use the Buffer API for incremental reads, where + * callers could possibly read the same records, then separate instanced + * buffers are advised. + * + *

However, if you are planning to use position-oriented access to the + * buffer only, then it is not necessary to ask for instanced buffers. In + * some usage patterns, it may be desirable to provide a single logical + * view of buffer reader position across multiple threads. In this case, + * setting instanced to false is necessary.

+ * + * @param filename The location of the source data for the buffer. + * @param instanced If true, each caller gets a wrapped buffer object with its own + * tracking state + * @param ascending + * @return An instance of a ByteBuffer + */ + public synchronized static ByteBuffer getByteBuffer(String filename, boolean instanced, boolean ascending) { + ByteBuffer foundBuffer = BUFFERS.computeIfAbsent(filename, f->load(f,ascending)); + return instanced ? foundBuffer.asReadOnlyBuffer() : foundBuffer; + } + + private static ByteBuffer load(String filename, boolean ascending) { + try { + RandomAccessFile image = new RandomAccessFile(filename, "rw"); + ByteBuffer mbb = image.getChannel().map(FileChannel.MapMode.READ_WRITE, 0, image.length()); + if (!ascending) { + int RECORD_LEN = Long.BYTES * 2; + ByteBuffer descendingByteBuffer = ByteBuffer.allocate(mbb.capacity()); + for (int i = mbb.capacity()-RECORD_LEN; i >= 0 ; i-=RECORD_LEN) { + long v1 = mbb.getLong(i); + long v2 = mbb.getLong(i+Long.BYTES); + descendingByteBuffer.putLong(v1); + descendingByteBuffer.putLong(v2); + } + mbb = descendingByteBuffer; + } + return mbb; + } catch (Exception e) { + throw new RuntimeException(e); + } + } +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/datamappers/functions/rainbow/TokenMapFileToken.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/datamappers/functions/rainbow/TokenMapFileToken.java new file mode 100644 index 000000000..f453351c2 --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/datamappers/functions/rainbow/TokenMapFileToken.java @@ -0,0 +1,17 @@ +package io.nosqlbench.activitytype.cql.datamappers.functions.rainbow; + +import io.nosqlbench.virtdata.annotations.ThreadSafeMapper; + +@ThreadSafeMapper +public class TokenMapFileToken extends TokenMapFileBaseFunction { + + public TokenMapFileToken(String filename, boolean loopdata, boolean ascending) { + super(filename, loopdata, false, ascending); + } + + @Override + public long applyAsLong(int value) { + TokenMapFileAPIService datasvc = tl_DataSvc.get(); + return datasvc.getToken(); + } +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/datamappers/functions/string_string/SnappyComp.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/datamappers/functions/string_string/SnappyComp.java new file mode 100644 index 000000000..f32799299 --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/datamappers/functions/string_string/SnappyComp.java @@ -0,0 +1,23 @@ +package io.nosqlbench.activitytype.cql.datamappers.functions.string_string; + +import io.nosqlbench.virtdata.annotations.ThreadSafeMapper; +import org.xerial.snappy.Snappy; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.function.Function; + +@ThreadSafeMapper +public class SnappyComp implements Function { + + private Snappy snappy = new Snappy(); + + @Override + public ByteBuffer apply(String s) { + try { + return ByteBuffer.wrap(Snappy.compress(s)); + } catch (IOException e) { + throw new RuntimeException(e); + } + } +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/api/ErrorResponse.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/api/ErrorResponse.java new file mode 100644 index 000000000..f5ad95617 --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/api/ErrorResponse.java @@ -0,0 +1,23 @@ +package io.nosqlbench.activitytype.cql.ebdrivers.cql.api; + +/** + * When an error filter allows us to see and handle an error in a specific way, + * the ErrorResponse determines exactly how we handle it. Each level represents + * a starting point in handling, including everything after the starting point. + * The first enum is the most severe response + */ +public enum ErrorResponse { + + stop("S"), // Rethrow this error to the runtime, forcing it to handle the error or stop + warn("W"), // log a warning with some details about this error + retry("R"), // resubmit this operation up to the available tries + histogram("H"), // record this metric in a histogram + count("C"), // count this metric separately + ignore("I"); // do nothing + + private String symbol; + + ErrorResponse(String symbol) { + this.symbol = symbol; + } +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/api/ResultSetCycleOperator.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/api/ResultSetCycleOperator.java new file mode 100644 index 000000000..0f0466518 --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/api/ResultSetCycleOperator.java @@ -0,0 +1,18 @@ +package io.nosqlbench.activitytype.cql.ebdrivers.cql.api; + +import com.datastax.driver.core.ResultSet; +import com.datastax.driver.core.Statement; + +/** + * An operator interface for performing a modular action on CQL ResultSets per-cycle. + */ +public interface ResultSetCycleOperator { + /** + * Perform an action on a result set for a specific cycle. + * @param resultSet The ResultSet for the given cycle + * @param statement The statement for the given cycle + * @param cycle The cycle for which the statement was submitted + * @return A value, only meaningful when used with aggregated operators + */ + int apply(ResultSet resultSet, Statement statement, long cycle); +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/api/RowCycleOperator.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/api/RowCycleOperator.java new file mode 100644 index 000000000..1e33b0670 --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/api/RowCycleOperator.java @@ -0,0 +1,11 @@ +package io.nosqlbench.activitytype.cql.ebdrivers.cql.api; + +import com.datastax.driver.core.Row; + +/** + * An operator interface for consuming ResultSets and producing some + * int that can be used as a status code in activities. + */ +public interface RowCycleOperator { + int apply(Row row, long cycle); +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/api/StatementFilter.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/api/StatementFilter.java new file mode 100644 index 000000000..9ece49d14 --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/api/StatementFilter.java @@ -0,0 +1,7 @@ +package io.nosqlbench.activitytype.cql.ebdrivers.cql.api; + +import com.datastax.driver.core.Statement; + +public interface StatementFilter { + boolean matches(Statement statement); +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/api/VerifyApplied.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/api/VerifyApplied.java new file mode 100644 index 000000000..e7c149209 --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/api/VerifyApplied.java @@ -0,0 +1,7 @@ +package io.nosqlbench.activitytype.cql.ebdrivers.cql.api; + +public enum VerifyApplied { + ignore, + error, + retry +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/core/CQLBindHelper.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/core/CQLBindHelper.java new file mode 100644 index 000000000..27f50c8e5 --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/core/CQLBindHelper.java @@ -0,0 +1,164 @@ +package io.nosqlbench.activitytype.cql.ebdrivers.cql.core; + +import com.datastax.driver.core.*; +import io.nosqlbench.engine.api.activityconfig.ParsedStmt; +import io.nosqlbench.engine.api.activityconfig.yaml.StmtDef; + +import java.math.BigDecimal; +import java.net.InetAddress; +import java.nio.ByteBuffer; +import java.util.*; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +public class CQLBindHelper { + + private final static Pattern stmtToken = Pattern.compile("\\?(\\w+[-_\\d\\w]*)|\\{(\\w+[-_\\d\\w.]*)}"); + public static Statement rebindUnappliedStatement(Statement statement, ColumnDefinitions defs, Row row) { + + for (ColumnDefinitions.Definition def : defs) { + String name = def.getName(); + def.getType(); + if (!name.equals("[applied]")) { + DataType.Name typeName = def.getType().getName(); + switch (typeName) { + case ASCII: // ASCII(1, String.class) + ((BoundStatement) statement).bind().setString(name, row.getString(name)); + case VARCHAR: // VARCHAR(13, String.class) + ((BoundStatement) statement).bind().setString(name, row.getString(name)); + case TEXT: // TEXT(10, String.class) + ((BoundStatement) statement).bind().setString(name, row.getString(name)); + case BIGINT: // BIGINT(2, Long.class) + ((BoundStatement) statement).bind().setLong(name, row.getLong(name)); + case COUNTER: // COUNTER(5, Long.class) + ((BoundStatement) statement).bind().setLong(name, row.getLong(name)); + case BLOB: // BLOB(3, ByteBuffer.class) + ((BoundStatement) statement).bind().setBytes(name, row.getBytes(name)); + case CUSTOM: // CUSTOM(0, ByteBuffer.class) + throw new RuntimeException("The diagnostic binder does not understand custom types yet."); + case BOOLEAN: // BOOLEAN(4, Boolean.class) + ((BoundStatement) statement).bind().setBool(name, row.getBool(name)); + case DECIMAL: // DECIMAL(6, BigDecimal.class) + ((BoundStatement) statement).bind().setDecimal(name, row.getDecimal(name)); + case DOUBLE: // DOUBLE(7, Double.class) + ((BoundStatement) statement).bind().setDouble(name, row.getDouble(name)); + case FLOAT: // FLOAT(8, Float.class) + ((BoundStatement) statement).bind().setFloat(name, row.getFloat(name)); + case INET: // INET(16, InetAddress.class) + ((BoundStatement) statement).bind().setInet(name, row.getInet(name)); + case INT: // INT(9, Integer.class) + ((BoundStatement) statement).bind().setInt(name, row.getInt(name)); + case TIMESTAMP: // TIMESTAMP(11, Date.class) + ((BoundStatement) statement).bind().setTimestamp(name, row.getTimestamp(name)); + case UUID: // UUID(12, UUID.class) + ((BoundStatement) statement).bind().setUUID(name, row.getUUID(name)); + case TIMEUUID: // TIMEUUID(15, UUID.class) + ((BoundStatement) statement).bind().setUUID(name, row.getUUID(name)); + case VARINT: // VARINT(14, BigInteger.class) + ((BoundStatement) statement).bind().setInt(name, row.getInt(name)); + case UDT: // UDT(48, UDTValue.class) + ((BoundStatement) statement).bind().setUDTValue(name, row.getUDTValue(name)); + case TUPLE: // TUPLE(49, TupleValue.class) + ((BoundStatement) statement).bind().setTupleValue(name, row.getTupleValue(name)); + case SMALLINT: + ((BoundStatement) statement).bind().setInt(name, row.getInt(name)); + case TINYINT: + ((BoundStatement) statement).bind().setInt(name, row.getInt(name)); + case DATE: + ((BoundStatement) statement).bind().setDate(name, row.getDate(name)); + case TIME: + ((BoundStatement) statement).bind().setTime(name, row.getTime(name)); + default: + throw new RuntimeException("Unrecognized type:" + typeName); + } + } + } + return statement; + } + + public static BoundStatement bindStatement(Statement statement, String name, Object value, DataType.Name typeName) { + switch (typeName) { + case ASCII: // ASCII(1, String.class) + return ((BoundStatement) statement).bind().setString(name, (String) value); + case VARCHAR: // VARCHAR(13, String.class) + return ((BoundStatement) statement).bind().setString(name, (String) value); + case TEXT: // TEXT(10, String.class) + return ((BoundStatement) statement).bind().setString(name, (String) value); + case BIGINT: // BIGINT(2, Long.class) + return ((BoundStatement) statement).bind().setLong(name, (long) value); + case COUNTER: // COUNTER(5, Long.class) + return ((BoundStatement) statement).bind().setLong(name, (long) value); + case BLOB: // BLOB(3, ByteBuffer.class) + return ((BoundStatement) statement).bind().setBytes(name, (ByteBuffer) value); + case CUSTOM: // CUSTOM(0, ByteBuffer.class) + throw new RuntimeException("The diagnostic binder does not understand custom types yet."); + case BOOLEAN: // BOOLEAN(4, Boolean.class) + return ((BoundStatement) statement).bind().setBool(name, (boolean) value); + case DECIMAL: // DECIMAL(6, BigDecimal.class) + return ((BoundStatement) statement).bind().setDecimal(name, (BigDecimal) value); + case DOUBLE: // DOUBLE(7, Double.class) + return ((BoundStatement) statement).bind().setDouble(name, (double) value); + case FLOAT: // FLOAT(8, Float.class) + return ((BoundStatement) statement).bind().setFloat(name, (float) value); + case INET: // INET(16, InetAddress.class) + return ((BoundStatement) statement).bind().setInet(name, (InetAddress) value); + case INT: // INT(9, Integer.class) + return ((BoundStatement) statement).bind().setInt(name, (int) value); + case TIMESTAMP: // TIMESTAMP(11, Date.class) + return ((BoundStatement) statement).bind().setTimestamp(name, (Date) value); + case UUID: // UUID(12, UUID.class) + return ((BoundStatement) statement).bind().setUUID(name, (UUID) value); + case TIMEUUID: // TIMEUUID(15, UUID.class) + return ((BoundStatement) statement).bind().setUUID(name, (UUID) value); + case VARINT: // VARINT(14, BigInteger.class) + return ((BoundStatement) statement).bind().setInt(name, (int) value); + case UDT: // UDT(48, UDTValue.class) + return ((BoundStatement) statement).bind().setUDTValue(name, (UDTValue) value); + case TUPLE: // TUPLE(49, TupleValue.class + return ((BoundStatement) statement).bind().setTupleValue(name, (TupleValue) value); + case SMALLINT: + return ((BoundStatement) statement).bind().setInt(name, (int) value); + case TINYINT: + return ((BoundStatement) statement).bind().setInt(name, (int) value); + case DATE: + return ((BoundStatement) statement).bind().setDate(name, (LocalDate) value); + case TIME: + return ((BoundStatement) statement).bind().setTime(name, (long) value); + default: + throw new RuntimeException("Unrecognized type:" + typeName); + } + } + + public static Map parseAndGetSpecificBindings(StmtDef stmtDef, ParsedStmt parsed) { + List spans = new ArrayList<>(); + + String statement = stmtDef.getStmt(); + + Set extraBindings = new HashSet<>(); + extraBindings.addAll(stmtDef.getBindings().keySet()); + Map specificBindings = new LinkedHashMap<>(); + + Matcher m = stmtToken.matcher(statement); + int lastMatch = 0; + String remainder = ""; + while (m.find(lastMatch)) { + String pre = statement.substring(lastMatch, m.start()); + + String form1 = m.group(1); + String form2 = m.group(2); + String tokenName = (form1 != null && !form1.isEmpty()) ? form1 : form2; + lastMatch = m.end(); + spans.add(pre); + + if (extraBindings.contains(tokenName)) { + if (specificBindings.get(tokenName) != null){ + String postfix = UUID.randomUUID().toString(); + specificBindings.put(tokenName+postfix, stmtDef.getBindings().get(tokenName)); + }else { + specificBindings.put(tokenName, stmtDef.getBindings().get(tokenName)); + } + } + } + return specificBindings; + } +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/core/CQLOptions.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/core/CQLOptions.java new file mode 100644 index 000000000..99c72350e --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/core/CQLOptions.java @@ -0,0 +1,198 @@ +package io.nosqlbench.activitytype.cql.ebdrivers.cql.core; + +import com.datastax.driver.core.*; +import com.datastax.driver.core.policies.*; +import io.netty.util.HashedWheelTimer; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.net.InetSocketAddress; +import java.util.*; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.TimeUnit; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import java.util.stream.Collectors; + +public class CQLOptions { + private final static Logger logger = LoggerFactory.getLogger(CQLOptions.class); + + private final static Pattern CORE_AND_MAX_RQ_PATTERN = Pattern.compile("(?\\d+)(:(?\\d+)(:(?\\d+))?)?(,(?\\d+)(:(?\\d+)(:(?\\d+))?)?)?(,?heartbeat_interval_s:(?\\d+))?(,?idle_timeout_s:(?\\d+))?(,?pool_timeout_ms:(?\\d+))?"); + private final static Pattern PERCENTILE_EAGER_PATTERN = Pattern.compile("^p(?[^:]+)(:(?\\d+))?(:(?\\d+)ms)?$"); + private final static Pattern CONSTANT_EAGER_PATTERN = Pattern.compile("^((?\\d++)ms)(:(?\\d+))?$"); + + private static ConstantSpeculativeExecutionPolicy constantPolicy(int threshold, int executions) { + return new ConstantSpeculativeExecutionPolicy(threshold, executions); + } + + private static SpeculativeExecutionPolicy percentilePolicy(long tracked, double threshold, int executions) { + PerHostPercentileTracker tracker = newTracker(tracked); + return new PercentileSpeculativeExecutionPolicy(tracker, threshold, executions); + } + + private static PerHostPercentileTracker newTracker(long millis) { + return PerHostPercentileTracker.builder(millis).build(); + } + + public static PoolingOptions poolingOptionsFor(String spec) { + Matcher matcher = CORE_AND_MAX_RQ_PATTERN.matcher(spec); + if (matcher.matches()) { + PoolingOptions poolingOptions = new PoolingOptions(); + + Optional.ofNullable(matcher.group("core")).map(Integer::valueOf) + .ifPresent(core -> poolingOptions.setCoreConnectionsPerHost(HostDistance.LOCAL, core)); + Optional.ofNullable(matcher.group("max")).map(Integer::valueOf) + .ifPresent(max -> poolingOptions.setMaxConnectionsPerHost(HostDistance.LOCAL, max)); + Optional.ofNullable(matcher.group("rq")).map(Integer::valueOf) + .ifPresent(rq -> poolingOptions.setMaxRequestsPerConnection(HostDistance.LOCAL, rq)); + + Optional.ofNullable(matcher.group("rcore")).map(Integer::valueOf) + .ifPresent(rcore -> poolingOptions.setCoreConnectionsPerHost(HostDistance.REMOTE, rcore)); + Optional.ofNullable(matcher.group("rmax")).map(Integer::valueOf) + .ifPresent(rmax -> poolingOptions.setMaxConnectionsPerHost(HostDistance.REMOTE, rmax)); + Optional.ofNullable(matcher.group("rrq")).map(Integer::valueOf) + .ifPresent(rrq -> poolingOptions.setMaxRequestsPerConnection(HostDistance.REMOTE, rrq)); + + Optional.ofNullable(matcher.group("heartbeatinterval")).map(Integer::valueOf) + .ifPresent(poolingOptions::setHeartbeatIntervalSeconds); + + Optional.ofNullable(matcher.group("idletimeout")).map(Integer::valueOf) + .ifPresent(poolingOptions::setIdleTimeoutSeconds); + + Optional.ofNullable(matcher.group("pooltimeout")).map(Integer::valueOf) + .ifPresent(poolingOptions::setPoolTimeoutMillis); + + return poolingOptions; + } + throw new RuntimeException("No pooling options could be parsed from spec: " + spec); + + } + + public static RetryPolicy retryPolicyFor(String spec) { + Set retryBehaviors = Arrays.stream(spec.split(",")).map(String::toLowerCase).collect(Collectors.toSet()); + RetryPolicy retryPolicy = DefaultRetryPolicy.INSTANCE; + + if (retryBehaviors.contains("default")) { + return retryPolicy; + } // add other mutually-exclusive behaviors here with checks, if we want to extend beyond "default" + + if (retryBehaviors.contains("logging")) { + retryPolicy = new LoggingRetryPolicy(retryPolicy); + } + + return retryPolicy; + } + + public static SocketOptions socketOptionsFor(String spec) { + String[] assignments = spec.split("[,;]"); + Map values = new HashMap<>(); + for (String assignment : assignments) { + String[] namevalue = assignment.split("[:=]", 2); + String name = namevalue[0]; + String value = namevalue[1]; + values.put(name, value); + } + + SocketOptions options = new SocketOptions(); + Optional.ofNullable(values.get("read_timeout_ms")).map(Integer::parseInt).ifPresent( + options::setReadTimeoutMillis + ); + Optional.ofNullable(values.get("connect_timeout_ms")).map(Integer::parseInt).ifPresent( + options::setConnectTimeoutMillis + ); + Optional.ofNullable(values.get("keep_alive")).map(Boolean::parseBoolean).ifPresent( + options::setKeepAlive + ); + Optional.ofNullable(values.get("reuse_address")).map(Boolean::parseBoolean).ifPresent( + options::setReuseAddress + ); + Optional.ofNullable(values.get("so_linger")).map(Integer::parseInt).ifPresent( + options::setSoLinger + ); + Optional.ofNullable(values.get("tcp_no_delay")).map(Boolean::parseBoolean).ifPresent( + options::setTcpNoDelay + ); + Optional.ofNullable(values.get("receive_buffer_size")).map(Integer::parseInt).ifPresent( + options::setReceiveBufferSize + ); + Optional.ofNullable(values.get("send_buffer_size")).map(Integer::parseInt).ifPresent( + options::setSendBufferSize + ); + + return options; + } + + public static SpeculativeExecutionPolicy defaultSpeculativePolicy() { + PerHostPercentileTracker tracker = PerHostPercentileTracker + .builder(15000) + .build(); + PercentileSpeculativeExecutionPolicy defaultSpecPolicy = + new PercentileSpeculativeExecutionPolicy(tracker, 99.0, 5); + return defaultSpecPolicy; + } + + public static SpeculativeExecutionPolicy speculativeFor(String spec) { + Matcher pctileMatcher = PERCENTILE_EAGER_PATTERN.matcher(spec); + Matcher constantMatcher = CONSTANT_EAGER_PATTERN.matcher(spec); + if (pctileMatcher.matches()) { + double pctile = Double.valueOf(pctileMatcher.group("pctile")); + if (pctile > 100.0 || pctile < 0.0) { + throw new RuntimeException("pctile must be between 0.0 and 100.0"); + } + String executionsSpec = pctileMatcher.group("executions"); + String trackedSpec = pctileMatcher.group("tracked"); + int executions = (executionsSpec != null && !executionsSpec.isEmpty()) ? Integer.valueOf(executionsSpec) : 5; + int tracked = (trackedSpec != null && !trackedSpec.isEmpty()) ? Integer.valueOf(trackedSpec) : 15000; + logger.debug("speculative: Creating new percentile tracker policy from spec '" + spec + "'"); + return percentilePolicy(tracked, pctile, executions); + } else if (constantMatcher.matches()) { + int threshold = Integer.valueOf(constantMatcher.group("msThreshold")); + String executionsSpec = constantMatcher.group("executions"); + int executions = (executionsSpec != null && !executionsSpec.isEmpty()) ? Integer.valueOf(executionsSpec) : 5; + logger.debug("speculative: Creating new constant policy from spec '" + spec + "'"); + return constantPolicy(threshold, executions); + } else { + throw new RuntimeException("Unable to parse pattern for speculative option: " + spec + ", it must be in " + + "an accepted form, like p99.0:5:15000, or p99.0:5, or 5000ms:5"); + } + + } + + public static LoadBalancingPolicy whitelistFor(String s, LoadBalancingPolicy innerPolicy) { + String[] addrSpecs = s.split(","); + List sockAddrs = Arrays.stream(addrSpecs) + .map(CQLOptions::toSocketAddr) + .collect(Collectors.toList()); + if (innerPolicy == null) { + innerPolicy = new RoundRobinPolicy(); + } + return new WhiteListPolicy(innerPolicy, sockAddrs); + } + + public static NettyOptions withTickDuration(String tick) { + logger.info("Cluster builder using custom tick duration value for HashedWheelTimer: " + tick + " milliseconds"); + int tickDuration = Integer.valueOf(tick); + return new NettyOptions() { + public io.netty.util.Timer timer(ThreadFactory threadFactory) { + return new HashedWheelTimer( + threadFactory, tickDuration, TimeUnit.MILLISECONDS); + } + }; + } + + private static InetSocketAddress toSocketAddr(String addr) { + String[] addrs = addr.split(":", 2); + String inetHost = addrs[0]; + String inetPort = (addrs.length == 2) ? addrs[1] : "9042"; + return new InetSocketAddress(inetHost, Integer.valueOf(inetPort)); + } + + public static ProtocolOptions.Compression withCompression(String compspec) { + try { + return ProtocolOptions.Compression.valueOf(compspec); + } catch (IllegalArgumentException iae) { + throw new RuntimeException("Compression option '" + compspec + "' was specified, but only " + + Arrays.toString(ProtocolOptions.Compression.values()) + " are available."); + } + } +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/core/CqlAction.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/core/CqlAction.java new file mode 100644 index 000000000..7879744b4 --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/core/CqlAction.java @@ -0,0 +1,359 @@ +package io.nosqlbench.activitytype.cql.ebdrivers.cql.core; + +import com.codahale.metrics.Timer; +import com.datastax.driver.core.*; +import io.nosqlbench.activitytype.cql.ebdrivers.cql.api.ResultSetCycleOperator; +import io.nosqlbench.activitytype.cql.ebdrivers.cql.api.RowCycleOperator; +import io.nosqlbench.activitytype.cql.ebdrivers.cql.api.StatementFilter; +import io.nosqlbench.activitytype.cql.ebdrivers.cql.errorhandling.ErrorStatus; +import io.nosqlbench.activitytype.cql.ebdrivers.cql.errorhandling.HashedCQLErrorHandler; +import io.nosqlbench.activitytype.cql.ebdrivers.cql.errorhandling.exceptions.CQLCycleException; +import io.nosqlbench.activitytype.cql.ebdrivers.cql.errorhandling.exceptions.ChangeUnappliedCycleException; +import io.nosqlbench.activitytype.cql.ebdrivers.cql.errorhandling.exceptions.MaxTriesExhaustedException; +import io.nosqlbench.activitytype.cql.ebdrivers.cql.errorhandling.exceptions.UnexpectedPagingException; +import io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.core.ReadyCQLStatement; +import com.google.common.util.concurrent.ListenableFuture; +import io.nosqlbench.engine.api.activityapi.core.ActivityDefObserver; +import io.nosqlbench.engine.api.activityapi.core.MultiPhaseAction; +import io.nosqlbench.engine.api.activityapi.core.SyncAction; +import io.nosqlbench.engine.api.activityapi.planning.OpSequence; +import io.nosqlbench.engine.api.activityimpl.ActivityDef; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.List; +import java.util.concurrent.TimeUnit; + +@SuppressWarnings("Duplicates") +public class CqlAction implements SyncAction, MultiPhaseAction, ActivityDefObserver { + + private final static Logger logger = LoggerFactory.getLogger(CqlAction.class); + private final int slot; + private final CqlActivity cqlActivity; + private final ActivityDef activityDef; + private List rowOps; + private List cycleOps; + private List modifiers; + private StatementFilter statementFilter; + private OpSequence sequencer; + private int maxTries = 10; // how many cycles a statement will be attempted for before giving up + + private HashedCQLErrorHandler ebdseErrorHandler; + + private int pagesFetched = 0; + private long totalRowsFetchedForQuery = 0L; + private ResultSet pagingResultSet; + private Statement pagingStatement; + private ReadyCQLStatement pagingReadyStatement; + private boolean showcql; + private long nanoStartTime; + private long retryDelay; + private long maxRetryDelay; + private boolean retryReplace; + + public CqlAction(ActivityDef activityDef, int slot, CqlActivity cqlActivity) { + this.activityDef = activityDef; + this.cqlActivity = cqlActivity; + this.slot = slot; + onActivityDefUpdate(activityDef); + } + + @Override + public void init() { + onActivityDefUpdate(activityDef); + this.sequencer = cqlActivity.getOpSequencer(); + } + + @Override + public int runCycle(long value) { + // In this activity type, we use the same phase + // logic for the initial phase (runCycle(...)) + // as well as subsequent phases. + return runPhase(value); + } + + public int runPhase(long cycleValue) { + + HashedCQLErrorHandler.resetThreadStatusCode(); + + if (pagingResultSet == null) { + + totalRowsFetchedForQuery = 0L; + + Statement statement; + ResultSetFuture resultSetFuture; + ReadyCQLStatement readyCQLStatement; + + int tries = 0; + + try (Timer.Context bindTime = cqlActivity.bindTimer.time()) { + readyCQLStatement = sequencer.get(cycleValue); + statement = readyCQLStatement.bind(cycleValue); + + if (statementFilter != null) { + if (!statementFilter.matches(statement)) { + cqlActivity.skippedTokensHisto.update(cycleValue); + return 0; + } + } + + if (modifiers != null) { + for (StatementModifier modifier : modifiers) { + statement = modifier.modify(statement, cycleValue); + } + } + + if (showcql) { + logger.info("CQL(cycle=" + cycleValue + "):\n" + readyCQLStatement.getQueryString(cycleValue)); + } + } + nanoStartTime = System.nanoTime(); + + while (tries < maxTries) { + tries++; + + if (tries > maxTries) { + throw new MaxTriesExhaustedException(cycleValue, maxTries); + } + + if (tries > 1) { + try (Timer.Context retryTime = cqlActivity.retryDelayTimer.time()) { + Thread.sleep(Math.min((retryDelay << tries) / 1000, maxRetryDelay / 1000)); + } catch (InterruptedException ignored) { + } + } + + try (Timer.Context executeTime = cqlActivity.executeTimer.time()) { + resultSetFuture = cqlActivity.getSession().executeAsync(statement); + } + + Timer.Context resultTime = cqlActivity.resultTimer.time(); + try { + ResultSet resultSet = resultSetFuture.getUninterruptibly(); + + if (cycleOps != null) { + for (ResultSetCycleOperator cycleOp : cycleOps) { + cycleOp.apply(resultSet, statement, cycleValue); + } + } + + ResultSetCycleOperator[] perStmtRSOperators = readyCQLStatement.getResultSetOperators(); + if (perStmtRSOperators != null) { + for (ResultSetCycleOperator perStmtRSOperator : perStmtRSOperators) { + perStmtRSOperator.apply(resultSet, statement, cycleValue); + } + } + + if (!resultSet.wasApplied()) { + //resultSet.b + Row row = resultSet.one(); + ColumnDefinitions defs = row.getColumnDefinitions(); + if (retryReplace) { + statement = CQLBindHelper.rebindUnappliedStatement(statement, defs, row); + } + + logger.trace(readyCQLStatement.getQueryString(cycleValue)); + // To make exception handling logic flow more uniformly + throw new ChangeUnappliedCycleException( + cycleValue, resultSet, readyCQLStatement.getQueryString(cycleValue) + ); + } + + int pageRows = resultSet.getAvailableWithoutFetching(); + int remaining = pageRows; + RowCycleOperator[] perStmtRowOperators = readyCQLStatement.getRowCycleOperators(); + if (rowOps == null && perStmtRowOperators==null) { + while (remaining-- > 0) { + Row row = resultSet.one(); + +// NOTE: This has been replaced by: +// params: +// rowops: savevars +// You must add this to the YAML for statements that are meant to capture vars +// HashMap bindings = SharedState.tl_ObjectMap.get(); +// for (ColumnDefinitions.Definition cdef : row.getColumnDefinitions()) { +// bindings.put(cdef.getName(), row.getObject(cdef.getName())); +// } +// + } + } else { + while (remaining-- > 0) { + Row onerow = resultSet.one(); + if (rowOps!=null) { + for (RowCycleOperator rowOp : rowOps) { + rowOp.apply(onerow, cycleValue); + } + } + if (perStmtRowOperators!=null) { + for (RowCycleOperator rowOp : perStmtRowOperators) { + rowOp.apply(onerow, cycleValue); + } + } + } + } + cqlActivity.rowsCounter.mark(pageRows); + totalRowsFetchedForQuery += pageRows; + + if (resultSet.isFullyFetched()) { + long resultNanos = System.nanoTime() - nanoStartTime; + cqlActivity.resultSuccessTimer.update(resultNanos, TimeUnit.NANOSECONDS); + cqlActivity.resultSetSizeHisto.update(totalRowsFetchedForQuery); + readyCQLStatement.onSuccess(cycleValue, resultNanos, totalRowsFetchedForQuery); + } else { + if (cqlActivity.maxpages > 1) { + pagingResultSet = resultSet; + pagingStatement = statement; + pagingReadyStatement = readyCQLStatement; + pagesFetched = 1; + } else { + throw new UnexpectedPagingException( + cycleValue, + resultSet, + readyCQLStatement.getQueryString(cycleValue), + 1, + cqlActivity.maxpages, + cqlActivity.getSession().getCluster().getConfiguration().getQueryOptions().getFetchSize() + ); + } + } + break; // This is normal termination of this loop, when retries aren't needed + } catch (Exception e) { + long resultNanos = resultTime.stop(); + resultTime = null; + readyCQLStatement.onError(cycleValue, resultNanos, e); + CQLCycleException cqlCycleException = new CQLCycleException(cycleValue, resultNanos, e, readyCQLStatement); + ErrorStatus errorStatus = ebdseErrorHandler.handleError(cycleValue, cqlCycleException); + if (!errorStatus.isRetryable()) { + cqlActivity.triesHisto.update(tries); + return errorStatus.getResultCode(); + } + } finally { + if (resultTime != null) { + resultTime.stop(); + } + } + } + cqlActivity.triesHisto.update(tries); + + } else { + + int tries = 0; + + while (tries < maxTries) { + tries++; + if (tries > maxTries) { + throw new MaxTriesExhaustedException(cycleValue, maxTries); + } + + ListenableFuture pagingFuture; + + try (Timer.Context pagingTime = cqlActivity.pagesTimer.time()) { + try (Timer.Context executeTime = cqlActivity.executeTimer.time()) { + pagingFuture = pagingResultSet.fetchMoreResults(); + } + + Timer.Context resultTime = cqlActivity.resultTimer.time(); + try { + ResultSet resultSet = pagingFuture.get(); + + if (cycleOps != null) { + for (ResultSetCycleOperator cycleOp : cycleOps) { + cycleOp.apply(resultSet, pagingStatement, cycleValue); + } + } + ResultSetCycleOperator[] perStmtRSOperators = pagingReadyStatement.getResultSetOperators(); + if (perStmtRSOperators != null) { + for (ResultSetCycleOperator perStmtRSOperator : perStmtRSOperators) { + perStmtRSOperator.apply(resultSet, pagingStatement, cycleValue); + } + } + + pagesFetched++; + + int pageRows = resultSet.getAvailableWithoutFetching(); + int remaining = pageRows; + if (rowOps == null) { + while (remaining-- > 0) { + resultSet.one(); + } + } else { + while (remaining-- > 0) { + for (RowCycleOperator rowOp : rowOps) { + rowOp.apply(resultSet.one(), cycleValue); + + } + } + } + cqlActivity.rowsCounter.mark(pageRows); + totalRowsFetchedForQuery += pageRows; + + if (resultSet.isFullyFetched()) { + long nanoTime = System.nanoTime() - nanoStartTime; + cqlActivity.resultSuccessTimer.update(nanoTime, TimeUnit.NANOSECONDS); + cqlActivity.resultSetSizeHisto.update(totalRowsFetchedForQuery); + pagingReadyStatement.onSuccess(cycleValue, nanoTime, totalRowsFetchedForQuery); + pagingResultSet = null; + + } else { + if (pagesFetched > cqlActivity.maxpages) { + throw new UnexpectedPagingException( + cycleValue, + pagingResultSet, + pagingReadyStatement.getQueryString(cycleValue), + pagesFetched, + cqlActivity.maxpages, + cqlActivity.getSession().getCluster().getConfiguration().getQueryOptions().getFetchSize() + ); + } + pagingResultSet = resultSet; + } + break; // This is normal termination of this loop, when retries aren't needed + } catch (Exception e) { + long resultNanos = resultTime.stop(); + resultTime = null; + + pagingReadyStatement.onError(cycleValue, resultNanos, e); + CQLCycleException cqlCycleException = new CQLCycleException(cycleValue, resultNanos, e, pagingReadyStatement); + ErrorStatus errorStatus = ebdseErrorHandler.handleError(cycleValue, cqlCycleException); + if (!errorStatus.isRetryable()) { + cqlActivity.triesHisto.update(tries); + return errorStatus.getResultCode(); + } + } finally { + if (resultTime != null) { + resultTime.stop(); + } + } + } + } + cqlActivity.triesHisto.update(tries); + } + return 0; + } + + + @Override + public boolean incomplete() { + return pagingResultSet != null; + } + + @Override + public void onActivityDefUpdate(ActivityDef activityDef) { + this.maxTries = cqlActivity.getMaxTries(); + this.retryDelay = cqlActivity.getRetryDelay(); + this.maxRetryDelay = cqlActivity.getMaxRetryDelay(); + this.retryReplace = cqlActivity.isRetryReplace(); + this.showcql = cqlActivity.isShowCql(); + this.ebdseErrorHandler = cqlActivity.getCqlErrorHandler(); + this.statementFilter = cqlActivity.getStatementFilter(); + this.rowOps = cqlActivity.getRowCycleOperators(); + this.cycleOps = cqlActivity.getResultSetCycleOperators(); + this.modifiers = cqlActivity.getStatementModifiers(); + } + + protected CqlActivity getCqlActivity() { + return cqlActivity; + } + + +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/core/CqlActionDispenser.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/core/CqlActionDispenser.java new file mode 100644 index 000000000..9311ba666 --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/core/CqlActionDispenser.java @@ -0,0 +1,27 @@ +package io.nosqlbench.activitytype.cql.ebdrivers.cql.core; + + +import io.nosqlbench.engine.api.activityapi.core.Action; +import io.nosqlbench.engine.api.activityapi.core.ActionDispenser; + +public class CqlActionDispenser implements ActionDispenser { + + public CqlActivity getCqlActivity() { + return cqlActivity; + } + + private CqlActivity cqlActivity; + + public CqlActionDispenser(CqlActivity activityContext) { + this.cqlActivity = activityContext; + } + + public Action getAction(int slot) { + long async= cqlActivity.getActivityDef().getParams().getOptionalLong("async").orElse(0L); + if (async>0) { + return new CqlAsyncAction(cqlActivity, slot); + } else { + return new CqlAction(cqlActivity.getActivityDef(), slot, cqlActivity); + } + } +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/core/CqlActivity.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/core/CqlActivity.java new file mode 100644 index 000000000..873a4a0e5 --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/core/CqlActivity.java @@ -0,0 +1,655 @@ +package io.nosqlbench.activitytype.cql.ebdrivers.cql.core; + +import com.codahale.metrics.Histogram; +import com.codahale.metrics.Meter; +import com.codahale.metrics.Timer; +import com.datastax.driver.core.*; +import io.nosqlbench.activitytype.cql.codecsupport.UDTCodecInjector; +import com.datastax.driver.core.TokenRangeStmtFilter; +import io.nosqlbench.activitytype.cql.ebdrivers.cql.api.ErrorResponse; +import io.nosqlbench.activitytype.cql.ebdrivers.cql.api.ResultSetCycleOperator; +import io.nosqlbench.activitytype.cql.ebdrivers.cql.api.RowCycleOperator; +import io.nosqlbench.activitytype.cql.ebdrivers.cql.api.StatementFilter; +import io.nosqlbench.activitytype.cql.ebdrivers.cql.errorhandling.EbdseCycleErrorHandler; +import io.nosqlbench.activitytype.cql.ebdrivers.cql.errorhandling.HashedCQLErrorHandler; +import io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.binders.CqlBinderTypes; +import io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.core.*; +import io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.rowoperators.RowCycleOperators; +import io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.rowoperators.Save; +import io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.rsoperators.ResultSetCycleOperators; +import io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.rsoperators.TraceLogger; +import io.nosqlbench.engine.api.activityapi.core.Activity; +import io.nosqlbench.engine.api.activityapi.core.ActivityDefObserver; +import io.nosqlbench.engine.api.activityapi.planning.OpSequence; +import io.nosqlbench.engine.api.activityapi.planning.SequencePlanner; +import io.nosqlbench.engine.api.activityapi.planning.SequencerType; +import io.nosqlbench.engine.api.activityconfig.ParsedStmt; +import io.nosqlbench.engine.api.activityconfig.StatementsLoader; +import io.nosqlbench.engine.api.activityconfig.rawyaml.RawStmtDef; +import io.nosqlbench.engine.api.activityconfig.rawyaml.RawStmtsBlock; +import io.nosqlbench.engine.api.activityconfig.rawyaml.RawStmtsDoc; +import io.nosqlbench.engine.api.activityconfig.rawyaml.RawStmtsDocList; +import io.nosqlbench.engine.api.activityconfig.yaml.StmtDef; +import io.nosqlbench.engine.api.activityconfig.yaml.StmtsDocList; +import io.nosqlbench.engine.api.activityimpl.ActivityDef; +import io.nosqlbench.engine.api.activityimpl.ParameterMap; +import io.nosqlbench.engine.api.activityimpl.SimpleActivity; +import io.nosqlbench.engine.api.metrics.ActivityMetrics; +import io.nosqlbench.engine.api.metrics.ExceptionCountMetrics; +import io.nosqlbench.engine.api.metrics.ExceptionHistoMetrics; +import io.nosqlbench.engine.api.util.SimpleConfig; +import io.nosqlbench.engine.api.util.StrInterpolater; +import io.nosqlbench.engine.api.util.TagFilter; +import io.nosqlbench.engine.api.util.Unit; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.FileWriter; +import java.io.IOException; +import java.io.Writer; +import java.nio.charset.StandardCharsets; +import java.util.*; + +@SuppressWarnings("Duplicates") +public class CqlActivity extends SimpleActivity implements Activity, ActivityDefObserver { + + private final static Logger logger = LoggerFactory.getLogger(CqlActivity.class); + private final ExceptionCountMetrics exceptionCountMetrics; + private final ExceptionHistoMetrics exceptionHistoMetrics; + private final ActivityDef activityDef; + private final Map namedWriters = new HashMap<>(); + protected List stmts; + Timer retryDelayTimer; + Timer bindTimer; + Timer executeTimer; + Timer resultTimer; + Timer resultSuccessTimer; + Timer pagesTimer; + Histogram triesHisto; + Histogram skippedTokensHisto; + Histogram resultSetSizeHisto; + int maxpages; + Meter rowsCounter; + private HashedCQLErrorHandler errorHandler; + private OpSequence opsequence; + private Session session; + private int maxTries; + private StatementFilter statementFilter; + private Boolean showcql; + private List rowCycleOperators; + private List resultSetCycleOperators; + private List statementModifiers; + private Long maxTotalOpsInFlight; + private long retryDelay; + private long maxRetryDelay; + private boolean retryReplace; + private String pooling; + + + public CqlActivity(ActivityDef activityDef) { + super(activityDef); + this.activityDef = activityDef; + exceptionCountMetrics = new ExceptionCountMetrics(activityDef); + exceptionHistoMetrics = new ExceptionHistoMetrics(activityDef); + } + + private void registerCodecs(Session session) { + UDTCodecInjector injector = new UDTCodecInjector(); + injector.injectUserProvidedCodecs(session, true); + } + + + @Override + public synchronized void initActivity() { + logger.debug("initializing activity: " + this.activityDef.getAlias()); + session = getSession(); + + if (getParams().getOptionalBoolean("usercodecs").orElse(false)) { + registerCodecs(session); + } + initSequencer(); + setDefaultsFromOpSequence(this.opsequence); + + retryDelayTimer = ActivityMetrics.timer(activityDef, "retry-delay"); + bindTimer = ActivityMetrics.timer(activityDef, "bind"); + executeTimer = ActivityMetrics.timer(activityDef, "execute"); + resultTimer = ActivityMetrics.timer(activityDef, "result"); + triesHisto = ActivityMetrics.histogram(activityDef, "tries"); + pagesTimer = ActivityMetrics.timer(activityDef, "pages"); + rowsCounter = ActivityMetrics.meter(activityDef, "rows"); + skippedTokensHisto = ActivityMetrics.histogram(activityDef, "skipped-tokens"); + resultSuccessTimer = ActivityMetrics.timer(activityDef, "result-success"); + resultSetSizeHisto = ActivityMetrics.histogram(activityDef, "resultset-size"); + onActivityDefUpdate(activityDef); + logger.debug("activity fully initialized: " + this.activityDef.getAlias()); + } + + public synchronized Session getSession() { + if (session == null) { + session = CQLSessionCache.get().getSession(this.getActivityDef()); + } + return session; + } + + private void initSequencer() { + + SequencerType sequencerType = SequencerType.valueOf( + getParams().getOptionalString("seq").orElse("bucket") + ); + SequencePlanner planner = new SequencePlanner<>(sequencerType); + + StmtsDocList unfiltered = loadStmtsYaml(); + + // log tag filtering results + String tagfilter = activityDef.getParams().getOptionalString("tags").orElse(""); + TagFilter tagFilter = new TagFilter(tagfilter); + unfiltered.getStmts().stream().map(tagFilter::matchesTaggedResult).forEach(r -> logger.info(r.getLog())); + + stmts = unfiltered.getStmts(tagfilter); + + if (stmts.size() == 0) { + throw new RuntimeException("There were no unfiltered statements found for this activity."); + } + + for (StmtDef stmtDef : stmts) { + + ParsedStmt parsed = stmtDef.getParsed().orError(); + boolean prepared = Boolean.valueOf(stmtDef.getParams().getOrDefault("prepared", "true")); + long ratio = Long.valueOf(stmtDef.getParams().getOrDefault("ratio", "1")); + + Optional cl = Optional.ofNullable( + stmtDef.getParams().getOrDefault("cl", null)).map(ConsistencyLevel::valueOf); + + Optional serial_cl = Optional.ofNullable( + stmtDef.getParams().getOrDefault("serial_cl", null)).map(ConsistencyLevel::valueOf); + + Optional idempotent = Optional.ofNullable(stmtDef.getParams().getOrDefault("idempotent", null)) + .map(Boolean::valueOf); + + StringBuilder psummary = new StringBuilder(); + + boolean instrument = Optional.ofNullable(stmtDef.getParams() + .get("instrument")).map(Boolean::valueOf) + .orElse(getParams().getOptionalBoolean("instrument").orElse(false)); + + String logresultcsv = stmtDef.getParams().getOrDefault("logresultcsv",""); + String logresultcsv_act = getParams().getOptionalString("logresultcsv").orElse(""); + + if (!logresultcsv_act.isEmpty() && !logresultcsv_act.toLowerCase().equals("true")) { + throw new RuntimeException("At the activity level, only logresultcsv=true is allowed, no other values."); + } + logresultcsv = !logresultcsv.isEmpty() ? logresultcsv : logresultcsv_act; + logresultcsv = !logresultcsv.toLowerCase().equals("true") ? logresultcsv : stmtDef.getName()+"--results.csv"; + + logger.debug("readying statement[" + (prepared ? "" : "un") + "prepared]:" + parsed.getStmt()); + + ReadyCQLStatementTemplate template; + String stmtForDriver = parsed.getPositionalStatement(s -> "?"); + if (prepared) { + psummary.append(" prepared=>").append(prepared); + PreparedStatement prepare = getSession().prepare(stmtForDriver); + cl.ifPresent((conlvl) -> { + psummary.append(" consistency_level=>").append(conlvl); + prepare.setConsistencyLevel(conlvl); + }); + serial_cl.ifPresent((scl) -> { + psummary.append(" serial_consistency_level=>").append(serial_cl); + prepare.setSerialConsistencyLevel(scl); + }); + idempotent.ifPresent((i) -> { + psummary.append(" idempotent=").append(idempotent); + prepare.setIdempotent(i); + }); + CqlBinderTypes binderType = CqlBinderTypes.valueOf(stmtDef.getParams() + .getOrDefault("binder", CqlBinderTypes.DEFAULT.toString())); + + template = new ReadyCQLStatementTemplate(binderType, getSession(), prepare, ratio, parsed.getName()); + } else { + SimpleStatement simpleStatement = new SimpleStatement(stmtForDriver); + cl.ifPresent((conlvl) -> { + psummary.append(" consistency_level=>").append(conlvl); + simpleStatement.setConsistencyLevel(conlvl); + }); + serial_cl.ifPresent((scl) -> { + psummary.append(" serial_consistency_level=>").append(scl); + simpleStatement.setSerialConsistencyLevel(scl); + }); + idempotent.ifPresent((i) -> { + psummary.append(" idempotent=>").append(i); + simpleStatement.setIdempotent(i); + }); + template = new ReadyCQLStatementTemplate(getSession(), simpleStatement, ratio, parsed.getName()); + } + + Optional.ofNullable(stmtDef.getParams().getOrDefault("save", null)) + .map(s -> s.split("[,; ]")) + .map(Save::new) + .ifPresent(save_op -> { + psummary.append(" save=>").append(save_op.toString()); + template.addRowCycleOperators(save_op); + }); + + Optional.ofNullable(stmtDef.getParams().getOrDefault("rsoperators", null)) + .map(s -> s.split(",")) + .stream().flatMap(Arrays::stream) + .map(ResultSetCycleOperators::newOperator) + .forEach(rso -> { + psummary.append(" rsop=>").append(rso.toString()); + template.addResultSetOperators(rso); + }); + + Optional.ofNullable(stmtDef.getParams().getOrDefault("rowoperators", null)) + .map(s -> s.split(",")) + .stream().flatMap(Arrays::stream) + .map(RowCycleOperators::newOperator) + .forEach(ro -> { + psummary.append(" rowop=>").append(ro.toString()); + template.addRowCycleOperators(ro); + }); + + if (instrument) { + logger.info("Adding per-statement success and error and resultset-size timers to statement '" + parsed.getName() + "'"); + template.instrument(this); + psummary.append(" instrument=>").append(instrument); + } + + if (!logresultcsv.isEmpty()) { + logger.info("Adding per-statement result CSV logging to statement '" + parsed.getName() + "'"); + template.logResultCsv(this,logresultcsv); + psummary.append(" logresultcsv=>").append(logresultcsv); + } + + template.getContextualBindings().getBindingsTemplate().addFieldBindings(stmtDef.getParsed().getBindPoints()); + + if (psummary.length() > 0) { + logger.info("statement named '" + stmtDef.getName() + "' has custom settings:" + psummary.toString()); + } + + planner.addOp(template.resolve(), ratio); + } + + opsequence = planner.resolve(); + + } + + private StmtsDocList loadStmtsYaml() { + StmtsDocList doclist = null; + + String yaml_loc = activityDef.getParams().getOptionalString("yaml").orElse("default"); + StrInterpolater interp = new StrInterpolater(activityDef); + + String yamlVersion = "unset"; + if (yaml_loc.endsWith(":1") || yaml_loc.endsWith(":2")) { + yamlVersion = yaml_loc.substring(yaml_loc.length() - 1); + yaml_loc = yaml_loc.substring(0, yaml_loc.length() - 2); + } + + switch (yamlVersion) { + case "1": + doclist = getVersion1StmtsDoc(interp, yaml_loc); + logger.warn("DEPRECATED-FORMAT: Loaded yaml " + yaml_loc + " with compatibility mode. " + + "This will be deprecated in a future release."); + logger.warn("DEPRECATED-FORMAT: Please refer to " + + "http://docs.engineblock.io/user-guide/standard_yaml/ for more details."); + break; + case "2": + doclist = StatementsLoader.load(logger, yaml_loc, interp, "activities"); + break; + case "unset": + try { + logger.debug("You can suffix your yaml filename or url with the " + + "format version, such as :1 or :2. Assuming version 2."); + doclist = StatementsLoader.load(null, yaml_loc, interp, "activities"); + } catch (Exception ignored) { + try { + doclist = getVersion1StmtsDoc(interp, yaml_loc); + logger.warn("DEPRECATED-FORMAT: Loaded yaml " + yaml_loc + + " with compatibility mode. This will be deprecated in a future release."); + logger.warn("DEPRECATED-FORMAT: Please refer to " + + "http://docs.engineblock.io/user-guide/standard_yaml/ for more details."); + } catch (Exception compatError) { + logger.warn("Tried to load yaml in compatibility mode, " + + "since it failed to load with the standard format, " + + "but found an error:" + compatError); + logger.warn("The following detailed errors are provided only " + + "for the standard format. To force loading version 1 with detailed logging, add" + + " a version qualifier to your yaml filename or url like ':1'"); + // retrigger the error again, this time with logging enabled. + doclist = StatementsLoader.load(logger, yaml_loc, interp, "activities"); + } + } + break; + default: + throw new RuntimeException("Unrecognized yaml format version, expected :1 or :2 " + + "at end of yaml file, but got " + yamlVersion + " instead."); + } + + return doclist; + + } + + @Deprecated + private StmtsDocList getVersion1StmtsDoc(StrInterpolater interp, String yaml_loc) { + StmtsDocList unfiltered; + List blocks = new ArrayList<>(); + + YamlCQLStatementLoader deprecatedLoader = new YamlCQLStatementLoader(interp); + AvailableCQLStatements rawDocs = deprecatedLoader.load(yaml_loc, "activities"); + + List rawTagged = rawDocs.getRawTagged(); + + for (TaggedCQLStatementDefs rawdef : rawTagged) { + for (CQLStatementDef rawstmt : rawdef.getStatements()) { + RawStmtsBlock rawblock = new RawStmtsBlock(); + + // tags + rawblock.setTags(rawdef.getTags()); + + // params + Map params = new HashMap<>(rawdef.getParams()); + if (rawstmt.getConsistencyLevel() != null && !rawstmt.getConsistencyLevel().isEmpty()) + params.put("cl", rawstmt.getConsistencyLevel()); + if (!rawstmt.isPrepared()) params.put("prepared", "false"); + if (rawstmt.getRatio() != 1L) + params.put("ratio", String.valueOf(rawstmt.getRatio())); + rawblock.setParams(params); + + + // stmts + List stmtslist = new ArrayList<>(); + stmtslist.add(new RawStmtDef(rawstmt.getName(), rawstmt.getStatement())); + rawblock.setRawStmtDefs(stmtslist); + + // bindings + rawblock.setBindings(rawstmt.getBindings()); + + blocks.add(rawblock); + } + } + + RawStmtsDoc rawStmtsDoc = new RawStmtsDoc(); + rawStmtsDoc.setBlocks(blocks); + List rawStmtsDocs = new ArrayList<>(); + rawStmtsDocs.add(rawStmtsDoc); + RawStmtsDocList rawStmtsDocList = new RawStmtsDocList(rawStmtsDocs); + unfiltered = new StmtsDocList(rawStmtsDocList); + + return unfiltered; + } + + public ExceptionCountMetrics getExceptionCountMetrics() { + return exceptionCountMetrics; + } + + @Override + public String toString() { + return "CQLActivity {" + + "activityDef=" + activityDef + + ", session=" + session + + ", opSequence=" + this.opsequence + + '}'; + } + + @Override + public void onActivityDefUpdate(ActivityDef activityDef) { + super.onActivityDefUpdate(activityDef); + + clearResultSetCycleOperators(); + clearRowCycleOperators(); + clearStatementModifiers(); + + ParameterMap params = activityDef.getParams(); + Optional fetchSizeOption = params.getOptionalString("fetchsize"); + Cluster cluster = getSession().getCluster(); + if (fetchSizeOption.isPresent()) { + int fetchSize = fetchSizeOption.flatMap(Unit::bytesFor).map(Double::intValue).orElseThrow(() -> new RuntimeException( + "Unable to parse fetch size from " + fetchSizeOption.get() + )); + if (fetchSize > 10000000 && fetchSize < 1000000000) { + logger.warn("Setting the fetchsize to " + fetchSize + " is unlikely to give good performance."); + } else if (fetchSize > 1000000000) { + throw new RuntimeException("Setting the fetch size to " + fetchSize + " is likely to cause instability."); + } + logger.trace("setting fetchSize to " + fetchSize); + cluster.getConfiguration().getQueryOptions().setFetchSize(fetchSize); + } + + this.retryDelay = params.getOptionalLong("retrydelay").orElse(0L); + this.maxRetryDelay = params.getOptionalLong("maxretrydelay").orElse(500L); + this.retryReplace = params.getOptionalBoolean("retryreplace").orElse(false); + this.maxTries = params.getOptionalInteger("maxtries").orElse(10); + this.showcql = params.getOptionalBoolean("showcql").orElse(false); + this.maxpages = params.getOptionalInteger("maxpages").orElse(1); + + this.statementFilter = params.getOptionalString("tokens") + .map(s -> new TokenRangeStmtFilter(cluster, s)) + .orElse(null); + + if (statementFilter != null) { + logger.info("filtering statements" + statementFilter); + } + + errorHandler = configureErrorHandler(); + + params.getOptionalString("trace") + .map(SimpleConfig::new) + .map(TraceLogger::new) + .ifPresent( + tl -> { + addResultSetCycleOperator(tl); + addStatementModifier(tl); + }); + + this.maxTotalOpsInFlight = params.getOptionalLong("async").orElse(1L); + + Optional dynpooling = params.getOptionalString("pooling"); + if (dynpooling.isPresent()) { + logger.info("dynamically updating pooling"); + if (!dynpooling.get().equals(this.pooling)) { + PoolingOptions opts = CQLOptions.poolingOptionsFor(dynpooling.get()); + logger.info("pooling=>" + dynpooling.get()); + + PoolingOptions cfg = getSession().getCluster().getConfiguration().getPoolingOptions(); + + // This looks funny, because we have to set max conns per host + // in an order that will appease the driver, as there is no "apply settings" + // to do that for us, so we raise max first if it goes higher, and we lower + // it last, if it goes lower + int prior_mcph_l = cfg.getMaxConnectionsPerHost(HostDistance.LOCAL); + int mcph_l = opts.getMaxConnectionsPerHost(HostDistance.LOCAL); + int ccph_l = opts.getCoreConnectionsPerHost(HostDistance.LOCAL); + if (prior_mcph_l < mcph_l) { + logger.info("setting mcph_l to " + mcph_l); + cfg.setMaxConnectionsPerHost(HostDistance.LOCAL, mcph_l); + } + logger.info("setting ccph_l to " + ccph_l); + cfg.setCoreConnectionsPerHost(HostDistance.LOCAL, ccph_l); + if (mcph_l < prior_mcph_l) { + logger.info("setting mcph_l to " + mcph_l); + cfg.setMaxRequestsPerConnection(HostDistance.LOCAL, mcph_l); + } + cfg.setMaxRequestsPerConnection(HostDistance.LOCAL, opts.getMaxRequestsPerConnection(HostDistance.LOCAL)); + + int prior_mcph_r = cfg.getMaxConnectionsPerHost(HostDistance.REMOTE); + int mcph_r = opts.getMaxConnectionsPerHost(HostDistance.REMOTE); + int ccph_r = opts.getCoreConnectionsPerHost(HostDistance.REMOTE); + + if (mcph_r > 0) { + if (mcph_r > prior_mcph_r) opts.setMaxConnectionsPerHost(HostDistance.REMOTE, mcph_r); + opts.setCoreConnectionsPerHost(HostDistance.REMOTE, ccph_r); + if (prior_mcph_r > mcph_r) opts.setMaxConnectionsPerHost(HostDistance.REMOTE, mcph_r); + if (opts.getMaxConnectionsPerHost(HostDistance.REMOTE) > 0) { + cfg.setMaxRequestsPerConnection(HostDistance.REMOTE, opts.getMaxRequestsPerConnection(HostDistance.REMOTE)); + } + } + this.pooling = dynpooling.get(); + } + } + + } + + // TODO: make error handler updates consistent under concurrent updates + + private HashedCQLErrorHandler configureErrorHandler() { + + HashedCQLErrorHandler newerrorHandler = new HashedCQLErrorHandler(exceptionCountMetrics); + + String errors = activityDef.getParams() + .getOptionalString("errors") + .orElse("stop,retryable->retry,unverified->stop"); + + + String[] handlerSpecs = errors.split(","); + for (String spec : handlerSpecs) { + String[] keyval = spec.split("=|->|:", 2); + if (keyval.length == 1) { + String verb = keyval[0]; + newerrorHandler.setDefaultHandler( + new EbdseCycleErrorHandler( + ErrorResponse.valueOf(verb), + exceptionCountMetrics, + exceptionHistoMetrics, + !getParams().getOptionalLong("async").isPresent() + ) + ); + } else { + String pattern = keyval[0]; + String verb = keyval[1]; + if (newerrorHandler.getGroupNames().contains(pattern)) { + EbdseCycleErrorHandler handler = + new EbdseCycleErrorHandler( + ErrorResponse.valueOf(verb), + exceptionCountMetrics, + exceptionHistoMetrics, + !getParams().getOptionalLong("async").isPresent() + ); + logger.info("Handling error group '" + pattern + "' with handler:" + handler); + newerrorHandler.setHandlerForGroup(pattern, handler); + } else { + EbdseCycleErrorHandler handler = new EbdseCycleErrorHandler( + ErrorResponse.valueOf(keyval[1]), + exceptionCountMetrics, + exceptionHistoMetrics, + !getParams().getOptionalLong("async").isPresent() + ); + logger.info("Handling error pattern '" + pattern + "' with handler:" + handler); + newerrorHandler.setHandlerForPattern(keyval[0], handler); + } + } + } + + return newerrorHandler; + } + + public int getMaxTries() { + return maxTries; + } + + public HashedCQLErrorHandler getCqlErrorHandler() { + return this.errorHandler; + } + + public StatementFilter getStatementFilter() { + return statementFilter; + } + + public void setStatementFilter(StatementFilter statementFilter) { + this.statementFilter = statementFilter; + } + + public Boolean isShowCql() { + return showcql; + } + + public OpSequence getOpSequencer() { + return opsequence; + } + + public List getRowCycleOperators() { + return rowCycleOperators; + } + + protected synchronized void addRowCycleOperator(RowCycleOperator rsOperator) { + if (rowCycleOperators == null) { + rowCycleOperators = new ArrayList<>(); + } + rowCycleOperators.add(rsOperator); + } + + private void clearRowCycleOperators() { + this.rowCycleOperators = null; + } + + public List getResultSetCycleOperators() { + return resultSetCycleOperators; + } + + protected synchronized void addResultSetCycleOperator(ResultSetCycleOperator resultSetCycleOperator) { + if (this.resultSetCycleOperators == null) { + this.resultSetCycleOperators = new ArrayList<>(); + } + this.resultSetCycleOperators.add(resultSetCycleOperator); + } + + private void clearResultSetCycleOperators() { + this.resultSetCycleOperators = null; + } + + public List getStatementModifiers() { + return this.statementModifiers; + } + + protected synchronized void addStatementModifier(StatementModifier modifier) { + if (this.statementModifiers == null) { + this.statementModifiers = new ArrayList<>(); + } + this.statementModifiers.add(modifier); + } + + private void clearStatementModifiers() { + statementModifiers = null; + } + + public long getMaxOpsInFlight(int slot) { + int threads = this.getActivityDef().getThreads(); + return maxTotalOpsInFlight / threads + (slot < (maxTotalOpsInFlight % threads) ? 1 : 0); + } + + public long getRetryDelay() { + return retryDelay; + } + + public void setRetryDelay(long retryDelay) { + this.retryDelay = retryDelay; + } + + public long getMaxRetryDelay() { + return maxRetryDelay; + } + + public void setMaxRetryDelay(long maxRetryDelay) { + this.maxRetryDelay = maxRetryDelay; + } + + public boolean isRetryReplace() { + return retryReplace; + } + + public void setRetryReplace(boolean retryReplace) { + this.retryReplace = retryReplace; + } + + public synchronized Writer getNamedWriter(String name) { + Writer writer = namedWriters.computeIfAbsent(name, s -> { + try { + return new FileWriter(name, StandardCharsets.UTF_8); + } catch (IOException e) { + throw new RuntimeException(e); + } + }); + this.registerAutoCloseable(writer); + return writer; + } + + +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/core/CqlActivityType.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/core/CqlActivityType.java new file mode 100644 index 000000000..0a2fc6af9 --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/core/CqlActivityType.java @@ -0,0 +1,47 @@ +package io.nosqlbench.activitytype.cql.ebdrivers.cql.core; + + +import io.nosqlbench.engine.api.activityapi.core.ActionDispenser; +import io.nosqlbench.engine.api.activityapi.core.ActivityType; +import io.nosqlbench.engine.api.activityimpl.ActivityDef; +import io.nosqlbench.virtdata.annotations.Service; + +import java.util.Optional; + +@Service(ActivityType.class) +public class CqlActivityType implements ActivityType { + + public String getName() { + return "cql"; + } + + @Override + public CqlActivity getActivity(ActivityDef activityDef) { + Optional yaml = activityDef.getParams().getOptionalString("yaml"); + + // sanity check that we have a yaml parameter, which contains our statements and bindings + if (yaml.isEmpty()) { + throw new RuntimeException("Currently, the cql activity type requires yaml activity parameter."); + } + + // allow shortcut: yaml parameter provide the default alias name + if (activityDef.getAlias().equals(ActivityDef.DEFAULT_ALIAS)) { + activityDef.getParams().set("alias",yaml.get()); + } + + return new CqlActivity(activityDef); + } + + /** + * Returns the per-activity level dispenser. The ActionDispenser can then dispense + * per-thread actions within the activity instance. + * @param activity The activity instance which will parameterize this action + */ + @Override + public ActionDispenser getActionDispenser(CqlActivity activity) { + return new CqlActionDispenser(activity); + } + + + +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/core/CqlAsyncAction.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/core/CqlAsyncAction.java new file mode 100644 index 000000000..f4a4866c4 --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/core/CqlAsyncAction.java @@ -0,0 +1,265 @@ +package io.nosqlbench.activitytype.cql.ebdrivers.cql.core; + +import com.codahale.metrics.Timer; +import com.datastax.driver.core.ResultSet; +import com.datastax.driver.core.ResultSetFuture; +import io.nosqlbench.activitytype.cql.ebdrivers.cql.api.ErrorResponse; +import io.nosqlbench.activitytype.cql.ebdrivers.cql.api.ResultSetCycleOperator; +import io.nosqlbench.activitytype.cql.ebdrivers.cql.api.RowCycleOperator; +import io.nosqlbench.activitytype.cql.ebdrivers.cql.api.StatementFilter; +import io.nosqlbench.activitytype.cql.ebdrivers.cql.errorhandling.ErrorStatus; +import io.nosqlbench.activitytype.cql.ebdrivers.cql.errorhandling.HashedCQLErrorHandler; +import io.nosqlbench.activitytype.cql.ebdrivers.cql.errorhandling.exceptions.CQLCycleException; +import io.nosqlbench.activitytype.cql.ebdrivers.cql.errorhandling.exceptions.ChangeUnappliedCycleException; +import io.nosqlbench.activitytype.cql.ebdrivers.cql.errorhandling.exceptions.UnexpectedPagingException; +import io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.core.ReadyCQLStatement; +import com.google.common.util.concurrent.Futures; +import com.google.common.util.concurrent.ListenableFuture; +import io.nosqlbench.engine.api.activityapi.core.BaseAsyncAction; +import io.nosqlbench.engine.api.activityapi.core.ops.fluent.opfacets.FailedOp; +import io.nosqlbench.engine.api.activityapi.core.ops.fluent.opfacets.StartedOp; +import io.nosqlbench.engine.api.activityapi.core.ops.fluent.opfacets.SucceededOp; +import io.nosqlbench.engine.api.activityapi.core.ops.fluent.opfacets.TrackedOp; +import io.nosqlbench.engine.api.activityapi.planning.OpSequence; +import io.nosqlbench.engine.api.activityimpl.ActivityDef; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.List; +import java.util.concurrent.TimeUnit; +import java.util.function.LongFunction; + +@SuppressWarnings("Duplicates") +public class CqlAsyncAction extends BaseAsyncAction { + + private final static Logger logger = LoggerFactory.getLogger(CqlAsyncAction.class); + private final ActivityDef activityDef; + + private List rowOps; + private List cycleOps; + private List modifiers; + private StatementFilter statementFilter; + private OpSequence sequencer; + + // how many cycles a statement will be attempted for before giving up + private int maxTries = 10; + + private HashedCQLErrorHandler cqlActivityErrorHandler; + + // private int pagesFetched = 0; +// private long totalRowsFetchedForQuery = 0L; +// private ResultSet pagingResultSet; +// private Statement pagingStatement; +// private ReadyCQLStatement pagingReadyStatement; + private boolean showcql; +// private long opsInFlight = 0L; +// private long maxOpsInFlight = 1L; +// private long pendingResults = 0; +// private LinkedBlockingQueue resultQueue = new LinkedBlockingQueue<>(); + + public CqlAsyncAction(CqlActivity activity, int slot) { + super(activity, slot); + onActivityDefUpdate(activity.getActivityDef()); + this.activityDef = activity.getActivityDef(); + } + + @Override + public void init() { + onActivityDefUpdate(activityDef); + this.sequencer = activity.getOpSequencer(); + } + + @Override + public LongFunction getOpInitFunction() { + return (l) -> { + return new CqlOpData(l, this); + }; + } + + @Override + public void startOpCycle(TrackedOp opc) { + CqlOpData cqlop = opc.getData(); + long cycle = opc.getCycle(); + + // bind timer covers all statement selection and binding, skipping, transforming logic + try (Timer.Context bindTime = activity.bindTimer.time()) { + cqlop.readyCQLStatement = sequencer.get(cycle); + cqlop.statement = cqlop.readyCQLStatement.bind(cycle); + + // If a filter is defined, skip and count any statements that do not match it + if (statementFilter != null) { + if (!statementFilter.matches(cqlop.statement)) { + activity.skippedTokensHisto.update(cycle); + //opc.start().stop(-2); + cqlop.skipped = true; + opc.skip(0); + return; + } + } + + // Transform the statement if there are any statement transformers defined for this CQL activity + if (modifiers != null) { + for (StatementModifier modifier : modifiers) { + cqlop.statement = modifier.modify(cqlop.statement, cycle); + } + } + + // Maybe show the CQl in log/console - only for diagnostic use + if (showcql) { + logger.info("CQL(cycle=" + cycle + "):\n" + cqlop.readyCQLStatement.getQueryString(cycle)); + } + } + + StartedOp startedOp = opc.start(); + cqlop.startedOp = startedOp; + + // The execute timer covers only the point at which EB hands the op to the driver to be executed + try (Timer.Context executeTime = activity.executeTimer.time()) { + cqlop.future = activity.getSession().executeAsync(cqlop.statement); + Futures.addCallback(cqlop.future, cqlop); + } + } + + + public void onSuccess(StartedOp sop) { + CqlOpData cqlop = sop.getData(); + + HashedCQLErrorHandler.resetThreadStatusCode(); + if (cqlop.skipped) { + return; + } + + try { + + ResultSet resultSet = cqlop.resultSet; + cqlop.totalPagesFetchedForQuery++; + + // Apply any defined ResultSetCycleOperators + if (cycleOps != null) { + for (ResultSetCycleOperator cycleOp : cycleOps) { + cycleOp.apply(resultSet, cqlop.statement, cqlop.cycle); + } + } + + int pageRows = resultSet.getAvailableWithoutFetching(); + int remaining = pageRows; + if (rowOps == null) { + while (remaining-- > 0) { + resultSet.one(); + } + } else { + while (remaining-- > 0) { + for (RowCycleOperator rowOp : rowOps) { + rowOp.apply(resultSet.one(), cqlop.cycle); + } + } + } + cqlop.totalRowsFetchedForQuery += pageRows; + + if (cqlop.totalPagesFetchedForQuery++ > activity.maxpages) { + throw new UnexpectedPagingException( + cqlop.cycle, + resultSet, + cqlop.readyCQLStatement.getQueryString(cqlop.cycle), + 1, + activity.maxpages, + activity.getSession().getCluster().getConfiguration().getQueryOptions().getFetchSize() + ); + } + + if (!resultSet.wasApplied()) { + // To make exception handling logic flow more uniformly + throw new ChangeUnappliedCycleException( + cqlop.cycle, resultSet, cqlop.readyCQLStatement.getQueryString(cqlop.cycle) + ); + } + + if (!resultSet.isFullyFetched()) { + logger.trace("async paging request " + cqlop.totalPagesFetchedForQuery + " for cycle " + cqlop.cycle); + ListenableFuture resultSetListenableFuture = resultSet.fetchMoreResults(); + Futures.addCallback(resultSetListenableFuture, cqlop); + return; + } + + SucceededOp success = sop.succeed(0); + cqlop.readyCQLStatement.onSuccess(cqlop.cycle, success.getServiceTimeNanos(), cqlop.totalRowsFetchedForQuery); + + activity.triesHisto.update(cqlop.triesAttempted); + activity.rowsCounter.mark(cqlop.totalRowsFetchedForQuery); + activity.resultSuccessTimer.update(success.getServiceTimeNanos(), TimeUnit.NANOSECONDS); + activity.resultSetSizeHisto.update(cqlop.totalRowsFetchedForQuery); + activity.resultTimer.update(success.getServiceTimeNanos(), TimeUnit.NANOSECONDS); + + } catch (Exception e) { + long currentServiceTime = sop.getCurrentServiceTimeNanos(); + + cqlop.readyCQLStatement.onError(cqlop.cycle, currentServiceTime, e); + + CQLCycleException cqlCycleException = new CQLCycleException(cqlop.cycle, currentServiceTime, e, cqlop.readyCQLStatement); + ErrorStatus errorStatus = cqlActivityErrorHandler.handleError(cqlop.cycle, cqlCycleException); + + if (errorStatus.isRetryable() && ++cqlop.triesAttempted < maxTries) { + ResultSetFuture resultSetFuture = activity.getSession().executeAsync(cqlop.statement); + sop.retry(); + Futures.addCallback(resultSetFuture, cqlop); + return; + } else { + sop.fail(errorStatus.getResultCode()); + if (errorStatus.getResponse() == ErrorResponse.stop) { + cqlop.throwable = cqlCycleException; + activity.getActivityController().stopActivityWithErrorAsync(cqlCycleException); + } + } + } + + } + + public void onFailure(StartedOp startedOp) { + + CqlOpData cqlop = startedOp.getData(); + long serviceTime = startedOp.getCurrentServiceTimeNanos(); + + // Even if this is retryable, we expose error events + cqlop.readyCQLStatement.onError(startedOp.getCycle(),serviceTime,cqlop.throwable); + + long cycle = startedOp.getCycle(); + CQLCycleException cqlCycleException1 = new CQLCycleException(cqlop.cycle, serviceTime, cqlop.throwable, cqlop.readyCQLStatement); + ErrorStatus errorStatus = cqlActivityErrorHandler.handleError(startedOp.getCycle(), cqlCycleException1); + + if (errorStatus.getResponse() == ErrorResponse.stop) { + activity.getActivityController().stopActivityWithErrorAsync(cqlop.throwable); + return; + } + + if (errorStatus.isRetryable() && cqlop.triesAttempted < maxTries) { + startedOp.retry(); + try (Timer.Context executeTime = activity.executeTimer.time()) { + cqlop.future = activity.getSession().executeAsync(cqlop.statement); + Futures.addCallback(cqlop.future, cqlop); + return; + } + } + + FailedOp failed = startedOp.fail(errorStatus.getResultCode()); + activity.resultTimer.update(failed.getServiceTimeNanos(), TimeUnit.NANOSECONDS); + activity.triesHisto.update(cqlop.triesAttempted); + + + } + + + @Override + public void onActivityDefUpdate(ActivityDef activityDef) { + this.maxTries = activity.getMaxTries(); + this.showcql = activity.isShowCql(); + this.cqlActivityErrorHandler = activity.getCqlErrorHandler(); + this.statementFilter = activity.getStatementFilter(); + this.rowOps = activity.getRowCycleOperators(); + this.cycleOps = activity.getResultSetCycleOperators(); + this.modifiers = activity.getStatementModifiers(); + } + + public String toString() { + return "CqlAsyncAction["+this.slot+"]"; + } +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/core/CqlOpData.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/core/CqlOpData.java new file mode 100644 index 000000000..758923336 --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/core/CqlOpData.java @@ -0,0 +1,52 @@ +package io.nosqlbench.activitytype.cql.ebdrivers.cql.core; + +import com.datastax.driver.core.ResultSet; +import com.datastax.driver.core.ResultSetFuture; +import com.datastax.driver.core.Statement; +import io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.core.ReadyCQLStatement; +import com.google.common.util.concurrent.FutureCallback; +import io.nosqlbench.engine.api.activityapi.core.ops.fluent.opfacets.StartedOp; + +public class CqlOpData implements FutureCallback { + final long cycle; + + // op state is managed via callbacks, we keep a ref here + StartedOp startedOp; + + boolean skipped=false; + private CqlAsyncAction action; + int triesAttempted =0; + + ReadyCQLStatement readyCQLStatement; + Statement statement; + ResultSetFuture future; + ResultSet resultSet; + + long totalRowsFetchedForQuery; + long totalPagesFetchedForQuery; + + public Throwable throwable; + public long resultAt; + private long errorAt; + + public CqlOpData(long cycle, CqlAsyncAction action) { + this.cycle = cycle; + this.action = action; + } + + @Override + public void onSuccess(ResultSet result) { + this.resultSet = result; + this.resultAt = System.nanoTime(); + action.onSuccess(startedOp); + + } + + @Override + public void onFailure(Throwable throwable) { + this.throwable=throwable; + this.errorAt = System.nanoTime(); + action.onFailure(startedOp); + } + +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/core/ProxyTranslator.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/core/ProxyTranslator.java new file mode 100644 index 000000000..f7c022019 --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/core/ProxyTranslator.java @@ -0,0 +1,32 @@ +package io.nosqlbench.activitytype.cql.ebdrivers.cql.core; + +import com.datastax.driver.core.policies.AddressTranslator; +import com.datastax.driver.core.Cluster; + +import java.net.InetSocketAddress; + + +public class ProxyTranslator implements AddressTranslator { + + private int hostsIndex = 0; + + private InetSocketAddress address; + + public ProxyTranslator(InetSocketAddress host){ + this.address= host; + } + + @Override + public void init(Cluster cluster) { + // Nothing to do + } + + @Override + public InetSocketAddress translate(InetSocketAddress address) { + return address; + } + + @Override + public void close() { + } +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/core/StatementModifier.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/core/StatementModifier.java new file mode 100644 index 000000000..522c49eb6 --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/core/StatementModifier.java @@ -0,0 +1,11 @@ +package io.nosqlbench.activitytype.cql.ebdrivers.cql.core; + +import com.datastax.driver.core.Statement; + +/** + * Provides a modular way for any CQL activities to modify statements before execution. + * Each active modifier returns a statement in turn. + */ +public interface StatementModifier { + Statement modify(Statement unmodified, long cycleNum); +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/errorhandling/CQLExceptionEnum.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/errorhandling/CQLExceptionEnum.java new file mode 100644 index 000000000..0c2a04a9a --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/errorhandling/CQLExceptionEnum.java @@ -0,0 +1,113 @@ +package io.nosqlbench.activitytype.cql.ebdrivers.cql.errorhandling; + +import com.datastax.driver.core.exceptions.*; +import io.nosqlbench.activitytype.cql.ebdrivers.cql.errorhandling.exceptions.*; +import io.nosqlbench.engine.api.activityapi.cyclelog.buffers.results.ResultReadable; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +/** + * This enumerates all known exception classes, including supertypes, + * for the purposes of stable naming in error handling. + * This is current as of com.datastax.cassandra:cassandra-driver-core:3.2.0 + */ +public enum CQLExceptionEnum implements ResultReadable { + + FrameTooLongException(FrameTooLongException.class, 1), + CodecNotFoundException(CodecNotFoundException.class, 2), + DriverException(DriverException.class, 3), + + AuthenticationException(AuthenticationException.class, 4), + TraceRetrievalException(TraceRetrievalException.class, 5), + UnsupportedProtocolVersionException(UnsupportedProtocolVersionException.class, 6), + NoHostAvailableException(NoHostAvailableException.class, 7), + QueryValidationException(QueryValidationException.class, 8), + InvalidQueryException(InvalidQueryException.class, 9), + InvalidConfigurationInQueryException(InvalidConfigurationInQueryException.class, 10), + UnauthorizedException(UnauthorizedException.class, 11), + SyntaxError(SyntaxError.class, 12), + AlreadyExistsException(AlreadyExistsException.class, 13), + UnpreparedException(UnpreparedException.class, 14), + InvalidTypeException(InvalidTypeException.class, 15), + QueryExecutionException(QueryExecutionException.class, 16), + UnavailableException(UnavailableException.class, 17), + BootstrappingException(BootstrappingException.class, 18), + OverloadedException(OverloadedException.class, 19), + TruncateException(TruncateException.class, 20), + QueryConsistencyException(QueryConsistencyException.class, 21), + WriteTimeoutException(WriteTimeoutException.class, 22), + WriteFailureException(WriteFailureException.class, 23), + ReadFailureException(ReadFailureException.class, 24), + ReadTimeoutException(ReadTimeoutException.class, 25), + FunctionExecutionException(FunctionExecutionException.class, 26), + DriverInternalError(DriverInternalError.class, 27), + ProtocolError(ProtocolError.class, 28), + ServerError(ServerError.class, 29), + BusyPoolException(BusyPoolException.class, 30), + ConnectionException(ConnectionException.class, 31), + TransportException(TransportException.class, 32), + OperationTimedOutException(OperationTimedOutException.class, 33), + PagingStateException(PagingStateException.class, 34), + UnresolvedUserTypeException(UnresolvedUserTypeException.class, 35), + UnsupportedFeatureException(UnsupportedFeatureException.class, 36), + BusyConnectionException(BusyConnectionException.class, 37), + + ChangeUnappliedCycleException(ChangeUnappliedCycleException.class, 38), + ResultSetVerificationException(io.nosqlbench.activitytype.cql.ebdrivers.cql.errorhandling.exceptions.ResultSetVerificationException.class, 39), + RowVerificationException(io.nosqlbench.activitytype.cql.ebdrivers.cql.errorhandling.exceptions.RowVerificationException.class, 40), + UnexpectedPagingException(io.nosqlbench.activitytype.cql.ebdrivers.cql.errorhandling.exceptions.UnexpectedPagingException.class, 41), + EbdseCycleException(CqlCycleException.class, 42), + MaxTriesExhaustedException(io.nosqlbench.activitytype.cql.ebdrivers.cql.errorhandling.exceptions.MaxTriesExhaustedException.class,43); + + private final static Logger logger = LoggerFactory.getLogger(CQLExceptionEnum.class); + + private static Map codesByName = getCodesByName(); + private static String[] namesByCode = getNamesByCode(); + + private final Class exceptionClass; + private final int resultCode; + + CQLExceptionEnum(Class clazz, int resultCode) { + this.exceptionClass = clazz; + this.resultCode = resultCode; + } + + public Class getExceptionClass() { + return exceptionClass; + } + + public int getResultCode() { + return resultCode; + } + + public int getResult() { + return this.resultCode; + } + + private static Map getCodesByName() { + codesByName = new HashMap<>(); + for (CQLExceptionEnum cqlExceptionEnum : CQLExceptionEnum.values()) { + codesByName.put(cqlExceptionEnum.toString(), cqlExceptionEnum.resultCode); + } + codesByName.put("NONE",0); + return codesByName; + } + + private static String[] getNamesByCode() { + List namesByCode = new ArrayList<>(); + namesByCode.add("NONE"); + for (CQLExceptionEnum cqlExceptionEnum : CQLExceptionEnum.values()) { + int code = cqlExceptionEnum.resultCode; + for (int i = namesByCode.size(); i <= code ; i++) { + namesByCode.add("UNKNOWN"); + } + namesByCode.set(code, cqlExceptionEnum.toString()); + } + return namesByCode.toArray(new String[0]); + } +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/errorhandling/EbdseCycleErrorHandler.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/errorhandling/EbdseCycleErrorHandler.java new file mode 100644 index 000000000..f7a67d881 --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/errorhandling/EbdseCycleErrorHandler.java @@ -0,0 +1,101 @@ +package io.nosqlbench.activitytype.cql.ebdrivers.cql.errorhandling; + +import io.nosqlbench.activitytype.cql.ebdrivers.cql.api.ErrorResponse; +import io.nosqlbench.activitytype.cql.ebdrivers.cql.errorhandling.exceptions.CQLCycleException; +import io.nosqlbench.activitytype.cql.ebdrivers.cql.errorhandling.exceptions.CQLExceptionDetailer; +import io.nosqlbench.engine.api.activityapi.errorhandling.CycleErrorHandler; +import io.nosqlbench.engine.api.metrics.ExceptionCountMetrics; +import io.nosqlbench.engine.api.metrics.ExceptionHistoMetrics; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * A contextualized error handler that can catch a cycle-specific error. + * In this class, the error handlers return a boolean, which indicates + * to the call whether or not to retry the operation. This handler implements + * the error handling stack approach, which allows the user to select an + * entry point in the stack, with all lesser impacting handler rules + * applied from most impacting to least impacting order. + * + * For simplicity, the handler stack is fixed as described below. It is not + * possible to rearrange the verbs. Some care has been given to making sure + * that the selected handlers are complete and intuitive. + * + * The standard handler stack looks like this: + * + *
    + *
  1. stop - log and throw an exception, which should escape to the + * next level of exception handling, the level which causes ebdse + * to stop running. In this case, and only in this case, the remaining + * handlers in the stack are not used. + * are not reached.
  2. + *
  3. warn - log an exception without stopping execution.
  4. + *
  5. retry - retry an operation up to a limit, IFF it is retryable
  6. + *
  7. count - count, in metrics, the number of this particular error type
  8. + *
  9. ignore - do nothing
  10. + *
+ * + * As indicated above, if you specify "warn" for a particular error type, this means + * that also retry, count, will apply, as well as ignore, in that order. "ignore" is + * simply a no-op that allows you to specify it as the minimum case. + */ +@SuppressWarnings("Duplicates") +public class EbdseCycleErrorHandler implements CycleErrorHandler { + + private static final Logger logger = LoggerFactory.getLogger(EbdseCycleErrorHandler.class); + + private ErrorResponse errorResponse; + private ExceptionCountMetrics exceptionCountMetrics; + private final ExceptionHistoMetrics exceptionHistoMetrics; + private boolean throwExceptionOnStop=false; + + public EbdseCycleErrorHandler( + ErrorResponse errorResponse, + ExceptionCountMetrics exceptionCountMetrics, + ExceptionHistoMetrics exceptionHistoMetrics, + boolean throwExceptionOnStop) { + this.errorResponse = errorResponse; + this.exceptionCountMetrics = exceptionCountMetrics; + this.exceptionHistoMetrics = exceptionHistoMetrics; + this.throwExceptionOnStop = throwExceptionOnStop; + } + + @Override + public ErrorStatus handleError(long cycle, Throwable contextError) { + CQLCycleException cce = (CQLCycleException) contextError; + Throwable error = cce.getCause(); + + boolean retry = false; + switch (errorResponse) { + case stop: + logger.error("error with cycle " + cycle + ": statement: " + cce.getStatement() + " errmsg: " + + CQLExceptionDetailer.messageFor(cycle, error)); + if (throwExceptionOnStop) { + throw new RuntimeException(error); + } + + case warn: + logger.warn("error with cycle " + cycle + ": statement: " + cce.getStatement() + " errmsg: " + + CQLExceptionDetailer.messageFor(cycle, error)); + case retry: + retry = true; + case histogram: + exceptionHistoMetrics.update(error,cce.getDurationNanos()); + case count: + exceptionCountMetrics.count(error); + case ignore: + default: + break; + } + return new ErrorStatus(errorResponse, retry,-1); + } + + @Override + public ErrorStatus handleError(long cycle, Throwable contextError, String errMsg) { + return handleError(cycle,contextError); + } + + public String toString() { + return this.errorResponse.toString(); + } +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/errorhandling/ErrorStatus.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/errorhandling/ErrorStatus.java new file mode 100644 index 000000000..c7b7ecd21 --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/errorhandling/ErrorStatus.java @@ -0,0 +1,31 @@ +package io.nosqlbench.activitytype.cql.ebdrivers.cql.errorhandling; + +import io.nosqlbench.activitytype.cql.ebdrivers.cql.api.ErrorResponse; + +public class ErrorStatus { + private boolean retryable; + private int resultCode; + private ErrorResponse response; + + public ErrorStatus(ErrorResponse response, boolean retryable, int resultCode) { + this.response = response; + this.retryable = retryable; + this.resultCode = resultCode; + } + + public boolean isRetryable() { + return retryable; + } + + public int getResultCode() { + return resultCode; + } + + public void setResultCode(int resultCode) { + this.resultCode = resultCode; + } + + public ErrorResponse getResponse() { + return response; + } +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/errorhandling/ExceptionMap.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/errorhandling/ExceptionMap.java new file mode 100644 index 000000000..fe12ddf51 --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/errorhandling/ExceptionMap.java @@ -0,0 +1,80 @@ +package io.nosqlbench.activitytype.cql.ebdrivers.cql.errorhandling; + +import com.datastax.driver.core.exceptions.*; +import io.nosqlbench.activitytype.cql.ebdrivers.cql.errorhandling.exceptions.*; + +import java.util.LinkedHashMap; +import java.util.Map; + +/** + * This enumerates all known exception classes, including supertypes, + * for the purposes of stable naming in error handling. + * This is current as of com.datastax.cassandra:cassandra-driver-core:3.2.0 + */ +public class ExceptionMap { + + private final static Map, Class> map + = new LinkedHashMap, Class>() { + { + put(FrameTooLongException.class, DriverException.class); + put(CodecNotFoundException.class, DriverException.class); + put(AuthenticationException.class, DriverException.class); + put(TraceRetrievalException.class, DriverException.class); + put(UnsupportedProtocolVersionException.class, DriverException.class); + put(NoHostAvailableException.class, DriverException.class); + put(QueryValidationException.class, DriverException.class); + put(InvalidQueryException.class, QueryValidationException.class); + put(InvalidConfigurationInQueryException.class, InvalidQueryException.class); + put(UnauthorizedException.class, QueryValidationException.class); + put(SyntaxError.class, QueryValidationException.class); + put(AlreadyExistsException.class, QueryValidationException.class); + put(UnpreparedException.class, QueryValidationException.class); + put(InvalidTypeException.class, DriverException.class); + put(QueryExecutionException.class, DriverException.class); + put(UnavailableException.class, QueryValidationException.class); + put(BootstrappingException.class, QueryValidationException.class); + put(OverloadedException.class, QueryValidationException.class); + put(TruncateException.class, QueryValidationException.class); + put(QueryConsistencyException.class, QueryValidationException.class); + put(WriteTimeoutException.class, QueryConsistencyException.class); + put(WriteFailureException.class, QueryConsistencyException.class); + put(ReadFailureException.class, QueryConsistencyException.class); + put(ReadTimeoutException.class, QueryConsistencyException.class); + put(FunctionExecutionException.class, QueryValidationException.class); + put(DriverInternalError.class, DriverException.class); + put(ProtocolError.class, DriverInternalError.class); + put(ServerError.class, DriverInternalError.class); + put(BusyPoolException.class, DriverException.class); + put(ConnectionException.class, DriverException.class); + put(TransportException.class, ConnectionException.class); + put(OperationTimedOutException.class, ConnectionException.class); + put(PagingStateException.class, DriverException.class); + put(UnresolvedUserTypeException.class, DriverException.class); + put(UnsupportedFeatureException.class, DriverException.class); + put(BusyConnectionException.class, DriverException.class); + + put(ChangeUnappliedCycleException.class, CqlCycleException.class); + put(ResultSetVerificationException.class, CqlCycleException.class); + put(RowVerificationException.class, CqlCycleException.class); + put(UnexpectedPagingException.class, CqlCycleException.class); + put(CqlCycleException.class, RuntimeException.class); + } + }; + + public Class put( + Class exceptionClass, + Class parentClass) { + if (exceptionClass.getSuperclass() != parentClass) { + throw new RuntimeException("Sanity check failed: " + exceptionClass + + " is not a parent class of " + parentClass); + } + return map.put(exceptionClass, parentClass); + } + + public static Map, Class> getMap() { + return map; + } + + + +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/errorhandling/HashedCQLErrorHandler.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/errorhandling/HashedCQLErrorHandler.java new file mode 100644 index 000000000..0ed092569 --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/errorhandling/HashedCQLErrorHandler.java @@ -0,0 +1,82 @@ +package io.nosqlbench.activitytype.cql.ebdrivers.cql.errorhandling; + +import com.datastax.driver.core.exceptions.*; +import io.nosqlbench.activitytype.cql.ebdrivers.cql.errorhandling.exceptions.CQLCycleException; +import io.nosqlbench.activitytype.cql.ebdrivers.cql.errorhandling.exceptions.ChangeUnappliedCycleException; +import io.nosqlbench.activitytype.cql.ebdrivers.cql.errorhandling.exceptions.ResultSetVerificationException; +import io.nosqlbench.activitytype.cql.ebdrivers.cql.errorhandling.exceptions.RowVerificationException; +import io.nosqlbench.engine.api.activityapi.errorhandling.CycleErrorHandler; +import io.nosqlbench.engine.api.activityapi.errorhandling.HashedErrorHandler; +import io.nosqlbench.engine.api.metrics.ExceptionCountMetrics; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class HashedCQLErrorHandler extends HashedErrorHandler { + private static final Logger logger = LoggerFactory.getLogger(HashedCQLErrorHandler.class); + +// private static Set> UNVERIFIED_ERRORS = new HashSet>() {{ +// add(RowVerificationException.class); +// add(ResultSetVerificationException.class); +// }}; + private ExceptionCountMetrics exceptionCountMetrics; + private static ThreadLocal tlResultCode = ThreadLocal.withInitial(() -> (0)); + + public HashedCQLErrorHandler(ExceptionCountMetrics exceptionCountMetrics) { + this.exceptionCountMetrics = exceptionCountMetrics; + this.setGroup("retryable", + NoHostAvailableException.class, + UnavailableException.class, + OperationTimedOutException.class, + OverloadedException.class, + WriteTimeoutException.class, + ReadTimeoutException.class + ); + this.setGroup( + "unapplied", + ChangeUnappliedCycleException.class + ); + this.setGroup("unverified", + RowVerificationException.class, + ResultSetVerificationException.class + ); + // realerrors is everything else but the above + } + + private static class UncaughtErrorHandler implements CycleErrorHandler { + @Override + public ErrorStatus handleError(long cycle, Throwable error, String errMsg) { + throw new RuntimeException( + "An exception was thrown in cycle " + cycle + " that has no error: " + errMsg + ", error:" + error + ); + } + } + + @Override + public ErrorStatus handleError(long cycle, Throwable throwable, String errMsg) { + int resultCode = 127; + if (throwable instanceof CQLCycleException) { + CQLCycleException cce = (CQLCycleException) throwable; + Throwable cause = cce.getCause(); + try { + String simpleName = cause.getClass().getSimpleName(); + CQLExceptionEnum cqlExceptionEnum = CQLExceptionEnum.valueOf(simpleName); + resultCode = cqlExceptionEnum.getResult(); + } catch (Throwable t) { + logger.warn("unrecognized exception while mapping status code via Enum: " + throwable.getClass()); + } + } else { + logger.warn("un-marshaled exception while mapping status code: " + throwable.getClass()); + } + ErrorStatus errorStatus = super.handleError(cycle, throwable, errMsg); + errorStatus.setResultCode(resultCode); + return errorStatus; + } + + public static int getThreadStatusCode() { + return tlResultCode.get(); + } + + public static void resetThreadStatusCode() { + tlResultCode.set(0); + } +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/errorhandling/exceptions/CQLCycleException.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/errorhandling/exceptions/CQLCycleException.java new file mode 100644 index 000000000..79307f37b --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/errorhandling/exceptions/CQLCycleException.java @@ -0,0 +1,38 @@ +package io.nosqlbench.activitytype.cql.ebdrivers.cql.errorhandling.exceptions; + +import io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.core.ReadyCQLStatement; + +/** + * In internal exception type that is used to saverow exception + * context from within a CQL activity cycle. + */ +public class CQLCycleException extends Exception { + + private final long cycleValue; + private final long durationNanos; + private final ReadyCQLStatement readyCQLStatement; + + public CQLCycleException(long cycleValue, long durationNanos, Throwable e, ReadyCQLStatement readyCQLStatement) { + super(e); + this.cycleValue = cycleValue; + this.durationNanos = durationNanos; + this.readyCQLStatement = readyCQLStatement; + } + + public long getCycleValue() { + return cycleValue; + } + + public long getDurationNanos() { + return durationNanos; + } + + public ReadyCQLStatement getReadyCQLStatement() { + return readyCQLStatement; + } + + public String getStatement() { + return readyCQLStatement.getQueryString(cycleValue); + } + +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/errorhandling/exceptions/CQLExceptionDetailer.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/errorhandling/exceptions/CQLExceptionDetailer.java new file mode 100644 index 000000000..e3c054fa4 --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/errorhandling/exceptions/CQLExceptionDetailer.java @@ -0,0 +1,25 @@ +package io.nosqlbench.activitytype.cql.ebdrivers.cql.errorhandling.exceptions; + +import com.datastax.driver.core.exceptions.ReadTimeoutException; +import com.datastax.driver.core.exceptions.WriteTimeoutException; + +public class CQLExceptionDetailer { + + public static String messageFor(long cycle, Throwable e) { + + if (e instanceof ReadTimeoutException) { + ReadTimeoutException rte = (ReadTimeoutException) e; + return rte.getMessage() + + ", coordinator: " + rte.getHost() + + ", wasDataRetrieved: " + rte.wasDataRetrieved(); + } + + if (e instanceof WriteTimeoutException) { + WriteTimeoutException wte = (WriteTimeoutException) e; + return wte.getMessage() + + ", coordinator: " + wte.getHost(); + } + + return e.getMessage(); + } +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/errorhandling/exceptions/CQLResultSetException.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/errorhandling/exceptions/CQLResultSetException.java new file mode 100644 index 000000000..6c807af77 --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/errorhandling/exceptions/CQLResultSetException.java @@ -0,0 +1,56 @@ +package io.nosqlbench.activitytype.cql.ebdrivers.cql.errorhandling.exceptions; + +import com.datastax.driver.core.BoundStatement; +import com.datastax.driver.core.ResultSet; +import com.datastax.driver.core.SimpleStatement; +import com.datastax.driver.core.Statement; + +public abstract class CQLResultSetException extends CqlCycleException { + + private final Statement statement; + private final ResultSet resultSet; + + public CQLResultSetException(long cycle, ResultSet resultSet, Statement statement, String message, Throwable cause) { + super(cycle,message,cause); + this.resultSet = resultSet; + this.statement = statement; + } + + public CQLResultSetException(long cycle, ResultSet resultSet, Statement statement) { + super(cycle); + this.resultSet = resultSet; + this.statement = statement; + } + + public CQLResultSetException(long cycle, ResultSet resultSet, Statement statement, String message) { + super(cycle,message); + this.resultSet = resultSet; + this.statement=statement; + } + + public CQLResultSetException(long cycle, ResultSet resultSet, Statement statement, Throwable cause) { + super(cycle,cause); + this.resultSet = resultSet; + this.statement = statement; + } + + public Statement getStatement() { + return statement; + } + + public ResultSet getResultSet() { + return resultSet; + } + + protected static String getQueryString(Statement stmt) { + if (stmt instanceof BoundStatement) { + return ((BoundStatement)stmt).preparedStatement().getQueryString(); + } else if (stmt instanceof SimpleStatement) { + return ((SimpleStatement) stmt).getQueryString(); + } else { + return "UNKNOWN Statement type:" + stmt.getClass().getSimpleName(); + } + } + + +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/errorhandling/exceptions/ChangeUnappliedCycleException.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/errorhandling/exceptions/ChangeUnappliedCycleException.java new file mode 100644 index 000000000..e2526a8b3 --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/errorhandling/exceptions/ChangeUnappliedCycleException.java @@ -0,0 +1,26 @@ +package io.nosqlbench.activitytype.cql.ebdrivers.cql.errorhandling.exceptions; + +import com.datastax.driver.core.ResultSet; + +/** + * This was added to nosqlbench because the error handling logic was + * starting to look a bit contrived. Because we need to be able + * to respond to different result outcomes, it + * is just simpler to have a single type of error-handling logic for all outcomes. + */ +public class ChangeUnappliedCycleException extends CqlCycleException { + + private final ResultSet resultSet; + private final String queryString; + + public ChangeUnappliedCycleException(long cycle, ResultSet resultSet, String queryString) { + super(cycle, "Operation was not applied:" + queryString); + this.resultSet = resultSet; + this.queryString = queryString; + } + + public ResultSet getResultSet() { + return resultSet; + } + public String getQueryString() { return queryString; } +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/errorhandling/exceptions/CqlCycleException.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/errorhandling/exceptions/CqlCycleException.java new file mode 100644 index 000000000..cb3a8754e --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/errorhandling/exceptions/CqlCycleException.java @@ -0,0 +1,38 @@ +package io.nosqlbench.activitytype.cql.ebdrivers.cql.errorhandling.exceptions; + +public abstract class CqlCycleException extends RuntimeException { + + private long cycle; + + public CqlCycleException(long cycle, Throwable cause) { + super(cause); + this.cycle = cycle; + } + + public CqlCycleException(long cycle, String message) { + super(message); + this.cycle = cycle; + } + + public CqlCycleException(long cycle, String message, Throwable cause) { + super(message, cause); + this.cycle = cycle; + } + + public CqlCycleException(long cycle) { + super(); + this.cycle = cycle; + } + + @Override + public String getMessage() { + return "cycle:" + cycle + " caused by:" + super.getMessage(); + } + + public long getCycle() { + return cycle; + } + + + +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/errorhandling/exceptions/MaxTriesExhaustedException.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/errorhandling/exceptions/MaxTriesExhaustedException.java new file mode 100644 index 000000000..1063e2c0b --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/errorhandling/exceptions/MaxTriesExhaustedException.java @@ -0,0 +1,20 @@ +package io.nosqlbench.activitytype.cql.ebdrivers.cql.errorhandling.exceptions; + +public class MaxTriesExhaustedException extends CqlCycleException { + + private int maxtries; + + public MaxTriesExhaustedException(long cycle, int maxtries) { + super(cycle); + this.maxtries = maxtries; + } + + public int getMaxTries() { + return maxtries; + } + + @Override + public String getMessage() { + return "Exhausted max tries (" + getMaxTries() + ") on cycle " + getCycle() + "."; + } +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/errorhandling/exceptions/ResultSetVerificationException.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/errorhandling/exceptions/ResultSetVerificationException.java new file mode 100644 index 000000000..f65613b7f --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/errorhandling/exceptions/ResultSetVerificationException.java @@ -0,0 +1,17 @@ +package io.nosqlbench.activitytype.cql.ebdrivers.cql.errorhandling.exceptions; + +import com.datastax.driver.core.ResultSet; +import com.datastax.driver.core.Statement; + +public class ResultSetVerificationException extends CQLResultSetException { + + public ResultSetVerificationException( + long cycle, ResultSet resultSet, Statement statement, Throwable cause) { + super(cycle, resultSet, statement, cause); + } + + public ResultSetVerificationException( + long cycle, ResultSet resultSet, Statement statement, String s) { + super(cycle, resultSet, statement, s + ", \nquery string:\n" + getQueryString(statement)); + } +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/errorhandling/exceptions/RowVerificationException.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/errorhandling/exceptions/RowVerificationException.java new file mode 100644 index 000000000..a24b3caba --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/errorhandling/exceptions/RowVerificationException.java @@ -0,0 +1,33 @@ +package io.nosqlbench.activitytype.cql.ebdrivers.cql.errorhandling.exceptions; + +import com.datastax.driver.core.Row; + +import java.util.Map; + +/** + * This exception is thrown when read verification fails. + */ +public class RowVerificationException extends CqlCycleException { + + private Map expected; + private Row row; + + public RowVerificationException(long cycle, Row row, Map expected, String detail) { + super(cycle, detail); + this.expected = expected; + this.row = row; + } + + @Override + public String getMessage() { + return "cycle:" + getCycle() + ": " + super.getMessage(); + } + + public Map getExpectedValues() { + return expected; + } + + public Row getRow() { + return row; + } +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/errorhandling/exceptions/UnexpectedPagingException.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/errorhandling/exceptions/UnexpectedPagingException.java new file mode 100644 index 000000000..73156c8f0 --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/errorhandling/exceptions/UnexpectedPagingException.java @@ -0,0 +1,55 @@ +package io.nosqlbench.activitytype.cql.ebdrivers.cql.errorhandling.exceptions; + +import com.datastax.driver.core.ResultSet; + +/** + *

This is not a core exception. It was added to the CQL activity type + * driver for nosqlbench specifically to catch the following unexpected + * condition: + * Paging would be needed to read all the results from a read query, but the user + * is not expecting to intentionally check and iterate the result sets for paging. + *

+ * This should only be thrown if a result set would need paging, but configuration + * options specific that it should not expect to. Rather than assume paging is completely + * expected or unexpected, we simply assume that only 1 page is allowed, being the + * first page, or what is thought of as "not paging". + *

If this error is thrown, and paging is expected, then the user can adjust + * fetchsize or maxpages in order to open up paging to the degree that is allowable or + * expected. + */ +public class UnexpectedPagingException extends CqlCycleException { + + private final ResultSet resultSet; + private final String queryString; + private final int fetchSize; + private int fetchedPages; + private int maxpages; + + public UnexpectedPagingException( + long cycle, + ResultSet resultSet, + String queryString, + int fetchedPages, + int maxpages, + int fetchSize) { + super(cycle); + this.resultSet = resultSet; + this.queryString = queryString; + this.fetchedPages = fetchedPages; + this.maxpages = maxpages; + this.fetchSize = fetchSize; + } + + public ResultSet getResultSet() { + return resultSet; + } + + public String getMessage() { + StringBuilder sb = new StringBuilder(); + sb.append("Additional paging would be required to read the results from this query fully" + + ", but the user has not explicitly indicated that paging was expected.") + .append(" fetched/allowed: ").append(fetchedPages).append("/").append(maxpages) + .append(" fetchSize(").append(fetchSize).append("): ").append(queryString); + return sb.toString(); + } +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/filtering/CQLResultFilterType.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/filtering/CQLResultFilterType.java new file mode 100644 index 000000000..c23faf86b --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/filtering/CQLResultFilterType.java @@ -0,0 +1,65 @@ +package io.nosqlbench.activitytype.cql.ebdrivers.cql.filtering; + +import io.nosqlbench.activitytype.cql.ebdrivers.cql.errorhandling.CQLExceptionEnum; +import io.nosqlbench.engine.api.activityapi.cyclelog.buffers.results.ResultReadable; +import io.nosqlbench.engine.api.activityapi.cyclelog.filters.ResultFilterDispenser; +import io.nosqlbench.engine.api.activityapi.cyclelog.filters.ResultValueFilterType; +import io.nosqlbench.engine.api.activityapi.cyclelog.filters.tristate.EnumReadableMappingFilter; +import io.nosqlbench.engine.api.activityapi.cyclelog.filters.tristate.TristateFilter; +import io.nosqlbench.engine.api.util.ConfigTuples; +import io.nosqlbench.virtdata.annotations.Service; + +import java.util.function.Predicate; + +@Service(ResultValueFilterType.class) +public class CQLResultFilterType implements ResultValueFilterType { + + @Override + public String getName() { + return "cql"; + } + + @Override + public ResultFilterDispenser getDispenser(String config) { + return new Dispenser(config); + } + + private class Dispenser implements ResultFilterDispenser { + private final ConfigTuples conf; + private final EnumReadableMappingFilter enumFilter; + private final Predicate filter; + + public Dispenser(String config) { + this.conf = new ConfigTuples(config); + ConfigTuples inout = conf.getAllMatching("in.*", "ex.*"); + + // Default policy is opposite of leading rule + TristateFilter.Policy defaultPolicy = TristateFilter.Policy.Discard; + if (conf.get(0).get(0).startsWith("ex")) { + defaultPolicy = TristateFilter.Policy.Keep; + } + + this.enumFilter = + new EnumReadableMappingFilter<>(CQLExceptionEnum.values(), TristateFilter.Policy.Ignore); + + for (ConfigTuples.Section section : inout) { + if (section.get(0).startsWith("in")) { + this.enumFilter.addPolicy(section.get(1), TristateFilter.Policy.Keep); + } else if (section.get(0).startsWith("ex")) { + this.enumFilter.addPolicy(section.get(1), TristateFilter.Policy.Discard); + } else { + throw new RuntimeException("Section must start with in(clude) or ex(clude), but instead it is " + section); + } + + } + + this.filter = this.enumFilter.toDefaultingPredicate(defaultPolicy); + } + + @Override + public Predicate getResultFilter() { + return filter; + } + } + +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/binders/CqlBinderTypes.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/binders/CqlBinderTypes.java new file mode 100644 index 000000000..43a033c7e --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/binders/CqlBinderTypes.java @@ -0,0 +1,27 @@ +package io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.binders; + +import com.datastax.driver.core.PreparedStatement; +import com.datastax.driver.core.Session; +import com.datastax.driver.core.Statement; +import io.nosqlbench.virtdata.api.ValuesArrayBinder; + +public enum CqlBinderTypes { + direct_array, + unset_aware, + diagnostic; + + public final static CqlBinderTypes DEFAULT = unset_aware; + + public ValuesArrayBinder get(Session session) { + if (this==direct_array) { + return new DirectArrayValuesBinder(); + } else if (this== unset_aware) { + return new UnsettableValuesBinder(session); + } else if (this==diagnostic) { + return new DiagnosticPreparedBinder(); + } else { + throw new RuntimeException("Impossible-ish statement branch"); + } + } + +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/binders/DiagnosticPreparedBinder.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/binders/DiagnosticPreparedBinder.java new file mode 100644 index 000000000..80df09c15 --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/binders/DiagnosticPreparedBinder.java @@ -0,0 +1,48 @@ +package io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.binders; + +import com.datastax.driver.core.*; +import io.nosqlbench.activitytype.cql.ebdrivers.cql.core.CQLBindHelper; +import io.nosqlbench.virtdata.api.ValuesArrayBinder; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.List; + +/** + * This binder is not meant to be used primarily by default. It gives detailed + * diagnostics, but in order to do so by default it does lots of processing. + * Other binders will call to this one in an exception handler when needed in + * order to explain in more detail what is happening for users. + */ +public class DiagnosticPreparedBinder implements ValuesArrayBinder { + public static final Logger logger = LoggerFactory.getLogger(DiagnosticPreparedBinder.class); + @Override + public Statement bindValues(PreparedStatement prepared, Object[] values) { + ColumnDefinitions columnDefinitions = prepared.getVariables(); + BoundStatement bound = prepared.bind(); + List columnDefList; + if (columnDefinitions.asList().size() == values.length) { + columnDefList = columnDefinitions.asList(); + } else { + throw new RuntimeException("The number of named anchors in your statement does not match the number of bindings provided."); + } + + int i = 0; + for (Object value : values) { + if (columnDefList.size() <= i) { + logger.error("what gives?"); + } + ColumnDefinitions.Definition columnDef = columnDefList.get(i); + String colName = columnDef.getName(); + DataType.Name type = columnDef.getType().getName(); + try { + bound = CQLBindHelper.bindStatement(bound, colName, value, type); + } catch (ClassCastException e) { + logger.error(String.format("Unable to bind column %s to cql type %s with value %s", colName, type, value)); + throw e; + } + i++; + } + return bound; + } +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/binders/DirectArrayValuesBinder.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/binders/DirectArrayValuesBinder.java new file mode 100644 index 000000000..1f0f547c0 --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/binders/DirectArrayValuesBinder.java @@ -0,0 +1,37 @@ +package io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.binders; + +import com.datastax.driver.core.PreparedStatement; +import com.datastax.driver.core.Statement; +import io.nosqlbench.virtdata.api.ValuesArrayBinder; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.Arrays; + +/** + * This is now the main binder again, but if there are any exceptions, it delegates to the diagnostic + * one in order to explain what happened. This is to allow for higher performance in the general + * case, but with better user support when something goes wrong. + * + * If you want to force the client to use the array passing method of initializing a statement, + * use this one, known as 'directarray'. This does give up the benefit of allowing unset values + * to be modeled, and at no clear benefit. Thus the {@link CqlBinderTypes#unset_aware} one + * will become the default. + */ +public class DirectArrayValuesBinder implements ValuesArrayBinder { + public final static Logger logger = LoggerFactory.getLogger(DirectArrayValuesBinder.class); + + @Override + public Statement bindValues(PreparedStatement preparedStatement, Object[] objects) { + try { + return preparedStatement.bind(objects); + } catch (Exception e) { + StringBuilder sb = new StringBuilder(); + sb.append("Error binding objects to prepared statement directly, falling back to diagnostic binding layer:"); + sb.append(Arrays.toString(objects)); + logger.warn(sb.toString(),e); + DiagnosticPreparedBinder diag = new DiagnosticPreparedBinder(); + return diag.bindValues(preparedStatement, objects); + } + } +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/binders/SimpleStatementValuesBinder.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/binders/SimpleStatementValuesBinder.java new file mode 100644 index 000000000..f514672c6 --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/binders/SimpleStatementValuesBinder.java @@ -0,0 +1,19 @@ +package io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.binders; + +import com.datastax.driver.core.SimpleStatement; +import com.datastax.driver.core.Statement; +import io.nosqlbench.virtdata.api.ValuesArrayBinder; + +/** + * This binder is not meant to be used with anything but DDL or statements + * which should not be trying to parameterize values in general. If this changes, + * support will be added for parameterized values here. + */ +public class SimpleStatementValuesBinder + implements ValuesArrayBinder { + + @Override + public Statement bindValues(SimpleStatement context, Object[] values) { + return new SimpleStatement(context.getQueryString(), values); + } +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/binders/UnsettableValuesBinder.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/binders/UnsettableValuesBinder.java new file mode 100644 index 000000000..d3b3e03c0 --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/binders/UnsettableValuesBinder.java @@ -0,0 +1,73 @@ +package io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.binders; + +import com.datastax.driver.core.*; +import io.nosqlbench.virtdata.api.VALUE; +import io.nosqlbench.virtdata.api.ValuesArrayBinder; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.nio.ByteBuffer; +import java.util.List; + +public class UnsettableValuesBinder implements ValuesArrayBinder { + private final static Logger logger = LoggerFactory.getLogger(UnsettableValuesBinder.class); + + private final Session session; + private final CodecRegistry codecRegistry; + private final ProtocolVersion protocolVersion; + + public UnsettableValuesBinder(Session session) { + this.session = session; + this.codecRegistry = session.getCluster().getConfiguration().getCodecRegistry(); + this.protocolVersion = this.session.getCluster().getConfiguration().getProtocolOptions().getProtocolVersion(); + } + + + // TODO: Allow for warning when nulls are passed and they aren't expected + @Override + public Statement bindValues(PreparedStatement preparedStatement, Object[] objects) { + int i=-1; + try { + BoundStatement boundStmt = preparedStatement.bind(); + List defs = preparedStatement.getVariables().asList(); + for (i = 0; i < objects.length; i++) { + Object value = objects[i]; + if (VALUE.unset != value) { + if (null==value) { + boundStmt.setToNull(i); + } else { + DataType cqlType = defs.get(i).getType(); + TypeCodec codec = codecRegistry.codecFor(cqlType, value); + ByteBuffer serialized = codec.serialize(value, protocolVersion); + boundStmt.setBytesUnsafe(i,serialized); + } + } + } + return boundStmt; + } catch (Exception e) { + String typNam = (objects[i]==null ? "NULL" : objects[i].getClass().getCanonicalName()); + logger.error("Error binding column " + preparedStatement.getVariables().asList().get(i).getName() + " with class " + typNam + ": " + e.getMessage(), e); + throw e; +// StringBuilder sb = new StringBuilder(); +// sb.append("Error binding objects to prepared statement directly, falling back to diagnostic binding layer:"); +// sb.append(Arrays.toString(objects)); +// logger.warn(sb.toString(),e); +// DiagnosticPreparedBinder diag = new DiagnosticPreparedBinder(); +// return diag.bindValues(preparedStatement, objects); + } + } + +// static void setObject(Session session, BoundStatement bs, int index, Object value) { +// +// DataType cqlType = bs.preparedStatement().getVariables().getType(index); +// +// CodecRegistry codecRegistry = session.getCluster().getConfiguration().getCodecRegistry(); +// ProtocolVersion protocolVersion = +// session.getCluster().getConfiguration().getProtocolOptions().getProtocolVersion(); +// +// TypeCodec codec = codecRegistry.codecFor(cqlType, value); +// bs.setBytesUnsafe(index, codec.serialize(value, protocolVersion)); +// } + + +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/core/AvailableCQLStatements.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/core/AvailableCQLStatements.java new file mode 100644 index 000000000..d7928d92b --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/core/AvailableCQLStatements.java @@ -0,0 +1,50 @@ +package io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.core; + +import io.nosqlbench.engine.api.util.TagFilter; + +import java.util.*; +import java.util.stream.Collectors; + +public class AvailableCQLStatements { + + private List availableDefs = new ArrayList<>(); + + public AvailableCQLStatements(List allStatementDef) { + this.availableDefs = allStatementDef; + } + + public List getRawTagged() { + return availableDefs; + } + + public Map getFilteringDetails(String tagSpec) { + Map details = new LinkedHashMap<>(); + TagFilter ts = new TagFilter(tagSpec); + for (TaggedCQLStatementDefs availableDef : availableDefs) { + TagFilter.Result result = ts.matchesTaggedResult(availableDef); + String names = availableDef.getStatements().stream() + .map(CQLStatementDef::getName).collect(Collectors.joining(",")); + details.put(names, result.getLog()); + } + return details; + } + + public List getMatching(String tagSpec) { + + List defs = new ArrayList<>(); + TagFilter ts = new TagFilter(tagSpec); + List CQLStatementDefParsers = + availableDefs.stream() + .filter(ts::matchesTagged) + .map(TaggedCQLStatementDefs::getStatements) + .flatMap(Collection::stream) + .map(p -> new CQLStatementDefParser(p.getName(), p.getStatement())) + .collect(Collectors.toList()); + + return CQLStatementDefParsers; + } + + public List getAll() { + return getMatching(""); + } +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/core/CQLSessionCache.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/core/CQLSessionCache.java new file mode 100644 index 000000000..9711b899a --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/core/CQLSessionCache.java @@ -0,0 +1,339 @@ +package io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.core; + +import com.datastax.driver.core.*; +import com.datastax.driver.core.policies.*; +import com.datastax.driver.dse.DseCluster; +import io.nosqlbench.activitytype.cql.ebdrivers.cql.core.CQLOptions; +import io.nosqlbench.activitytype.cql.ebdrivers.cql.core.ProxyTranslator; +import io.nosqlbench.engine.api.activityapi.core.Shutdownable; +import io.nosqlbench.engine.api.activityimpl.ActivityDef; +import io.nosqlbench.engine.api.exceptions.BasicError; +import io.nosqlbench.engine.api.metrics.ActivityMetrics; +import io.nosqlbench.engine.api.scripting.NashornEvaluator; +import io.nosqlbench.engine.api.util.SSLKsFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.io.IOException; +import java.net.InetSocketAddress; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.*; + +public class CQLSessionCache implements Shutdownable { + + private final static Logger logger = LoggerFactory.getLogger(CQLSessionCache.class); + private final static String DEFAULT_SESSION_ID = "default"; + private static CQLSessionCache instance = new CQLSessionCache(); + private Map sessionCache = new HashMap<>(); + + private CQLSessionCache() { + } + + public static CQLSessionCache get() { + return instance; + } + + public void stopSession(ActivityDef activityDef) { + String key = activityDef.getParams().getOptionalString("clusterid").orElse(DEFAULT_SESSION_ID); + Session session = sessionCache.get(key); + session.getCluster().close(); + session.close(); + } + + public Session getSession(ActivityDef activityDef) { + String key = activityDef.getParams().getOptionalString("clusterid").orElse(DEFAULT_SESSION_ID); + return sessionCache.computeIfAbsent(key, (cid) -> createSession(activityDef, key)); + } + + // cbopts=\".withLoadBalancingPolicy(LatencyAwarePolicy.builder(new TokenAwarePolicy(new DCAwareRoundRobinPolicy(\"dc1-us-east\", 0, false))).build()).withRetryPolicy(new LoggingRetryPolicy(DefaultRetryPolicy.INSTANCE))\" + + private Session createSession(ActivityDef activityDef, String sessid) { + + String host = activityDef.getParams().getOptionalString("host").orElse("localhost"); + int port = activityDef.getParams().getOptionalInteger("port").orElse(9042); + + String driverType = activityDef.getParams().getOptionalString("driver").orElse("dse"); + + Cluster.Builder builder = + driverType.toLowerCase().equals("dse") ? DseCluster.builder() : + driverType.toLowerCase().equals("oss") ? Cluster.builder() : null; + + if (builder==null) { + throw new RuntimeException("The driver type '" + driverType + "' is not recognized"); + } + + logger.info("Using driver type '" + driverType.toUpperCase() + "'"); + + Optional scb = activityDef.getParams() + .getOptionalString("secureconnectbundle"); + scb.map(File::new) + .ifPresent(builder::withCloudSecureConnectBundle); + + activityDef.getParams() + .getOptionalString("insights").map(Boolean::parseBoolean) + .ifPresent(builder::withMonitorReporting); + + String[] contactPoints = activityDef.getParams().getOptionalString("host") + .map(h -> h.split(",")).orElse(null); + + if (contactPoints != null) { + builder.addContactPoints(contactPoints); + }else if (scb.isEmpty()){ + throw new BasicError("you must provide your contact " + + "points:\n hosts="); + } + + activityDef.getParams().getOptionalInteger("port").ifPresent(builder::withPort); + + builder.withCompression(ProtocolOptions.Compression.NONE); + + Optional usernameOpt = activityDef.getParams().getOptionalString("username"); + Optional passwordOpt = activityDef.getParams().getOptionalString("password"); + Optional passfileOpt = activityDef.getParams().getOptionalString("passfile"); + + if (usernameOpt.isPresent()) { + String username = usernameOpt.get(); + String password; + if (passwordOpt.isPresent()) { + password = passwordOpt.get(); + } else if (passfileOpt.isPresent()) { + Path path = Paths.get(passfileOpt.get()); + try { + password = Files.readAllLines(path).get(0); + } catch (IOException e) { + String error = "Error while reading password from file:" + passfileOpt; + logger.error(error, e); + throw new RuntimeException(e); + } + } else { + String error = "username is present, but neither password nor passfile are defined."; + logger.error(error); + throw new RuntimeException(error); + } + builder.withCredentials(username, password); + } + + Optional clusteropts = activityDef.getParams().getOptionalString("cbopts"); + if (clusteropts.isPresent()) { + try { + logger.info("applying cbopts:" + clusteropts.get()); + NashornEvaluator clusterEval = new NashornEvaluator<>(DseCluster.Builder.class); + clusterEval.put("builder", builder); + String importEnv = + "load(\"nashorn:mozilla_compat.js\");\n" + + " importPackage(com.google.common.collect.Lists);\n" + + " importPackage(com.google.common.collect.Maps);\n" + + " importPackage(com.datastax.driver);\n" + + " importPackage(com.datastax.driver.core);\n" + + " importPackage(com.datastax.driver.core.policies);\n" + + "builder" + clusteropts.get() + "\n"; + clusterEval.script(importEnv); + builder = clusterEval.eval(); + logger.info("successfully applied:" + clusteropts.get()); + } catch (Exception e) { + logger.error("Unable to evaluate: " + clusteropts.get() + " in script context:" + e.getMessage()); + throw e; + } + } + + SpeculativeExecutionPolicy speculativePolicy = activityDef.getParams() + .getOptionalString("speculative") + .map(speculative -> { + logger.info("speculative=>" + speculative); + return speculative; + }) + .map(CQLOptions::speculativeFor) + .orElse(CQLOptions.defaultSpeculativePolicy()); + builder.withSpeculativeExecutionPolicy(speculativePolicy); + + activityDef.getParams().getOptionalString("socketoptions") + .map(sockopts -> { + logger.info("socketoptions=>" + sockopts); + return sockopts; + }) + .map(CQLOptions::socketOptionsFor) + .ifPresent(builder::withSocketOptions); + + activityDef.getParams().getOptionalString("pooling") + .map(pooling -> { + logger.info("pooling=>" + pooling); + return pooling; + }) + .map(CQLOptions::poolingOptionsFor) + .ifPresent(builder::withPoolingOptions); + + activityDef.getParams().getOptionalString("whitelist") + .map(whitelist -> { + logger.info("whitelist=>" + whitelist); + return whitelist; + }) + .map(p -> CQLOptions.whitelistFor(p, null)) + .ifPresent(builder::withLoadBalancingPolicy); + + activityDef.getParams().getOptionalString("tickduration") + .map(tickduration -> { + logger.info("tickduration=>" + tickduration); + return tickduration; + }) + .map(CQLOptions::withTickDuration) + .ifPresent(builder::withNettyOptions); + + activityDef.getParams().getOptionalString("compression") + .map(compression -> { + logger.info("compression=>" + compression); + return compression; + }) + .map(CQLOptions::withCompression) + .ifPresent(builder::withCompression); + + if (activityDef.getParams().getOptionalString("ssl").isPresent()) { + logger.info("Cluster builder proceeding with SSL but no Client Auth"); + Object context = SSLKsFactory.get().getContext(activityDef); + SSLOptions sslOptions; + if (context instanceof javax.net.ssl.SSLContext) { + sslOptions = RemoteEndpointAwareJdkSSLOptions.builder() + .withSSLContext((javax.net.ssl.SSLContext) context).build(); + builder.withSSL(sslOptions); + } else if (context instanceof io.netty.handler.ssl.SslContext) { + sslOptions = + new RemoteEndpointAwareNettySSLOptions((io.netty.handler.ssl.SslContext) context); + } else { + throw new RuntimeException("Unrecognized ssl context object type: " + context.getClass().getCanonicalName()); + } + builder.withSSL(sslOptions); + } + +// JdkSSLOptions sslOptions = RemoteEndpointAwareJdkSSLOptions +// .builder() +// .withSSLContext(context) +// .build(); +// builder.withSSL(sslOptions); +// +// } +// +// boolean sslEnabled = activityDef.getParams().getOptionalBoolean("ssl").orElse(false); +// boolean jdkSslEnabled = activityDef.getParams().getOptionalBoolean("jdkssl").orElse(false); +// if (jdkSslEnabled){ +// sslEnabled = true; +// } +// +// // used for OpenSSL +// boolean openSslEnabled = activityDef.getParams().getOptionalBoolean("openssl").orElse(false); +// +// if (sslEnabled && openSslEnabled) { +// logger.error("You cannot enable both OpenSSL and JDKSSL, please pick one and try again!"); +// System.exit(2); +// } +// +// if (sslEnabled) { +// logger.info("Cluster builder proceeding with SSL but no Client Auth"); +// SSLContext context = SSLKsFactory.get().getContext(activityDef); +// JdkSSLOptions sslOptions = RemoteEndpointAwareJdkSSLOptions +// .builder() +// .withSSLContext(context) +// .build(); +// builder.withSSL(sslOptions); +// } +// else if (openSslEnabled) { +// logger.info("Cluster builder proceeding with SSL and Client Auth"); +// String keyPassword = activityDef.getParams().getOptionalString("keyPassword").orElse(null); +// String caCertFileLocation = activityDef.getParams().getOptionalString("caCertFilePath").orElse(null); +// String certFileLocation = activityDef.getParams().getOptionalString("certFilePath").orElse(null); +// String keyFileLocation = activityDef.getParams().getOptionalString("keyFilePath").orElse(null); +// +// +// try { +// +// KeyStore ks = KeyStore.getInstance("JKS", "SUN"); +// ks.load(null, keyPassword.toCharArray()); +// +// X509Certificate cert = (X509Certificate) CertificateFactory. +// getInstance("X509"). +// generateCertificate(new FileInputStream(caCertFileLocation)); +// +// //set alias to cert +// ks.setCertificateEntry(cert.getSubjectX500Principal().getName(), cert); +// +// TrustManagerFactory tMF = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm()); +// tMF.init(ks); +// +// +// SslContext sslContext = SslContextBuilder +// .forClient() +// /* configured with the TrustManagerFactory that has the cert from the ca.cert +// * This tells the driver to trust the server during the SSL handshake */ +// .trustManager(tMF) +// /* These are needed because the server is configured with require_client_auth +// * In this case the client's public key must be in the truststore on each DSE +// * server node and the CA configured */ +// .keyManager(new File(certFileLocation), new File(keyFileLocation)) +// .build(); +// +// RemoteEndpointAwareNettySSLOptions sslOptions = new RemoteEndpointAwareNettySSLOptions(sslContext); +// +// // Cluster builder with sslOptions +// builder.withSSL(sslOptions); +// +// } catch (Exception e) { +// throw new RuntimeException(e); +// } +// } + + RetryPolicy retryPolicy = activityDef.getParams() + .getOptionalString("retrypolicy") + .map(CQLOptions::retryPolicyFor).orElse(DefaultRetryPolicy.INSTANCE); + + if (retryPolicy instanceof LoggingRetryPolicy) { + logger.info("using LoggingRetryPolicy"); + } + + builder.withRetryPolicy(retryPolicy); + + if (!activityDef.getParams().getOptionalBoolean("jmxreporting").orElse(false)) { + builder.withoutJMXReporting(); + } + + // Proxy Translator and Whitelist for use with DS Cloud on-demand single-endpoint setup + if (activityDef.getParams().getOptionalBoolean("single-endpoint").orElse(false)) { + InetSocketAddress inetHost = new InetSocketAddress(host, port); + final List whiteList = new ArrayList<>(); + whiteList.add(inetHost); + + LoadBalancingPolicy whitelistPolicy = new WhiteListPolicy(new RoundRobinPolicy(), whiteList); + builder.withAddressTranslator(new ProxyTranslator(inetHost)).withLoadBalancingPolicy(whitelistPolicy); + } + + Cluster cl = builder.build(); + + // Apply default idempotence, if set + activityDef.getParams().getOptionalBoolean("defaultidempotence").map( + b -> cl.getConfiguration().getQueryOptions().setDefaultIdempotence(b) + ); + + Session session = cl.newSession(); + + // This also forces init of metadata + + logger.info("cluster-metadata-allhosts:\n" + session.getCluster().getMetadata().getAllHosts()); + + if (activityDef.getParams().getOptionalBoolean("drivermetrics").orElse(false)) { + String driverPrefix = "driver." + sessid; + driverPrefix = activityDef.getParams().getOptionalString("driverprefix").orElse(driverPrefix) + "."; + ActivityMetrics.mountSubRegistry(driverPrefix, cl.getMetrics().getRegistry()); + } + + return session; + } + + @Override + public void shutdown() { + for (Session session : sessionCache.values()) { + Cluster cluster = session.getCluster(); + session.close(); + cluster.close(); + } + } +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/core/CQLStatementDef.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/core/CQLStatementDef.java new file mode 100644 index 000000000..f07490971 --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/core/CQLStatementDef.java @@ -0,0 +1,105 @@ +package io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.core; + +import com.datastax.driver.core.ConsistencyLevel; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.*; +import java.util.stream.Collectors; + +public class CQLStatementDef { + private final static Logger logger = LoggerFactory.getLogger(CQLStatementDef.class); + + private Map params = new HashMap<>(); + private String name = ""; + private String statement = ""; + private boolean prepared = true; + private String cl = ConsistencyLevel.LOCAL_ONE.name(); + private Map bindings = new HashMap<>(); + + public CQLStatementDef() { + } + + public String getGenSpec(String s) { + return bindings.get(s); + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public String getStatement() { + return statement; + } + + public void setStatement(String statement) { + this.statement = statement; + } + + public Map getBindings() { + return bindings; + } + + public void setBindings(Map bindings) { + this.bindings = bindings; + } + + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(" name:").append(this.getName()).append("\n"); + sb.append(" statement: |").append("\n"); + String formattedStmt = Arrays.asList(getStatement().split("\\r*\n")) + .stream().map(s -> " " + s) + .collect(Collectors.joining("\n")); + sb.append(formattedStmt); + if (bindings.size() > 0) { + sb.append(" bindings:\n"); + Optional maxLen = this.bindings.keySet().stream().map(String::length).reduce(Integer::max); + for (String bindName : this.bindings.keySet()) { + sb + .append(String.format(" %-" + (maxLen.orElse(20) + 2) + "s", bindName)).append(" : ") + .append(bindings.get(bindName)) + .append("\n"); + } + } + return sb.toString(); + } + + public boolean isPrepared() { + return prepared; + } + + public void setPrepared(boolean prepared) { + this.prepared = prepared; + } + + public String getConsistencyLevel() { + return this.cl; + } + + public void setConsistencyLevel(String consistencyLevel) { + this.cl = consistencyLevel; + } + + + public void setCl(String consistencyLevel) { + setConsistencyLevel(consistencyLevel); + } + + public Map getParams() { + return params; + } + + public void setParams(Map params) { + this.params = params; + } + + public long getRatio() { + return Long.parseLong(Optional.ofNullable(params.get("ratio")).orElse("1")); + } + +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/core/CQLStatementDefParser.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/core/CQLStatementDefParser.java new file mode 100644 index 000000000..44b425608 --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/core/CQLStatementDefParser.java @@ -0,0 +1,161 @@ +package io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.core; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.*; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import java.util.stream.Collectors; + +public class CQLStatementDefParser { + private final static Logger logger = LoggerFactory.getLogger(CQLStatementDefParser.class); + // private final static Pattern templateToken = Pattern.compile("<<(\\w+(:(.+?))?)>>"); + private final static Pattern stmtToken = Pattern.compile("\\?(\\w+[-_\\d\\w]*)|\\{(\\w+[-_\\d\\w.]*)}"); + private final static String UNSET_VALUE = "UNSET-VALUE"; + private final String stmt; + private final String name; + + private CQLStatementDef deprecatedDef; // deprecated, to be removed + + public void setBindings(Map bindings) { + this.bindings = bindings; + } + + private Map bindings; + + public CQLStatementDef getDeprecatedDef() { + return deprecatedDef; + } + + public void setDeprecatedDef(CQLStatementDef deprecatedDef) { + this.deprecatedDef = deprecatedDef; + } + + public CQLStatementDefParser(String name, String stmt) { + this.stmt = stmt; + this.name = name; + this.bindings = bindings; + } + + public Map getBindings() { + return bindings; + } + + /** + * @return bindableNames in order as specified in the parameter placeholders + */ + public List getBindableNames() { + Matcher m = stmtToken.matcher(stmt); + List bindNames = new ArrayList<>(); + while (m.find()) { + String form1 = m.group(1); + String form2 = m.group(2); + bindNames.add( (form1!=null && !form1.isEmpty()) ? form1 : form2 ); + } + return bindNames; + } + + public String getName() { + return name; + } + + + public String getParsedStatementOrError(Set namedBindings) { + ParseResult result = getParseResult(namedBindings); + if (result.hasError()) { + throw new RuntimeException("Statement template has errors:\n" + result.toString()); + } + return result.getStatement(); + } + + public ParseResult getParseResult(Set namedBindings) { + + HashSet missingAnchors = new HashSet() {{ addAll(namedBindings); }}; + HashSet missingBindings = new HashSet(); + + String statement = this.stmt; + StringBuilder cooked = new StringBuilder(); + + Matcher m = stmtToken.matcher(statement); + int lastMatch = 0; + String remainder = ""; + while (m.find(lastMatch)) { + String pre = statement.substring(lastMatch, m.start()); + + String form1 = m.group(1); + String form2 = m.group(2); + String tokenName = (form1!=null && !form1.isEmpty()) ? form1 : form2; + lastMatch = m.end(); + cooked.append(pre); + cooked.append("?"); + + if (!namedBindings.contains(tokenName)) { + missingBindings.add(tokenName); + } else { + if (missingAnchors.contains(tokenName)) { + missingAnchors.remove(tokenName); + } + } + + } + + // add remainder of unmatched + if (lastMatch>=0) { + cooked.append(statement.substring(lastMatch)); + } + else { + cooked.append(statement); + } + + logger.info("Parsed statement as: " + cooked.toString().replaceAll("\\n","\\\\n")); + + return new ParseResult(cooked.toString(),name,bindings,missingBindings,missingAnchors); + } + + public static class ParseResult { + private Set missingGenerators; + private Set missingAnchors; + private String statement; + private Map bindings; + private String name; + + public ParseResult(String stmt, String name, Map bindings, Set missingGenerators, Set missingAnchors) { + this.missingGenerators = missingGenerators; + this.missingAnchors = missingAnchors; + this.statement = stmt; + this.name = name; + } + + public String toString() { + String generatorsSummary = (this.missingGenerators.size() > 0) ? + "\nundefined generators:" + this.missingGenerators.stream().collect(Collectors.joining(",", "[", "]")) : ""; + return "STMT:" + statement + "\n" + generatorsSummary; + } + + public String getName() { + return name; + } + + public Map getBindings() { + return bindings; + } + + public boolean hasError() { + return missingGenerators.size() > 0; + } + + public String getStatement() { + return statement; + } + + public Set getMissingAnchors() { + return missingAnchors; + } + + public Set getMissingGenerators() { + return missingGenerators; + } + } + +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/core/CQLStatementGroups.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/core/CQLStatementGroups.java new file mode 100644 index 000000000..724cdd54e --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/core/CQLStatementGroups.java @@ -0,0 +1,37 @@ +package io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.core; + +import java.util.*; + +public class CQLStatementGroups { + + private Map> statementGroups = new HashMap<>(); + + public CQLStatementGroups(Map> statementGroups) { + this.statementGroups = statementGroups; + + } + + public List getGroups(String... groupNames) { + List statements = new ArrayList(); + for (String groupName : groupNames) { + List adding = statementGroups.getOrDefault(groupName, Collections.emptyList()); + statements.addAll(adding); + } + return statements; + } + + public String toString() { + StringBuilder sb = new StringBuilder(); + List groups = new ArrayList(statementGroups.keySet()); + Collections.sort(groups); + sb.append("groups:\n"); + for (String group : groups) { +// sb.append("section:").append(section).append("\n"); + for (CQLStatementDefParser statementDef : statementGroups.get(group)) { + sb.append(statementDef.toString()); + } + sb.append("\n"); + } + return sb.toString(); + } +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/core/ReadyCQLStatement.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/core/ReadyCQLStatement.java new file mode 100644 index 000000000..0beacca39 --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/core/ReadyCQLStatement.java @@ -0,0 +1,182 @@ +package io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.core; + +import com.codahale.metrics.Histogram; +import com.codahale.metrics.Timer; +import com.datastax.driver.core.PreparedStatement; +import com.datastax.driver.core.SimpleStatement; +import com.datastax.driver.core.Statement; +import io.nosqlbench.activitytype.cql.ebdrivers.cql.api.ResultSetCycleOperator; +import io.nosqlbench.activitytype.cql.ebdrivers.cql.api.RowCycleOperator; +import io.nosqlbench.virtdata.api.ContextualArrayBindings; + +import java.io.IOException; +import java.io.Writer; +import java.util.concurrent.TimeUnit; + +/** + * A ReadyCQLStatement instantiates new statements to be executed at some mix ratio. + * It optionally holds metrics objects for a named statement. + */ +public class ReadyCQLStatement { + + private String name; + private ContextualArrayBindings contextualBindings; + private long ratio; + private ResultSetCycleOperator[] resultSetOperators = null; + private RowCycleOperator[] rowCycleOperators = null; + + private Timer successTimer; + private Timer errorTimer; + private Histogram rowsFetchedHisto; + private Writer resultCsvWriter; + + public ReadyCQLStatement(ContextualArrayBindings contextualBindings, long ratio, String name) { + this.contextualBindings = contextualBindings; + this.ratio = ratio; + this.name = name; + } + + public ReadyCQLStatement withMetrics(Timer successTimer, Timer errorTimer, Histogram rowsFetchedHisto) { + this.successTimer = successTimer; + this.errorTimer = errorTimer; + this.rowsFetchedHisto = rowsFetchedHisto; + return this; + } + + public Statement bind(long value) { + return contextualBindings.bind(value); + } + + public ResultSetCycleOperator[] getResultSetOperators() { + return resultSetOperators; + } + + public ContextualArrayBindings getContextualBindings() { + return this.contextualBindings; + } + + public String getQueryString(long value) { + Object stmt = contextualBindings.getContext(); + if (stmt instanceof PreparedStatement) { + String queryString = ((PreparedStatement)stmt).getQueryString(); + StringBuilder sb = new StringBuilder(queryString.length()*2); + sb.append("(prepared) "); + return getQueryStringValues(value, queryString, sb); + } else if (stmt instanceof SimpleStatement) { + String queryString = ((SimpleStatement) stmt).getQueryString(); + StringBuilder sb = new StringBuilder(); + sb.append("(simple) "); + return getQueryStringValues(value, queryString, sb); + } + if (stmt instanceof String) { + return (String)stmt; + } + throw new RuntimeException("context object not recognized for query string:" + stmt.getClass().getCanonicalName()); + } + + private String getQueryStringValues(long value, String queryString, StringBuilder sb) { + if (!queryString.endsWith("\n")) { + sb.append("\n"); + } + sb.append(queryString).append(" VALUES["); + Object[] all = contextualBindings.getBindings().getAll(value); + String delim=""; + for (Object o : all) { + sb.append(delim); + delim=","; + sb.append(o.toString()); + } + sb.append("]"); + return sb.toString(); + } + + public long getRatio() { + return ratio; + } + + public void setRatio(long ratio) { + this.ratio = ratio; + } + + /** + * This method should be called when an associated statement is executed successfully. + * @param cycleValue The cycle associated with the execution. + * @param nanoTime The nanoTime duration of the execution. + * @param rowsFetched The number of rows fetched for this cycle + */ + public void onSuccess(long cycleValue, long nanoTime, long rowsFetched) { + if (successTimer!=null) { + successTimer.update(nanoTime, TimeUnit.NANOSECONDS); + } + if (rowsFetchedHisto!=null) { + rowsFetchedHisto.update(rowsFetched); + } + if (resultCsvWriter!=null) { + try { + synchronized(resultCsvWriter) { + // ,(SUCCESS|FAILURE),,,\n + resultCsvWriter + .append(String.valueOf(cycleValue)).append(",") + .append("SUCCESS,") + .append(String.valueOf(nanoTime)).append(",") + .append(String.valueOf(rowsFetched)) + .append(",NONE") + .append("\n"); + } + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + } + + /** + * This method should be called when an associated statement is executed unsuccessfully. + * It should be called only once per cycle in the case of execution error. + * @param cycleValue The cycle associated with the erred execution. + * @param resultNanos The nanoTime duration of the execution. + * @param t The associated throwable + */ + public void onError(long cycleValue, long resultNanos, Throwable t) { + if (errorTimer!=null) { + errorTimer.update(resultNanos, TimeUnit.NANOSECONDS); + } + if (resultCsvWriter!=null) { + try { + synchronized(resultCsvWriter) { + // ,(SUCCESS|FAILURE),,,\n + resultCsvWriter + .append(String.valueOf(cycleValue)).append(",") + .append("FAILURE,") + .append(String.valueOf(resultNanos)).append(",") + .append("0,") + .append(t.getClass().getSimpleName()).append(",") + .append("\n"); + } + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + + } + + public ReadyCQLStatement withResultSetCycleOperators(ResultSetCycleOperator[] resultSetCycleOperators) { + this.resultSetOperators = resultSetCycleOperators; + return this; + } + + public ReadyCQLStatement withRowCycleOperators(RowCycleOperator[] rowCycleOperators) { + this.rowCycleOperators = rowCycleOperators; + return this; + } + + public RowCycleOperator[] getRowCycleOperators() { + return this.rowCycleOperators; + } + + public ReadyCQLStatement withResultCsvWriter(Writer resultCsvWriter) { + this.resultCsvWriter = resultCsvWriter; + return this; + } +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/core/ReadyCQLStatementTemplate.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/core/ReadyCQLStatementTemplate.java new file mode 100644 index 000000000..b1ce136fa --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/core/ReadyCQLStatementTemplate.java @@ -0,0 +1,109 @@ +package io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.core; + +import com.codahale.metrics.Histogram; +import com.codahale.metrics.Timer; +import com.datastax.driver.core.PreparedStatement; +import com.datastax.driver.core.Session; +import com.datastax.driver.core.SimpleStatement; +import com.datastax.driver.core.Statement; +import io.nosqlbench.activitytype.cql.ebdrivers.cql.api.ResultSetCycleOperator; +import io.nosqlbench.activitytype.cql.ebdrivers.cql.api.RowCycleOperator; +import io.nosqlbench.activitytype.cql.ebdrivers.cql.core.CqlActivity; +import io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.binders.CqlBinderTypes; +import io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.binders.SimpleStatementValuesBinder; +import io.nosqlbench.engine.api.metrics.ActivityMetrics; +import io.nosqlbench.virtdata.api.BindingsTemplate; +import io.nosqlbench.virtdata.api.ContextualBindingsArrayTemplate; +import io.nosqlbench.virtdata.api.ValuesArrayBinder; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.Writer; + +public class ReadyCQLStatementTemplate { + + private final static Logger logger = LoggerFactory.getLogger(ReadyCQLStatementTemplate.class); + private final Session session; + private ContextualBindingsArrayTemplate template; + private long ratio; + private String name; + + private ResultSetCycleOperator[] resultSetCycleOperators; + private RowCycleOperator[] rowCycleOperators; + + private Timer successTimer; + private Timer errorTimer; + private Histogram rowsFetchedHisto; + private Writer resultCsvWriter; + + public ReadyCQLStatementTemplate(CqlBinderTypes binderType, Session session, PreparedStatement preparedStmt, long ratio, String name) { + this.session = session; + this.name = name; + ValuesArrayBinder binder = binderType.get(session); + logger.trace("Using binder_type=>" + binder.toString()); + + template = new ContextualBindingsArrayTemplate<>( + preparedStmt, + new BindingsTemplate(), + binder + ); + this.ratio = ratio; + } + + public ReadyCQLStatementTemplate(Session session, SimpleStatement simpleStatement, long ratio, String name) { + this.session = session; + this.name = name; + template = new ContextualBindingsArrayTemplate<>( + simpleStatement, + new BindingsTemplate(), + new SimpleStatementValuesBinder() + ); + this.ratio = ratio; + } + + public ReadyCQLStatement resolve() { + return new ReadyCQLStatement(template.resolveBindings(), ratio, name) + .withMetrics(this.successTimer, this.errorTimer, this.rowsFetchedHisto) + .withResultSetCycleOperators(resultSetCycleOperators) + .withRowCycleOperators(rowCycleOperators) + .withResultCsvWriter(resultCsvWriter); + } + + public ContextualBindingsArrayTemplate getContextualBindings() { + return template; + } + + + public String getName() { + return name; + } + + public void instrument(CqlActivity activity) { + this.successTimer = ActivityMetrics.timer(activity.getActivityDef(), name + "--success"); + this.errorTimer = ActivityMetrics.timer(activity.getActivityDef(), name + "--error"); + this.rowsFetchedHisto = ActivityMetrics.histogram(activity.getActivityDef(), name + "--resultset-size"); + } + + public void logResultCsv(CqlActivity activity, String name) { + this.resultCsvWriter = activity.getNamedWriter(name); + } + + public void addResultSetOperators(ResultSetCycleOperator... addingOperators) { + resultSetCycleOperators = (resultSetCycleOperators==null) ? new ResultSetCycleOperator[0]: resultSetCycleOperators; + + ResultSetCycleOperator[] newOperators = new ResultSetCycleOperator[resultSetCycleOperators.length + addingOperators.length]; + System.arraycopy(resultSetCycleOperators,0,newOperators,0,resultSetCycleOperators.length); + System.arraycopy(addingOperators,0,newOperators,resultSetCycleOperators.length,addingOperators.length); + this.resultSetCycleOperators=newOperators; + } + + public void addRowCycleOperators(RowCycleOperator... addingOperators) { + rowCycleOperators = (rowCycleOperators==null) ? new RowCycleOperator[0]: rowCycleOperators; + RowCycleOperator[] newOperators = new RowCycleOperator[rowCycleOperators.length + addingOperators.length]; + System.arraycopy(rowCycleOperators,0,newOperators,0,rowCycleOperators.length); + System.arraycopy(addingOperators, 0, newOperators,rowCycleOperators.length,addingOperators.length); + this.rowCycleOperators = newOperators; + } + + +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/core/ReadyCQLStatementsTemplate.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/core/ReadyCQLStatementsTemplate.java new file mode 100644 index 000000000..86333a024 --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/core/ReadyCQLStatementsTemplate.java @@ -0,0 +1,25 @@ +package io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.core; + +import java.util.ArrayList; +import java.util.List; +import java.util.stream.Collectors; + +public class ReadyCQLStatementsTemplate { + + private List readyStatementList = new ArrayList<>(); + + public void addTemplate(ReadyCQLStatementTemplate t) { + this.readyStatementList.add(t); + } + + public List resolve() { + return readyStatementList.stream() + .map(ReadyCQLStatementTemplate::resolve) + .collect(Collectors.toList()); + } + + public int size() { + return readyStatementList.size(); + } + +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/core/TaggedCQLStatementDefs.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/core/TaggedCQLStatementDefs.java new file mode 100644 index 000000000..226ca6b11 --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/core/TaggedCQLStatementDefs.java @@ -0,0 +1,57 @@ +package io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.core; + +import io.nosqlbench.engine.api.util.Tagged; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +public class TaggedCQLStatementDefs implements Tagged { + + private List statements = new ArrayList<>(); + private Map tags = new HashMap<>(); + private Map params = new HashMap<>(); + + public TaggedCQLStatementDefs(Map tags, Map params, List statements) { + this.tags = tags; + this.params = params; + this.statements = statements; + } + public TaggedCQLStatementDefs(Map tags, List statements) { + this.tags = tags; + this.statements = statements; + } + + public TaggedCQLStatementDefs(List statements) { + this.statements = statements; + } + + + public TaggedCQLStatementDefs() { + } + + public List getStatements() { + return statements; + } + + public void setStatements(List statements) { + this.statements = statements; + } + + public Map getTags() { + return tags; + } + + public void setTags(Map tags) { + this.tags = tags; + } + + public Map getParams() { + return params; + } + + public void setParams(Map params) { + this.params = params; + } +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/core/YamlCQLStatementLoader.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/core/YamlCQLStatementLoader.java new file mode 100644 index 000000000..cc8868bd7 --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/core/YamlCQLStatementLoader.java @@ -0,0 +1,81 @@ +package io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.core; + +import io.nosqlbench.engine.api.activityimpl.ActivityInitializationError; +import io.nosqlbench.engine.api.util.NosqlBenchFiles; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.yaml.snakeyaml.TypeDescription; +import org.yaml.snakeyaml.Yaml; +import org.yaml.snakeyaml.constructor.Constructor; + +import java.io.BufferedReader; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.function.Function; +import java.util.stream.Collectors; + +@SuppressWarnings("ALL") +public class YamlCQLStatementLoader { + + private final static Logger logger = LoggerFactory.getLogger(YamlCQLStatementLoader.class); + List> transformers = new ArrayList<>(); + + public YamlCQLStatementLoader() { + } + + public YamlCQLStatementLoader(Function... transformers) { + this.transformers.addAll(Arrays.asList(transformers)); + } + + public AvailableCQLStatements load(String fromPath, String... searchPaths) { + + InputStream stream = NosqlBenchFiles.findRequiredStreamOrFile(fromPath, + "yaml", searchPaths); + String data = ""; + try (BufferedReader buffer = new BufferedReader(new InputStreamReader(stream))) { + data = buffer.lines().collect(Collectors.joining("\n")); + } catch (Exception e) { + throw new RuntimeException("Error while reading yaml stream data:" + e); + } + + for (Function xform : transformers) { + try { + logger.debug("Applying string transformer to yaml data:" + xform); + data = xform.apply(data); + } catch (Exception e) { + RuntimeException t = new ActivityInitializationError("Error applying string transform to input", e); + logger.error(t.getMessage(), t); + throw t; + } + } + + Yaml yaml = getCustomYaml(); + + try { + Iterable objects = yaml.loadAll(data); + List stmtListList = new ArrayList<>(); + for (Object object : objects) { + TaggedCQLStatementDefs tsd = (TaggedCQLStatementDefs) object; + stmtListList.add(tsd); + } + return new AvailableCQLStatements(stmtListList); + + } catch (Exception e) { + logger.error("Error loading yaml from " + fromPath, e); + throw e; + } + + } + + private Yaml getCustomYaml() { + Constructor constructor = new Constructor(TaggedCQLStatementDefs.class); + TypeDescription tds = new TypeDescription(TaggedCQLStatementDefs.class); + tds.putListPropertyType("statements", CQLStatementDef.class); + constructor.addTypeDescription(tds); + return new Yaml(constructor); + } + +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/rowoperators/Print.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/rowoperators/Print.java new file mode 100644 index 000000000..c944fb7ee --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/rowoperators/Print.java @@ -0,0 +1,17 @@ +package io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.rowoperators; + +import com.datastax.driver.core.Row; +import io.nosqlbench.activitytype.cql.ebdrivers.cql.api.RowCycleOperator; + +/** + * Save specific variables to the thread local object map + */ +public class Print implements RowCycleOperator { + + @Override + public int apply(Row row, long cycle) { + System.out.println("ROW:" + row); + return 0; + } + +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/rowoperators/RowCycleOperators.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/rowoperators/RowCycleOperators.java new file mode 100644 index 000000000..ee93f4c94 --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/rowoperators/RowCycleOperators.java @@ -0,0 +1,34 @@ +package io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.rowoperators; + +import io.nosqlbench.activitytype.cql.ebdrivers.cql.api.RowCycleOperator; + +public enum RowCycleOperators { + + saverows(SaveRows.class), + savevars(SaveVars.class), + print(Print.class); + + private final Class implClass; + + RowCycleOperators(Class traceLoggerClass) { + this.implClass = traceLoggerClass; + } + + + public Class getImplementation() { + return implClass; + } + + public RowCycleOperator getInstance() { + try { + return getImplementation().getConstructor().newInstance(); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + public static RowCycleOperator newOperator(String name) { + return RowCycleOperators.valueOf(name).getInstance(); + } + +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/rowoperators/Save.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/rowoperators/Save.java new file mode 100644 index 000000000..47d47380f --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/rowoperators/Save.java @@ -0,0 +1,47 @@ +package io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.rowoperators; + +import com.datastax.driver.core.ColumnDefinitions; +import com.datastax.driver.core.Row; +import io.nosqlbench.activitytype.cql.ebdrivers.cql.api.RowCycleOperator; +import io.nosqlbench.virtdata.library.basics.core.threadstate.SharedState; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.stream.Collectors; + +/** + * Save specific variables to the thread local object map + */ +public class Save implements RowCycleOperator { + private final static Logger logger = LoggerFactory.getLogger(Save.class); + + ThreadLocal> tl_objectMap = SharedState.tl_ObjectMap; + + private String[] varnames; + + public Save(String... varnames) { + this.varnames = varnames; + } + + @Override + public int apply(Row row, long cycle) { + try { + HashMap tlvars= tl_objectMap.get(); + for (String varname : varnames) { + Object object = row.getObject(varname); + tlvars.put(varname,object); + } + } catch (Exception e) { + List definitions = row.getColumnDefinitions().asList(); + logger.error("Unable to save '" + Arrays.toString(varnames) + "' from " + + definitions.stream().map(ColumnDefinitions.Definition::getName) + .collect(Collectors.joining(",","[","]")) + ": " + e.getMessage(),e); + throw e; + } + return 0; + } + +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/rowoperators/SaveRows.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/rowoperators/SaveRows.java new file mode 100644 index 000000000..2a12a6d04 --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/rowoperators/SaveRows.java @@ -0,0 +1,18 @@ +package io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.rowoperators; + +import com.datastax.driver.core.Row; +import io.nosqlbench.activitytype.cql.ebdrivers.cql.api.RowCycleOperator; +import io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.rsoperators.PerThreadCQLData; + +import java.util.LinkedList; + +public class SaveRows implements RowCycleOperator { + + @Override + public int apply(Row row, long cycle) { + LinkedListrows = PerThreadCQLData.rows.get(); + rows.add(row); + return 0; + } + +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/rowoperators/SaveVars.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/rowoperators/SaveVars.java new file mode 100644 index 000000000..4b8b222ba --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/rowoperators/SaveVars.java @@ -0,0 +1,27 @@ +package io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.rowoperators; + +import com.datastax.driver.core.ColumnDefinitions; +import com.datastax.driver.core.Row; +import io.nosqlbench.activitytype.cql.ebdrivers.cql.api.RowCycleOperator; +import io.nosqlbench.virtdata.library.basics.core.threadstate.SharedState; + +import java.util.HashMap; +import java.util.List; + +public class SaveVars implements RowCycleOperator { + + ThreadLocal> tl_objectMap = SharedState.tl_ObjectMap; + + @Override + public int apply(Row row, long cycle) { + HashMap tlvars= tl_objectMap.get(); + List cdlist = row.getColumnDefinitions().asList(); + for (ColumnDefinitions.Definition definition : cdlist) { + String name = definition.getName(); + Object object = row.getObject(name); + tlvars.put(name,object); + } + return 0; + } + +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/rsoperators/AssertSingleRowResultSet.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/rsoperators/AssertSingleRowResultSet.java new file mode 100644 index 000000000..42105f647 --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/rsoperators/AssertSingleRowResultSet.java @@ -0,0 +1,22 @@ +package io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.rsoperators; + +import com.datastax.driver.core.ResultSet; +import com.datastax.driver.core.Statement; +import io.nosqlbench.activitytype.cql.ebdrivers.cql.api.ResultSetCycleOperator; +import io.nosqlbench.activitytype.cql.ebdrivers.cql.errorhandling.exceptions.ResultSetVerificationException; + +public class AssertSingleRowResultSet implements ResultSetCycleOperator { + + @Override + public int apply(ResultSet resultSet, Statement statement, long cycle) { + int rowsIncoming = resultSet.getAvailableWithoutFetching(); + if (rowsIncoming<1) { + throw new ResultSetVerificationException(cycle, resultSet, statement, "no row in result set, expected exactly 1"); + } + if (rowsIncoming>1) { + throw new ResultSetVerificationException(cycle, resultSet, statement, "more than one row in result set, expected exactly 1"); + } + return rowsIncoming; + } + +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/rsoperators/ClearVars.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/rsoperators/ClearVars.java new file mode 100644 index 000000000..8979b7a61 --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/rsoperators/ClearVars.java @@ -0,0 +1,15 @@ +package io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.rsoperators; + +import com.datastax.driver.core.ResultSet; +import com.datastax.driver.core.Statement; +import io.nosqlbench.activitytype.cql.ebdrivers.cql.api.ResultSetCycleOperator; +import io.nosqlbench.virtdata.library.basics.core.threadstate.SharedState; + +public class ClearVars implements ResultSetCycleOperator { + + @Override + public int apply(ResultSet resultSet, Statement statement, long cycle) { + SharedState.tl_ObjectMap.get().clear(); + return 0; + } +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/rsoperators/CqlResultSetLogger.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/rsoperators/CqlResultSetLogger.java new file mode 100644 index 000000000..7569f7f64 --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/rsoperators/CqlResultSetLogger.java @@ -0,0 +1,36 @@ +package io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.rsoperators; + +import com.datastax.driver.core.*; +import io.nosqlbench.activitytype.cql.ebdrivers.cql.api.ResultSetCycleOperator; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class CqlResultSetLogger implements ResultSetCycleOperator { + private final static Logger logger = LoggerFactory.getLogger(CqlResultSetLogger.class); + + private static String getQueryString(Statement stmt) { + if (stmt instanceof PreparedStatement) { + return "(prepared) " + ((PreparedStatement) stmt).getQueryString(); + } else if (stmt instanceof SimpleStatement) { + return "(simple) " + ((SimpleStatement) stmt).getQueryString(); + } else if (stmt instanceof BoundStatement) { + return "(bound) " + ((BoundStatement) stmt).preparedStatement().getQueryString(); + } else { + return "(unknown) " + stmt.toString(); + } + } + + @Override + public int apply(ResultSet resultSet, Statement statement, long cycle) { + logger.debug("result-set-logger: " + + " cycle=" + cycle + + " rows=" + resultSet.getAvailableWithoutFetching() + + " fetched=" + resultSet.isFullyFetched() + + " statement=" + getQueryString(statement).stripTrailing() + ); + for (Row row : resultSet) { + logger.trace(row.toString()); + } + return 0; + } +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/rsoperators/PerThreadCQLData.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/rsoperators/PerThreadCQLData.java new file mode 100644 index 000000000..f101b5f32 --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/rsoperators/PerThreadCQLData.java @@ -0,0 +1,9 @@ +package io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.rsoperators; + +import com.datastax.driver.core.Row; + +import java.util.LinkedList; + +public class PerThreadCQLData { + public final static ThreadLocal> rows = ThreadLocal.withInitial(LinkedList::new); +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/rsoperators/PopVars.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/rsoperators/PopVars.java new file mode 100644 index 000000000..2c18de296 --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/rsoperators/PopVars.java @@ -0,0 +1,23 @@ +package io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.rsoperators; + +import com.datastax.driver.core.ResultSet; +import com.datastax.driver.core.Statement; +import io.nosqlbench.activitytype.cql.ebdrivers.cql.api.ResultSetCycleOperator; +import io.nosqlbench.virtdata.library.basics.core.threadstate.SharedState; + +import java.util.HashMap; + +public class PopVars implements ResultSetCycleOperator { + + @Override + public int apply(ResultSet resultSet, Statement statement, long cycle) { + HashMap stringObjectHashMap = SharedState.tl_ObjectMap.get(); + Object o = SharedState.tl_ObjectStack.get().pollLast(); + if (o != null && o instanceof HashMap) { + SharedState.tl_ObjectMap.set((HashMap) o); + return 0; + } else { + throw new RuntimeException("Tried to pop thread local data from stack, but there was none."); + } + } +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/rsoperators/Print.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/rsoperators/Print.java new file mode 100644 index 000000000..5255ddaba --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/rsoperators/Print.java @@ -0,0 +1,14 @@ +package io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.rsoperators; + +import com.datastax.driver.core.ResultSet; +import com.datastax.driver.core.Statement; +import io.nosqlbench.activitytype.cql.ebdrivers.cql.api.ResultSetCycleOperator; + +public class Print implements ResultSetCycleOperator { + + @Override + public int apply(ResultSet resultSet, Statement statement, long cycle) { + System.out.println("RS:"+ resultSet.toString()); + return 0; + } +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/rsoperators/PushVars.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/rsoperators/PushVars.java new file mode 100644 index 000000000..3d2d50716 --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/rsoperators/PushVars.java @@ -0,0 +1,20 @@ +package io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.rsoperators; + +import com.datastax.driver.core.ResultSet; +import com.datastax.driver.core.Statement; +import io.nosqlbench.activitytype.cql.ebdrivers.cql.api.ResultSetCycleOperator; +import io.nosqlbench.virtdata.library.basics.core.threadstate.SharedState; + +import java.util.HashMap; + +public class PushVars implements ResultSetCycleOperator { + + @Override + public int apply(ResultSet resultSet, Statement statement, long cycle) { + HashMap existingVars = SharedState.tl_ObjectMap.get(); + HashMap topush = new HashMap<>(existingVars); + + SharedState.tl_ObjectStack.get().addLast(topush); + return 0; + } +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/rsoperators/ResultSetCycleOperators.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/rsoperators/ResultSetCycleOperators.java new file mode 100644 index 000000000..c2b2045ce --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/rsoperators/ResultSetCycleOperators.java @@ -0,0 +1,40 @@ +package io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.rsoperators; + +import io.nosqlbench.activitytype.cql.ebdrivers.cql.api.ResultSetCycleOperator; + +public enum ResultSetCycleOperators { + + pushvars(PushVars.class), + popvars(PopVars.class), + clearvars(ClearVars.class), + + trace(TraceLogger.class), + log(CqlResultSetLogger.class), + assert_singlerow(AssertSingleRowResultSet.class), + + print(Print.class); + + private final Class implClass; + + ResultSetCycleOperators(Class traceLoggerClass) { + this.implClass = traceLoggerClass; + } + + + public Class getImplementation() { + return implClass; + } + + public ResultSetCycleOperator getInstance() { + try { + return getImplementation().getConstructor().newInstance(); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + public static ResultSetCycleOperator newOperator(String name) { + return ResultSetCycleOperators.valueOf(name).getInstance(); + } + +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/rsoperators/RowCapture.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/rsoperators/RowCapture.java new file mode 100644 index 000000000..875581935 --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/rsoperators/RowCapture.java @@ -0,0 +1,16 @@ +package io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.rsoperators; + +import com.datastax.driver.core.ResultSet; +import com.datastax.driver.core.Row; +import com.datastax.driver.core.Statement; +import io.nosqlbench.activitytype.cql.ebdrivers.cql.api.ResultSetCycleOperator; + +import java.util.LinkedList; + +public class RowCapture implements ResultSetCycleOperator { + @Override + public int apply(ResultSet resultSet, Statement statement, long cycle) { + ThreadLocal> rows = PerThreadCQLData.rows; + return 0; + } +} diff --git a/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/rsoperators/TraceLogger.java b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/rsoperators/TraceLogger.java new file mode 100644 index 000000000..54906ab69 --- /dev/null +++ b/activitytype-cql/src/main/java/io/nosqlbench/activitytype/cql/ebdrivers/cql/statements/rsoperators/TraceLogger.java @@ -0,0 +1,97 @@ +package io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.rsoperators; + +import com.datastax.driver.core.ExecutionInfo; +import com.datastax.driver.core.QueryTrace; +import com.datastax.driver.core.ResultSet; +import com.datastax.driver.core.Statement; +import io.nosqlbench.activitytype.cql.ebdrivers.cql.api.ResultSetCycleOperator; +import io.nosqlbench.activitytype.cql.ebdrivers.cql.core.StatementModifier; +import io.nosqlbench.engine.api.util.SimpleConfig; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.FileDescriptor; +import java.io.FileWriter; +import java.io.IOException; +import java.text.SimpleDateFormat; +import java.util.Date; + +public class TraceLogger implements ResultSetCycleOperator, StatementModifier { + + private final static Logger logger = LoggerFactory.getLogger(TraceLogger.class); + + private static SimpleDateFormat sdf = new SimpleDateFormat("HH:mm:ss.SSS"); + private final long modulo; + private final String filename; + private final FileWriter writer; + private final ThreadLocal tlsb = ThreadLocal.withInitial(StringBuilder::new); + + public TraceLogger(SimpleConfig conf) { + this( + conf.getLong("modulo").orElse(1L), + conf.getString("filename").orElse("tracelog") + ); + } + + public TraceLogger(long modulo, String filename) { + this.modulo = modulo; + this.filename = filename; + try { + if (filename.equals("stdout")) { + writer = new FileWriter(FileDescriptor.out); + } else { + writer = new FileWriter(filename); + } + + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + @Override + public int apply(ResultSet rs, Statement statement, long cycle) { + if ((cycle%modulo)!=0) { + return 0; + } + + ExecutionInfo ei = rs.getExecutionInfo(); + QueryTrace qt = ei.getQueryTrace(); + StringBuilder sb = tlsb.get(); + sb.setLength(0); + sb.append("\n---------------------------- QueryTrace Summary ---------------------------\n"); + sb.append("\n Coordinator: ").append(qt.getCoordinator()); + sb.append("\n Cycle: ").append(cycle); + sb.append("\nServer-side query duration (us): ").append(qt.getDurationMicros()); + sb.append("\n Request type: ").append(qt.getRequestType()); + sb.append("\n Start time: ").append(qt.getStartedAt()); + sb.append("\n Trace UUID: ").append(qt.getTraceId()); + sb.append("\n Params: ").append(qt.getParameters()); + sb.append("\n--------------------------------------------------------------------------\n"); + sb.append("\n---------------------------- QueryTrace Events ---------------------------\n"); + for (QueryTrace.Event event : qt.getEvents()) { + sb.append("\n Date: ").append(sdf.format(new Date(event.getTimestamp()))); + sb.append("\n Source: ").append(event.getSource()); + sb.append("\nSourceElapsedMicros: ").append(event.getSourceElapsedMicros()); + sb.append("\n Thread: ").append(event.getThreadName()); + sb.append("\n Description: ").append(event.getDescription()).append("\n"); + } + sb.append("\n--------------------------------------------------------------------------\n"); + + try { + writer.append(sb.toString()); + writer.flush(); + } catch (IOException e) { + throw new RuntimeException(e); + } + + return 0; + } + + @Override + public Statement modify(Statement statement, long cycle) { + if ((cycle%modulo)==0) { + statement.enableTracing(); + } + return statement; + } +} diff --git a/activitytype-cql/src/main/resources/META-INF/services/javax.annotation.processing.Processor b/activitytype-cql/src/main/resources/META-INF/services/javax.annotation.processing.Processor new file mode 100644 index 000000000..f6dcb0e2f --- /dev/null +++ b/activitytype-cql/src/main/resources/META-INF/services/javax.annotation.processing.Processor @@ -0,0 +1 @@ +io.nosqlbench.virtdata.processors.ServiceProcessor diff --git a/activitytype-cql/src/main/resources/advanced-cql.md b/activitytype-cql/src/main/resources/advanced-cql.md new file mode 100644 index 000000000..ebc5dbd8c --- /dev/null +++ b/activitytype-cql/src/main/resources/advanced-cql.md @@ -0,0 +1,98 @@ +# cql activity type - advanced features + +This is an addendum to the standard CQL Activity Type docs. For that, see "cql". +Use the features in this guide carefully. They do not come with as much documentation +as they are less used than the main CQL features. + +### ResultSet and Row operators + +Within the CQL Activity type, synchronous mode (activities with out +the async= parameter), you have the ability to attach operators to a +given statement such that it will get per-statement handling. These +operators are ways of interrogating the result of an operation, saving +values, or managing other side-effects for specific types of testing. + +When enabled for a statement, operators are applied in this order: + +1. Activity-level ResultSet operators are applied in specified order. +2. Statement-level ResultSet operators are applied in specified order. +3. Activity-level Row operators are applied in specified order. +4. Statement-level Row operators are applied in specified order. + +The result set handling does not go to any extra steps of making +a copy of the data. When a row is read from the result set, +it is consumed from it. Thus, if you want to do anything with +row data, you must apply a row operator as explained below. + + +### CQL Statement Parameters + +- **rsoperators** - If provided as a CQL statement param, then the + list of operator names that follow, separated by a comma, will + be used to attach ResultSet operators to the given statement. + Such operators act on the whole result set of a statement. + +- **rowoperators** - If provided as a CQL statement param, then the + list of operator names that follow, separated by a comma, will + be used to attache Row operators to the given statement. + +## Available ResultSet Operators + +- pushvars - Push a copy of the current thread local variables onto + the thread-local stack. This does nothing with the ResultSet data, + but is meant to be used for stateful management of these in + conjunction with the row operators below. +- popvars - Pop the last thread local variable set from the thread-local + stack into vars, replacing the previous content. This does nothing + with the ResultSet data. +- clearvars - Clears the contents of the thread local variables. This + does nothign with the ResultSet data. +- trace - Flags a statement to be traced on the server-side and then logs + the details of the trace to the trace log file. +- log - Logs basic data to the main log. This is useful to verify that + operators are loading and triggering as expected. +- assert_singlerow - Throws an exception (ResultSetVerificationException) + if the ResultSet has more or less than one row. + +Examples: + +``` + statements: + - s1: | + a statement + rsoperators: pushvars, clearvars +``` +## Available Row Operators: + +- savevars - Copies the values of the row into the thread-local variables. +- saverows - Copies the rows into a special CQL-only thread local row state. + +Examples: + +``` + statements: + - s2: | + a statement + rowoperators: saverows +``` + +## Injecting additional Queries + +It is possible to inject new operations to an activity. However, such +operations are _indirect_ to cycles, since they must be based on the results +of other operations. As such, they will not be represented in cycle output or +other advanced features. This is a specific feature for the CQL activity -- +implemented internal to the way a CQL cycle is processed. A future version +of EngineBlock will provide a more uniform way to achieve this result across +activity types. For now, remember that this is a CQL-only capability. + +- subquery-statement - Adds additional operations to the current cycle, based + on the contents of the thread-local row state. The value to this parameter + is a name of a statement in the current YAML. + +local thread based on contents + of the CQL-only thread local row state. Each row is consumed from this list, + and a new operation is added to the current cycle. +- subquery-concurrency - Allow subqueries to execute with concurrency, up to + the level specified. + default: 1 diff --git a/activitytype-cql/src/main/resources/cql.md b/activitytype-cql/src/main/resources/cql.md new file mode 100644 index 000000000..4bad3458a --- /dev/null +++ b/activitytype-cql/src/main/resources/cql.md @@ -0,0 +1,397 @@ +# cql activity type + +This is an activity type which allows for the execution of CQL statements. +This particular activity type is wired synchronously within each client +thread, however the async API is used in order to expose fine-grain +metrics about op binding, op submission, and waiting for a result. + +### Example activity definitions + +Run a cql activity named 'cql1', with definitions from activities/cqldefs.yaml +~~~ +... type=cql alias=cql1 yaml=cqldefs +~~~ + +Run a cql activity defined by cqldefs.yaml, but with shortcut naming +~~~ +... type=cql yaml=cqldefs +~~~ + +Only run statement groups which match a tag regex +~~~ +... type=cql yaml=cqldefs tags=group:'ddl.*' +~~~ + +Run the matching 'dml' statements, with 100 cycles, from [1000..1100) +~~~ +... type=cql yaml=cqldefs tags=group:'dml.*' cycles=1000..1100 +~~~ +This last example shows that the cycle range is [inclusive..exclusive), +to allow for stacking test intervals. This is standard across all +activity types. + +### CQL ActivityType Parameters + +- **driver** - default: dse - The type of driver to use, either dse, or + oss. If you need DSE-specific features, use the dse driver. If you are + connecting to an OSS Apache Cassandra cluster, you must use the oss + driver. The oss driver option is only available in ebdse. +- **host** - The host or hosts to use for connection points to + the cluster. If you specify multiple values here, use commas + with no spaces. + Examples: + - `host=192.168.1.25` + - `host=`192.168.1.25,testhost42` +- **yaml** - The file which holds the schema and statement defs. + (no default, required) +- **port** - The port to connect with +- **cl** - An override to consistency levels for the activity. If + this option is used, then all consistency levels will be replaced + by this one for the current activity, and a log line explaining + the difference with respect to the yaml will be emitted. + This is not a dynamic parameter. It will only be applied at + activity start. +- **cbopts** - default: none - this is how you customize the cluster + settings for the client, including policies, compression, etc. This + is a string of *Java*-like method calls just as you would use them + in the Cluster.Builder fluent API. They are evaluated inline + with the default Cluster.Builder options not covered below. + Example: cbopts=".withCompression(ProtocolOptions.Compression.NONE)" +- **whitelist** default: none - Applies a whitelist policy to the load balancing + policy in the driver. If used, a WhitelistPolicy(RoundRobinPolicy()) + will be created and added to the cluster builder on startup. + Examples: + - whitelist=127.0.0.1 + - whitelist=127.0.0.1:9042,127.0.0.2:1234 +- **retrypolicy** default: none - Applies a retry policy in the driver + The only option supported for this version is `retrypolicy=logging`, + which uses the default retry policy, but with logging added. + +- **pooling** default: none - Applies the connection pooling options + to the policy. + Examples: + - `pooling=4:10` + keep between 4 and 10 connections to LOCAL hosts + - `pooling=4:10,2:5` + keep 4-10 connections to LOCAL hosts and 2-5 to REMOTE + - `pooling=4:10:2000` + keep between 4-10 connections to LOCAL hosts with + up to 2000 requests per connection + - `pooling=5:10:2000,2:4:1000` keep between 5-10 connections to + LOCAL hosts with up to 2000 requests per connection, and 2-4 + connection to REMOTE hosts with up to 1000 requests per connection + + Additionally, you may provide the following options on pooling. Any + of these that are provided must appear in this order: + `,heartbeat_interval_s:n,idle_timeout_s:n,pool_timeout_ms:n`, so a + full example with all options set would appear as: + `pooling=5:10:2000,2:4:1000,heartbeat_interval_s:30,idle_timeout_s:120,pool_timeout_ms:5` + +- **socketoptions** default: none - Applies any of the valid socket + options to the client when the session is built. Each of the options + uses the long form of the name, with either a numeric or boolean + value. Individual sub-parameters should be separated by a comma, and + the parameter names and values can be separated by either equals or a + colon. All of these values may be changed: + - read_timeout_ms + - connect_timeout_ms + - keep_alive + - reuse_address + - so_linger + - tcp_no_delay + - receive_buffer_size + - send_buffer_size + + Examples: + - `socketoptions=read_timeout_ms=23423,connect_timeout_ms=4444` + - `socketoptions=tcp_no_delay=true + +- **tokens** default: unset - Only executes statements that fall within + any of the specified token ranges. Others are counted in metrics + as skipped-tokens, with a histogram value of the cycle number. + Examples: + - tokens=1:10000,100000:1000000 + - tokens=1:123456 +- **maxtries** - default: 10 - how many times an operation may be + attempted before it is disregarded +- **maxpages** - default: 1 - how many pages can be read from a query which + is larger than the fetchsize. If more than this number of pages + is required for such a query, then an UnexpectedPaging excpetion + is passed to the error handler as explained below. +- **fetchsize** - controls the driver parameter of the same name. + Suffixed units can be used here, such as "50K". If this parameter + is not present, then the driver option is not set. +- **cycles** - standard, however the cql activity type will default + this to however many statements are included in the current + activity, after tag filtering, etc. +- **username** - the user to authenticate as. This option requires + that one of **password** or **passfile** also be defined. +- **password** - the password to authenticate with. This will be + ignored if passfile is also present. +- **passfile** - the file to read the password from. The first + line of this file is used as the password. +- **ssl** - enable ssl if you want transport level encryption. + Examples: + - `ssl=true` + enable ssl + - `ssl=false` + disable ssl (the default) +- **keystore** - specify the keystore location for SSL. + Examples: + - `keystore=JKS` (the default) +- **kspass** - specify the password to the keystore for SSL. + Examples: + - `kspass=mypass` +- **tlsversion** - specify the TLS version to use for SSL. + Examples: + - `tlsversion=TLSv1.2` (the default) +- **jmxreporting** - enable JMX reporting if needed. + Examples: + - `jmxreporting=true` + - `jmxreporting=false` (the default) +- **alias** - this is a standard engineblock parameter, however + the cql type will use the yaml value also as the alias value + when not specified. +- **errors** - error handler configuration. + (default errors=stop,retryable->retry,unverified->stop) + Examples: + - errors=stop,WriteTimeoutException=histogram + - errors=count + - errors=warn,retryable=count + See the separate help on 'cqlerrors' for detailed + configuration options. +- **defaultidempotence** - sets default idempotence on the + driver options, but only if it has a value. + (default unset, valid values: true or false) +- **speculative** - sets the speculative retry policy on the cluster. + (default unset) + This can be in one of the following forms: + - pT:E:L - where :L is optional and + T is a floating point threshold between 0.0 and 100.0 and + E is an allowed number of concurrent speculative executions and + L is the maximum latency tracked in the tracker instance + (L defaults to 15000 when left out) + Examples: + - p99.8:5:15000ms - 99.8 percentile, 5 executions, 15000ms max tracked + - p98:2:10000ms - 98.0 percentile, 2 executions allowed, 10s max tracked + - Tms:E - where :E is optional and + T is a constant threshold latency and + E is the allowed number of concurrent speculative retries + (E default to 5 when left out) + Examples: + - 100ms:5 - constant threshold of 100ms and 5 allowed executions +- **seq** - selects the statement sequencer used with statement ratios. + (default: bucket) + (options: concat | bucket | interval) + The concat sequencer repeats each statement in order until the ratio + is achieved. + The bucket sequencer uses simple round-robin distribution to plan + statement ratios, a simple but unbalanced form of interleaving. + The interval sequencer apportions statements over time and then by + order of appearance for ties. This has the effect of interleaving + statements from an activity more evenly, but is less obvious in how + it works. + All of the sequencers create deterministic schedules which use an internal + lookup table for indexing into a list of possible statements. +- **trace** - enables a trace on a subset of operations. This is disabled + by default. + Examples: + `trace=modulo:100,filename:trace.log` + The above traces every 100th cycle to a file named trace.log. + `trace=modulo:1000,filename:stdout` + The above traces every 1000th cycle to stdout. + If the trace log is not specified, then 'tracelog' is assumed. + If the filename is specified as stdout, then traces are dumped to stdout. +- **clusterid** - names the configuration to be used for this activity. Within + a given scenario, any activities that use the same name for clusterid will + share a session and cluster. + default: 'default' +- **drivermetrics** - enable reporting of driver metrics. + default: false +- **driverprefix** - set the metrics name that will prefix all CQL driver metrics. + default: 'driver.*clusterid*.' + The clusterid specified is included so that separate cluster and session + contexts can be reported independently for advanced tests. +- **usercodecs** - enable the loading of user codec libraries + for more details see: com.datastax.codecs.framework.UDTCodecInjector in the ebdse + code base. This is for dynamic codec loading with user-provided codecs mapped + via the internal UDT APIs. + default: false +- **secureconnectbundle** - used to connect to CaaS, accepts a path to the secure connect bundle + that is downloaded from the CaaS UI. + Examples: + - `secureconnectbundle=/tmp/secure-connect-my_db.zip` + - `secureconnectbundle="/home/automaton/secure-connect-my_db.zip"` +- **insights** - Set to false to disable the driver from sending insights monitoring information + - `insights=false` +- **tickduration** - sets the tickDuration (milliseconds) of HashedWheelTimer of the + java driver. This timer is used to schedule speculative requests. + Examples: + - `tickduration=10` + - `tickduration=100` (driver default value) +- **compression** - sets the transport compression to use for this + activity. Valid values are 'LZ4' and 'SNAPPY'. Both types are bundled + with EBDSE. + +### CQL YAML Parameters + +A uniform YAML configuration format was introduced with engineblock 2.0. +As part of this format, statement parameters were added for the CQL Activity Type. +These parameters will be consolidated with the above parameters in time, but for +now **they are limited to a YAML params block**: + + params: + + ratio: 1 + # Sets the statement ratio within the operation sequencer + # scheme. Integers only. + # When preparing the operation order (AKA sequencing), + # frequency of the associated statements. + + cl: ONE + # Sets the consistency level, using any of the standard + # identifiers from com.datastax.driver.core.ConsistencyLevel, + # any one of: + # LOCAL_QUORUM, ANY, ONE, TWO, THREE, QUORUM, ALL, + # EACH_QUORUM, SERIAL, LOCAL_SERIAL, LOCAL_ONE + + prepared: true + # By default, all statements are prepared. If you are + # creating schema, set this to false. + + idempotent: false + # For statements that are known to be idempotent, set this + # to true + + instrument: false + # If a statement has instrument set to true, then + # individual Timer metrics will be tracked for + # that statement for both successes and errors, + # using the given statement name. + + logresultcsv: true + OR + logresultcsv: myfilename.csv + # If a statement has logresultcsv set to true, + # then individual operations will be logged to a CSV file. + # In this case the CSV file will be named as + # --results.csv. + # If the value is present and not "true", then the value will + # be used as the name of the file. + # + # The format of the file is: + # ,(SUCCESS|FAILURE),,,(\> and \<\\>. The first +form contains a default value. In any case, if one of these parameters is +encountered and a qualifying value is not found, an error will be thrown. + +### YAML Location + +The YAML file referenced in the yaml= parameter will be searched for in the following places, in this order: +1. A URL, if it starts with 'http:' or 'https:' +2. The local filesystem, if it exists there +3. The internal classpath and assets in the jar. + +The '.yaml' suffix is not required in the yaml= parameter, however it is +required on the actual file. As well, the logical search path "activities/" +will be used if necessary to locate the file, both on the filesystem and in +the classpath. + +There is a basic example below that can be copied as a starting template. + +## YAML Examples + +Please see the bundled activities with ebdse for examples. diff --git a/activitytype-cql/src/main/resources/cqlerrors.md b/activitytype-cql/src/main/resources/cqlerrors.md new file mode 100644 index 000000000..d8be73af4 --- /dev/null +++ b/activitytype-cql/src/main/resources/cqlerrors.md @@ -0,0 +1,198 @@ +# cql error handling + +The error handling facility utilizes a type-aware error handler +provided by nosqlbench. However, it is much more modular and configurable +than most error handlers found in other testing tools. The trade-off here +is that so many options may bewilder newer users. If you agree, then +simply use one of these basic recipes in your activity parameters: + + # error and stop on *any exception + # incidentally, this is the same as the deprecated diagnose=true option + errors=stop + + # error and stop for (usually) unrecoverable errors + # warn and retry everything else (this is actually the default) + + errors=stop,retryable->retry + + # record histograms for WriteTimeoutException, error and stop + # for everything else. + + errors=stop,WriteTimeoutException:histogram + +As you can see, the error handling format is pretty basic. Behind this basic +format is modular and flexible configuration scheme that should allow for either +simple or advanced testing setups. The errors value is simply a list of error to +hander verbs mappings, but also allows for a simple verb to be specified to +cover all error types. Going from left to right, each mapping is applied in +order. You can use any of ':', '->', or '=' for the error to verb assignment +operator. + +Anytime you assign a value to the *errors* parameter for a cql activity, you are +replacing the default 'stop,retryable->retry,unverified->stop' configuration. +That is, each time this value is assigned, a new error handler is configured and +installed according to the new value. + +### errors= parameter format + +The errors parameter contains a comma-separated list of one or more +handler assignments where the error can be in any of these forms: + +- group name ( "unapplied" | "retryable" | "unverified" ) +- a single exception name like 'WriteTimeoutException', or a substring of + that which is long enough to avoid ambiguity (only one match allowed) +- A regex, like '.*WriteTimeout.*' (multiple matches allowed) + +The verb can be any of the named starting points in the error handler +stack, as explained below. + +As a special case, if the handler assignment consists of only a single word, +then it is assumed to be the default handler verb. This gets applied +as a last resort to any errors which do not match another handler by class +type or parent class type. This allows for simple hard wiring of a +handler default for all non-specific errors in the form: + + # force the test to stop with any error, even retryable ones + errors=stop + +### Error Handler Verbs + +When an error occurs, you can control how it is handled for the most part. +This is the error handler stack: + +- **stop** - logs an error, and then rethrows the causing exception, + causing ebdse to shutdown the current scenario. +- **warn** - log a warning in the log, with details about the error + and associated statement. +- **retry** - Retry the operation if the number of retries hasn't been + used up *and* the causing exception falls in the set of + *retryable* errors. +- **histogram** - keep a histogram of the exception counts, under the + name errorhistos.classname, using the simple class name. + The magnitude of these histos is how long the operation was pending + before the related error occurred. +- **count** - keep a count in metrics for the exception, under the name + errorcounts.classname, using the simple class name. +- **ignore** - do nothing, do not even retry or count + +Each handling verb above is ordered from the most invasive to least +invasive starting at the top. With the exception of the **stop** +handler, the rest of them will be applied to an error all the way +to the bottom. For now, the error handling stack is exactly as above. +You can't modify it, although it may be made configurable in the future. + +One way to choose the right handler is to say "How serious is this type +of error to the test results if it happens?" In general, it is best +to be more conservative and choose a more aggressive setting unless you +are specifically wanting to measure how often a given error happens, +for example. + +Each exception type will have one and only one error handler at all times. +No matter how you set an error handler for a class, only the most +recently assigned handler stack will be active for it. This might be +important to keep in mind when you make multiple assignments to potentially +overlapping sets of error types. In any case, the default 'stop' handler +will always catch an error that does not otherwise have a more specific +handler assigned to it. + +##### Error Types + +The errors that can be handled are simply all the exception types that +can be thrown by either the DataStax Java Driver for DSE, *or* the +ebdse client itself. This includes errors that indicate a potentially +intermittent failure condition. It also includes errors that are more +permanent in nature, like WriteFailure, which would continue to occur +on subsequent retries without some form of intervention. The ebdse +application will also generate some additional exceptions that capture +common error cases that the Java driver doesn't or shouldn't have a +special case for, but which may be important for ebdse testing purposes. + +In ebdse, all error handlers are specific to a particular kind of +exception that you would catch in a typical application that uses DSE, +although you can tell a handler to take care of a whole category +of problems as long as you know the right name to use. + +##### Assigned by Java Exception Type + +Error handlers can be assigned to a common parent type in order to also handle +all known subtypes, hence the default on the top line applies to all of the +driver exceptions that do not have a more specific handler assigned, either +by a closer parent or directly. + +##### Assigning by Error Group Name + +Error types for which you would commonly assign the same handling behavior +are also grouped in predefined names. If a handler is assigned to one +of the group names, then the handler is assigned all of the exceptions +in the group individually. For example, 'errors=retryable=stop' + +### Recognized Exceptions + +The whole hierarchy of exceptions as of DSE Driver 3.2.0 is as follows, +with the default configuration shown. + + DriverException -> stop + FrameTooLongException + CodecNotFoundException + AuthenticationException + TraceRetrievalException + UnsupportedProtocolVersionException + NoHostAvailableException -> retry (group: retryable) + QueryValidationException (abstract) + InvalidQueryException + InvalidConfigurationInQueryException + UnauthorizedException + SyntaxError + AlreadyExistsException + UnpreparedException + InvalidTypeException + QueryExecutionException (abstract) + UnavailableException + BootstrappingException -> retry (group: retryable) + OverloadedException -> retry (group: retryable) + TruncateException + QueryConsistencyException (abstract) + WriteTimeoutException -> retry (group: retryable) + WriteFailureException -> retry (group: retryable) + ReadFailureException + ReadTimeoutException + FunctionExecutionException + DriverInternalError + ProtocolError + ServerError + BusyPoolException + ConnectionException + TransportException + OperationTimedOutException -> retry (group: retryable) + PagingStateException + UnresolvedUserTypeException + UnsupportedFeatureException + BusyConnectionException + EbdseException (abstract) -> stop + CQLResultSetException (abstract) + UnexpectedPagingException + ResultSetVerificationException + RowVerificationException + ChangeUnappliedCycleException (group:unapplied) + RetriesExhaustedCycleException -> count + +##### Additional Exceptions + +The following exceptions are synthesized directly by ebdse, but get +handled alongside the normal exceptions as explained above. + +1. ChangeUnappliedException - The change unapplied condition is important to + detect when it is not expected, although some testing may intentionally send + changes that can't be applied. For this reason, it is kept as a separately + controllable error group "unapplied". +2. UnexpectedPaging - The UnexpectedPaging exception is meant to keep users from + being surprised when there is paging activity in the workload, as this can have + other implications for tuning and performance. See the details on the + **maxpages** parameter, and the *fetch size* parameter in the java driver for + details. +3. Unverified\* Exceptions - For data set verification; These exceptions + indicate when a cqlverify activity has found rows that differ from what + was expected. +4. RetriesExhaustedException - Indicates that all retries were exhausted before + a given operation could complete successfully. + diff --git a/activitytype-cql/src/main/resources/cqlexceptionlist.md b/activitytype-cql/src/main/resources/cqlexceptionlist.md new file mode 100644 index 000000000..9e37b0c56 --- /dev/null +++ b/activitytype-cql/src/main/resources/cqlexceptionlist.md @@ -0,0 +1,42 @@ +DriverException -> stop + 1 FrameTooLongException + 2 CodecNotFoundException + 3 AuthenticationException + 4 TraceRetrievalException + 5 UnsupportedProtocolVersionException + 6 NoHostAvailableException + 7 QueryValidationException (abstract) + 8 InvalidQueryException + 9 InvalidConfigurationInQueryException + 10 UnauthorizedException + 11 SyntaxError + 12 AlreadyExistsException + 13 UnpreparedException + 14 InvalidTypeException + 15 QueryExecutionException (abstract) -> retry + 16 UnavailableException + 17 BootstrappingException + 18 OverloadedException + 19 TruncateException + 20 QueryConsistencyException (abstract) + 21 WriteTimeoutException + 22 WriteFailureException + 23 ReadFailureException + 24 ReadTimeoutException + 25 FunctionExecutionException + 26 DriverInternalError + 27 ProtocolError + 28 ServerError + 29 BusyPoolException + 30 ConnectionException + 31 TransportException + 32 OperationTimedOutException + 33 PagingStateException + 34 UnresolvedUserTypeException + 35 UnsupportedFeatureException + 36 BusyConnectionException + 41 EbdseCycleException (abstract) -> stop + 37 ChangeUnappliedCycleException + 38 ResultSetVerificationException + 39 RowVerificationException (abstract) + 40 UnexpectedPagingException diff --git a/activitytype-cql/src/main/resources/cqlverify.md b/activitytype-cql/src/main/resources/cqlverify.md new file mode 100644 index 000000000..8a5b81a50 --- /dev/null +++ b/activitytype-cql/src/main/resources/cqlverify.md @@ -0,0 +1,172 @@ +# cqlverify activity type + +This activity type allows you to read values from a database and compare them to +the generated values that were expected to be written, row-by-row, producing a +comparative result between the two. + +The verification options include: + +1. Each row contains the right fields, according to the reference data. +2. Each row contains only the fields specified in the reference data. +3. Each value of each row, by name, is equal to the referenced data, + according to the Java equals implementation for the object type + specified in that field's metadata. + +The data bindings are used to generate the expected values that would be used +for an upsert. Each row is verified according to these values, and any +discrepancy is treated as an error that can be counted, logged, etc. + +### Using cqlverify + +The cqlverify activity type is built on top of the cql activity type. As such, +it has all of the same capabilities and options, and then some. See the cql +activity type documentation for the usual details. This doc page only covers how +the cqlverify activity extends it. + +The differences between the cql and cqlverify activity types are mostly in how +how you configure for verifiable data and error handling. + +##### Writing verifiable data + +The cqlverify activity type does not retain logged data for verification. Still, +it is able to compare data as if it had a separate data set to compare to. This +is possibly only because the data generation facilities used by ebcql (and +engineblock) provide realistic and voluminous synthetic data that can be +recalled from a recipe and accessed dynamically. + +That means, however, that you must avoid using the non-stable data mapping +functions when writing data. The rule of thumb is to avoid using any data +mapping functions containing the word "Random", as these are the ones that have +historically used internal RNG state. Instead, swap in their replacements that +start with "Hashed". There is a hashed equivalent to all of the original random +functions. The rng-based functions will be deprecated in a future release. + +In a typical cql activity, you are allowed to name the bindings however you +like, so long as the binding names match the anchor names in your statement +template. Because we need to match reference field data to actual row data +pair-wise by field name, there is a more strict requirement for cqlverify +activities. The binding names themselves are now required to match the field +names that they are expected to be compared to. + +The simplest way to do this is to follow this recipe: + +1. Make the binding names the same as the field names that you use in + in your write statements. +2. When you configure your read statement for the cqlverify activity, + simply include the same bindings as-is, using the partition and + clustering fields in the appropriate where clauses. + +*note*: It used to be an error to have bindings names in excess of what anchor +names would match. Now, it is only an error if an anchor is not qualified with +a matching binding name. This allows you to simply copy your bindings as-is +directly from your write statement with no issues. + +### Configuring the verification Reader + +A cqlverify activity is almost exactly like any other cql activity. However, you +configure a single read statement to access the row data you want to verify. The +bindings for the read statement should include the data mappings that you have +for the write statement. That's pretty much all you have to do. + +The names of the bindings and the values they produce are considered, depending +on the *compare* setting explained below. This means that you need to make sure +that the bindings that are provided for the statement are exactly the same as +you expect the row structure, irrespective of field order. For some statements +which use the same value in more than one place, you must name these uniquely +as well. + +If more than one statement is active for a cqlverify activity, then an error is +thrown. This may change in the future, but for now it is a requirement. + +### Handling Verification Errors + +The cqlverify activity extends on the error handling stack mechanism that is +used by the cql activity type, by introducing a new error category: +*unverified*. The default configuration for this error category is + + unverified=stop + +However, the usual options, including "stop", "warn", "retry", "histogram", +"count", and "ignore" are also allowed. + +Care should be taken to set the other error handling categories to be strict +enough to avoid false negatives in testing. The verification on a row can only +be done if the row is actually read first. If you set the error handler stack to +only count real errors, for example, then you will be preempting the read +verifier. Therefore, there is a default setting for the cqlverify activity for +the catch-all error handler parameter *errors*. + +This means that the default error handling behavior will cause an exception to +be thrown and the client will exit by default. If you wish for something less +dramatic, then set it to + + errors=...,unverified->count + +or + + errors=...,unverified->warn + +##### rows to verify + +Currently, every read operation in a cqlverify activity must have a single row +in the result set. If there is no row, then the row fails validation. The same +happens if there is more than one row. + +A future release may allow for paged reads for quicker verification. + +### Example activity definitions + +Write 100K cycles of telemetry data + + ... run type=cql alias=writesome yaml=telemetry tags=group:write cycles=100000 host=... + +Verify the the same 100K cycles of telemetry data + + ... run type=cqlverify alias=verify yaml=telemetry tags=group:verify cycles=100000 host=... + +To see how these examples work, consult the telemetry.yaml file in the ebdse.jar. + +### CQLVerify ActivityType Parameters + +(in addition to those provided by the cql activity type) + +- **compare** - what to verify. Valid values are "reffields", + "rowfields", "fields", "values", or "all" + (default: all) + - rowfields - Verify that fields in the row, by name, are + not in excess of what is provided in the reference data. + - reffields - Verify that fields in the row, by name, are + present for all all of those provided in the reference data. + - fields - A synonym for rowfields AND reffields + (full set equivalence) + - values - Verify that all the pair-wise fields have equal + values, according to the type-specific equals method for + the data type identified in the row metadata by field name. + - all - A synonym for fields AND values + +### CQLVerify Statement Parameters + +- **verify-fields** - an optional modifier of fields to verify for a statement. + If this parameter is not provided, then it is presumed to be '*' by default. + This is a string which consists of comma-separate values. If the value + is '*', then all the bindings that are visible for the statement will be + used as expected values. + If it is a word that starts with '-', like '-field2', then the name after the + dash is removed from the list of fields to verify. + If it is a word that starts with a '+', like '+field3', or a simple word, + then the field is added to the list of fields to verify. + This parameter is useful if you have a set of default bindings and want + to specify which subset of them of them will be used just for this statement. + + If any of the added fields is in the form "f->b", then it is taken as a mapping + from the field name _f_ in the schema to a binding _b_. + +### Metrics + +The cqlverify activity type adds some verification-specific metrics: + +- alias.verifiedrows - A counter for how many rows passed verification +- alias.unverifiedrows - A counter for how many rows failed verification +- alias.verifiedvalues - A counter for how many field values were verified +- alias.unverifiedvalues - A counter for how many field values were unverified + diff --git a/activitytype-cql/src/main/resources/topics.md b/activitytype-cql/src/main/resources/topics.md new file mode 100644 index 000000000..d405dfbdb --- /dev/null +++ b/activitytype-cql/src/main/resources/topics.md @@ -0,0 +1,11 @@ +# additional help topics + +## (activity types) + +- cql +- cqlverify + +## general topics + +- cqlerrors +- cqlexceptionlist \ No newline at end of file diff --git a/activitytype-cql/src/test/java/com/datastax/ebdrivers/cql/CqlActionTest.java b/activitytype-cql/src/test/java/com/datastax/ebdrivers/cql/CqlActionTest.java new file mode 100644 index 000000000..ddd930301 --- /dev/null +++ b/activitytype-cql/src/test/java/com/datastax/ebdrivers/cql/CqlActionTest.java @@ -0,0 +1,21 @@ +package com.datastax.ebdrivers.cql; + +import io.nosqlbench.activitytype.cql.ebdrivers.cql.core.CqlAction; +import io.nosqlbench.activitytype.cql.ebdrivers.cql.core.CqlActivity; +import io.nosqlbench.engine.api.activityimpl.ActivityDef; +import org.testng.annotations.Test; + +@Test +public class CqlActionTest { + + @Test(enabled = false) + public void testCqlAction() { + ActivityDef ad = ActivityDef.parseActivityDef("type=ebdrivers;alias=foo;yaml=write-telemetry.yaml;"); + CqlActivity cac = new CqlActivity(ad); + CqlAction cq = new CqlAction(ad, 0, cac); + cq.init(); + cq.runCycle(5); + } + + +} \ No newline at end of file diff --git a/activitytype-cql/src/test/java/com/datastax/ebdrivers/cql/statements/CQLCQLStatementDefParserTest.java b/activitytype-cql/src/test/java/com/datastax/ebdrivers/cql/statements/CQLCQLStatementDefParserTest.java new file mode 100644 index 000000000..05fd32311 --- /dev/null +++ b/activitytype-cql/src/test/java/com/datastax/ebdrivers/cql/statements/CQLCQLStatementDefParserTest.java @@ -0,0 +1,62 @@ +package com.datastax.ebdrivers.cql.statements; + +import io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.core.CQLStatementDefParser; +import org.testng.annotations.Test; + +import java.util.HashMap; +import java.util.List; + +import static org.assertj.core.api.Assertions.assertThat; + +@Test +public class CQLCQLStatementDefParserTest { + + // TODO: Implment support for default values in yaml + + @Test + public void testBasicParsing() { + HashMap bindings = new HashMap() {{ + put("not", "even"); + }}; + CQLStatementDefParser sdp = new CQLStatementDefParser("test-name","This is ?not an error."); + CQLStatementDefParser.ParseResult r = sdp.getParseResult(bindings.keySet()); + assertThat(r.hasError()).isFalse(); + assertThat(r.getStatement()).isEqualTo("This is ? an error."); + assertThat(r.getMissingAnchors().size()).isEqualTo(0); + assertThat(r.getMissingGenerators().size()).isEqualTo(0); + } + + @Test + public void testParsingDiagnostics() { + + HashMap bindings = new HashMap() {{ + put("BINDABLE", "two"); + put("EXTRABINDING", "5"); + }}; + CQLStatementDefParser sdp = new CQLStatementDefParser("test-name","This is a test of ?BINDABLE interpolation and ?MISSINGBINDING."); + List bindableNames = sdp.getBindableNames(); + CQLStatementDefParser.ParseResult result = sdp.getParseResult(bindings.keySet()); + assertThat(result.hasError()).isTrue(); + assertThat(result.getStatement()).isEqualTo("This is a test of ? interpolation and ?."); + assertThat(result.getMissingAnchors().size()).isEqualTo(1); + assertThat(result.getMissingGenerators().size()).isEqualTo(1); + assertThat(result.getMissingAnchors()).contains("EXTRABINDING"); + assertThat(result.getMissingGenerators()).contains("MISSINGBINDING"); + + } + + @Test + public void testParsingPatterns() { + HashMap bindings = new HashMap() {{ + put("B-1", "one"); + put("B_-1.2", "two"); + }}; + CQLStatementDefParser sdp = new CQLStatementDefParser("test-name","This is a test of ?B-1 and {B_-1.2}"); + List bindableNames = sdp.getBindableNames(); + assertThat(bindableNames).containsExactly("B-1","B_-1.2"); + CQLStatementDefParser.ParseResult parseResult = sdp.getParseResult(bindings.keySet()); + assertThat(parseResult.hasError()).isFalse(); + assertThat(parseResult.getStatement()).isEqualTo("This is a test of ? and ?"); + } + +} \ No newline at end of file diff --git a/activitytype-cql/src/test/java/com/datastax/ebdrivers/cql/statements/CQLOptionsTest.java b/activitytype-cql/src/test/java/com/datastax/ebdrivers/cql/statements/CQLOptionsTest.java new file mode 100644 index 000000000..87a5a5857 --- /dev/null +++ b/activitytype-cql/src/test/java/com/datastax/ebdrivers/cql/statements/CQLOptionsTest.java @@ -0,0 +1,71 @@ +package com.datastax.ebdrivers.cql.statements; + +import com.datastax.driver.core.HostDistance; +import com.datastax.driver.core.PoolingOptions; +import com.datastax.driver.core.SocketOptions; +import com.datastax.driver.core.policies.LoadBalancingPolicy; +import com.datastax.driver.core.policies.SpeculativeExecutionPolicy; +import io.nosqlbench.activitytype.cql.ebdrivers.cql.core.CQLOptions; +import org.testng.annotations.Test; + +import static org.assertj.core.api.Assertions.assertThat; + +@Test +public class CQLOptionsTest { + + @Test + public void testSpeculative() { + SpeculativeExecutionPolicy p1 = CQLOptions.speculativeFor("p99:5"); + assertThat(p1).isNotNull(); + SpeculativeExecutionPolicy p2 = CQLOptions.speculativeFor("p99:5:5000ms"); + assertThat(p2).isNotNull(); + } + + @Test + public void testConstant() { + SpeculativeExecutionPolicy p1 = CQLOptions.speculativeFor("5000ms:5"); + assertThat(p1).isNotNull(); + } + + @Test + public void testWhitelist() { + LoadBalancingPolicy lbp = CQLOptions.whitelistFor("127.0.0.1,127.0.0.2:123", null); + assertThat(lbp).isNotNull(); + } + + @Test + public void testSocketOptionPatterns() { + SocketOptions so = CQLOptions.socketOptionsFor("read_timeout_ms=23423,connect_timeout_ms=2344;keep_alive:true,reuse_address:true;so_linger:323;tcp_no_delay=true;receive_buffer_size:100,send_buffer_size=1000"); + assertThat(so.getConnectTimeoutMillis()).isEqualTo(2344); + assertThat(so.getKeepAlive()).isEqualTo(true); + assertThat(so.getReadTimeoutMillis()).isEqualTo(23423); + assertThat(so.getReceiveBufferSize()).isEqualTo(100); + assertThat(so.getReuseAddress()).isEqualTo(true); + assertThat(so.getSendBufferSize()).isEqualTo(1000); + assertThat(so.getSoLinger()).isEqualTo(323); + assertThat(so.getTcpNoDelay()).isEqualTo(true); + + } + + @Test + public void testConnectionsPatterns() { + PoolingOptions po = CQLOptions.poolingOptionsFor("2345"); + assertThat(po.getCoreConnectionsPerHost(HostDistance.LOCAL)).isEqualTo(2345); + assertThat(po.getMaxConnectionsPerHost(HostDistance.LOCAL)).isEqualTo(Integer.MIN_VALUE); + assertThat(po.getMaxRequestsPerConnection(HostDistance.LOCAL)).isEqualTo(Integer.MIN_VALUE); + + PoolingOptions po2 = CQLOptions.poolingOptionsFor("1:2:3,4:5:6"); + assertThat(po2.getCoreConnectionsPerHost(HostDistance.LOCAL)).isEqualTo(1); + assertThat(po2.getMaxConnectionsPerHost(HostDistance.LOCAL)).isEqualTo(2); + assertThat(po2.getMaxRequestsPerConnection(HostDistance.LOCAL)).isEqualTo(3); + assertThat(po2.getCoreConnectionsPerHost(HostDistance.REMOTE)).isEqualTo(4); + assertThat(po2.getMaxConnectionsPerHost(HostDistance.REMOTE)).isEqualTo(5); + assertThat(po2.getMaxRequestsPerConnection(HostDistance.REMOTE)).isEqualTo(6); + + PoolingOptions po3 = CQLOptions.poolingOptionsFor("1:2:3,4:5:6,heartbeat_interval_s:100,idle_timeout_s:123,pool_timeout_ms:234"); + assertThat(po3.getIdleTimeoutSeconds()).isEqualTo(123); + assertThat(po3.getPoolTimeoutMillis()).isEqualTo(234); + assertThat(po3.getHeartbeatIntervalSeconds()).isEqualTo(100); + + } +} \ No newline at end of file diff --git a/activitytype-cql/src/test/resources/activities/testactivity.yaml b/activitytype-cql/src/test/resources/activities/testactivity.yaml new file mode 100644 index 000000000..4911a4c3f --- /dev/null +++ b/activitytype-cql/src/test/resources/activities/testactivity.yaml @@ -0,0 +1,14 @@ +tags: + group: read +statements: +- name: read-telemetry + statement: | + select * from <>.<>_telemetry + where source={source} + and epoch_hour={epoch_hour} + and param={param} + limit 10 + bindings: + source: ThreadNumGenerator + epoch_hour: DateSequenceFieldGenerator(1000,'YYYY-MM-dd-HH') + param: LineExtractGenerator('data/variable_words.txt') diff --git a/activitytype-cqlverify/pom.xml b/activitytype-cqlverify/pom.xml new file mode 100644 index 000000000..c5399b425 --- /dev/null +++ b/activitytype-cqlverify/pom.xml @@ -0,0 +1,32 @@ + + 4.0.0 + + + io.nosqlbench + mvn-defaults + 3.12.2-SNAPSHOT + ../mvn-defaults + + + at-cqlverify + jar + ${project.artifactId} + + + A CQL content verifier ActivityType, based on the CQL ActivityType + built on http://nosqlbench.io/ + + + + + + + + io.nosqlbench + at-cql + 3.12.2-SNAPSHOT + + + + + diff --git a/activitytype-cqlverify/src/main/java/io/nosqlbench/activitytype/cqlverify/CqlVerifyAction.java b/activitytype-cqlverify/src/main/java/io/nosqlbench/activitytype/cqlverify/CqlVerifyAction.java new file mode 100644 index 000000000..6c9048549 --- /dev/null +++ b/activitytype-cqlverify/src/main/java/io/nosqlbench/activitytype/cqlverify/CqlVerifyAction.java @@ -0,0 +1,20 @@ +package io.nosqlbench.activitytype.cqlverify; + +import io.nosqlbench.activitytype.cql.ebdrivers.cql.core.CqlAction; +import io.nosqlbench.activitytype.cql.ebdrivers.cql.core.CqlActivity; +import io.nosqlbench.engine.api.activityapi.core.ActivityDefObserver; +import io.nosqlbench.engine.api.activityimpl.ActivityDef; + +public class CqlVerifyAction extends CqlAction implements ActivityDefObserver { + + public CqlVerifyAction(ActivityDef activityDef, int slot, CqlActivity cqlActivity) { + super(activityDef, slot, cqlActivity); + } + + @Override + public void onActivityDefUpdate(ActivityDef activityDef) { + super.onActivityDefUpdate(activityDef); + } + + +} diff --git a/activitytype-cqlverify/src/main/java/io/nosqlbench/activitytype/cqlverify/CqlVerifyActionDispenser.java b/activitytype-cqlverify/src/main/java/io/nosqlbench/activitytype/cqlverify/CqlVerifyActionDispenser.java new file mode 100644 index 000000000..ace83cdd1 --- /dev/null +++ b/activitytype-cqlverify/src/main/java/io/nosqlbench/activitytype/cqlverify/CqlVerifyActionDispenser.java @@ -0,0 +1,21 @@ +package io.nosqlbench.activitytype.cqlverify; + +import io.nosqlbench.activitytype.cql.ebdrivers.cql.core.CqlActionDispenser; +import io.nosqlbench.activitytype.cql.ebdrivers.cql.core.CqlActivity; +import io.nosqlbench.engine.api.activityapi.core.Action; + +public class CqlVerifyActionDispenser extends CqlActionDispenser { + public CqlVerifyActionDispenser(CqlActivity cqlActivity) { + super(cqlActivity); + } + + public Action getAction(int slot) { + long async= getCqlActivity().getActivityDef().getParams().getOptionalLong("async").orElse(0L); + if (async>0) { + return new CqlVerifyAsyncAction(getCqlActivity().getActivityDef(), slot, getCqlActivity()); + } else { + return new CqlVerifyAction(getCqlActivity().getActivityDef(), slot, getCqlActivity()); + } + } + +} diff --git a/activitytype-cqlverify/src/main/java/io/nosqlbench/activitytype/cqlverify/CqlVerifyActivity.java b/activitytype-cqlverify/src/main/java/io/nosqlbench/activitytype/cqlverify/CqlVerifyActivity.java new file mode 100644 index 000000000..c74474f0e --- /dev/null +++ b/activitytype-cqlverify/src/main/java/io/nosqlbench/activitytype/cqlverify/CqlVerifyActivity.java @@ -0,0 +1,124 @@ +package io.nosqlbench.activitytype.cqlverify; + +import io.nosqlbench.activitytype.cql.ebdrivers.cql.core.CqlActivity; +import io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.rsoperators.AssertSingleRowResultSet; +import io.nosqlbench.engine.api.activityimpl.ActivityDef; +import io.nosqlbench.virtdata.api.Bindings; +import io.nosqlbench.virtdata.api.BindingsTemplate; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Optional; + +public class CqlVerifyActivity extends CqlActivity { + + private final static Logger logger = LoggerFactory.getLogger(CqlVerifyActivity.class); + private BindingsTemplate expectedValuesTemplate; + private VerificationMetrics verificationMetrics; + + public CqlVerifyActivity(ActivityDef activityDef) { + super(activityDef); + } + + @Override + public synchronized void initActivity() { + this.verificationMetrics = new VerificationMetrics(getActivityDef()); + + super.initActivity(); + + if (this.stmts.size() > 1) { + throw new RuntimeException("More than one statement was configured as active. " + + this.getActivityDef().getActivityType() + " requires exactly one."); + } + + Optional randomMapper = stmts.stream() + .flatMap(s -> s.getBindings().values().stream()) + .filter(t -> t.matches(".*Random.*") || t.matches(".*random.*")) + .findAny(); + + + if (randomMapper.isPresent()) { + throw new RuntimeException( + "You should not try to verify data generated with random mapping " + + "functions, like " + randomMapper.get() + " as it does not " + + "produce stable results in different invocation order."); + } + + } + + public synchronized BindingsTemplate getExpectedValuesTemplate() { + if (expectedValuesTemplate==null) { + expectedValuesTemplate = new BindingsTemplate(); + Map bindings = stmts.get(0).getBindings(); + if (stmts.get(0).getParams().containsKey("verify-fields")) { + List fields = new ArrayList<>(); + String fieldsSpec = stmts.get(0).getParams().get("verify-fields"); + String[] vfields = fieldsSpec.split("\\s*,\\s*"); + for (String vfield : vfields) { + if (vfield.equals("*")) { + bindings.forEach((k,v)->fields.add(k)); + } else if (vfield.startsWith("+")) { + fields.add(vfield.substring(1)); + } else if (vfield.startsWith("-")) { + fields.remove(vfield.substring(1)); + } else if (vfield.matches("\\w+(\\w+->[\\w-]+)?")) { + fields.add(vfield); + } else { + throw new RuntimeException("unknown verify-fields format: '" + vfield + "'"); + } + } + for (String vfield : fields) { + String[] fieldNameAndBindingName = vfield.split("\\s*->\\s*", 2); + String fieldName = fieldNameAndBindingName[0]; + String bindingName = fieldNameAndBindingName.length==1 ? fieldName : fieldNameAndBindingName[1]; + if (!bindings.containsKey(bindingName)) { + throw new RuntimeException("binding name '" + bindingName + + "' referenced in verify-fields, but it is not present in available bindings."); + } + expectedValuesTemplate.addFieldBinding(fieldName,bindings.get(bindingName)); + } + } else { + bindings.forEach((k,v)->expectedValuesTemplate.addFieldBinding(k,v)); + } + } + return expectedValuesTemplate; + } + + public synchronized VerificationMetrics getVerificationMetrics() { + return verificationMetrics; + } + + @Override + public void shutdownActivity() { + super.shutdownActivity(); + VerificationMetrics metrics = getVerificationMetrics(); + long unverifiedValues = metrics.unverifiedValuesCounter.getCount(); + long unverifiedRows = metrics.unverifiedRowsCounter.getCount(); + + if (unverifiedRows > 0 || unverifiedValues > 0) { + throw new RuntimeException( + "There were " + unverifiedValues + " unverified values across " + unverifiedRows + " unverified rows." + ); + } + logger.info("verified " + metrics.verifiedValuesCounter.getCount() + " values across " + metrics.verifiedRowsCounter.getCount() + " verified rows"); + } + + @Override + public void onActivityDefUpdate(ActivityDef activityDef) { + super.onActivityDefUpdate(activityDef); + addResultSetCycleOperator(new AssertSingleRowResultSet()); + + String verify = activityDef.getParams() + .getOptionalString("compare").orElse("all"); + DiffType diffType = DiffType.valueOf(verify); + Bindings verifyBindings = getExpectedValuesTemplate().resolveBindings(); + var differ = new RowDifferencer.ThreadLocalWrapper( + getVerificationMetrics(), + verifyBindings, + diffType); + addRowCycleOperator(differ); + } +} diff --git a/activitytype-cqlverify/src/main/java/io/nosqlbench/activitytype/cqlverify/CqlVerifyActivityType.java b/activitytype-cqlverify/src/main/java/io/nosqlbench/activitytype/cqlverify/CqlVerifyActivityType.java new file mode 100644 index 000000000..31cc2591d --- /dev/null +++ b/activitytype-cqlverify/src/main/java/io/nosqlbench/activitytype/cqlverify/CqlVerifyActivityType.java @@ -0,0 +1,25 @@ +package io.nosqlbench.activitytype.cqlverify; + +import io.nosqlbench.engine.api.activityapi.core.ActionDispenser; +import io.nosqlbench.engine.api.activityapi.core.ActivityType; +import io.nosqlbench.engine.api.activityimpl.ActivityDef; +import io.nosqlbench.virtdata.annotations.Service; + +@Service(ActivityType.class) +public class CqlVerifyActivityType implements ActivityType { + + @Override + public String getName() { + return "cqlverify"; + } + + @Override + public ActionDispenser getActionDispenser(CqlVerifyActivity activity) { + return new CqlVerifyActionDispenser(activity); + } + + @Override + public CqlVerifyActivity getActivity(ActivityDef activityDef) { + return new CqlVerifyActivity(activityDef); + } +} diff --git a/activitytype-cqlverify/src/main/java/io/nosqlbench/activitytype/cqlverify/CqlVerifyAsyncAction.java b/activitytype-cqlverify/src/main/java/io/nosqlbench/activitytype/cqlverify/CqlVerifyAsyncAction.java new file mode 100644 index 000000000..3000d642b --- /dev/null +++ b/activitytype-cqlverify/src/main/java/io/nosqlbench/activitytype/cqlverify/CqlVerifyAsyncAction.java @@ -0,0 +1,20 @@ +package io.nosqlbench.activitytype.cqlverify; + +import io.nosqlbench.activitytype.cql.ebdrivers.cql.core.CqlActivity; +import io.nosqlbench.activitytype.cql.ebdrivers.cql.core.CqlAsyncAction; +import io.nosqlbench.engine.api.activityapi.core.ActivityDefObserver; +import io.nosqlbench.engine.api.activityimpl.ActivityDef; + +public class CqlVerifyAsyncAction extends CqlAsyncAction implements ActivityDefObserver { + + public CqlVerifyAsyncAction(ActivityDef activityDef, int slot, CqlActivity cqlActivity) { + super(cqlActivity, slot); + } + + @Override + public void onActivityDefUpdate(ActivityDef activityDef) { + super.onActivityDefUpdate(activityDef); + } + + +} diff --git a/activitytype-cqlverify/src/main/java/io/nosqlbench/activitytype/cqlverify/DiffType.java b/activitytype-cqlverify/src/main/java/io/nosqlbench/activitytype/cqlverify/DiffType.java new file mode 100644 index 000000000..f882bf38c --- /dev/null +++ b/activitytype-cqlverify/src/main/java/io/nosqlbench/activitytype/cqlverify/DiffType.java @@ -0,0 +1,33 @@ +package io.nosqlbench.activitytype.cqlverify; + +public enum DiffType { + + /// Verify that fields named in the row are present in the reference map. + rowfields(0x1), + + /// Verify that fields in the reference map are present in the row data. + reffields(0x1<<1), + + /// Verify that all fields present in either the row or the reference data + /// are also present in the other. + fields(0x1|0x1<<1), + + /// Verify that all values of the same named field are equal, according to + /// {@link Object#equals(Object)}}. + values(0x1<<2), + + /// Cross-verify all fields and field values between the reference data and + /// the actual data. + all(0x1|0x1<<1|0x1<<2); + + public int bitmask; + + DiffType(int bit) { + this.bitmask = bit; + } + + public boolean is(DiffType option) { + return (bitmask & option.bitmask) > 0; + } + +} diff --git a/activitytype-cqlverify/src/main/java/io/nosqlbench/activitytype/cqlverify/RowDifferencer.java b/activitytype-cqlverify/src/main/java/io/nosqlbench/activitytype/cqlverify/RowDifferencer.java new file mode 100644 index 000000000..f5d939670 --- /dev/null +++ b/activitytype-cqlverify/src/main/java/io/nosqlbench/activitytype/cqlverify/RowDifferencer.java @@ -0,0 +1,312 @@ +package io.nosqlbench.activitytype.cqlverify; + +import com.datastax.driver.core.*; +import io.nosqlbench.activitytype.cql.ebdrivers.cql.api.RowCycleOperator; +import io.nosqlbench.activitytype.cql.ebdrivers.cql.errorhandling.exceptions.RowVerificationException; +import io.nosqlbench.virtdata.api.Bindings; + +import java.math.BigDecimal; +import java.math.BigInteger; +import java.net.InetAddress; +import java.nio.ByteBuffer; +import java.util.*; +import java.util.stream.Collectors; + +/** + *

RowDifferencer uses the metadata associated with a row to access and compare + * {@link Row} values in a type-specific way. + *

+ */ +public class RowDifferencer implements RowCycleOperator { + + private final StringBuilder logbuffer = new StringBuilder(); + private final Map refMap = new HashMap<>(); + private final DiffType difftype; + private final Bindings bindings; + private final VerificationMetrics metrics; + + + private RowDifferencer(VerificationMetrics metrics, Bindings bindings, DiffType diffType) { + this.metrics = metrics; + this.bindings = bindings; + this.difftype = diffType; + } + + /** + * see {@link DataType} + * + * @param typeName The DataType.Name of the field in question + * @param row The row to read the field value from + * @param fieldName The field name to read + * @param genValue the generated value to compare against + * @return true, if the value is equal + */ + private static boolean isEqual(DataType.Name typeName, Row row, String fieldName, Object genValue) { + switch (typeName) { + case ASCII: // ASCII(1, String.class) + case VARCHAR: // VARCHAR(13, String.class) + case TEXT: // TEXT(10, String.class) + String textValue = row.getString(fieldName); + return textValue.equals(genValue); + case BIGINT: // BIGINT(2, Long.class) + case COUNTER: // COUNTER(5, Long.class) + long longValue = row.getLong(fieldName); + return longValue == (long) genValue; + case BLOB: // BLOB(3, ByteBuffer.class) + // TODO: How do we test this one? + case CUSTOM: // CUSTOM(0, ByteBuffer.class) + ByteBuffer blobValue = row.getBytes(fieldName); + return blobValue.equals(genValue); + case BOOLEAN: // BOOLEAN(4, Boolean.class) + boolean boolValue = row.getBool(fieldName); + return boolValue == (boolean) genValue; + case DECIMAL: // DECIMAL(6, BigDecimal.class) + BigDecimal bigDecimalValue = row.getDecimal(fieldName); + return bigDecimalValue.equals(genValue); + case DOUBLE: // DOUBLE(7, Double.class) + double doubleValue = row.getDouble(fieldName); + return doubleValue == (double) genValue; + case FLOAT: // FLOAT(8, Float.class) + float floatValue = row.getFloat(fieldName); + return floatValue == (float) genValue; + case INET: // INET(16, InetAddress.class) + InetAddress inetAddressValue = row.getInet(fieldName); + return inetAddressValue.equals(genValue); + case INT: // INT(9, Integer.class) + int intValue = row.getInt(fieldName); + return intValue == (int) genValue; + case TIMESTAMP: // TIMESTAMP(11, Date.class) + Date timestamp = row.getTimestamp(fieldName); + return timestamp.equals(genValue); + case UUID: // UUID(12, UUID.class) + case TIMEUUID: // TIMEUUID(15, UUID.class) + UUID uuidValue = row.getUUID(fieldName); + return uuidValue.equals(genValue); + case VARINT: // VARINT(14, BigInteger.class) + BigInteger bigIntValue = row.getVarint(fieldName); + return bigIntValue.equals(genValue); + case LIST: // LIST(32, List.class) + // TODO: How do we make getCollection methods work with non-String CQL types? + List list = row.getList(fieldName, String.class); + return list.equals(genValue); + case SET: // SET(34, Set.class) + Set set = row.getSet(fieldName, String.class); + return set.equals(genValue); + case MAP: // MAP(33, Map.class) + Map map = row.getMap(fieldName, String.class, String.class); + return map.equals(genValue); + case UDT: // UDT(48, UDTValue.class) + UDTValue udtValue = row.getUDTValue(fieldName); + return udtValue.equals(genValue); + case TUPLE: // TUPLE(49, TupleValue.class) + TupleValue tupleValue = row.getTupleValue(fieldName); + return tupleValue.equals(genValue); + case SMALLINT: + short shortVal = row.getShort(fieldName); + return shortVal == (Short) genValue; + case TINYINT: + byte byteValue = row.getByte(fieldName); + return byteValue == (byte) genValue; + case DATE: + LocalDate dateValue = row.getDate(fieldName); + return dateValue.equals(genValue); + case TIME: + long timeValue = row.getTime(fieldName); + return timeValue == (long) genValue; + default: + throw new RuntimeException("Unrecognized type:" + typeName); + } + } + + private static String prettyPrint(DataType.Name typeName, Row row, String fieldName) { + switch (typeName) { + case ASCII: // ASCII(1, String.class) + case VARCHAR: // VARCHAR(13, String.class) + case TEXT: // TEXT(10, String.class) + return row.getString(fieldName); + case BIGINT: // BIGINT(2, Long.class) + case COUNTER: // COUNTER(5, Long.class) + long counterValue = row.getLong(fieldName); + return String.valueOf(counterValue); + case BLOB: // BLOB(3, ByteBuffer.class) + case CUSTOM: // CUSTOM(0, ByteBuffer.class) + ByteBuffer blobValue = row.getBytes(fieldName); + return String.valueOf(blobValue); + case BOOLEAN: // BOOLEAN(4, Boolean.class) + boolean boolValue = row.getBool(fieldName); + return String.valueOf(boolValue); + case DECIMAL: // DECIMAL(6, BigDecimal.class) + BigDecimal bigDecimalValue = row.getDecimal(fieldName); + return String.valueOf(bigDecimalValue); + case DOUBLE: // DOUBLE(7, Double.class) + double doubleValue = row.getDouble(fieldName); + return String.valueOf(doubleValue); + case FLOAT: // FLOAT(8, Float.class) + float floatValue = row.getFloat(fieldName); + return String.valueOf(floatValue); + case INET: // INET(16, InetAddress.class) + InetAddress inetAddressValue = row.getInet(fieldName); + return String.valueOf(inetAddressValue); + case INT: // INT(9, Integer.class) + int intValue = row.getInt(fieldName); + return String.valueOf(intValue); + case TIMESTAMP: // TIMESTAMP(11, Date.class) + Date timestamp = row.getTimestamp(fieldName); + return String.valueOf(timestamp); + case UUID: // UUID(12, UUID.class) + case TIMEUUID: // TIMEUUID(15, UUID.class) + UUID uuidValue = row.getUUID(fieldName); + return String.valueOf(uuidValue); + case VARINT: // VARINT(14, BigInteger.class) + BigInteger bigIntValue = row.getVarint(fieldName); + return String.valueOf(bigIntValue); + case LIST: // LIST(32, List.class) + List list = row.getList(fieldName, String.class); + return String.valueOf(list); + case SET: // SET(34, Set.class) + Set set = row.getSet(fieldName, String.class); + return String.valueOf(set); + case MAP: // MAP(33, Map.class) + Map map = row.getMap(fieldName, String.class, String.class); + return String.valueOf(map); + case UDT: // UDT(48, UDTValue.class) + UDTValue udtValue = row.getUDTValue(fieldName); + return String.valueOf(udtValue); + case TUPLE: // TUPLE(49, TupleValue.class) + TupleValue tupleValue = row.getTupleValue(fieldName); + return String.valueOf(tupleValue); + case SMALLINT: + short val = row.getShort(fieldName); + return String.valueOf(val); + case TINYINT: + byte byteValue = row.getByte(fieldName); + return String.valueOf(byteValue); + case DATE: + LocalDate dateValue = row.getDate(fieldName); + return String.valueOf(dateValue); + case TIME: + long timeValue = row.getTime(fieldName); + return String.valueOf(timeValue); + default: + throw new RuntimeException("Type not recognized:" + typeName); + } + } + + /** + * Compare the values of the row with the values generated. + *

+ * Specifically, + *

    + *
  1. Ensure the same number of fields.
  2. + *
  3. Ensure the same pair-wise field names.
  4. + *
  5. Ensure that each pair of same-named fields has the same data type.
  6. + *
  7. Ensure that the value of each pair of fields is equal according to the equals + * operator for the respective type.
  8. + *
+ * * + * + * @param row A row of data + * @param referenceMap a map of values + * @return a count of differences between the row and the reference values + */ + private int compare(Row row, Map referenceMap) { + int diff = 0; + ColumnDefinitions cdefs = row.getColumnDefinitions(); + + logbuffer.setLength(0); + + if (difftype.is(DiffType.reffields)) { + List missingRowFields = referenceMap.keySet().stream() + .filter(gk -> !cdefs.contains(gk)) + .collect(Collectors.toList()); + if (missingRowFields.size() > 0) { + diff += missingRowFields.size(); + + logbuffer.append("\nexpected fields '"); + logbuffer.append(String.join("','", missingRowFields)); + logbuffer.append("' not in row."); + } + } + + if (difftype.is(DiffType.rowfields)) { + List missingRefFields = cdefs.asList().stream() + .map(ColumnDefinitions.Definition::getName) + .filter(k -> !referenceMap.containsKey(k)) + .collect(Collectors.toList()); + if (missingRefFields.size() > 0) { + diff += missingRefFields.size(); + + logbuffer.append("\nexpected fields '"); + logbuffer.append(String.join("','", missingRefFields)); + logbuffer.append("' not in reference data: " + referenceMap); + } + } + + if (difftype.is(DiffType.values)) { + for (ColumnDefinitions.Definition definition : row.getColumnDefinitions()) { + String name = definition.getName(); + if (referenceMap.containsKey(name)) { + DataType type = definition.getType(); + if (!isEqual(type.getName(), row, name, referenceMap.get(name))) { + logbuffer.append("\nvalue differs for '").append(name).append("' "); + logbuffer.append("expected:'").append(referenceMap.get(name).toString()).append("'"); + logbuffer.append(" actual:'").append(prettyPrint(type.getName(), row, name)).append("'"); + diff++; + metrics.unverifiedValuesCounter.inc(); + } else { + metrics.verifiedValuesCounter.inc(); + } + } + } + } + if (diff == 0) { + metrics.verifiedRowsCounter.inc(); + } else { + metrics.unverifiedRowsCounter.inc(); + } + return diff; + } + + /** + * Get the most recent detail log recorded by this thread. + * + * @return a logbuffer string, with one entry per line + */ + public String getDetail() { + return this.logbuffer.toString(); + } + + @Override + public int apply(Row row, long cycle) { + refMap.clear(); + bindings.setMap(refMap, cycle); + int diffs = compare(row, refMap); + if (diffs > 0) { + HashMap mapcopy = new HashMap<>(); + mapcopy.putAll(refMap); + throw new RowVerificationException(cycle, row, mapcopy, getDetail()); + } else { + return 0; + } + } + + public static class ThreadLocalWrapper implements RowCycleOperator { + + private final VerificationMetrics metrics; + private final Bindings bindings; + private final DiffType diffType; + private ThreadLocal tl; + + public ThreadLocalWrapper(VerificationMetrics metrics, Bindings bindings, DiffType diffType) { + this.metrics = metrics; + this.bindings = bindings; + this.diffType = diffType; + tl = ThreadLocal.withInitial(() -> new RowDifferencer(metrics,bindings,diffType)); + } + + @Override + public int apply(Row row, long cycle) { + return tl.get().apply(row,cycle); + } + } +} diff --git a/activitytype-cqlverify/src/main/java/io/nosqlbench/activitytype/cqlverify/VerificationMetrics.java b/activitytype-cqlverify/src/main/java/io/nosqlbench/activitytype/cqlverify/VerificationMetrics.java new file mode 100644 index 000000000..9e0c7803a --- /dev/null +++ b/activitytype-cqlverify/src/main/java/io/nosqlbench/activitytype/cqlverify/VerificationMetrics.java @@ -0,0 +1,21 @@ +package io.nosqlbench.activitytype.cqlverify; + +import com.codahale.metrics.Counter; +import io.nosqlbench.engine.api.activityimpl.ActivityDef; +import io.nosqlbench.engine.api.metrics.ActivityMetrics; + +public class VerificationMetrics { + + public final Counter verifiedRowsCounter; + public final Counter unverifiedRowsCounter; + public final Counter verifiedValuesCounter; + public final Counter unverifiedValuesCounter; + + public VerificationMetrics(ActivityDef activityDef) { + verifiedRowsCounter = ActivityMetrics.counter(activityDef,"verifiedrows"); + unverifiedRowsCounter= ActivityMetrics.counter(activityDef,"unverifiedrows"); + verifiedValuesCounter = ActivityMetrics.counter(activityDef,"verifiedvalues"); + unverifiedValuesCounter = ActivityMetrics.counter(activityDef,"unverifiedvalues"); + } + +} diff --git a/docsys/src/main/java/io/nosqlbench/docsys/core/DocServerApp.java b/docsys/src/main/java/io/nosqlbench/docsys/core/DocServerApp.java index 4b8c2c5cf..5268d808d 100644 --- a/docsys/src/main/java/io/nosqlbench/docsys/core/DocServerApp.java +++ b/docsys/src/main/java/io/nosqlbench/docsys/core/DocServerApp.java @@ -56,7 +56,7 @@ public class DocServerApp { if (arg.matches(".*://.*")) { if (!arg.toLowerCase().contains("http://")) { String suggested = arg.toLowerCase().replaceAll("https","http"); - throw new RuntimeException("ERROR:\nIn this release, only 'http://' URLs are supported.\nTLS will be added in a future release.\nSee https://github.com/datastax/dsbench-labs/issues/29\n" + + throw new RuntimeException("ERROR:\nIn this release, only 'http://' URLs are supported.\nTLS will be added in a future release.\nSee https://github.com/nosqlbench/nosqlbench/issues/35\n" + "Consider using " + suggested); } server.withURL(arg); diff --git a/docsys/src/main/node/docsys/nuxt.config.js b/docsys/src/main/node/docsys/nuxt.config.js index aedcd5833..9396e822d 100644 --- a/docsys/src/main/node/docsys/nuxt.config.js +++ b/docsys/src/main/node/docsys/nuxt.config.js @@ -9,7 +9,7 @@ export default { ** Headers of the page */ head: { - titleTemplate: '%s' + "DSBench Guidebook", + titleTemplate: '%s' + "NoSqlBench Guidebook", title: process.env.npm_package_name || '', meta: [ {charset: 'utf-8'}, diff --git a/docsys/src/main/resources/docsys-guidebook/index.html b/docsys/src/main/resources/docsys-guidebook/index.html index 9071f2fb4..c0da1d048 100644 --- a/docsys/src/main/resources/docsys-guidebook/index.html +++ b/docsys/src/main/resources/docsys-guidebook/index.html @@ -1,7 +1,8 @@ - guidebookDSBench Guidebook + NoSQLBench Guidebook
Loading...
diff --git a/docsys/src/main/resources/docsys-testmarkdown/CONTRIBUTING.md b/docsys/src/main/resources/docsys-testmarkdown/CONTRIBUTING.md deleted file mode 100644 index bc9fb1efd..000000000 --- a/docsys/src/main/resources/docsys-testmarkdown/CONTRIBUTING.md +++ /dev/null @@ -1,63 +0,0 @@ ---- -title: Contributing ---- - -# Contributing - -For the long-term success of DSBench, we need as many hands on deck as possible. -We want to make it easy for anyone at DataStax to improve DSBench. - -If you are looking to take on some development, reach out at dsbench@datastax.com -or in the #ebdse-devs slack channel to collaborate. For non-trivial items in particular, -we want to be involved early on to ensure that the proposed feature and design fits into the tool's -direction as a product. - -See the sections below for details on contributing to DSBench. - - -## Collab -We tag all Github Issues that we feel would be a good fit for casual contributors -with a `collab` tag. These issues are a good place to start if you want to do some -development on DSBench. - -If you choose to pick up one of these items, please make yourself the Assignee -in Github and shoot us an email at dsbench@datastax.com or post in the #ebdse-devs slack channel to -let us know that you are starting work on a task. - -link - - -## Roadmap -We have a prioritized roadmap that holds the larger items that we want to tackle. -Some of these are net few features and others are bringing existing -functionality that is in the private EBDSE into the public DSBench. - -In general, these features are complex and the work required is likely not -suited for the casual contributor, but if you feel like you have the familiarity -with the code base to grab one of these items, we will be more than happy to have -a conversation. To start that process, send an email to dsbench@datastax.com with -the Github Issue that you intend to work on. - -The roadmap can be found in the link below. - -link - - -## Free Climb -We by no means want to limit the creativity of contributors and in many cases -there may be features that you want to work on that are not called out in the -collaboration bucket or in the roadmap. This is fine! Send us an email at -dsbench@datastax.com and let us know what you want to implement. We will chat -about the feature and make sure that it's use aligns with the DSBench objectives. - - -## Code Layout -Show the high level project structure and where things live - - -## Code Guidelines - -Need input from Shooky & Seb -- What code styles are required? -- What are our unit / integration testing requirements? -- What general rules do people need to follow? diff --git a/docsys/src/main/resources/docsys-testmarkdown/Home.md b/docsys/src/main/resources/docsys-testmarkdown/Home.md deleted file mode 100644 index bd152a737..000000000 --- a/docsys/src/main/resources/docsys-testmarkdown/Home.md +++ /dev/null @@ -1,21 +0,0 @@ -# DataStax Bench Documentation - -Welcome to the documentation for DataStax Bench ( DSBench ). DSBench is a command line utility that emulates real application workloads -and fast-tracks performance, sizing and data model testing by removing the need to write any code in order to -run sound and efficient client testing scenarios with DataStax products. - -The documentation for this tool is divided into 3 tracks. Select the track that fits your needs. - -1. Getting Started: For first time and beginner users. -2. Basic: For common testing customization and intermediate users. -3. Advanced: For complex testing scenarios and expert users. - - -### Download - -link - - -### DSBench Compatibility - -matrix \ No newline at end of file diff --git a/docsys/src/main/resources/docsys-testmarkdown/advanced/AdvancedHome.md b/docsys/src/main/resources/docsys-testmarkdown/advanced/AdvancedHome.md deleted file mode 100644 index 0ede3cb6f..000000000 --- a/docsys/src/main/resources/docsys-testmarkdown/advanced/AdvancedHome.md +++ /dev/null @@ -1,6 +0,0 @@ -# Advanced Topics - -In this Advanced Topics track, we will cover the full feature set of the tool -and show how to flex your testing harness and scenarios with DSBench. -We recommend that you go through the following steps in order, though these -sections can be used as a reference once you are comfortable with the material. \ No newline at end of file diff --git a/docsys/src/main/resources/docsys-testmarkdown/basic/BasicBuiltInWorkloads.md b/docsys/src/main/resources/docsys-testmarkdown/basic/BasicBuiltInWorkloads.md deleted file mode 100644 index 2db5c19df..000000000 --- a/docsys/src/main/resources/docsys-testmarkdown/basic/BasicBuiltInWorkloads.md +++ /dev/null @@ -1,3 +0,0 @@ -# Built-in Workloads - -Describe each pre-built workload in high level detail and then link to yaml with more details \ No newline at end of file diff --git a/docsys/src/main/resources/docsys-testmarkdown/basic/BasicControllingDataFootprint.md b/docsys/src/main/resources/docsys-testmarkdown/basic/BasicControllingDataFootprint.md deleted file mode 100644 index ba556350f..000000000 --- a/docsys/src/main/resources/docsys-testmarkdown/basic/BasicControllingDataFootprint.md +++ /dev/null @@ -1,4 +0,0 @@ -# Controlling Data Footprint - -Talk about data distribution and how that relates to the data bindings and pk / clustering key columns. -Cover partition size \ No newline at end of file diff --git a/docsys/src/main/resources/docsys-testmarkdown/basic/BasicCustomizingDataGeneration.md b/docsys/src/main/resources/docsys-testmarkdown/basic/BasicCustomizingDataGeneration.md deleted file mode 100644 index 0fd8ecfa1..000000000 --- a/docsys/src/main/resources/docsys-testmarkdown/basic/BasicCustomizingDataGeneration.md +++ /dev/null @@ -1,3 +0,0 @@ -# Customizing Data Generation - -Introduce the data binding portion of the .yaml and talk about pre-built functions w/examples \ No newline at end of file diff --git a/docsys/src/main/resources/docsys-testmarkdown/basic/BasicCustomizingSchemaAndQueries.md b/docsys/src/main/resources/docsys-testmarkdown/basic/BasicCustomizingSchemaAndQueries.md deleted file mode 100644 index 88c6484d6..000000000 --- a/docsys/src/main/resources/docsys-testmarkdown/basic/BasicCustomizingSchemaAndQueries.md +++ /dev/null @@ -1,4 +0,0 @@ -# Customizing Schema and Queries - -Introduce the .yaml format and show examples of CQL - diff --git a/docsys/src/main/resources/docsys-testmarkdown/basic/BasicHome.md b/docsys/src/main/resources/docsys-testmarkdown/basic/BasicHome.md deleted file mode 100644 index 31e28f6c6..000000000 --- a/docsys/src/main/resources/docsys-testmarkdown/basic/BasicHome.md +++ /dev/null @@ -1,14 +0,0 @@ -# Basic Topics - -In this Basic Topics track, we will cover topics that will show how to customize -the tool to your specific application and requirements. We recommend that you -go through the following steps in order, though these sections can be used -as a reference once you are comfortable with the material. - -1. Testing Best Practices -2. Built-in Workloads -3. Customizing Schema and Queries -4. Customizing Data Generation -5. Loading Base Data Set -6. Controlling Data footprint -7. Interpreting Results \ No newline at end of file diff --git a/docsys/src/main/resources/docsys-testmarkdown/basic/BasicInterpretingResults.md b/docsys/src/main/resources/docsys-testmarkdown/basic/BasicInterpretingResults.md deleted file mode 100644 index 828d92f46..000000000 --- a/docsys/src/main/resources/docsys-testmarkdown/basic/BasicInterpretingResults.md +++ /dev/null @@ -1,7 +0,0 @@ -# Interpreting Results - -What metrics to look at - -How to get meaning out of results - -How to present results to team \ No newline at end of file diff --git a/docsys/src/main/resources/docsys-testmarkdown/basic/BasicLoadingBaseDataSet.md b/docsys/src/main/resources/docsys-testmarkdown/basic/BasicLoadingBaseDataSet.md deleted file mode 100644 index 7c44cf960..000000000 --- a/docsys/src/main/resources/docsys-testmarkdown/basic/BasicLoadingBaseDataSet.md +++ /dev/null @@ -1,3 +0,0 @@ -# Loading Base Data Set - -Have to load a base data set before running workload, talk about how to parallelize load, ensure the expected # of rows reach the db, how to observe the data distribution etc. \ No newline at end of file diff --git a/docsys/src/main/resources/docsys-testmarkdown/basic/BasicTestingBestPractices.md b/docsys/src/main/resources/docsys-testmarkdown/basic/BasicTestingBestPractices.md deleted file mode 100644 index 742ad6c7a..000000000 --- a/docsys/src/main/resources/docsys-testmarkdown/basic/BasicTestingBestPractices.md +++ /dev/null @@ -1,28 +0,0 @@ -# Testing Best Practices - -It's no secret that distributed systems are difficult. This is a primary reason why sound testing and simulation of expected load -on the system is so important to do before running an application in production. Here we discuss a few of the best practices to follow -when using DSBench but also for testing a distributed database such as DataStax Distribution of Apache Cassandra and DataStax Enterprise. - -### Topology - -See existing doc for inspiration -https://powertools.datastax.com/ebdse/advanced-topics/topology/ - -- Run clients on separate machines from database -- Run performance monitoring on separate machines from client and database -- etc. - -### Client Saturation - -Make sure client is not bottle neck - -### Testing Phases - -Base data set first, read / write workload - -### Database saturation - -Don't crush cluster, how to find saturation point and then scale back - -... \ No newline at end of file diff --git a/docsys/src/main/resources/docsys-testmarkdown/getting-started/GettingStartedHome.md b/docsys/src/main/resources/docsys-testmarkdown/getting-started/GettingStartedHome.md deleted file mode 100644 index e0309357b..000000000 --- a/docsys/src/main/resources/docsys-testmarkdown/getting-started/GettingStartedHome.md +++ /dev/null @@ -1,10 +0,0 @@ -# Getting Started - -In this Getting Started track, we will walk you through your first test run with DSBench -and explain the minimal set of information that you will need to get off the ground. We -recommend that you go through the following steps in order, as each step builds on the last. - -1. Installation -2. Running the tool -3. Viewing Results -4. Next Steps \ No newline at end of file diff --git a/docsys/src/main/resources/docsys-testmarkdown/getting-started/GettingStartedInstallation.md b/docsys/src/main/resources/docsys-testmarkdown/getting-started/GettingStartedInstallation.md deleted file mode 100644 index d8f9b4121..000000000 --- a/docsys/src/main/resources/docsys-testmarkdown/getting-started/GettingStartedInstallation.md +++ /dev/null @@ -1,31 +0,0 @@ -# Installation - -DSBench is a command line utility that is packaged as a standalone executable. - -#### 1. Prerequisites - -- Java 8 or 11 installed on the machine that will run DSBench - -#### 2. Install DSBench - -DSBench is hosted on DataStax servers at the following location. - -``` - -``` - -Install from the command line - -``` -curl -``` - -It is a best practice to run DSBench on a separate set of machines -rather than directly on the DSE nodes to avoid resource contention. - - -#### 3. Confirm Installation - -``` -/dsbench --help -``` \ No newline at end of file diff --git a/docsys/src/main/resources/docsys-testmarkdown/getting-started/GettingStartedNextSteps.md b/docsys/src/main/resources/docsys-testmarkdown/getting-started/GettingStartedNextSteps.md deleted file mode 100644 index c0cfb2bc7..000000000 --- a/docsys/src/main/resources/docsys-testmarkdown/getting-started/GettingStartedNextSteps.md +++ /dev/null @@ -1 +0,0 @@ -# Next Steps \ No newline at end of file diff --git a/docsys/src/main/resources/docsys-testmarkdown/getting-started/GettingStartedRunning.md b/docsys/src/main/resources/docsys-testmarkdown/getting-started/GettingStartedRunning.md deleted file mode 100644 index 0e226a1c7..000000000 --- a/docsys/src/main/resources/docsys-testmarkdown/getting-started/GettingStartedRunning.md +++ /dev/null @@ -1,160 +0,0 @@ -# Running - -Now that we have DSBench installed, we will run a simple test against a DSE cluster to -establish some basic familiarity with the tool. - -#### 1. Create Schema - -We will start by creating a simple schema in the database. -From your command line, go ahead and execute the following command, -replacing the `host=` with that of one of your database nodes. -``` -./dsbench start type=cql yaml=baselines/cql-keyvalue tags=phase:schema host= -``` - -This command is creating the following schema in your database. -``` -CREATE KEYSPACE baselines WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'} AND durable_writes = true; -CREATE TABLE baselines.keyvalue ( - key text PRIMARY KEY, - value text -) -``` - -Let's break down each of those command line options. - -- `start` tells DSBench to start an activity. -- `type=...` is used to specify the activity type. In this case we are using `cql` -which tells DSBench to use the DataStax Java Driver and execute CQL statements against a database. -- The `yaml=...` is used to specify the yaml file that defines the activity. -All activities require a yaml in which you configure things such as data bindings and CQL statements, -but don't worry about those details right now. -In this example, we use `baselines/cql-keyvalue` which is a pre-built workload that is packaged with DSBench. -- The `tags=phase:schema` tells DSBench to run the yaml block that has the `phase:schema` defined as one of its tags. -In this example, that is the DDL portion of the `baselines/cql-keyvalue` workload. -- The `host=...` tells DSBench how to connect to your database, only one host is necessary. - -Before moving on, confirm that this was created in DSE using cqlsh or DataStax Studio < links >. -``` -DESCRIBE KEYSPACE baselines; -``` - -#### 2. Write Base Data Set - -Before sending our writes to the database, we will use the `stdout` activity type -so we can see what DSBench is generating for CQL statements. - -Go ahead and execute the following command. -``` -./dsbench start type=stdout yaml=baselines/cql-keyvalue tags=phase:rampup cycles=10 -``` - -You should see 10 of the following statements in your console -``` -insert into baselines.keyvalue (key, value) values (0,382062539); -insert into baselines.keyvalue (key, value) values (1,774912474); -insert into baselines.keyvalue (key, value) values (2,949364593); -insert into baselines.keyvalue (key, value) values (3,352527683); -insert into baselines.keyvalue (key, value) values (4,351686621); -insert into baselines.keyvalue (key, value) values (5,114304900); -insert into baselines.keyvalue (key, value) values (6,439790106); -insert into baselines.keyvalue (key, value) values (7,564330072); -insert into baselines.keyvalue (key, value) values (8,296173906); -insert into baselines.keyvalue (key, value) values (9,97405552); -``` -One thing to know is that DSBench deterministically generates data, so the generated values will be the same from run to run. - -Now we are ready to write some data to our database. Go ahead and execute the following from your command line. -``` -./dsbench start type=cql yaml=baselines/cql-keyvalue tags=phase:rampup host= cycles=100k --progress console:1s -``` - -Note the differences between this and the command that we used to generate the schema. - -- `tags=phase:rampup` is running the yaml block in `baselines/cql-keyvalue` that has only INSERT statements. -- `cycles=100k` will run a total of 100,000 operations, in this case, 100,000 writes. -- `--progress console:1s` will print the progression of the run to the console every 1 second. - -You should see output that looks like this -``` -baselines/cql-keyvalue: 0.00%/Running (details: min=0 cycle=1 max=100000) -baselines/cql-keyvalue: 0.00%/Running (details: min=0 cycle=1 max=100000) -baselines/cql-keyvalue: 0.32%/Running (details: min=0 cycle=325 max=100000) -baselines/cql-keyvalue: 1.17%/Running (details: min=0 cycle=1171 max=100000) -baselines/cql-keyvalue: 2.36%/Running (details: min=0 cycle=2360 max=100000) -baselines/cql-keyvalue: 3.65%/Running (details: min=0 cycle=3648 max=100000) -baselines/cql-keyvalue: 4.61%/Running (details: min=0 cycle=4613 max=100000) -baselines/cql-keyvalue: 5.59%/Running (details: min=0 cycle=5593 max=100000) -baselines/cql-keyvalue: 7.14%/Running (details: min=0 cycle=7138 max=100000) -baselines/cql-keyvalue: 8.87%/Running (details: min=0 cycle=8868 max=100000) -... -baselines/cql-keyvalue: 100.00%/Finished (details: min=0 cycle=100000 max=100000) -``` - -Before moving on, confirm that these rows were inserted using the DataStax Bulk Loader. - -Install DSBulk -``` -curl -O -L "https://downloads.datastax.com/dsbulk/dsbulk.tar.gz" -tar zxf dsbulk.tar.gz -``` -Count -``` -dsbulk count -k baselines -t keyvalue -h -``` -You should see output that reflects 100k rows were written -``` - total | failed | rows/s | p50ms | p99ms | p999ms -100,000 | 0 | 47,271 | 138.33 | 318.77 | 318.77 -``` -#### 3. Run read / write workload - -Now that we have a base dataset of 100k rows in the database, we will now run a mixed read / write workload, by default this runs a 50% read / 50% write workload. - -``` -./dsbench start type=cql yaml=baselines/cql-keyvalue tags=phase:main host= cycles=100k cyclerate=5000 threads=50 --progress console:1s -``` - -You should see output that looks like this. -``` -Logging to logs/scenario_20190812_154431_028.log -baselines/cql-keyvalue: 0.50%/Running (details: min=0 cycle=500 max=100000) -baselines/cql-keyvalue: 2.50%/Running (details: min=0 cycle=2500 max=100000) -baselines/cql-keyvalue: 6.70%/Running (details: min=0 cycle=6700 max=100000) -baselines/cql-keyvalue: 11.16%/Running (details: min=0 cycle=11160 max=100000) -baselines/cql-keyvalue: 14.25%/Running (details: min=0 cycle=14250 max=100000) -baselines/cql-keyvalue: 18.41%/Running (details: min=0 cycle=18440 max=100000) -baselines/cql-keyvalue: 22.76%/Running (details: min=0 cycle=22760 max=100000) -baselines/cql-keyvalue: 27.27%/Running (details: min=0 cycle=27300 max=100000) -baselines/cql-keyvalue: 31.81%/Running (details: min=0 cycle=31810 max=100000) -baselines/cql-keyvalue: 36.34%/Running (details: min=0 cycle=36340 max=100000) -baselines/cql-keyvalue: 40.90%/Running (details: min=0 cycle=40900 max=100000) -baselines/cql-keyvalue: 45.48%/Running (details: min=0 cycle=45480 max=100000) -baselines/cql-keyvalue: 50.05%/Running (details: min=0 cycle=50050 max=100000) -baselines/cql-keyvalue: 54.36%/Running (details: min=0 cycle=54360 max=100000) -baselines/cql-keyvalue: 58.91%/Running (details: min=0 cycle=58920 max=100000) -baselines/cql-keyvalue: 63.40%/Running (details: min=0 cycle=63400 max=100000) -baselines/cql-keyvalue: 66.96%/Running (details: min=0 cycle=66970 max=100000) -baselines/cql-keyvalue: 71.61%/Running (details: min=0 cycle=71610 max=100000) -baselines/cql-keyvalue: 76.11%/Running (details: min=0 cycle=76130 max=100000) -baselines/cql-keyvalue: 80.66%/Running (details: min=0 cycle=80660 max=100000) -baselines/cql-keyvalue: 85.22%/Running (details: min=0 cycle=85220 max=100000) -baselines/cql-keyvalue: 89.80%/Running (details: min=0 cycle=89800 max=100000) -baselines/cql-keyvalue: 94.46%/Running (details: min=0 cycle=94460 max=100000) -baselines/cql-keyvalue: 98.93%/Running (details: min=0 cycle=98930 max=100000) -baselines/cql-keyvalue: 100.00%/Finished (details: min=0 cycle=100000 max=100000) -``` - -We have a few new command line options here. - -- `tags=phase:main` is using a new block in our activity's yaml that contains both read and write queries. -- `threads=50` is an important one. The default for DSBench is to run with a single thread. -This is not adequate for workloads that will be running many operations, -so threads is used as a way to increase concurrency on the client side. -- `cyclerate=5000` is used to control the operations per second that are dispatched by DSBench. -This command line option is the primary means to rate limit the workload and here we are running at 5000 ops/sec. - -#### 4. Viewing Results - -Note in the above output, we see `Logging to logs/scenario_20190812_154431_028.log`. -By default DSBench records the metrics from the run in this file, we will go into detail about these metrics in the next section Viewing Results. \ No newline at end of file diff --git a/docsys/src/main/resources/docsys-testmarkdown/getting-started/GettingStartedViewingResults.md b/docsys/src/main/resources/docsys-testmarkdown/getting-started/GettingStartedViewingResults.md deleted file mode 100644 index 1f74ff296..000000000 --- a/docsys/src/main/resources/docsys-testmarkdown/getting-started/GettingStartedViewingResults.md +++ /dev/null @@ -1,77 +0,0 @@ -# Viewing Results -Coming off of our first run with DSBench, we ran a very simple workload against our database. -In that example, we saw that DSBench writes to a log file and it is in that log file where the most -basic form of metrics are displayed. - -### Log File Metrics - -For our previous run, we saw that DSBench was writing to `logs/scenario_20190812_154431_028.log` - -Below is the full section in that log that gives us our basic metrics. There is a lot to digest here, -for now we will only focus a subset of the most important metrics. -``` -2019-08-12 15:46:00,274 INFO [main] i.e.c.ScenarioResult [ScenarioResult.java:48] -- BEGIN METRICS DETAIL -- -2019-08-12 15:46:00,294 INFO [main] i.e.c.ScenarioResult [Slf4jReporter.java:373] type=GAUGE, name=baselines/cql-keyvalue.cycles.config.burstrate, value=5500.0 -2019-08-12 15:46:00,295 INFO [main] i.e.c.ScenarioResult [Slf4jReporter.java:373] type=GAUGE, name=baselines/cql-keyvalue.cycles.config.cyclerate, value=5000.0 -2019-08-12 15:46:00,295 INFO [main] i.e.c.ScenarioResult [Slf4jReporter.java:373] type=GAUGE, name=baselines/cql-keyvalue.cycles.waittime, value=3898782735 -2019-08-12 15:46:00,298 INFO [main] i.e.c.ScenarioResult [Slf4jReporter.java:373] type=HISTOGRAM, name=baselines/cql-keyvalue.resultset-size, count=100000, min=0, max=1, mean=8.0E-5, stddev=0.008943914131967056, median=0.0, p75=0.0, p95=0.0, p98=0.0, p99=0.0, p999=0.0 -2019-08-12 15:46:00,340 INFO [main] i.e.c.ScenarioResult [Slf4jReporter.java:373] type=HISTOGRAM, name=baselines/cql-keyvalue.skipped-tokens, count=0, min=0, max=0, mean=0.0, stddev=0.0, median=0.0, p75=0.0, p95=0.0, p98=0.0, p99=0.0, p999=0.0 -2019-08-12 15:46:00,341 INFO [main] i.e.c.ScenarioResult [Slf4jReporter.java:373] type=HISTOGRAM, name=baselines/cql-keyvalue.tries, count=100000, min=1, max=1, mean=1.0, stddev=0.0, median=1.0, p75=1.0, p95=1.0, p98=1.0, p99=1.0, p999=1.0 -2019-08-12 15:46:00,341 INFO [main] i.e.c.ScenarioResult [Slf4jReporter.java:373] type=METER, name=baselines/cql-keyvalue.rows, count=8, mean_rate=0.33513484972659807, m1=0.36684141626782935, m5=0.39333484605698305, m15=0.3977778345542248, rate_unit=events/second -2019-08-12 15:46:00,589 INFO [main] i.e.c.ScenarioResult [Slf4jReporter.java:373] type=TIMER, name=baselines/cql-keyvalue.bind, count=100000, min=1.582, max=23439.359, mean=12.56832522, stddev=341.12259029628433, median=3.969, p75=5.733, p95=14.857, p98=25.578, p99=35.727, p999=97.487, mean_rate=4142.45514275983, m1=3508.0300578687047, m5=3299.8619559559247, m15=3260.8242490944554, rate_unit=events/second, duration_unit=microseconds -2019-08-12 15:46:00,826 INFO [main] i.e.c.ScenarioResult [Slf4jReporter.java:373] type=TIMER, name=baselines/cql-keyvalue.cycles.servicetime, count=100000, min=348012.544, max=3849846.783, mean=2565961.07337728, stddev=796189.5358718627, median=2535587.839, p75=3299737.599, p95=3665297.407, p98=3743154.175, p99=3759669.247, p999=3807510.527, mean_rate=4133.36694607174, m1=3637.62940362701, m5=3458.3041653186974, m15=3424.659562378474, rate_unit=events/second, duration_unit=microseconds -2019-08-12 15:46:00,935 INFO [main] i.e.c.ScenarioResult [Slf4jReporter.java:373] type=TIMER, name=baselines/cql-keyvalue.execute, count=100000, min=3.486, max=12572.159, mean=21.37352134, stddev=147.5872262658514, median=12.455, p75=21.65, p95=45.701, p98=69.079, p99=105.123, p999=695.103, mean_rate=4084.755592762048, m1=3511.6350453271425, m5=3304.558546576714, m15=3265.7216557117335, rate_unit=events/second, duration_unit=microseconds -2019-08-12 15:46:00,943 INFO [main] i.e.c.ScenarioResult [Slf4jReporter.java:373] type=TIMER, name=baselines/cql-keyvalue.pages, count=0, min=0.0, max=0.0, mean=0.0, stddev=0.0, median=0.0, p75=0.0, p95=0.0, p98=0.0, p99=0.0, p999=0.0, mean_rate=0.0, m1=0.0, m5=0.0, m15=0.0, rate_unit=events/second, duration_unit=microseconds -2019-08-12 15:46:01,090 INFO [main] i.e.c.ScenarioResult [Slf4jReporter.java:373] type=TIMER, name=baselines/cql-keyvalue.phases.servicetime, count=100000, min=444.0, max=381059.071, mean=3798.41135488, stddev=10790.998109403186, median=1899.647, p75=3679.999, p95=10174.975, p98=15896.575, p99=24294.399, p999=136609.791, mean_rate=4089.258228031301, m1=3638.711481830029, m5=3459.480755773593, m15=3425.8517756084334, rate_unit=events/second, duration_unit=microseconds -2019-08-12 15:46:01,171 INFO [main] i.e.c.ScenarioResult [Slf4jReporter.java:373] type=TIMER, name=baselines/cql-keyvalue.read_input, count=10050, min=0.1, max=39045.119, mean=35.44877721393035, stddev=945.8304421638578, median=0.874, p75=1.039, p95=2.036, p98=4.114, p99=11.585, p999=16249.343, mean_rate=409.40825761884753, m1=367.86916182353934, m5=350.83483186356915, m15=347.63927014428833, rate_unit=events/second, duration_unit=microseconds -2019-08-12 15:46:01,310 INFO [main] i.e.c.ScenarioResult [Slf4jReporter.java:373] type=TIMER, name=baselines/cql-keyvalue.result, count=100000, min=233.48, max=358596.607, mean=3732.00338612, stddev=10254.850416061185, median=1874.815, p75=3648.767, p95=10115.071, p98=15855.615, p99=23916.543, p999=111292.415, mean_rate=4024.0234405430424, m1=3514.053841156124, m5=3307.431472596865, m15=3268.6786509004132, rate_unit=events/second, duration_unit=microseconds -2019-08-12 15:46:01,452 INFO [main] i.e.c.ScenarioResult [Slf4jReporter.java:373] type=TIMER, name=baselines/cql-keyvalue.result-success, count=100000, min=435.168, max=358645.759, mean=3752.40990808, stddev=10251.524945886964, median=1889.791, p75=3668.479, p95=10154.495, p98=15884.287, p99=24280.063, p999=111443.967, mean_rate=4003.3090048756894, m1=3523.40328629036, m5=3318.8463896065778, m15=3280.480326762243, rate_unit=events/second, duration_unit=microseconds -2019-08-12 15:46:01,460 INFO [main] i.e.c.ScenarioResult [Slf4jReporter.java:373] type=TIMER, name=baselines/cql-keyvalue.retry-delay, count=0, min=0.0, max=0.0, mean=0.0, stddev=0.0, median=0.0, p75=0.0, p95=0.0, p98=0.0, p99=0.0, p999=0.0, mean_rate=0.0, m1=0.0, m5=0.0, m15=0.0, rate_unit=events/second, duration_unit=microseconds -2019-08-12 15:46:01,605 INFO [main] i.e.c.ScenarioResult [Slf4jReporter.java:373] type=TIMER, name=baselines/cql-keyvalue.strides.servicetime, count=10000, min=10772.48, max=627572.735, mean=117668.1484032, stddev=72863.5379858271, median=106024.959, p75=144965.631, p95=236265.471, p98=304971.775, p99=450625.535, p999=613449.727, mean_rate=399.8614604956103, m1=355.11611665744687, m5=337.61623765618054, m15=334.0569514490176, rate_unit=events/second, duration_unit=microseconds -2019-08-12 15:46:01,702 INFO [main] i.e.c.ScenarioResult [Slf4jReporter.java:373] type=TIMER, name=baselines/cql-keyvalue.tokenfiller, count=22208, min=1011.328, max=71806.975, mean=1135.442975051788, stddev=784.3016082392345, median=1076.799, p75=1087.807, p95=1207.103, p98=1454.015, p99=2429.567, p999=8490.495, mean_rate=879.7500537541893, m1=833.0176653507624, m5=814.1230871081734, m15=810.3570012336148, rate_unit=events/second, duration_unit=microseconds -2019-08-12 15:46:01,703 INFO [main] i.e.c.ScenarioResult [ScenarioResult.java:56] -- END METRICS DETAIL -- -``` - -#### results -This is the primary metric that should be used to get a quick idea of the throughput and latency for a given run. It encapsulates the entire operation life cycle ( ie. bind, execute, get result back ). - -For this example we see that we averaged 3732 operations / second with 3.6ms 75th percentile latency and 23.9ms 99th percentile latency. Note the raw metrics are in microseconds. - -``` -2019-08-12 15:46:01,310 INFO [main] i.e.c.ScenarioResult [Slf4jReporter.java:373] type=TIMER, name=baselines/cql-keyvalue.result, count=100000, min=233.48, max=358596.607, mean=3732.00338612, stddev=10254.850416061185, median=1874.815, p75=3648.767, p95=10115.071, p98=15855.615, p99=23916.543, p999=111292.415, mean_rate=4024.0234405430424, m1=3514.053841156124, m5=3307.431472596865, m15=3268.6786509004132, rate_unit=events/second, duration_unit=microseconds -``` - -#### result-success -This metric shows whether there were any errors during the run. You should confirm that the count is equal to the number of cycles for the run. - -Here we see that all 100k of our cycles succeeded. Note that the metrics for throughput and latency here are slightly different than the `results` metric simply because this is a separate timer around success only results. - -``` -2019-08-12 15:46:01,452 INFO [main] i.e.c.ScenarioResult [Slf4jReporter.java:373] type=TIMER, name=baselines/cql-keyvalue.result-success, count=100000, min=435.168, max=358645.759, mean=3752.40990808, stddev=10251.524945886964, median=1889.791, p75=3668.479, p95=10154.495, p98=15884.287, p99=24280.063, p999=111443.967, mean_rate=4003.3090048756894, m1=3523.40328629036, m5=3318.8463896065778, m15=3280.480326762243, rate_unit=events/second, duration_unit=microseconds -``` - -#### resultset-size -For read workloads, this metric shows the size of result sent back to DSBench from the server. This is useful to confirm that you are reading rows that already exist in the database. - -TODO: talk about mix of read / writes and how that affects this metric -``` -2019-08-12 15:46:00,298 INFO [main] i.e.c.ScenarioResult [Slf4jReporter.java:373] type=HISTOGRAM, name=baselines/cql-keyvalue.resultset-size, count=100000, min=0, max=1, mean=8.0E-5, stddev=0.008943914131967056, median=0.0, p75=0.0, p95=0.0, p98=0.0, p99=0.0, p999=0.0 -``` - -#### tries -DSBench will retry failures 10 times by default, this is configurable via the `maxtries` command line option < link >. This metric shows a histogram of the number of tries that each operation required, in this example, there were no retries as the `count` is 100k. -``` -2019-08-12 15:46:00,341 INFO [main] i.e.c.ScenarioResult [Slf4jReporter.java:373] type=HISTOGRAM, name=baselines/cql-keyvalue.tries, count=100000, min=1, max=1, mean=1.0, stddev=0.0, median=1.0, p75=1.0, p95=1.0, p98=1.0, p99=1.0, p999=1.0 -``` - -### More Metrics - -DSBench extends many ways to report the metrics from a run. To get more information on the options, see the doc links below. -- Interpreting Metrics < link > -- Built-in Docker Dashboard < link > -- Reporting to CSV < link > -- Reporting to Graphite < link > -- Reporting to HDR < link > - -### Congratulations -You have completed your first run with DSBench! Let's head over to the Next Steps section < link > to talk about the possibilities that are now at our fingertips. \ No newline at end of file diff --git a/engine-core/src/main/java/io/nosqlbench/engine/core/UserException.java b/engine-api/src/main/java/io/nosqlbench/engine/api/exceptions/BasicError.java similarity index 67% rename from engine-core/src/main/java/io/nosqlbench/engine/core/UserException.java rename to engine-api/src/main/java/io/nosqlbench/engine/api/exceptions/BasicError.java index 1eeeb6039..eb0066e75 100644 --- a/engine-core/src/main/java/io/nosqlbench/engine/core/UserException.java +++ b/engine-api/src/main/java/io/nosqlbench/engine/api/exceptions/BasicError.java @@ -1,12 +1,12 @@ -package io.nosqlbench.engine.core; +package io.nosqlbench.engine.api.exceptions; /** * User exceptions are errors for which we know how to explain the cause to the user. * For these, we should not need to log or report stack traces to any channel, as * the cause of and thus the remedy for the error should be very obvious. */ -public class UserException extends RuntimeException { - public UserException(String exception) { +public class BasicError extends RuntimeException { + public BasicError(String exception) { super(exception); } } diff --git a/engine-docs/pom.xml b/engine-docs/pom.xml index 7015e27bc..bff42ef63 100644 --- a/engine-docs/pom.xml +++ b/engine-docs/pom.xml @@ -31,6 +31,12 @@ virtdata-docsys 3.12.2-SNAPSHOT + + io.nosqlbench + virtdata-docsys + 3.12.2-SNAPSHOT + compile + diff --git a/engine-docs/src/main/java/io/nosqlbench/engine/docs/NBMarkdownManifest.java b/engine-docs/src/main/java/io/nosqlbench/engine/docs/NBMarkdownManifest.java index b489ea16b..104bde69b 100644 --- a/engine-docs/src/main/java/io/nosqlbench/engine/docs/NBMarkdownManifest.java +++ b/engine-docs/src/main/java/io/nosqlbench/engine/docs/NBMarkdownManifest.java @@ -1,9 +1,9 @@ package io.nosqlbench.engine.docs; -import io.nosqlbench.virtdata.annotations.Service; import io.nosqlbench.docsys.api.Docs; import io.nosqlbench.docsys.api.DocsBinder; import io.nosqlbench.docsys.api.DocsysDynamicManifest; +import io.nosqlbench.virtdata.annotations.Service; @Service(DocsysDynamicManifest.class) public class NBMarkdownManifest implements DocsysDynamicManifest { diff --git a/nb/appimage/skel/eb.desktop b/nb/appimage/skel/nb.desktop similarity index 74% rename from nb/appimage/skel/eb.desktop rename to nb/appimage/skel/nb.desktop index 0097a398a..a99914dbb 100644 --- a/nb/appimage/skel/eb.desktop +++ b/nb/appimage/skel/nb.desktop @@ -1,12 +1,12 @@ [Desktop Entry] Name=nosqlbench -Exec=eb %F -Icon=eb +Exec=nb %F +Icon=nb Type=Application Categories=Development;Network;Utility;Science; -Comment=nosqlbench Core Module +Comment=nosqlbench MimeType=text/yaml;application/yaml; -Name[en]=eb +Name[nb]=nb Terminal=false StartupNotify=true NoDisplay=false diff --git a/nb/appimage/skel/eb.png b/nb/appimage/skel/nb.png similarity index 100% rename from nb/appimage/skel/eb.png rename to nb/appimage/skel/nb.png diff --git a/nb/appimage/skel/usr/bin/eb b/nb/appimage/skel/usr/bin/nb similarity index 100% rename from nb/appimage/skel/usr/bin/eb rename to nb/appimage/skel/usr/bin/nb diff --git a/pom.xml b/pom.xml index 82539362f..db04fa45b 100644 --- a/pom.xml +++ b/pom.xml @@ -72,6 +72,17 @@ + + cqlmodules + + true + + + activitytype-cql + activitytype-cqlverify + + + virtdatamodules diff --git a/virtdata-userlibs/pom.xml b/virtdata-userlibs/pom.xml index 9de22bc4d..14304228b 100644 --- a/virtdata-userlibs/pom.xml +++ b/virtdata-userlibs/pom.xml @@ -75,6 +75,12 @@ 1.0.0m1 test + + io.nosqlbench + virtdata-docsys + 3.12.2-SNAPSHOT + compile +