introduced cql activity type and cleaned up package names

This commit is contained in:
Jonathan Shook 2020-03-02 11:46:34 -06:00
parent 17d16bc7d1
commit f1ca10eb2c
142 changed files with 7899 additions and 446 deletions

140
activitytype-cql/pom.xml Normal file
View File

@ -0,0 +1,140 @@
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>io.nosqlbench</groupId>
<artifactId>mvn-defaults</artifactId>
<version>3.12.2-SNAPSHOT</version>
<relativePath>../mvn-defaults</relativePath>
</parent>
<artifactId>at-cql</artifactId>
<packaging>jar</packaging>
<name>${project.artifactId}</name>
<description>
A CQL ActivityType driver for http://nosqlbench.io/
</description>
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<dse-driver-version>1.9.0</dse-driver-version>
</properties>
<dependencies>
<!-- core dependencies -->
<dependency>
<groupId>io.nosqlbench</groupId>
<artifactId>engine-api</artifactId>
<version>3.12.2-SNAPSHOT</version>
</dependency>
<!-- <dependency>-->
<!-- <groupId>com.datastax.labs</groupId>-->
<!-- <artifactId>dsbench-virtdata</artifactId>-->
<!-- <version>3.12.1-SNAPSHOT</version>-->
<!-- <scope>compile</scope>-->
<!-- </dependency>-->
<!-- <dependency>-->
<!-- <groupId>com.datastax.labs</groupId>-->
<!-- <artifactId>dsbench-udts</artifactId>-->
<!-- <version>3.12.1-SNAPSHOT</version>-->
<!-- <scope>compile</scope>-->
<!-- </dependency>-->
<dependency>
<groupId>com.datastax.dse</groupId>
<artifactId>dse-java-driver-core</artifactId>
<version>${dse-driver-version}</version>
</dependency>
<dependency>
<groupId>com.datastax.dse</groupId>
<artifactId>dse-java-driver-extras</artifactId>
<version>${dse-driver-version}</version>
</dependency>
<dependency>
<groupId>com.datastax.dse</groupId>
<artifactId>dse-java-driver-mapping</artifactId>
<version>${dse-driver-version}</version>
</dependency>
<!-- For CQL compression option -->
<dependency>
<groupId>org.lz4</groupId>
<artifactId>lz4-java</artifactId>
<version>1.4.1</version>
</dependency>
<!-- For CQL compression option -->
<dependency>
<groupId>org.xerial.snappy</groupId>
<artifactId>snappy-java</artifactId>
<version>1.1.2.6</version>
</dependency>
<!-- <dependency>-->
<!-- <groupId>io.netty</groupId>-->
<!-- <artifactId>netty-transport-native-epoll</artifactId>-->
<!-- <version>4.1.36.Final</version>-->
<!-- <classifier>linux-x86_64</classifier>-->
<!-- </dependency>-->
<!-- <dependency>-->
<!-- <groupId>org.yaml</groupId>-->
<!-- <artifactId>snakeyaml</artifactId>-->
<!-- <version>1.23</version>-->
<!-- </dependency>-->
<!-- <dependency>-->
<!-- <groupId>org.apache.commons</groupId>-->
<!-- <artifactId>commons-lang3</artifactId>-->
<!-- <version>3.7</version>-->
<!-- </dependency>-->
<!-- <dependency>-->
<!-- <groupId>org.slf4j</groupId>-->
<!-- <artifactId>slf4j-api</artifactId>-->
<!-- <version>1.7.25</version>-->
<!-- </dependency>-->
<!-- test only scope -->
<dependency>
<groupId>org.testng</groupId>
<artifactId>testng</artifactId>
<version>6.13.1</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.assertj</groupId>
<artifactId>assertj-core-java8</artifactId>
<version>1.0.0m1</version>
<scope>test</scope>
</dependency>
</dependencies>
<profiles>
<profile>
<id>shade</id>
<activation>
<activeByDefault>true</activeByDefault>
</activation>
<build>
<plugins>
<plugin>
<artifactId>maven-shade-plugin</artifactId>
<configuration>
<finalName>${project.artifactId}</finalName>
</configuration>
</plugin>
</plugins>
</build>
</profile>
</profiles>
</project>

View File

@ -0,0 +1,47 @@
package com.datastax.driver.core;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.List;
import java.util.OptionalLong;
import java.util.Set;
public class M3PTokenFilter {
private final TokenRange[] ranges;
private final ProtocolVersion protocolVersion;
private final CodecRegistry codecRegistry;
private final Metadata clusterMetadata;
private final Token.Factory factory;
public M3PTokenFilter(Set<TokenRange> ranges, Cluster cluster) {
protocolVersion = cluster.getConfiguration().getProtocolOptions().getProtocolVersion();
codecRegistry = cluster.getConfiguration().getCodecRegistry();
clusterMetadata = cluster.getMetadata();
factory = Token.getFactory(clusterMetadata.partitioner);
List<TokenRange> rangeList = new ArrayList<>();
for (TokenRange range : ranges) {
if (!range.getStart().getType().equals(DataType.bigint())) {
throw new RuntimeException("This filter only works with bigint valued token types");
}
rangeList.add(range);
}
this.ranges=rangeList.toArray(new TokenRange[0]);
if (this.ranges.length<1) {
throw new RuntimeException("There were no tokens found. Please check your keyspace and cluster settings.");
}
}
public OptionalLong matches(Statement statement) {
ByteBuffer routingKey = statement.getRoutingKey(protocolVersion, codecRegistry);
Token token = factory.hash(routingKey);
for (TokenRange range : ranges) {
if (range.contains(token)) {
return OptionalLong.of((long)token.getValue());
}
}
return OptionalLong.empty();
}
}

View File

@ -0,0 +1,60 @@
package com.datastax.driver.core;
import io.nosqlbench.activitytype.cql.ebdrivers.cql.api.StatementFilter;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.stream.Collectors;
public class TokenRangeStmtFilter implements StatementFilter {
private final Metadata clusterMetadata;
private final ProtocolVersion protocolVersion;
private final CodecRegistry codecRegistry;
private final Token.Factory factory;
private TokenRange[] ranges;
public TokenRangeStmtFilter(Cluster cluster, String rangesSpec) {
clusterMetadata = cluster.getMetadata();
protocolVersion = cluster.getConfiguration().getProtocolOptions().getProtocolVersion();
codecRegistry = cluster.getConfiguration().getCodecRegistry();
factory = Token.getFactory(clusterMetadata.partitioner);
ranges = parseRanges(factory, rangesSpec);
}
private TokenRange[] parseRanges(Token.Factory factory, String rangesStr) {
String[] ranges = rangesStr.split(",");
List<TokenRange> tr = new ArrayList<>();
for (String range : ranges) {
String[] interval = range.split(":");
Token start = factory.fromString(interval[0]);
Token end = factory.fromString(interval[1]);
TokenRange tokenRange = new TokenRange(start, end, factory);
tr.add(tokenRange);
}
return tr.toArray(new TokenRange[tr.size()]);
}
@Override
public boolean matches(Statement statement) {
ByteBuffer routingKey = statement.getRoutingKey(protocolVersion, codecRegistry);
Token token = factory.hash(routingKey);
for (TokenRange range : ranges) {
if (range.contains(token)) {
return true;
}
}
return false;
}
@Override
public String toString() {
return "including token ranges: " +
Arrays.stream(ranges)
.map(String::valueOf)
.collect(Collectors.joining(","));
}
}

View File

@ -0,0 +1,71 @@
package com.datastax.driver.core;
import java.io.BufferedWriter;
import java.io.FileWriter;
import java.io.IOException;
import java.util.Comparator;
import java.util.Set;
public class TokenRangeUtil {
private final Metadata clusterMetadata;
private final ProtocolVersion protocolVersion;
private final CodecRegistry codecRegistry;
private final Token.Factory factory;
private final Cluster cluster;
public TokenRangeUtil(Cluster cluster) {
this.cluster= cluster;
clusterMetadata = cluster.getMetadata();
protocolVersion = cluster.getConfiguration().getProtocolOptions().getProtocolVersion();
codecRegistry = cluster.getConfiguration().getCodecRegistry();
factory = Token.getFactory(clusterMetadata.partitioner);
}
public Set<TokenRange> getTokenRangesFor(String keyspace, String hostaddress) {
Host host=null;
if (hostaddress.matches("\\d+")) {
int hostenum = Integer.parseInt(hostaddress);
host = clusterMetadata.getAllHosts().stream()
.sorted(Comparator.comparing(h -> h.getAddress().toString()))
.skip(hostenum)
.findFirst()
.orElseThrow();
} else if (!hostaddress.isEmpty()) {
host = clusterMetadata.getAllHosts().stream()
.filter(h -> h.getAddress().toString().replaceAll("/","").equals(hostaddress))
.findFirst()
.orElseThrow();
} else {
throw new RuntimeException("You must specify a host enum in order or a host address.");
}
return clusterMetadata.getTokenRanges(keyspace,host);
}
public void printRanges(String tokensks) {
Set<Host> hosts = clusterMetadata.getAllHosts();
for (Host host : hosts) {
String address = host.getAddress().toString().substring(1);
BufferedWriter writer = null;
try {
writer = new BufferedWriter(new FileWriter("ranges-"+address));
String ranges = getTokenRangesFor(tokensks, address).toString();
writer.write(ranges);
writer.close();
} catch (IOException e) {
e.printStackTrace();
throw new RuntimeException("Can't write token range files");
}
}
}
public M3PTokenFilter getFilterFor(Set<TokenRange> ranges) {
return new M3PTokenFilter(ranges, this.cluster);
}
}

View File

@ -0,0 +1,12 @@
package io.nosqlbench.activitytype.cql.codecsupport;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.TYPE)
public @interface CQLUserTypeNames {
String[] value();
}

View File

@ -0,0 +1,12 @@
package io.nosqlbench.activitytype.cql.codecsupport;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.TYPE)
public @interface UDTCodecClasses {
Class<? extends UDTTransformCodec>[] value();
}

View File

@ -0,0 +1,33 @@
package io.nosqlbench.activitytype.cql.codecsupport;
import com.datastax.driver.core.CodecRegistry;
import com.datastax.driver.core.Session;
import com.datastax.driver.core.UserType;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.List;
import java.util.ServiceLoader;
public class UDTCodecInjector {
private final static Logger logger = LoggerFactory.getLogger(UDTCodecInjector.class);
private List<UserCodecProvider> codecProviders = new ArrayList<>();
private List<UserType> userTypes = new ArrayList<>();
public void injectUserProvidedCodecs(Session session, boolean allowAcrossKeyspaces) {
CodecRegistry registry = session.getCluster().getConfiguration().getCodecRegistry();
ServiceLoader<UserCodecProvider> codecLoader = ServiceLoader.load(UserCodecProvider.class);
for (UserCodecProvider userCodecProvider : codecLoader) {
codecProviders.add(userCodecProvider);
}
for (UserCodecProvider codecProvider : codecProviders) {
codecProvider.registerCodecsForCluster(session,true);
}
}
}

View File

@ -0,0 +1,12 @@
package io.nosqlbench.activitytype.cql.codecsupport;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.TYPE)
public @interface UDTJavaType {
Class<?> value();
}

View File

@ -0,0 +1,22 @@
package io.nosqlbench.activitytype.cql.codecsupport;
import com.datastax.driver.core.TypeCodec;
import com.datastax.driver.core.UDTValue;
import com.datastax.driver.core.UserType;
import com.datastax.driver.extras.codecs.MappingCodec;
public abstract class UDTTransformCodec<T> extends MappingCodec<T,UDTValue> {
protected UserType userType;
public UDTTransformCodec(UserType userType, Class<T> javaType) {
super(TypeCodec.userType(userType), javaType);
this.userType = userType;
}
public UserType getUserType() {
return userType;
}
}

View File

@ -0,0 +1,138 @@
package io.nosqlbench.activitytype.cql.codecsupport;
import com.datastax.driver.core.*;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.lang.reflect.Constructor;
import java.util.*;
import java.util.stream.Collectors;
public abstract class UserCodecProvider {
private final static Logger logger = LoggerFactory.getLogger(UserCodecProvider.class);
public List<UDTTransformCodec> registerCodecsForCluster(
Session session,
boolean allowAcrossKeyspaces
) {
List<UDTTransformCodec> typeCodecs = new ArrayList<>();
List<KeyspaceMetadata> ksMetas = new ArrayList<>(session.getCluster().getMetadata().getKeyspaces());
for (KeyspaceMetadata keyspace : ksMetas) {
List<UDTTransformCodec> keyspaceCodecs = registerCodecsForKeyspace(session, keyspace.getName());
for (UDTTransformCodec typeCodec : keyspaceCodecs) {
if (typeCodecs.contains(typeCodec) && !allowAcrossKeyspaces) {
throw new RuntimeException("codec " + typeCodec + " could be registered" +
"in multiple keyspaces, but this is not allowed.");
}
typeCodecs.add(typeCodec);
logger.debug("Found user-provided codec for ks:" + keyspace + ", udt:" + typeCodec);
}
}
return typeCodecs;
}
public List<UDTTransformCodec> registerCodecsForKeyspace(Session session, String keyspace) {
CodecRegistry registry = session.getCluster().getConfiguration().getCodecRegistry();
List<UDTTransformCodec> codecsForKeyspace = new ArrayList<>();
KeyspaceMetadata ksMeta = session.getCluster().getMetadata().getKeyspace(keyspace);
if (ksMeta==null) {
logger.warn("No metadata for " + keyspace);
return Collections.emptyList();
}
Collection<UserType> typesInKeyspace = ksMeta.getUserTypes();
List<Class<? extends UDTTransformCodec>> providedCodecClasses = getUDTCodecClasses();
Map<UserType, Class<? extends UDTTransformCodec>> codecMap = new HashMap<>();
for (Class<? extends TypeCodec> providedCodecClass : providedCodecClasses) {
Class<? extends UDTTransformCodec> udtCodecClass = (Class<? extends UDTTransformCodec>) providedCodecClass;
List<String> targetUDTTypes = getUDTTypeNames(udtCodecClass);
for (UserType keyspaceUserType : typesInKeyspace) {
String ksTypeName = keyspaceUserType.getTypeName();
String globalTypeName = (ksTypeName.contains(".") ? ksTypeName.split("\\.",2)[1] : ksTypeName);
if (targetUDTTypes.contains(ksTypeName) || targetUDTTypes.contains(globalTypeName)) {
codecMap.put(keyspaceUserType, udtCodecClass);
}
}
}
for (UserType userType : codecMap.keySet()) {
Class<? extends UDTTransformCodec> codecClass = codecMap.get(userType);
Class<?> udtJavaType = getUDTJavaType(codecClass);
UDTTransformCodec udtCodec = instantiate(userType, codecClass, udtJavaType);
codecsForKeyspace.add(udtCodec);
registry.register(udtCodec);
logger.info("registered codec:" + udtCodec);
}
return codecsForKeyspace;
}
private UDTTransformCodec instantiate(UserType key, Class<? extends UDTTransformCodec> codecClass, Class<?> javaType) {
try {
Constructor<? extends UDTTransformCodec> ctor = codecClass.getConstructor(UserType.class, Class.class);
UDTTransformCodec typeCodec = ctor.newInstance(key, javaType);
return typeCodec;
} catch (Exception e) {
e.printStackTrace();
throw new RuntimeException(e);
}
}
private List<Class<? extends UDTTransformCodec>> getUDTCodecClasses() {
UDTCodecClasses[] annotationsByType = this.getClass().getAnnotationsByType(UDTCodecClasses.class);
List<Class<? extends UDTTransformCodec>> codecClasses = Arrays.stream(annotationsByType)
.map(UDTCodecClasses::value)
.flatMap(Arrays::stream)
.collect(Collectors.toList());
return codecClasses;
}
/**
* Allows simple annotation of implementations of this class to use
* {@code @CQLUserTypeNames({"type1","type2",...}}
*
* @param codecClass the UDTTransformCode class which is to be inspected
* @return THe list of target UDT type names, as defined in CQL
*/
private List<String> getUDTTypeNames(Class<? extends UDTTransformCodec> codecClass) {
CQLUserTypeNames[] annotationsByType = codecClass.getAnnotationsByType(CQLUserTypeNames.class);
List<String> cqlTypeNames = new ArrayList<>();
for (CQLUserTypeNames cqlUserTypeNames : annotationsByType) {
cqlTypeNames.addAll(Arrays.asList(cqlUserTypeNames.value()));
}
return cqlTypeNames;
}
/**
* Allows simple annotation of implementations of this class to use
* {@code @UDTJavaType(POJOType.class)}
*
* @param codecClass the UDTTransformCode class which is to be inspected
* @return The class type of the POJO which this codec maps to and from
*/
private Class<?> getUDTJavaType(Class<? extends UDTTransformCodec> codecClass) {
UDTJavaType[] annotationsByType = codecClass.getAnnotationsByType(UDTJavaType.class);
Class<?> javaType = Arrays.stream(annotationsByType)
.map(UDTJavaType::value)
.findFirst()
.orElseThrow(
() -> new RuntimeException("Unable to find UDTJavaType annotation for " + codecClass.getCanonicalName())
);
return (Class<?>) javaType;
}
}

View File

@ -0,0 +1,124 @@
package io.nosqlbench.activitytype.cql.datamappers.functions.collectionclobs;
import io.nosqlbench.virtdata.annotations.Categories;
import io.nosqlbench.virtdata.annotations.Category;
import io.nosqlbench.virtdata.annotations.Example;
import io.nosqlbench.virtdata.annotations.ThreadSafeMapper;
import java.util.function.LongFunction;
import java.util.function.LongToIntFunction;
/**
* Create a {@code Map<String,String>} from a long input
* based on three functions,
* the first to determine the map size, and the second to populate
* the map with key objects, and the third to populate the map with
* value objects. The long input fed to the second and third functions
* is incremented between entries. Regardless of the object type provided
* by the second and third functions, {@link Object#toString()}
* is used to determine the key and value to add to the map.
*
* To create Maps of any key and value types, simply use
* {@link java.util.Map} with
* an specific key and value mapping functions.
*/
@Categories({Category.collections})
@ThreadSafeMapper
public class StringMapClob implements LongFunction<String> {
private final static ThreadLocal<StringBuilder> tl_sb = ThreadLocal.withInitial(StringBuilder::new);
private final LongToIntFunction sizeFunc;
private final LongFunction[] keyFuncs;
private final LongFunction[] valueFuncs;
private final Mode mode;
private final static String BEFORE_RESULT = "{";
private final static String AFTER_RESULT = "}";
private final static String KEY_QUOTE ="'";
private final static String VAL_QUOTE = "'";
private final static String ASSIGNMENT = ": ";
private final static String BETWEEN_ENTRIES = ", ";
@Example({"StringMap(HashRange(3,7),NumberNameToString(),HashRange(1300,1700))",
"create a map of size 3-7 entries, with a key of type " +
"string and a value of type int (Integer by autoboxing)"})
public StringMapClob(LongToIntFunction sizeFunc,
LongFunction<Object> keyFunc,
LongFunction<Object> valueFunc) {
this.mode = Mode.VarSized;
this.sizeFunc = sizeFunc;
this.keyFuncs = new LongFunction[1];
keyFuncs[0] = keyFunc;
this.valueFuncs = new LongFunction[1];
valueFuncs[0] = valueFunc;
}
@Example({"StringMapClob(NumberNameToString(),HashRange(1300,1700),NumberNameToString(),HashRange(3,7))",
"create a map of size 2, with a specific function for each key and each value"})
@SafeVarargs
public StringMapClob(LongFunction<Object>... objfuncs) {
this.mode = Mode.Tuples;
if ((objfuncs.length % 2) != 0) {
throw new RuntimeException("An even number of functions must be provided.");
}
int size = objfuncs.length / 2;
sizeFunc = (l) -> size;
keyFuncs = new LongFunction[size];
valueFuncs = new LongFunction[size];
for (int i = 0; i < size; i++) {
keyFuncs[i] = objfuncs[i << 1];
valueFuncs[i] = objfuncs[(i << 1) + 1];
}
}
@Override
public String apply(long value) {
// "{key='value',key='value'}"
StringBuilder sb = tl_sb.get();
sb.setLength(0);
sb.append(BEFORE_RESULT);
int size = sizeFunc.applyAsInt(value);
switch (mode) {
case VarSized:
for (int i = 0; i < size; i++) {
Object keyObject = keyFuncs[0].apply(value + i);
Object valueObject = valueFuncs[0].apply(value + i);
sb.append(KEY_QUOTE).append(keyObject).append(KEY_QUOTE);
sb.append(ASSIGNMENT);
sb.append(VAL_QUOTE).append(valueObject).append(VAL_QUOTE);
sb.append(BETWEEN_ENTRIES);
}
break;
case Tuples:
for (int i = 0; i < keyFuncs.length; i++) {
Object keyObject = keyFuncs[i].apply(value + i);
Object valueObject = valueFuncs[i].apply(value + i);
sb.append(KEY_QUOTE).append(keyObject).append(KEY_QUOTE);
sb.append(ASSIGNMENT);
sb.append(VAL_QUOTE).append(valueObject).append(VAL_QUOTE);
sb.append(BETWEEN_ENTRIES);
}
break;
}
sb.setLength(sb.length()-BETWEEN_ENTRIES.length());
sb.append(AFTER_RESULT);
return sb.toString();
}
private enum Mode {
VarSized,
Tuples
}
}

View File

@ -0,0 +1,41 @@
package io.nosqlbench.activitytype.cql.datamappers.functions.collections;
import io.nosqlbench.virtdata.annotations.Example;
import io.nosqlbench.virtdata.annotations.ThreadSafeMapper;
import io.nosqlbench.virtdata.api.DataMapper;
import io.nosqlbench.virtdata.api.VirtData;
import java.util.ArrayList;
import java.util.List;
import java.util.function.LongFunction;
/**
* This is an example of a mapping function that can create a list of objects
* from another internal mapping function.
*
* The input value for each function is incremented by one from the initial input value
* this this overall function.
*
*/
@ThreadSafeMapper
public class ListMapper implements LongFunction<List<?>> {
private int size;
private DataMapper<String> elementMapper;
@Example({"ListMapper(5,NumberNameToString())","creates a list of number names"})
public ListMapper(int size, String genSpec) {
this.size = size;
elementMapper = VirtData.getMapper(genSpec,String.class);
}
@Override
public List<?> apply(long value) {
List<Object> list = new ArrayList<>(size);
for (int listpos = 0; listpos < size; listpos++) {
Object o = elementMapper.get(value + listpos);
list.add(o);
}
return list;
}
}

View File

@ -0,0 +1,23 @@
package io.nosqlbench.activitytype.cql.datamappers.functions.contrib;
import io.nosqlbench.virtdata.annotations.ThreadSafeMapper;
import java.util.function.IntUnaryOperator;
@ThreadSafeMapper
public class WrappedClustering implements IntUnaryOperator {
@Override
public int applyAsInt(int operand) {
long longOperand = operand;
long longOperandTimes15 = longOperand * 15;
long integerMax = Integer.MAX_VALUE + 1;
long integerMin = Integer.MIN_VALUE;
long sign = (long) Math.pow((-1), longOperandTimes15/integerMax);
if (sign > 0)
return (int) (sign * (longOperandTimes15 % integerMax));
else
return (int) (integerMin - (sign * (longOperandTimes15 % integerMax)));
}
}

View File

@ -0,0 +1,114 @@
package io.nosqlbench.activitytype.cql.datamappers.functions.diagnostics;
import com.datastax.driver.core.DataType;
import com.datastax.driver.core.LocalDate;
import com.datastax.driver.core.TupleValue;
import com.datastax.driver.core.UDTValue;
import io.nosqlbench.virtdata.annotations.ThreadSafeMapper;
import java.math.BigDecimal;
import java.math.BigInteger;
import java.net.InetAddress;
import java.nio.ByteBuffer;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.function.Function;
/**
* Shows the compatible CQL type most associated with the incoming Java type.
*/
@ThreadSafeMapper
public class ToCqlType implements Function<Object, String> {
private final static Map<String, String> typemap = new HashMap<String, String>() {{
put("a", "b");
put(String.class.getCanonicalName(), DataType.text().getName().toString() +
" or " + DataType.ascii().getName().toString() +
" or " + DataType.varchar().getName().toString());
put(Long.class.getCanonicalName(), DataType.bigint().getName().toString() +
" or " + DataType.time().getName().toString() +
" or " + DataType.counter().getName().toString());
put(long.class.getCanonicalName(), DataType.bigint().getName().toString() +
" or " + DataType.counter().getName().toString());
put(ByteBuffer.class.getCanonicalName(), DataType.blob().getName().toString() +
",CUSTOM");
put(Boolean.class.getCanonicalName(), DataType.cboolean().getName().toString());
put(boolean.class.getCanonicalName(), DataType.cboolean().getName().toString());
put(BigDecimal.class.getCanonicalName(), DataType.decimal().getName().toString());
put(Double.class.getCanonicalName(),DataType.cdouble().getName().toString());
put(double.class.getCanonicalName(),DataType.cdouble().getName().toString());
put(Float.class.getCanonicalName(), DataType.cfloat().getName().toString());
put(float.class.getCanonicalName(), DataType.cfloat().getName().toString());
put(InetAddress.class.getCanonicalName(), DataType.inet().getName().toString());
put(Integer.class.getCanonicalName(),DataType.cint().getName().toString());
put(int.class.getCanonicalName(),DataType.cint().getName().toString());
put(java.util.Date.class.getCanonicalName(),DataType.timestamp().getName().toString());
put(java.util.UUID.class.getCanonicalName(),DataType.timeuuid().getName().toString()+" or "+DataType.uuid().getName().toString());
put(BigInteger.class.getCanonicalName(),DataType.varint().getName().toString());
put(Short.class.getCanonicalName(), DataType.smallint().getName().toString());
put(short.class.getCanonicalName(), DataType.smallint().getName().toString());
put(Byte.class.getCanonicalName(), DataType.tinyint().getName().toString());
put(byte.class.getCanonicalName(), DataType.tinyint().getName().toString());
put(LocalDate.class.getCanonicalName(), DataType.date().getName().toString());
put(UDTValue.class.getCanonicalName(), "<udt>");
put(TupleValue.class.getCanonicalName(),"<tuple>");
}};
private final ThreadLocal<StringBuilder> tlsb = ThreadLocal.withInitial(StringBuilder::new);
@Override
public String apply(Object o) {
String canonicalName = o.getClass().getCanonicalName();
String cqlTypeName = typemap.get(canonicalName);
StringBuilder sb = tlsb.get();
sb.setLength(0);
if (cqlTypeName!=null) {
return sb.append(canonicalName).append(" -> ").append(cqlTypeName).toString();
}
return findAlternates(o,canonicalName);
}
private String findAlternates(Object o, String canonicalName) {
StringBuilder sb = tlsb.get();
if (List.class.isAssignableFrom(o.getClass())) {
sb.append(canonicalName).append("<");
if (((List)o).size()>0) {
Object o1 = ((List) o).get(0);
String elementType = o1.getClass().getCanonicalName();
sb.append(elementType).append("> -> List<");
sb.append(typemap.getOrDefault(elementType,"UNKNOWN")).append(">");
return sb.toString();
}
return sb.append("?> -> List<?>").toString();
}
if (Map.class.isAssignableFrom(o.getClass())) {
sb.append(canonicalName).append("<");
if (((Map)o).size()>0) {
Map.Entry next = (Map.Entry) ((Map) o).entrySet().iterator().next();
String keyType = next.getKey().getClass().getCanonicalName();
String valType = next.getValue().getClass().getCanonicalName();
sb.append(keyType).append(",").append(valType).append("> -> Map<");
sb.append(typemap.getOrDefault(keyType,"UNKNOWN")).append(",");
sb.append(typemap.getOrDefault(valType,"UNKNOWN")).append(">");
return sb.toString();
}
return sb.append("?,?> -> Map<?,?>").toString();
}
if (Set.class.isAssignableFrom(o.getClass())) {
sb.append(canonicalName).append("<");
if (((Set)o).size()>0) {
Object o1=((Set)o).iterator().next();
String elementType = o1.getClass().getCanonicalName();
sb.append(elementType).append("> -> Set<");
sb.append(typemap.getOrDefault(elementType,"UNKNOWN")).append(">");
return sb.toString();
}
return sb.append("?> -> Set<?>").toString();
}
return typemap.getOrDefault(o.getClass().getSuperclass().getCanonicalName(), "UNKNOWN");
}
}

View File

@ -0,0 +1,66 @@
package io.nosqlbench.activitytype.cql.datamappers.functions.geometry;
import com.datastax.driver.dse.geometry.Point;
import io.nosqlbench.virtdata.annotations.ThreadSafeMapper;
import java.util.function.LongFunction;
import java.util.function.LongToDoubleFunction;
/**
* Create a Distance generator which produces
* com.datastax.driver.dse.geometry.Distance objects.
*/
@ThreadSafeMapper
public class Distance implements LongFunction<com.datastax.driver.dse.geometry.Distance> {
private final io.nosqlbench.activitytype.cql.datamappers.functions.geometry.Point pointfunc;
private final LongToDoubleFunction rfunc;
public Distance(LongToDoubleFunction xfunc, LongToDoubleFunction yfunc, LongToDoubleFunction rfunc) {
pointfunc = new io.nosqlbench.activitytype.cql.datamappers.functions.geometry.Point(xfunc,yfunc);
this.rfunc = rfunc;
}
public Distance(double x, LongToDoubleFunction yfunc, LongToDoubleFunction rfunc) {
pointfunc = new io.nosqlbench.activitytype.cql.datamappers.functions.geometry.Point((u)->x,yfunc);
this.rfunc = rfunc;
}
public Distance(LongToDoubleFunction xfunc, double y, LongToDoubleFunction rfunc) {
pointfunc = new io.nosqlbench.activitytype.cql.datamappers.functions.geometry.Point(xfunc,(v)->y);
this.rfunc = rfunc;
}
public Distance(double x, double y, LongToDoubleFunction rfunc) {
pointfunc = new io.nosqlbench.activitytype.cql.datamappers.functions.geometry.Point((u)->x,(v)->y);
this.rfunc = rfunc;
}
public Distance(LongToDoubleFunction xfunc, LongToDoubleFunction yfunc, double r) {
pointfunc = new io.nosqlbench.activitytype.cql.datamappers.functions.geometry.Point(xfunc,yfunc);
this.rfunc = (w) -> r;
}
public Distance(double x, LongToDoubleFunction yfunc, double r) {
pointfunc = new io.nosqlbench.activitytype.cql.datamappers.functions.geometry.Point((u)->x,yfunc);
this.rfunc = (w) -> r;
}
public Distance(LongToDoubleFunction xfunc, double y, double r) {
pointfunc = new io.nosqlbench.activitytype.cql.datamappers.functions.geometry.Point(xfunc,(v)->y);
this.rfunc = (w) -> r;
}
public Distance(double x, double y, double r) {
pointfunc = new io.nosqlbench.activitytype.cql.datamappers.functions.geometry.Point((u) -> x, (v) -> y);
this.rfunc = (w) -> r;
}
@Override
public com.datastax.driver.dse.geometry.Distance apply(long value) {
Point apoint = pointfunc.apply(value);
double aradius = rfunc.applyAsDouble(value);
return new com.datastax.driver.dse.geometry.Distance(apoint,aradius);
}
}

View File

@ -0,0 +1,46 @@
package io.nosqlbench.activitytype.cql.datamappers.functions.geometry;
//import com.datastax.driver.dse.geometry.Point;
import io.nosqlbench.virtdata.annotations.ThreadSafeMapper;
import java.util.function.LongFunction;
import java.util.function.LongToDoubleFunction;
import java.util.function.LongToIntFunction;
@SuppressWarnings("Duplicates")
@ThreadSafeMapper
public class LineString implements LongFunction<com.datastax.driver.dse.geometry.LineString> {
private final LongFunction<com.datastax.driver.dse.geometry.Point> pointfunc;
private final LongToIntFunction lenfunc;
public LineString(LongToIntFunction lenfunc, LongFunction<com.datastax.driver.dse.geometry.Point> pointfunc) {
this.pointfunc = pointfunc;
this.lenfunc = lenfunc;
}
public LineString(LongToIntFunction lenfunc, LongToDoubleFunction xfunc, LongToDoubleFunction yfunc) {
this.lenfunc = lenfunc;
this.pointfunc=new Point(xfunc,yfunc);
}
public LineString(int len, LongFunction<com.datastax.driver.dse.geometry.Point> pointfunc) {
this.lenfunc = (i) -> len;
this.pointfunc = pointfunc;
}
@Override
public com.datastax.driver.dse.geometry.LineString apply(long value) {
int linelen = Math.max(lenfunc.applyAsInt(value),2);
com.datastax.driver.dse.geometry.Point p0 = pointfunc.apply(value);
com.datastax.driver.dse.geometry.Point p1 = pointfunc.apply(value+1);
com.datastax.driver.dse.geometry.Point[] points = new com.datastax.driver.dse.geometry.Point[linelen-2];
for (int i = 2; i < linelen; i++) {
points[i-2]=pointfunc.apply(value+i);
}
return new com.datastax.driver.dse.geometry.LineString(p0,p1,points);
}
}

View File

@ -0,0 +1,44 @@
package io.nosqlbench.activitytype.cql.datamappers.functions.geometry;
import io.nosqlbench.virtdata.annotations.ThreadSafeMapper;
import java.util.function.LongFunction;
import java.util.function.LongToDoubleFunction;
/**
* Create a Point generator which generates com.datastax.driver.dse.geometry.Point
* objects.
*/
@ThreadSafeMapper
public class Point implements LongFunction<com.datastax.driver.dse.geometry.Point> {
private final LongToDoubleFunction xfunc;
private final LongToDoubleFunction yfunc;
public Point(double x, double y) {
this.xfunc = (u) -> x;
this.yfunc = (v) -> y;
}
public Point(double x, LongToDoubleFunction yfunc) {
this.xfunc = (u) -> x;
this.yfunc = yfunc;
}
public Point(LongToDoubleFunction xfunc, double y) {
this.xfunc = xfunc;
this.yfunc = (v) -> y;
}
public Point(LongToDoubleFunction xfunc, LongToDoubleFunction yfunc) {
this.xfunc = xfunc;
this.yfunc = yfunc;
}
@Override
public com.datastax.driver.dse.geometry.Point apply(long value) {
return new com.datastax.driver.dse.geometry.Point(xfunc.applyAsDouble(value), yfunc.applyAsDouble(value));
}
}

View File

@ -0,0 +1,45 @@
package io.nosqlbench.activitytype.cql.datamappers.functions.geometry;
import com.datastax.driver.dse.geometry.Point;
import io.nosqlbench.virtdata.annotations.ThreadSafeMapper;
import java.util.function.LongFunction;
import java.util.function.LongToDoubleFunction;
import java.util.function.LongToIntFunction;
@SuppressWarnings("ALL")
@ThreadSafeMapper
public class Polygon implements LongFunction<com.datastax.driver.dse.geometry.Polygon> {
private final LongFunction<Point> pointfunc;
private final LongToIntFunction lenfunc;
public Polygon(LongToIntFunction lenfunc, LongFunction<Point> pointfunc) {
this.pointfunc = pointfunc;
this.lenfunc = lenfunc;
}
public Polygon(LongToIntFunction lenfunc, LongToDoubleFunction xfunc, LongToDoubleFunction yfunc) {
this.lenfunc = lenfunc;
this.pointfunc=new io.nosqlbench.activitytype.cql.datamappers.functions.geometry.Point(xfunc,yfunc);
}
public Polygon(int len, LongFunction<Point> pointfunc) {
this.lenfunc = (i) -> len;
this.pointfunc = pointfunc;
}
@Override
public com.datastax.driver.dse.geometry.Polygon apply(long value) {
int linelen = Math.max(lenfunc.applyAsInt(value),3);
Point p0 = pointfunc.apply(value);
Point p1 = pointfunc.apply(value+1);
Point p2 = pointfunc.apply(value+2);
Point[] points = new Point[linelen-3];
for (int i = 3; i < linelen; i++) {
points[i-3]=pointfunc.apply(value+i);
}
return new com.datastax.driver.dse.geometry.Polygon(p0,p1,p2,points);
}
}

View File

@ -0,0 +1,86 @@
package io.nosqlbench.activitytype.cql.datamappers.functions.geometry;
import com.datastax.driver.dse.geometry.Point;
import com.datastax.driver.dse.geometry.Polygon;
import io.nosqlbench.virtdata.annotations.Example;
import io.nosqlbench.virtdata.annotations.ThreadSafeMapper;
import io.nosqlbench.virtdata.library.curves4.discrete.long_int.Uniform;
import java.util.function.LongFunction;
/**
* This function will return a polygon in the form of a rectangle from the specified
* grid system. The coordinates define the top left and bottom right coordinates in
* (x1,y1),(x2,y2) order, while the number of rows and columns divides these ranges
* into the unit-length for each square.
* x1 must be greater than x2. y1 must be less than y2.
*
* This grid system can be used to construct a set of overlapping grids such that the
* likelyhood of overlap is somewhat easy to reason about. For example, if you create
* one grid system as a refernce grid, then attempt to map another grid system which
* half overlaps the original grid, you can easily determine that half the time, a
* random rectangle selected from the second grid will overlap a rectangle from the
* first, for simple even-numbered grids and the expected uniform sampling on the
* internal coordinate selector functions.
*/
@SuppressWarnings("ALL")
@ThreadSafeMapper
public class PolygonOnGrid implements LongFunction<Polygon> {
private final double rows;
private final double columns;
private final double x_topleft;
private final double y_topleft;
private final double x_bottomright;
private final double y_bottomright;
private final Uniform rowfunc;
private final Uniform colfunc;
private final double xwidth;
private final double yheight;
@Example({"PolygonOnGrid(1, 11, 11, 1, 10, 10)","Create a 10x10 grid with cells 1x1, spaced one off the y=0 and x=0 axes"})
public PolygonOnGrid(double x_topleft, double y_topleft, double x_bottomright, double y_bottomright, int rows, int columns) {
if (x_topleft>=x_bottomright) {
throw new RuntimeException("x_topleft should be less than x_bottomright");
}
if (y_topleft<=y_bottomright) {
throw new RuntimeException("y_topleft should be more than y_bottomright");
}
this.x_topleft = x_topleft;
this.y_topleft = y_topleft;
this.x_bottomright = x_bottomright;
this.y_bottomright = y_bottomright;
this.rows = rows;
this.columns = columns;
this.xwidth = (x_bottomright-x_topleft) / columns;
this.yheight = (y_topleft-y_bottomright) / columns;
this.rowfunc = new Uniform(0, rows - 1);
this.colfunc = new Uniform(0,columns-1);
}
@Override
public Polygon apply(long value) {
int row = rowfunc.applyAsInt(value);
int column = colfunc.applyAsInt(value+33);
double left=x_topleft + (column*xwidth);
double top =y_topleft - (row*yheight);
double right = left+xwidth;
double bottom = top - yheight;
Polygon polygon = new Polygon(
new Point(left, bottom),
new Point(left, top),
new Point(right, top),
new Point(right, bottom)
);
return polygon;
}
}

View File

@ -0,0 +1,25 @@
package io.nosqlbench.activitytype.cql.datamappers.functions.long_localdate;
import com.datastax.driver.core.LocalDate;
import io.nosqlbench.virtdata.annotations.Example;
import io.nosqlbench.virtdata.annotations.ThreadSafeMapper;
import java.util.function.LongFunction;
/**
* Converts epoch millis to a
* com.datastax.driver.core.{@link LocalDate} object, as
* the number of milliseconds since January 1st, 1970 GMT.
*/
@ThreadSafeMapper
public class EpochMillisToCqlLocalDate implements LongFunction<LocalDate> {
@Example({"EpochMillisToJavaLocalDate()", "Yields the LocalDate for the millis in GMT"})
public EpochMillisToCqlLocalDate() {
}
@Override
public LocalDate apply(long value) {
return LocalDate.fromMillisSinceEpoch(value);
}
}

View File

@ -0,0 +1,43 @@
package io.nosqlbench.activitytype.cql.datamappers.functions.long_localdate;
import io.nosqlbench.virtdata.annotations.Example;
import io.nosqlbench.virtdata.annotations.ThreadSafeMapper;
import java.time.Instant;
import java.time.LocalDate;
import java.time.ZoneId;
import java.util.function.LongFunction;
/**
* Converts epoch millis to a java.time.{@link LocalDate} object,
* using either the system
* default timezone or the timezone provided. If the specified ZoneId is not
* the same as the time base of the epoch millis instant, then conversion
* errors will occur.
*
* Short form ZoneId values like 'CST' can be used, although US Domestic names
* which specify the daylight savings hours are not supported. The full list of
* short Ids at @see <a href="https://docs.oracle.com/en/java/javase/12/docs/api/java.base/java/time/ZoneId.html#SHORT_IDS">JavaSE ZoneId Ids</a>
*
* Any timezone specifier may be used which can be read by {@link ZoneId#of(String)}
*/
@ThreadSafeMapper
public class EpochMillisToJavaLocalDate implements LongFunction<LocalDate> {
ZoneId timezone;
@Example({"EpochMillisToJavaLocalDate()","Yields the LocalDate for the system default ZoneId"})
public EpochMillisToJavaLocalDate() {
this.timezone = ZoneId.systemDefault();
}
@Example({"EpochMillisToJavaLocalDate('ECT')","Yields the LocalDate for the ZoneId entry for 'Europe/Paris'"})
public EpochMillisToJavaLocalDate(String zoneid) {
this.timezone = ZoneId.of(zoneid);
}
@Override
public LocalDate apply(long value) {
return Instant.ofEpochMilli(value).atZone(timezone).toLocalDate();
}
}

View File

@ -0,0 +1,43 @@
package io.nosqlbench.activitytype.cql.datamappers.functions.long_localdate;
import io.nosqlbench.virtdata.annotations.Example;
import io.nosqlbench.virtdata.annotations.ThreadSafeMapper;
import java.time.Instant;
import java.time.LocalDateTime;
import java.time.ZoneId;
import java.util.function.LongFunction;
/**
* Converts epoch millis to a
* java.time.{@link LocalDateTime} object, using either the system
* default timezone or the timezone provided. If the specified ZoneId is not
* the same as the time base of the epoch millis instant, then conversion
* errors will occur.
*
* Short form ZoneId values like 'CST' can be used, although US Domestic names
* which specify the daylight savings hours are not supported. The full list of
* short Ids at @see <a href="https://docs.oracle.com/en/java/javase/12/docs/api/java.base/java/time/ZoneId.html#SHORT_IDS">JavaSE ZoneId Ids</a>
*
* Any timezone specifier may be used which can be read by {@link ZoneId#of(String)}
*/
@ThreadSafeMapper
public class EpochMillisToJavaLocalDateTime implements LongFunction<LocalDateTime> {
ZoneId timezone;
@Example({"EpochMillisToJavaLocalDateTime()","Yields the LocalDateTime for the system default ZoneId"})
public EpochMillisToJavaLocalDateTime() {
this.timezone = ZoneId.systemDefault();
}
@Example({"EpochMillisToJavaLocalDateTime('ECT')","Yields the LocalDateTime for the ZoneId entry for 'Europe/Paris'"})
public EpochMillisToJavaLocalDateTime(String zoneid) {
this.timezone = ZoneId.of(zoneid);
}
@Override
public LocalDateTime apply(long value) {
return Instant.ofEpochMilli(value).atZone(timezone).toLocalDateTime();
}
}

View File

@ -0,0 +1,22 @@
package io.nosqlbench.activitytype.cql.datamappers.functions.long_localdate;
import com.datastax.driver.core.LocalDate;
import io.nosqlbench.virtdata.annotations.Example;
import io.nosqlbench.virtdata.annotations.ThreadSafeMapper;
import java.util.function.LongFunction;
/**
* Days since Jan 1st 1970
*/
@ThreadSafeMapper
public class LongToLocalDateDays implements LongFunction<LocalDate> {
@Override
public LocalDate apply(long value) {
return LocalDate.fromDaysSinceEpoch((int) value % Integer.MAX_VALUE);
}
@Example({"LongToLocalDateDays()","take the cycle number and turn it into a LocalDate based on days since 1970"})
public LongToLocalDateDays (){
}
}

View File

@ -0,0 +1,69 @@
/*
*
* Copyright 2015 Jonathan Shook
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package io.nosqlbench.activitytype.cql.datamappers.functions.long_string;
import io.nosqlbench.virtdata.annotations.Example;
import io.nosqlbench.virtdata.annotations.ThreadSafeMapper;
import io.nosqlbench.virtdata.api.VirtDataResources;
import io.nosqlbench.virtdata.library.basics.shared.from_long.to_string.ModuloLineToString;
import org.apache.commons.csv.CSVParser;
import org.apache.commons.csv.CSVRecord;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.List;
import java.util.function.LongFunction;
/**
* Select a value from a CSV file line by modulo division against the number
* of lines in the file. The second parameter is the field name, and this must
* be provided in the CSV header line as written.
*/
@ThreadSafeMapper
public class ModuloCSVLineToString implements LongFunction<String> {
private final static Logger logger = LoggerFactory.getLogger(ModuloLineToString.class);
private List<String> lines = new ArrayList<>();
private String filename;
@Example({"ModuloCSVLineToString('data/myfile.csv','lat')","load values for 'lat' from the CSV file myfile.csv."})
public ModuloCSVLineToString(String filename, String fieldname) {
this.filename = filename;
CSVParser csvp = VirtDataResources.readFileCSV(filename);
int column = csvp.getHeaderMap().get(fieldname);
for (CSVRecord strings : csvp) {
lines.add(strings.get(column));
}
}
@Override
public String apply(long input) {
int itemIdx = (int) (input % lines.size()) % Integer.MAX_VALUE;
String item = lines.get(itemIdx);
return item;
}
public String toString() {
return getClass().getSimpleName() + ":" + filename;
}
}

View File

@ -0,0 +1,71 @@
/*
*
* Copyright 2015 Jonathan Shook
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package io.nosqlbench.activitytype.cql.datamappers.functions.long_string;
import io.nosqlbench.virtdata.annotations.Example;
import io.nosqlbench.virtdata.annotations.ThreadSafeMapper;
import io.nosqlbench.virtdata.api.VirtDataResources;
import io.nosqlbench.virtdata.library.basics.shared.from_long.to_string.ModuloLineToString;
import org.apache.commons.csv.CSVParser;
import org.apache.commons.csv.CSVRecord;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.List;
import java.util.UUID;
import java.util.function.LongFunction;
/**
* Select a value from a CSV file line by modulo division against the number
* of lines in the file. The second parameter is the field name, and this must
* be provided in the CSV header line as written.
*/
@ThreadSafeMapper
public class ModuloCSVLineToUUID implements LongFunction<UUID> {
private final static Logger logger = LoggerFactory.getLogger(ModuloLineToString.class);
private List<String> lines = new ArrayList<>();
private String filename;
@Example({"ModuloCSVLineToUUID('data/myfile.csv','lat')","load values for 'lat' from the CSV file myfile.csv."})
public ModuloCSVLineToUUID(String filename, String fieldname) {
this.filename = filename;
CSVParser csvp = VirtDataResources.readFileCSV(filename);
int column = csvp.getHeaderMap().get(fieldname);
for (CSVRecord strings : csvp) {
lines.add(strings.get(column));
}
}
@Override
public UUID apply(long input) {
int itemIdx = (int) (input % lines.size()) % Integer.MAX_VALUE;
String item = lines.get(itemIdx);
return UUID.fromString(item);
}
public String toString() {
return getClass().getSimpleName() + ":" + filename;
}
}

View File

@ -0,0 +1,23 @@
package io.nosqlbench.activitytype.cql.datamappers.functions.long_uuid;
import com.datastax.driver.core.utils.UUIDs;
import io.nosqlbench.virtdata.annotations.Categories;
import io.nosqlbench.virtdata.annotations.Category;
import io.nosqlbench.virtdata.annotations.ThreadSafeMapper;
import java.util.UUID;
import java.util.function.LongFunction;
/**
* Converts a long timestamp in epoch millis form into a Version 1 TimeUUID
* according to <a href="https://www.ietf.org/rfc/rfc4122.txt">RFC 4122</a>.
* This form uses {@link UUIDs#startOf(long)}
*/
@Categories({Category.datetime})
@ThreadSafeMapper
public class ToTimeUUIDMax implements LongFunction<UUID> {
@Override
public UUID apply(long value) {
return UUIDs.endOf(value);
}
}

View File

@ -0,0 +1,23 @@
package io.nosqlbench.activitytype.cql.datamappers.functions.long_uuid;
import com.datastax.driver.core.utils.UUIDs;
import io.nosqlbench.virtdata.annotations.Categories;
import io.nosqlbench.virtdata.annotations.Category;
import io.nosqlbench.virtdata.annotations.ThreadSafeMapper;
import java.util.UUID;
import java.util.function.LongFunction;
/**
* Converts a long timestamp in epoch millis form into a Version 1 TimeUUID
* according to <a href="https://www.ietf.org/rfc/rfc4122.txt">RFC 4122</a>.
* This form uses {@link UUIDs#startOf(long)}
*/
@Categories({Category.datetime})
@ThreadSafeMapper
public class ToTimeUUIDMin implements LongFunction<UUID> {
@Override
public UUID apply(long value) {
return UUIDs.startOf(value);
}
}

View File

@ -0,0 +1,98 @@
package io.nosqlbench.activitytype.cql.datamappers.functions.rainbow;
import java.nio.BufferUnderflowException;
import java.nio.ByteBuffer;
/**
* <p>This class provides <em>cursor-like</em> access to a set of data from
* a binary file using Java nio buffers. Calling {@link #next()} causes
* the next record to be loaded, after which the getter methods return
* the loaded values. You must call next before access each record's fields.</p>
*
* <p>The {@link #next(int)} method may be used for absolute offset access.
* In this mode, no thread safety is imposed, as there is no chance of the
* internal buffer's position to affect the result.</p>
*
* <p>Buffers may be accessed as shared or not. If</p>
*
*/
public class TokenMapFileAPIService {
// public static ThreadLocal<Map<String, BinaryCursorForTokenCycle>> tl_cll =
// ThreadLocal.withInitial(HashMap::new);
//
private final int recordCount;
private final ByteBuffer buffer;
private final int RECORD_LEN = Long.BYTES * 2;
private int recordPosition;
private long token;
private int TOKEN_OFFSET = 0;
private long cycle;
private int CYCLE_OFFSET = Long.BYTES;
private boolean loopdata;
/**
* Create a new binary cursor for data in a binary file which consists of a (long,long) tuple of
* token values (murmur3 partitioner tokens) and cycle values that correspond to them. The cycles
* are the ones responsible for producing the associated token values.
* @param datafile The data file to read from
* @param loopdata Whether or not to loop around to the beginning of the data. For positional reads this is also
* modulo-based, such that relatively prime sizes and increments will loop not simply repeat
* values at the start of the buffer
* @param instanced Whether or not to provide an instanced view into the byte buffer, where each thread can have
* its own read tracking state
* @param ascending Whether to reverse the order othe long,long tuples when the file is read.
*/
public TokenMapFileAPIService(String datafile, boolean loopdata, boolean instanced, boolean ascending) {
this.loopdata = loopdata;
buffer = TokenMapFileSharedBuffers.getByteBuffer(datafile,instanced,ascending).asReadOnlyBuffer();
this.recordCount = (int) (buffer.capacity() / RECORD_LEN);
this.recordPosition = 0;
}
public synchronized void next() {
try {
token = buffer.getLong();
cycle = buffer.getLong();
} catch (BufferUnderflowException bue) {
if (loopdata) {
buffer.position(0);
next();
}
else {
throw bue;
}
}
}
/**
* Do a read of [token,cycle] record without incremental read state.
* @param position The logical record within the buffer to read
*/
public void next(int position) {
if (loopdata) {
position = (position % recordCount) * RECORD_LEN;
}
token = buffer.getLong(position+TOKEN_OFFSET);
cycle = buffer.getLong(position+CYCLE_OFFSET);
}
public long getToken() {
return token;
}
public long getCycle() {
return cycle;
}
// public static BinaryCursorForTokenCycle get(String mapname) {
// BinaryCursorForTokenCycle cursorLongLong = tl_cll.get().get(mapname);
// return cursorLongLong;
// }
}

View File

@ -0,0 +1,22 @@
package io.nosqlbench.activitytype.cql.datamappers.functions.rainbow;
import java.util.function.IntToLongFunction;
public abstract class TokenMapFileBaseFunction implements IntToLongFunction {
protected static ThreadLocal<TokenMapFileAPIService> tl_DataSvc;
public TokenMapFileBaseFunction(String filename, boolean loopdata, boolean instanced, boolean ascending) {
tl_DataSvc = ThreadLocal.withInitial(() -> new TokenMapFileAPIService(filename, loopdata, instanced, ascending));
}
public TokenMapFileBaseFunction(String filename) {
this(filename, false, true, true);
}
// @Override
// public long applyAsLong(long operand) {
// BinaryCursorForTokenCycle bc;
// bc.next(operand);
// return 0;
// }
}

View File

@ -0,0 +1,17 @@
package io.nosqlbench.activitytype.cql.datamappers.functions.rainbow;
import io.nosqlbench.virtdata.annotations.ThreadSafeMapper;
@ThreadSafeMapper
public class TokenMapFileCycle extends TokenMapFileBaseFunction {
public TokenMapFileCycle(String filename, boolean loopdata, boolean ascending) {
super(filename, loopdata, false, ascending);
}
@Override
public long applyAsLong(int value) {
TokenMapFileAPIService datasvc = tl_DataSvc.get();
return datasvc.getCycle();
}
}

View File

@ -0,0 +1,18 @@
package io.nosqlbench.activitytype.cql.datamappers.functions.rainbow;
import io.nosqlbench.virtdata.annotations.ThreadSafeMapper;
@ThreadSafeMapper
public class TokenMapFileNextCycle extends TokenMapFileBaseFunction {
public TokenMapFileNextCycle(String filename, boolean loopdata, boolean ascending) {
super(filename, loopdata, false, ascending);
}
@Override
public long applyAsLong(int value) {
TokenMapFileAPIService datasvc = tl_DataSvc.get();
datasvc.next(value);
return datasvc.getCycle();
}
}

View File

@ -0,0 +1,18 @@
package io.nosqlbench.activitytype.cql.datamappers.functions.rainbow;
import io.nosqlbench.virtdata.annotations.ThreadSafeMapper;
@ThreadSafeMapper
public class TokenMapFileNextToken extends TokenMapFileBaseFunction {
public TokenMapFileNextToken(String filename, boolean loopdata, boolean ascending) {
super(filename, loopdata, false, ascending);
}
@Override
public long applyAsLong(int value) {
TokenMapFileAPIService datasvc = tl_DataSvc.get();
datasvc.next(value);
return datasvc.getToken();
}
}

View File

@ -0,0 +1,60 @@
package io.nosqlbench.activitytype.cql.datamappers.functions.rainbow;
import java.io.RandomAccessFile;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.util.HashMap;
import java.util.Map;
public class TokenMapFileSharedBuffers {
public final static TokenMapFileSharedBuffers INSTANCE = new TokenMapFileSharedBuffers();
private final static Map<String,ByteBuffer> BUFFERS = new HashMap<>();
private TokenMapFileSharedBuffers() {}
/**
* Find and load the {@link ByteBuffer} which can be read at the specified
* location. This will only be loaded into memory once. All callers will
* get access to the same logical source data. Whether or not the caller
* gets its own buffer tracking state (see {@link java.nio.Buffer}).
* If each caller will use the Buffer API for incremental reads, where
* callers could possibly read the same records, then separate instanced
* buffers are advised.
*
* <p>However, if you are planning to use position-oriented access to the
* buffer only, then it is not necessary to ask for instanced buffers. In
* some usage patterns, it may be desirable to provide a single logical
* view of buffer reader position across multiple threads. In this case,
* setting instanced to false is necessary.</p>
*
* @param filename The location of the source data for the buffer.
* @param instanced If true, each caller gets a wrapped buffer object with its own
* tracking state
* @param ascending
* @return An instance of a ByteBuffer
*/
public synchronized static ByteBuffer getByteBuffer(String filename, boolean instanced, boolean ascending) {
ByteBuffer foundBuffer = BUFFERS.computeIfAbsent(filename, f->load(f,ascending));
return instanced ? foundBuffer.asReadOnlyBuffer() : foundBuffer;
}
private static ByteBuffer load(String filename, boolean ascending) {
try {
RandomAccessFile image = new RandomAccessFile(filename, "rw");
ByteBuffer mbb = image.getChannel().map(FileChannel.MapMode.READ_WRITE, 0, image.length());
if (!ascending) {
int RECORD_LEN = Long.BYTES * 2;
ByteBuffer descendingByteBuffer = ByteBuffer.allocate(mbb.capacity());
for (int i = mbb.capacity()-RECORD_LEN; i >= 0 ; i-=RECORD_LEN) {
long v1 = mbb.getLong(i);
long v2 = mbb.getLong(i+Long.BYTES);
descendingByteBuffer.putLong(v1);
descendingByteBuffer.putLong(v2);
}
mbb = descendingByteBuffer;
}
return mbb;
} catch (Exception e) {
throw new RuntimeException(e);
}
}
}

View File

@ -0,0 +1,17 @@
package io.nosqlbench.activitytype.cql.datamappers.functions.rainbow;
import io.nosqlbench.virtdata.annotations.ThreadSafeMapper;
@ThreadSafeMapper
public class TokenMapFileToken extends TokenMapFileBaseFunction {
public TokenMapFileToken(String filename, boolean loopdata, boolean ascending) {
super(filename, loopdata, false, ascending);
}
@Override
public long applyAsLong(int value) {
TokenMapFileAPIService datasvc = tl_DataSvc.get();
return datasvc.getToken();
}
}

View File

@ -0,0 +1,23 @@
package io.nosqlbench.activitytype.cql.datamappers.functions.string_string;
import io.nosqlbench.virtdata.annotations.ThreadSafeMapper;
import org.xerial.snappy.Snappy;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.function.Function;
@ThreadSafeMapper
public class SnappyComp implements Function<String, ByteBuffer> {
private Snappy snappy = new Snappy();
@Override
public ByteBuffer apply(String s) {
try {
return ByteBuffer.wrap(Snappy.compress(s));
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}

View File

@ -0,0 +1,23 @@
package io.nosqlbench.activitytype.cql.ebdrivers.cql.api;
/**
* When an error filter allows us to see and handle an error in a specific way,
* the ErrorResponse determines exactly how we handle it. Each level represents
* a starting point in handling, including everything after the starting point.
* The first enum is the most severe response
*/
public enum ErrorResponse {
stop("S"), // Rethrow this error to the runtime, forcing it to handle the error or stop
warn("W"), // log a warning with some details about this error
retry("R"), // resubmit this operation up to the available tries
histogram("H"), // record this metric in a histogram
count("C"), // count this metric separately
ignore("I"); // do nothing
private String symbol;
ErrorResponse(String symbol) {
this.symbol = symbol;
}
}

View File

@ -0,0 +1,18 @@
package io.nosqlbench.activitytype.cql.ebdrivers.cql.api;
import com.datastax.driver.core.ResultSet;
import com.datastax.driver.core.Statement;
/**
* An operator interface for performing a modular action on CQL ResultSets per-cycle.
*/
public interface ResultSetCycleOperator {
/**
* Perform an action on a result set for a specific cycle.
* @param resultSet The ResultSet for the given cycle
* @param statement The statement for the given cycle
* @param cycle The cycle for which the statement was submitted
* @return A value, only meaningful when used with aggregated operators
*/
int apply(ResultSet resultSet, Statement statement, long cycle);
}

View File

@ -0,0 +1,11 @@
package io.nosqlbench.activitytype.cql.ebdrivers.cql.api;
import com.datastax.driver.core.Row;
/**
* An operator interface for consuming ResultSets and producing some
* int that can be used as a status code in activities.
*/
public interface RowCycleOperator {
int apply(Row row, long cycle);
}

View File

@ -0,0 +1,7 @@
package io.nosqlbench.activitytype.cql.ebdrivers.cql.api;
import com.datastax.driver.core.Statement;
public interface StatementFilter {
boolean matches(Statement statement);
}

View File

@ -0,0 +1,7 @@
package io.nosqlbench.activitytype.cql.ebdrivers.cql.api;
public enum VerifyApplied {
ignore,
error,
retry
}

View File

@ -0,0 +1,164 @@
package io.nosqlbench.activitytype.cql.ebdrivers.cql.core;
import com.datastax.driver.core.*;
import io.nosqlbench.engine.api.activityconfig.ParsedStmt;
import io.nosqlbench.engine.api.activityconfig.yaml.StmtDef;
import java.math.BigDecimal;
import java.net.InetAddress;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
public class CQLBindHelper {
private final static Pattern stmtToken = Pattern.compile("\\?(\\w+[-_\\d\\w]*)|\\{(\\w+[-_\\d\\w.]*)}");
public static Statement rebindUnappliedStatement(Statement statement, ColumnDefinitions defs, Row row) {
for (ColumnDefinitions.Definition def : defs) {
String name = def.getName();
def.getType();
if (!name.equals("[applied]")) {
DataType.Name typeName = def.getType().getName();
switch (typeName) {
case ASCII: // ASCII(1, String.class)
((BoundStatement) statement).bind().setString(name, row.getString(name));
case VARCHAR: // VARCHAR(13, String.class)
((BoundStatement) statement).bind().setString(name, row.getString(name));
case TEXT: // TEXT(10, String.class)
((BoundStatement) statement).bind().setString(name, row.getString(name));
case BIGINT: // BIGINT(2, Long.class)
((BoundStatement) statement).bind().setLong(name, row.getLong(name));
case COUNTER: // COUNTER(5, Long.class)
((BoundStatement) statement).bind().setLong(name, row.getLong(name));
case BLOB: // BLOB(3, ByteBuffer.class)
((BoundStatement) statement).bind().setBytes(name, row.getBytes(name));
case CUSTOM: // CUSTOM(0, ByteBuffer.class)
throw new RuntimeException("The diagnostic binder does not understand custom types yet.");
case BOOLEAN: // BOOLEAN(4, Boolean.class)
((BoundStatement) statement).bind().setBool(name, row.getBool(name));
case DECIMAL: // DECIMAL(6, BigDecimal.class)
((BoundStatement) statement).bind().setDecimal(name, row.getDecimal(name));
case DOUBLE: // DOUBLE(7, Double.class)
((BoundStatement) statement).bind().setDouble(name, row.getDouble(name));
case FLOAT: // FLOAT(8, Float.class)
((BoundStatement) statement).bind().setFloat(name, row.getFloat(name));
case INET: // INET(16, InetAddress.class)
((BoundStatement) statement).bind().setInet(name, row.getInet(name));
case INT: // INT(9, Integer.class)
((BoundStatement) statement).bind().setInt(name, row.getInt(name));
case TIMESTAMP: // TIMESTAMP(11, Date.class)
((BoundStatement) statement).bind().setTimestamp(name, row.getTimestamp(name));
case UUID: // UUID(12, UUID.class)
((BoundStatement) statement).bind().setUUID(name, row.getUUID(name));
case TIMEUUID: // TIMEUUID(15, UUID.class)
((BoundStatement) statement).bind().setUUID(name, row.getUUID(name));
case VARINT: // VARINT(14, BigInteger.class)
((BoundStatement) statement).bind().setInt(name, row.getInt(name));
case UDT: // UDT(48, UDTValue.class)
((BoundStatement) statement).bind().setUDTValue(name, row.getUDTValue(name));
case TUPLE: // TUPLE(49, TupleValue.class)
((BoundStatement) statement).bind().setTupleValue(name, row.getTupleValue(name));
case SMALLINT:
((BoundStatement) statement).bind().setInt(name, row.getInt(name));
case TINYINT:
((BoundStatement) statement).bind().setInt(name, row.getInt(name));
case DATE:
((BoundStatement) statement).bind().setDate(name, row.getDate(name));
case TIME:
((BoundStatement) statement).bind().setTime(name, row.getTime(name));
default:
throw new RuntimeException("Unrecognized type:" + typeName);
}
}
}
return statement;
}
public static BoundStatement bindStatement(Statement statement, String name, Object value, DataType.Name typeName) {
switch (typeName) {
case ASCII: // ASCII(1, String.class)
return ((BoundStatement) statement).bind().setString(name, (String) value);
case VARCHAR: // VARCHAR(13, String.class)
return ((BoundStatement) statement).bind().setString(name, (String) value);
case TEXT: // TEXT(10, String.class)
return ((BoundStatement) statement).bind().setString(name, (String) value);
case BIGINT: // BIGINT(2, Long.class)
return ((BoundStatement) statement).bind().setLong(name, (long) value);
case COUNTER: // COUNTER(5, Long.class)
return ((BoundStatement) statement).bind().setLong(name, (long) value);
case BLOB: // BLOB(3, ByteBuffer.class)
return ((BoundStatement) statement).bind().setBytes(name, (ByteBuffer) value);
case CUSTOM: // CUSTOM(0, ByteBuffer.class)
throw new RuntimeException("The diagnostic binder does not understand custom types yet.");
case BOOLEAN: // BOOLEAN(4, Boolean.class)
return ((BoundStatement) statement).bind().setBool(name, (boolean) value);
case DECIMAL: // DECIMAL(6, BigDecimal.class)
return ((BoundStatement) statement).bind().setDecimal(name, (BigDecimal) value);
case DOUBLE: // DOUBLE(7, Double.class)
return ((BoundStatement) statement).bind().setDouble(name, (double) value);
case FLOAT: // FLOAT(8, Float.class)
return ((BoundStatement) statement).bind().setFloat(name, (float) value);
case INET: // INET(16, InetAddress.class)
return ((BoundStatement) statement).bind().setInet(name, (InetAddress) value);
case INT: // INT(9, Integer.class)
return ((BoundStatement) statement).bind().setInt(name, (int) value);
case TIMESTAMP: // TIMESTAMP(11, Date.class)
return ((BoundStatement) statement).bind().setTimestamp(name, (Date) value);
case UUID: // UUID(12, UUID.class)
return ((BoundStatement) statement).bind().setUUID(name, (UUID) value);
case TIMEUUID: // TIMEUUID(15, UUID.class)
return ((BoundStatement) statement).bind().setUUID(name, (UUID) value);
case VARINT: // VARINT(14, BigInteger.class)
return ((BoundStatement) statement).bind().setInt(name, (int) value);
case UDT: // UDT(48, UDTValue.class)
return ((BoundStatement) statement).bind().setUDTValue(name, (UDTValue) value);
case TUPLE: // TUPLE(49, TupleValue.class
return ((BoundStatement) statement).bind().setTupleValue(name, (TupleValue) value);
case SMALLINT:
return ((BoundStatement) statement).bind().setInt(name, (int) value);
case TINYINT:
return ((BoundStatement) statement).bind().setInt(name, (int) value);
case DATE:
return ((BoundStatement) statement).bind().setDate(name, (LocalDate) value);
case TIME:
return ((BoundStatement) statement).bind().setTime(name, (long) value);
default:
throw new RuntimeException("Unrecognized type:" + typeName);
}
}
public static Map<String, String> parseAndGetSpecificBindings(StmtDef stmtDef, ParsedStmt parsed) {
List<String> spans = new ArrayList<>();
String statement = stmtDef.getStmt();
Set<String> extraBindings = new HashSet<>();
extraBindings.addAll(stmtDef.getBindings().keySet());
Map<String, String> specificBindings = new LinkedHashMap<>();
Matcher m = stmtToken.matcher(statement);
int lastMatch = 0;
String remainder = "";
while (m.find(lastMatch)) {
String pre = statement.substring(lastMatch, m.start());
String form1 = m.group(1);
String form2 = m.group(2);
String tokenName = (form1 != null && !form1.isEmpty()) ? form1 : form2;
lastMatch = m.end();
spans.add(pre);
if (extraBindings.contains(tokenName)) {
if (specificBindings.get(tokenName) != null){
String postfix = UUID.randomUUID().toString();
specificBindings.put(tokenName+postfix, stmtDef.getBindings().get(tokenName));
}else {
specificBindings.put(tokenName, stmtDef.getBindings().get(tokenName));
}
}
}
return specificBindings;
}
}

View File

@ -0,0 +1,198 @@
package io.nosqlbench.activitytype.cql.ebdrivers.cql.core;
import com.datastax.driver.core.*;
import com.datastax.driver.core.policies.*;
import io.netty.util.HashedWheelTimer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.net.InetSocketAddress;
import java.util.*;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.TimeUnit;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
public class CQLOptions {
private final static Logger logger = LoggerFactory.getLogger(CQLOptions.class);
private final static Pattern CORE_AND_MAX_RQ_PATTERN = Pattern.compile("(?<core>\\d+)(:(?<max>\\d+)(:(?<rq>\\d+))?)?(,(?<rcore>\\d+)(:(?<rmax>\\d+)(:(?<rrq>\\d+))?)?)?(,?heartbeat_interval_s:(?<heartbeatinterval>\\d+))?(,?idle_timeout_s:(?<idletimeout>\\d+))?(,?pool_timeout_ms:(?<pooltimeout>\\d+))?");
private final static Pattern PERCENTILE_EAGER_PATTERN = Pattern.compile("^p(?<pctile>[^:]+)(:(?<executions>\\d+))?(:(?<tracked>\\d+)ms)?$");
private final static Pattern CONSTANT_EAGER_PATTERN = Pattern.compile("^((?<msThreshold>\\d++)ms)(:(?<executions>\\d+))?$");
private static ConstantSpeculativeExecutionPolicy constantPolicy(int threshold, int executions) {
return new ConstantSpeculativeExecutionPolicy(threshold, executions);
}
private static SpeculativeExecutionPolicy percentilePolicy(long tracked, double threshold, int executions) {
PerHostPercentileTracker tracker = newTracker(tracked);
return new PercentileSpeculativeExecutionPolicy(tracker, threshold, executions);
}
private static PerHostPercentileTracker newTracker(long millis) {
return PerHostPercentileTracker.builder(millis).build();
}
public static PoolingOptions poolingOptionsFor(String spec) {
Matcher matcher = CORE_AND_MAX_RQ_PATTERN.matcher(spec);
if (matcher.matches()) {
PoolingOptions poolingOptions = new PoolingOptions();
Optional.ofNullable(matcher.group("core")).map(Integer::valueOf)
.ifPresent(core -> poolingOptions.setCoreConnectionsPerHost(HostDistance.LOCAL, core));
Optional.ofNullable(matcher.group("max")).map(Integer::valueOf)
.ifPresent(max -> poolingOptions.setMaxConnectionsPerHost(HostDistance.LOCAL, max));
Optional.ofNullable(matcher.group("rq")).map(Integer::valueOf)
.ifPresent(rq -> poolingOptions.setMaxRequestsPerConnection(HostDistance.LOCAL, rq));
Optional.ofNullable(matcher.group("rcore")).map(Integer::valueOf)
.ifPresent(rcore -> poolingOptions.setCoreConnectionsPerHost(HostDistance.REMOTE, rcore));
Optional.ofNullable(matcher.group("rmax")).map(Integer::valueOf)
.ifPresent(rmax -> poolingOptions.setMaxConnectionsPerHost(HostDistance.REMOTE, rmax));
Optional.ofNullable(matcher.group("rrq")).map(Integer::valueOf)
.ifPresent(rrq -> poolingOptions.setMaxRequestsPerConnection(HostDistance.REMOTE, rrq));
Optional.ofNullable(matcher.group("heartbeatinterval")).map(Integer::valueOf)
.ifPresent(poolingOptions::setHeartbeatIntervalSeconds);
Optional.ofNullable(matcher.group("idletimeout")).map(Integer::valueOf)
.ifPresent(poolingOptions::setIdleTimeoutSeconds);
Optional.ofNullable(matcher.group("pooltimeout")).map(Integer::valueOf)
.ifPresent(poolingOptions::setPoolTimeoutMillis);
return poolingOptions;
}
throw new RuntimeException("No pooling options could be parsed from spec: " + spec);
}
public static RetryPolicy retryPolicyFor(String spec) {
Set<String> retryBehaviors = Arrays.stream(spec.split(",")).map(String::toLowerCase).collect(Collectors.toSet());
RetryPolicy retryPolicy = DefaultRetryPolicy.INSTANCE;
if (retryBehaviors.contains("default")) {
return retryPolicy;
} // add other mutually-exclusive behaviors here with checks, if we want to extend beyond "default"
if (retryBehaviors.contains("logging")) {
retryPolicy = new LoggingRetryPolicy(retryPolicy);
}
return retryPolicy;
}
public static SocketOptions socketOptionsFor(String spec) {
String[] assignments = spec.split("[,;]");
Map<String, String> values = new HashMap<>();
for (String assignment : assignments) {
String[] namevalue = assignment.split("[:=]", 2);
String name = namevalue[0];
String value = namevalue[1];
values.put(name, value);
}
SocketOptions options = new SocketOptions();
Optional.ofNullable(values.get("read_timeout_ms")).map(Integer::parseInt).ifPresent(
options::setReadTimeoutMillis
);
Optional.ofNullable(values.get("connect_timeout_ms")).map(Integer::parseInt).ifPresent(
options::setConnectTimeoutMillis
);
Optional.ofNullable(values.get("keep_alive")).map(Boolean::parseBoolean).ifPresent(
options::setKeepAlive
);
Optional.ofNullable(values.get("reuse_address")).map(Boolean::parseBoolean).ifPresent(
options::setReuseAddress
);
Optional.ofNullable(values.get("so_linger")).map(Integer::parseInt).ifPresent(
options::setSoLinger
);
Optional.ofNullable(values.get("tcp_no_delay")).map(Boolean::parseBoolean).ifPresent(
options::setTcpNoDelay
);
Optional.ofNullable(values.get("receive_buffer_size")).map(Integer::parseInt).ifPresent(
options::setReceiveBufferSize
);
Optional.ofNullable(values.get("send_buffer_size")).map(Integer::parseInt).ifPresent(
options::setSendBufferSize
);
return options;
}
public static SpeculativeExecutionPolicy defaultSpeculativePolicy() {
PerHostPercentileTracker tracker = PerHostPercentileTracker
.builder(15000)
.build();
PercentileSpeculativeExecutionPolicy defaultSpecPolicy =
new PercentileSpeculativeExecutionPolicy(tracker, 99.0, 5);
return defaultSpecPolicy;
}
public static SpeculativeExecutionPolicy speculativeFor(String spec) {
Matcher pctileMatcher = PERCENTILE_EAGER_PATTERN.matcher(spec);
Matcher constantMatcher = CONSTANT_EAGER_PATTERN.matcher(spec);
if (pctileMatcher.matches()) {
double pctile = Double.valueOf(pctileMatcher.group("pctile"));
if (pctile > 100.0 || pctile < 0.0) {
throw new RuntimeException("pctile must be between 0.0 and 100.0");
}
String executionsSpec = pctileMatcher.group("executions");
String trackedSpec = pctileMatcher.group("tracked");
int executions = (executionsSpec != null && !executionsSpec.isEmpty()) ? Integer.valueOf(executionsSpec) : 5;
int tracked = (trackedSpec != null && !trackedSpec.isEmpty()) ? Integer.valueOf(trackedSpec) : 15000;
logger.debug("speculative: Creating new percentile tracker policy from spec '" + spec + "'");
return percentilePolicy(tracked, pctile, executions);
} else if (constantMatcher.matches()) {
int threshold = Integer.valueOf(constantMatcher.group("msThreshold"));
String executionsSpec = constantMatcher.group("executions");
int executions = (executionsSpec != null && !executionsSpec.isEmpty()) ? Integer.valueOf(executionsSpec) : 5;
logger.debug("speculative: Creating new constant policy from spec '" + spec + "'");
return constantPolicy(threshold, executions);
} else {
throw new RuntimeException("Unable to parse pattern for speculative option: " + spec + ", it must be in " +
"an accepted form, like p99.0:5:15000, or p99.0:5, or 5000ms:5");
}
}
public static LoadBalancingPolicy whitelistFor(String s, LoadBalancingPolicy innerPolicy) {
String[] addrSpecs = s.split(",");
List<InetSocketAddress> sockAddrs = Arrays.stream(addrSpecs)
.map(CQLOptions::toSocketAddr)
.collect(Collectors.toList());
if (innerPolicy == null) {
innerPolicy = new RoundRobinPolicy();
}
return new WhiteListPolicy(innerPolicy, sockAddrs);
}
public static NettyOptions withTickDuration(String tick) {
logger.info("Cluster builder using custom tick duration value for HashedWheelTimer: " + tick + " milliseconds");
int tickDuration = Integer.valueOf(tick);
return new NettyOptions() {
public io.netty.util.Timer timer(ThreadFactory threadFactory) {
return new HashedWheelTimer(
threadFactory, tickDuration, TimeUnit.MILLISECONDS);
}
};
}
private static InetSocketAddress toSocketAddr(String addr) {
String[] addrs = addr.split(":", 2);
String inetHost = addrs[0];
String inetPort = (addrs.length == 2) ? addrs[1] : "9042";
return new InetSocketAddress(inetHost, Integer.valueOf(inetPort));
}
public static ProtocolOptions.Compression withCompression(String compspec) {
try {
return ProtocolOptions.Compression.valueOf(compspec);
} catch (IllegalArgumentException iae) {
throw new RuntimeException("Compression option '" + compspec + "' was specified, but only " +
Arrays.toString(ProtocolOptions.Compression.values()) + " are available.");
}
}
}

View File

@ -0,0 +1,359 @@
package io.nosqlbench.activitytype.cql.ebdrivers.cql.core;
import com.codahale.metrics.Timer;
import com.datastax.driver.core.*;
import io.nosqlbench.activitytype.cql.ebdrivers.cql.api.ResultSetCycleOperator;
import io.nosqlbench.activitytype.cql.ebdrivers.cql.api.RowCycleOperator;
import io.nosqlbench.activitytype.cql.ebdrivers.cql.api.StatementFilter;
import io.nosqlbench.activitytype.cql.ebdrivers.cql.errorhandling.ErrorStatus;
import io.nosqlbench.activitytype.cql.ebdrivers.cql.errorhandling.HashedCQLErrorHandler;
import io.nosqlbench.activitytype.cql.ebdrivers.cql.errorhandling.exceptions.CQLCycleException;
import io.nosqlbench.activitytype.cql.ebdrivers.cql.errorhandling.exceptions.ChangeUnappliedCycleException;
import io.nosqlbench.activitytype.cql.ebdrivers.cql.errorhandling.exceptions.MaxTriesExhaustedException;
import io.nosqlbench.activitytype.cql.ebdrivers.cql.errorhandling.exceptions.UnexpectedPagingException;
import io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.core.ReadyCQLStatement;
import com.google.common.util.concurrent.ListenableFuture;
import io.nosqlbench.engine.api.activityapi.core.ActivityDefObserver;
import io.nosqlbench.engine.api.activityapi.core.MultiPhaseAction;
import io.nosqlbench.engine.api.activityapi.core.SyncAction;
import io.nosqlbench.engine.api.activityapi.planning.OpSequence;
import io.nosqlbench.engine.api.activityimpl.ActivityDef;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.List;
import java.util.concurrent.TimeUnit;
@SuppressWarnings("Duplicates")
public class CqlAction implements SyncAction, MultiPhaseAction, ActivityDefObserver {
private final static Logger logger = LoggerFactory.getLogger(CqlAction.class);
private final int slot;
private final CqlActivity cqlActivity;
private final ActivityDef activityDef;
private List<RowCycleOperator> rowOps;
private List<ResultSetCycleOperator> cycleOps;
private List<StatementModifier> modifiers;
private StatementFilter statementFilter;
private OpSequence<ReadyCQLStatement> sequencer;
private int maxTries = 10; // how many cycles a statement will be attempted for before giving up
private HashedCQLErrorHandler ebdseErrorHandler;
private int pagesFetched = 0;
private long totalRowsFetchedForQuery = 0L;
private ResultSet pagingResultSet;
private Statement pagingStatement;
private ReadyCQLStatement pagingReadyStatement;
private boolean showcql;
private long nanoStartTime;
private long retryDelay;
private long maxRetryDelay;
private boolean retryReplace;
public CqlAction(ActivityDef activityDef, int slot, CqlActivity cqlActivity) {
this.activityDef = activityDef;
this.cqlActivity = cqlActivity;
this.slot = slot;
onActivityDefUpdate(activityDef);
}
@Override
public void init() {
onActivityDefUpdate(activityDef);
this.sequencer = cqlActivity.getOpSequencer();
}
@Override
public int runCycle(long value) {
// In this activity type, we use the same phase
// logic for the initial phase (runCycle(...))
// as well as subsequent phases.
return runPhase(value);
}
public int runPhase(long cycleValue) {
HashedCQLErrorHandler.resetThreadStatusCode();
if (pagingResultSet == null) {
totalRowsFetchedForQuery = 0L;
Statement statement;
ResultSetFuture resultSetFuture;
ReadyCQLStatement readyCQLStatement;
int tries = 0;
try (Timer.Context bindTime = cqlActivity.bindTimer.time()) {
readyCQLStatement = sequencer.get(cycleValue);
statement = readyCQLStatement.bind(cycleValue);
if (statementFilter != null) {
if (!statementFilter.matches(statement)) {
cqlActivity.skippedTokensHisto.update(cycleValue);
return 0;
}
}
if (modifiers != null) {
for (StatementModifier modifier : modifiers) {
statement = modifier.modify(statement, cycleValue);
}
}
if (showcql) {
logger.info("CQL(cycle=" + cycleValue + "):\n" + readyCQLStatement.getQueryString(cycleValue));
}
}
nanoStartTime = System.nanoTime();
while (tries < maxTries) {
tries++;
if (tries > maxTries) {
throw new MaxTriesExhaustedException(cycleValue, maxTries);
}
if (tries > 1) {
try (Timer.Context retryTime = cqlActivity.retryDelayTimer.time()) {
Thread.sleep(Math.min((retryDelay << tries) / 1000, maxRetryDelay / 1000));
} catch (InterruptedException ignored) {
}
}
try (Timer.Context executeTime = cqlActivity.executeTimer.time()) {
resultSetFuture = cqlActivity.getSession().executeAsync(statement);
}
Timer.Context resultTime = cqlActivity.resultTimer.time();
try {
ResultSet resultSet = resultSetFuture.getUninterruptibly();
if (cycleOps != null) {
for (ResultSetCycleOperator cycleOp : cycleOps) {
cycleOp.apply(resultSet, statement, cycleValue);
}
}
ResultSetCycleOperator[] perStmtRSOperators = readyCQLStatement.getResultSetOperators();
if (perStmtRSOperators != null) {
for (ResultSetCycleOperator perStmtRSOperator : perStmtRSOperators) {
perStmtRSOperator.apply(resultSet, statement, cycleValue);
}
}
if (!resultSet.wasApplied()) {
//resultSet.b
Row row = resultSet.one();
ColumnDefinitions defs = row.getColumnDefinitions();
if (retryReplace) {
statement = CQLBindHelper.rebindUnappliedStatement(statement, defs, row);
}
logger.trace(readyCQLStatement.getQueryString(cycleValue));
// To make exception handling logic flow more uniformly
throw new ChangeUnappliedCycleException(
cycleValue, resultSet, readyCQLStatement.getQueryString(cycleValue)
);
}
int pageRows = resultSet.getAvailableWithoutFetching();
int remaining = pageRows;
RowCycleOperator[] perStmtRowOperators = readyCQLStatement.getRowCycleOperators();
if (rowOps == null && perStmtRowOperators==null) {
while (remaining-- > 0) {
Row row = resultSet.one();
// NOTE: This has been replaced by:
// params:
// rowops: savevars
// You must add this to the YAML for statements that are meant to capture vars
// HashMap<String, Object> bindings = SharedState.tl_ObjectMap.get();
// for (ColumnDefinitions.Definition cdef : row.getColumnDefinitions()) {
// bindings.put(cdef.getName(), row.getObject(cdef.getName()));
// }
//
}
} else {
while (remaining-- > 0) {
Row onerow = resultSet.one();
if (rowOps!=null) {
for (RowCycleOperator rowOp : rowOps) {
rowOp.apply(onerow, cycleValue);
}
}
if (perStmtRowOperators!=null) {
for (RowCycleOperator rowOp : perStmtRowOperators) {
rowOp.apply(onerow, cycleValue);
}
}
}
}
cqlActivity.rowsCounter.mark(pageRows);
totalRowsFetchedForQuery += pageRows;
if (resultSet.isFullyFetched()) {
long resultNanos = System.nanoTime() - nanoStartTime;
cqlActivity.resultSuccessTimer.update(resultNanos, TimeUnit.NANOSECONDS);
cqlActivity.resultSetSizeHisto.update(totalRowsFetchedForQuery);
readyCQLStatement.onSuccess(cycleValue, resultNanos, totalRowsFetchedForQuery);
} else {
if (cqlActivity.maxpages > 1) {
pagingResultSet = resultSet;
pagingStatement = statement;
pagingReadyStatement = readyCQLStatement;
pagesFetched = 1;
} else {
throw new UnexpectedPagingException(
cycleValue,
resultSet,
readyCQLStatement.getQueryString(cycleValue),
1,
cqlActivity.maxpages,
cqlActivity.getSession().getCluster().getConfiguration().getQueryOptions().getFetchSize()
);
}
}
break; // This is normal termination of this loop, when retries aren't needed
} catch (Exception e) {
long resultNanos = resultTime.stop();
resultTime = null;
readyCQLStatement.onError(cycleValue, resultNanos, e);
CQLCycleException cqlCycleException = new CQLCycleException(cycleValue, resultNanos, e, readyCQLStatement);
ErrorStatus errorStatus = ebdseErrorHandler.handleError(cycleValue, cqlCycleException);
if (!errorStatus.isRetryable()) {
cqlActivity.triesHisto.update(tries);
return errorStatus.getResultCode();
}
} finally {
if (resultTime != null) {
resultTime.stop();
}
}
}
cqlActivity.triesHisto.update(tries);
} else {
int tries = 0;
while (tries < maxTries) {
tries++;
if (tries > maxTries) {
throw new MaxTriesExhaustedException(cycleValue, maxTries);
}
ListenableFuture<ResultSet> pagingFuture;
try (Timer.Context pagingTime = cqlActivity.pagesTimer.time()) {
try (Timer.Context executeTime = cqlActivity.executeTimer.time()) {
pagingFuture = pagingResultSet.fetchMoreResults();
}
Timer.Context resultTime = cqlActivity.resultTimer.time();
try {
ResultSet resultSet = pagingFuture.get();
if (cycleOps != null) {
for (ResultSetCycleOperator cycleOp : cycleOps) {
cycleOp.apply(resultSet, pagingStatement, cycleValue);
}
}
ResultSetCycleOperator[] perStmtRSOperators = pagingReadyStatement.getResultSetOperators();
if (perStmtRSOperators != null) {
for (ResultSetCycleOperator perStmtRSOperator : perStmtRSOperators) {
perStmtRSOperator.apply(resultSet, pagingStatement, cycleValue);
}
}
pagesFetched++;
int pageRows = resultSet.getAvailableWithoutFetching();
int remaining = pageRows;
if (rowOps == null) {
while (remaining-- > 0) {
resultSet.one();
}
} else {
while (remaining-- > 0) {
for (RowCycleOperator rowOp : rowOps) {
rowOp.apply(resultSet.one(), cycleValue);
}
}
}
cqlActivity.rowsCounter.mark(pageRows);
totalRowsFetchedForQuery += pageRows;
if (resultSet.isFullyFetched()) {
long nanoTime = System.nanoTime() - nanoStartTime;
cqlActivity.resultSuccessTimer.update(nanoTime, TimeUnit.NANOSECONDS);
cqlActivity.resultSetSizeHisto.update(totalRowsFetchedForQuery);
pagingReadyStatement.onSuccess(cycleValue, nanoTime, totalRowsFetchedForQuery);
pagingResultSet = null;
} else {
if (pagesFetched > cqlActivity.maxpages) {
throw new UnexpectedPagingException(
cycleValue,
pagingResultSet,
pagingReadyStatement.getQueryString(cycleValue),
pagesFetched,
cqlActivity.maxpages,
cqlActivity.getSession().getCluster().getConfiguration().getQueryOptions().getFetchSize()
);
}
pagingResultSet = resultSet;
}
break; // This is normal termination of this loop, when retries aren't needed
} catch (Exception e) {
long resultNanos = resultTime.stop();
resultTime = null;
pagingReadyStatement.onError(cycleValue, resultNanos, e);
CQLCycleException cqlCycleException = new CQLCycleException(cycleValue, resultNanos, e, pagingReadyStatement);
ErrorStatus errorStatus = ebdseErrorHandler.handleError(cycleValue, cqlCycleException);
if (!errorStatus.isRetryable()) {
cqlActivity.triesHisto.update(tries);
return errorStatus.getResultCode();
}
} finally {
if (resultTime != null) {
resultTime.stop();
}
}
}
}
cqlActivity.triesHisto.update(tries);
}
return 0;
}
@Override
public boolean incomplete() {
return pagingResultSet != null;
}
@Override
public void onActivityDefUpdate(ActivityDef activityDef) {
this.maxTries = cqlActivity.getMaxTries();
this.retryDelay = cqlActivity.getRetryDelay();
this.maxRetryDelay = cqlActivity.getMaxRetryDelay();
this.retryReplace = cqlActivity.isRetryReplace();
this.showcql = cqlActivity.isShowCql();
this.ebdseErrorHandler = cqlActivity.getCqlErrorHandler();
this.statementFilter = cqlActivity.getStatementFilter();
this.rowOps = cqlActivity.getRowCycleOperators();
this.cycleOps = cqlActivity.getResultSetCycleOperators();
this.modifiers = cqlActivity.getStatementModifiers();
}
protected CqlActivity getCqlActivity() {
return cqlActivity;
}
}

View File

@ -0,0 +1,27 @@
package io.nosqlbench.activitytype.cql.ebdrivers.cql.core;
import io.nosqlbench.engine.api.activityapi.core.Action;
import io.nosqlbench.engine.api.activityapi.core.ActionDispenser;
public class CqlActionDispenser implements ActionDispenser {
public CqlActivity getCqlActivity() {
return cqlActivity;
}
private CqlActivity cqlActivity;
public CqlActionDispenser(CqlActivity activityContext) {
this.cqlActivity = activityContext;
}
public Action getAction(int slot) {
long async= cqlActivity.getActivityDef().getParams().getOptionalLong("async").orElse(0L);
if (async>0) {
return new CqlAsyncAction(cqlActivity, slot);
} else {
return new CqlAction(cqlActivity.getActivityDef(), slot, cqlActivity);
}
}
}

View File

@ -0,0 +1,655 @@
package io.nosqlbench.activitytype.cql.ebdrivers.cql.core;
import com.codahale.metrics.Histogram;
import com.codahale.metrics.Meter;
import com.codahale.metrics.Timer;
import com.datastax.driver.core.*;
import io.nosqlbench.activitytype.cql.codecsupport.UDTCodecInjector;
import com.datastax.driver.core.TokenRangeStmtFilter;
import io.nosqlbench.activitytype.cql.ebdrivers.cql.api.ErrorResponse;
import io.nosqlbench.activitytype.cql.ebdrivers.cql.api.ResultSetCycleOperator;
import io.nosqlbench.activitytype.cql.ebdrivers.cql.api.RowCycleOperator;
import io.nosqlbench.activitytype.cql.ebdrivers.cql.api.StatementFilter;
import io.nosqlbench.activitytype.cql.ebdrivers.cql.errorhandling.EbdseCycleErrorHandler;
import io.nosqlbench.activitytype.cql.ebdrivers.cql.errorhandling.HashedCQLErrorHandler;
import io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.binders.CqlBinderTypes;
import io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.core.*;
import io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.rowoperators.RowCycleOperators;
import io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.rowoperators.Save;
import io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.rsoperators.ResultSetCycleOperators;
import io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.rsoperators.TraceLogger;
import io.nosqlbench.engine.api.activityapi.core.Activity;
import io.nosqlbench.engine.api.activityapi.core.ActivityDefObserver;
import io.nosqlbench.engine.api.activityapi.planning.OpSequence;
import io.nosqlbench.engine.api.activityapi.planning.SequencePlanner;
import io.nosqlbench.engine.api.activityapi.planning.SequencerType;
import io.nosqlbench.engine.api.activityconfig.ParsedStmt;
import io.nosqlbench.engine.api.activityconfig.StatementsLoader;
import io.nosqlbench.engine.api.activityconfig.rawyaml.RawStmtDef;
import io.nosqlbench.engine.api.activityconfig.rawyaml.RawStmtsBlock;
import io.nosqlbench.engine.api.activityconfig.rawyaml.RawStmtsDoc;
import io.nosqlbench.engine.api.activityconfig.rawyaml.RawStmtsDocList;
import io.nosqlbench.engine.api.activityconfig.yaml.StmtDef;
import io.nosqlbench.engine.api.activityconfig.yaml.StmtsDocList;
import io.nosqlbench.engine.api.activityimpl.ActivityDef;
import io.nosqlbench.engine.api.activityimpl.ParameterMap;
import io.nosqlbench.engine.api.activityimpl.SimpleActivity;
import io.nosqlbench.engine.api.metrics.ActivityMetrics;
import io.nosqlbench.engine.api.metrics.ExceptionCountMetrics;
import io.nosqlbench.engine.api.metrics.ExceptionHistoMetrics;
import io.nosqlbench.engine.api.util.SimpleConfig;
import io.nosqlbench.engine.api.util.StrInterpolater;
import io.nosqlbench.engine.api.util.TagFilter;
import io.nosqlbench.engine.api.util.Unit;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.FileWriter;
import java.io.IOException;
import java.io.Writer;
import java.nio.charset.StandardCharsets;
import java.util.*;
@SuppressWarnings("Duplicates")
public class CqlActivity extends SimpleActivity implements Activity, ActivityDefObserver {
private final static Logger logger = LoggerFactory.getLogger(CqlActivity.class);
private final ExceptionCountMetrics exceptionCountMetrics;
private final ExceptionHistoMetrics exceptionHistoMetrics;
private final ActivityDef activityDef;
private final Map<String, Writer> namedWriters = new HashMap<>();
protected List<StmtDef> stmts;
Timer retryDelayTimer;
Timer bindTimer;
Timer executeTimer;
Timer resultTimer;
Timer resultSuccessTimer;
Timer pagesTimer;
Histogram triesHisto;
Histogram skippedTokensHisto;
Histogram resultSetSizeHisto;
int maxpages;
Meter rowsCounter;
private HashedCQLErrorHandler errorHandler;
private OpSequence<ReadyCQLStatement> opsequence;
private Session session;
private int maxTries;
private StatementFilter statementFilter;
private Boolean showcql;
private List<RowCycleOperator> rowCycleOperators;
private List<ResultSetCycleOperator> resultSetCycleOperators;
private List<StatementModifier> statementModifiers;
private Long maxTotalOpsInFlight;
private long retryDelay;
private long maxRetryDelay;
private boolean retryReplace;
private String pooling;
public CqlActivity(ActivityDef activityDef) {
super(activityDef);
this.activityDef = activityDef;
exceptionCountMetrics = new ExceptionCountMetrics(activityDef);
exceptionHistoMetrics = new ExceptionHistoMetrics(activityDef);
}
private void registerCodecs(Session session) {
UDTCodecInjector injector = new UDTCodecInjector();
injector.injectUserProvidedCodecs(session, true);
}
@Override
public synchronized void initActivity() {
logger.debug("initializing activity: " + this.activityDef.getAlias());
session = getSession();
if (getParams().getOptionalBoolean("usercodecs").orElse(false)) {
registerCodecs(session);
}
initSequencer();
setDefaultsFromOpSequence(this.opsequence);
retryDelayTimer = ActivityMetrics.timer(activityDef, "retry-delay");
bindTimer = ActivityMetrics.timer(activityDef, "bind");
executeTimer = ActivityMetrics.timer(activityDef, "execute");
resultTimer = ActivityMetrics.timer(activityDef, "result");
triesHisto = ActivityMetrics.histogram(activityDef, "tries");
pagesTimer = ActivityMetrics.timer(activityDef, "pages");
rowsCounter = ActivityMetrics.meter(activityDef, "rows");
skippedTokensHisto = ActivityMetrics.histogram(activityDef, "skipped-tokens");
resultSuccessTimer = ActivityMetrics.timer(activityDef, "result-success");
resultSetSizeHisto = ActivityMetrics.histogram(activityDef, "resultset-size");
onActivityDefUpdate(activityDef);
logger.debug("activity fully initialized: " + this.activityDef.getAlias());
}
public synchronized Session getSession() {
if (session == null) {
session = CQLSessionCache.get().getSession(this.getActivityDef());
}
return session;
}
private void initSequencer() {
SequencerType sequencerType = SequencerType.valueOf(
getParams().getOptionalString("seq").orElse("bucket")
);
SequencePlanner<ReadyCQLStatement> planner = new SequencePlanner<>(sequencerType);
StmtsDocList unfiltered = loadStmtsYaml();
// log tag filtering results
String tagfilter = activityDef.getParams().getOptionalString("tags").orElse("");
TagFilter tagFilter = new TagFilter(tagfilter);
unfiltered.getStmts().stream().map(tagFilter::matchesTaggedResult).forEach(r -> logger.info(r.getLog()));
stmts = unfiltered.getStmts(tagfilter);
if (stmts.size() == 0) {
throw new RuntimeException("There were no unfiltered statements found for this activity.");
}
for (StmtDef stmtDef : stmts) {
ParsedStmt parsed = stmtDef.getParsed().orError();
boolean prepared = Boolean.valueOf(stmtDef.getParams().getOrDefault("prepared", "true"));
long ratio = Long.valueOf(stmtDef.getParams().getOrDefault("ratio", "1"));
Optional<ConsistencyLevel> cl = Optional.ofNullable(
stmtDef.getParams().getOrDefault("cl", null)).map(ConsistencyLevel::valueOf);
Optional<ConsistencyLevel> serial_cl = Optional.ofNullable(
stmtDef.getParams().getOrDefault("serial_cl", null)).map(ConsistencyLevel::valueOf);
Optional<Boolean> idempotent = Optional.ofNullable(stmtDef.getParams().getOrDefault("idempotent", null))
.map(Boolean::valueOf);
StringBuilder psummary = new StringBuilder();
boolean instrument = Optional.ofNullable(stmtDef.getParams()
.get("instrument")).map(Boolean::valueOf)
.orElse(getParams().getOptionalBoolean("instrument").orElse(false));
String logresultcsv = stmtDef.getParams().getOrDefault("logresultcsv","");
String logresultcsv_act = getParams().getOptionalString("logresultcsv").orElse("");
if (!logresultcsv_act.isEmpty() && !logresultcsv_act.toLowerCase().equals("true")) {
throw new RuntimeException("At the activity level, only logresultcsv=true is allowed, no other values.");
}
logresultcsv = !logresultcsv.isEmpty() ? logresultcsv : logresultcsv_act;
logresultcsv = !logresultcsv.toLowerCase().equals("true") ? logresultcsv : stmtDef.getName()+"--results.csv";
logger.debug("readying statement[" + (prepared ? "" : "un") + "prepared]:" + parsed.getStmt());
ReadyCQLStatementTemplate template;
String stmtForDriver = parsed.getPositionalStatement(s -> "?");
if (prepared) {
psummary.append(" prepared=>").append(prepared);
PreparedStatement prepare = getSession().prepare(stmtForDriver);
cl.ifPresent((conlvl) -> {
psummary.append(" consistency_level=>").append(conlvl);
prepare.setConsistencyLevel(conlvl);
});
serial_cl.ifPresent((scl) -> {
psummary.append(" serial_consistency_level=>").append(serial_cl);
prepare.setSerialConsistencyLevel(scl);
});
idempotent.ifPresent((i) -> {
psummary.append(" idempotent=").append(idempotent);
prepare.setIdempotent(i);
});
CqlBinderTypes binderType = CqlBinderTypes.valueOf(stmtDef.getParams()
.getOrDefault("binder", CqlBinderTypes.DEFAULT.toString()));
template = new ReadyCQLStatementTemplate(binderType, getSession(), prepare, ratio, parsed.getName());
} else {
SimpleStatement simpleStatement = new SimpleStatement(stmtForDriver);
cl.ifPresent((conlvl) -> {
psummary.append(" consistency_level=>").append(conlvl);
simpleStatement.setConsistencyLevel(conlvl);
});
serial_cl.ifPresent((scl) -> {
psummary.append(" serial_consistency_level=>").append(scl);
simpleStatement.setSerialConsistencyLevel(scl);
});
idempotent.ifPresent((i) -> {
psummary.append(" idempotent=>").append(i);
simpleStatement.setIdempotent(i);
});
template = new ReadyCQLStatementTemplate(getSession(), simpleStatement, ratio, parsed.getName());
}
Optional.ofNullable(stmtDef.getParams().getOrDefault("save", null))
.map(s -> s.split("[,; ]"))
.map(Save::new)
.ifPresent(save_op -> {
psummary.append(" save=>").append(save_op.toString());
template.addRowCycleOperators(save_op);
});
Optional.ofNullable(stmtDef.getParams().getOrDefault("rsoperators", null))
.map(s -> s.split(","))
.stream().flatMap(Arrays::stream)
.map(ResultSetCycleOperators::newOperator)
.forEach(rso -> {
psummary.append(" rsop=>").append(rso.toString());
template.addResultSetOperators(rso);
});
Optional.ofNullable(stmtDef.getParams().getOrDefault("rowoperators", null))
.map(s -> s.split(","))
.stream().flatMap(Arrays::stream)
.map(RowCycleOperators::newOperator)
.forEach(ro -> {
psummary.append(" rowop=>").append(ro.toString());
template.addRowCycleOperators(ro);
});
if (instrument) {
logger.info("Adding per-statement success and error and resultset-size timers to statement '" + parsed.getName() + "'");
template.instrument(this);
psummary.append(" instrument=>").append(instrument);
}
if (!logresultcsv.isEmpty()) {
logger.info("Adding per-statement result CSV logging to statement '" + parsed.getName() + "'");
template.logResultCsv(this,logresultcsv);
psummary.append(" logresultcsv=>").append(logresultcsv);
}
template.getContextualBindings().getBindingsTemplate().addFieldBindings(stmtDef.getParsed().getBindPoints());
if (psummary.length() > 0) {
logger.info("statement named '" + stmtDef.getName() + "' has custom settings:" + psummary.toString());
}
planner.addOp(template.resolve(), ratio);
}
opsequence = planner.resolve();
}
private StmtsDocList loadStmtsYaml() {
StmtsDocList doclist = null;
String yaml_loc = activityDef.getParams().getOptionalString("yaml").orElse("default");
StrInterpolater interp = new StrInterpolater(activityDef);
String yamlVersion = "unset";
if (yaml_loc.endsWith(":1") || yaml_loc.endsWith(":2")) {
yamlVersion = yaml_loc.substring(yaml_loc.length() - 1);
yaml_loc = yaml_loc.substring(0, yaml_loc.length() - 2);
}
switch (yamlVersion) {
case "1":
doclist = getVersion1StmtsDoc(interp, yaml_loc);
logger.warn("DEPRECATED-FORMAT: Loaded yaml " + yaml_loc + " with compatibility mode. " +
"This will be deprecated in a future release.");
logger.warn("DEPRECATED-FORMAT: Please refer to " +
"http://docs.engineblock.io/user-guide/standard_yaml/ for more details.");
break;
case "2":
doclist = StatementsLoader.load(logger, yaml_loc, interp, "activities");
break;
case "unset":
try {
logger.debug("You can suffix your yaml filename or url with the " +
"format version, such as :1 or :2. Assuming version 2.");
doclist = StatementsLoader.load(null, yaml_loc, interp, "activities");
} catch (Exception ignored) {
try {
doclist = getVersion1StmtsDoc(interp, yaml_loc);
logger.warn("DEPRECATED-FORMAT: Loaded yaml " + yaml_loc +
" with compatibility mode. This will be deprecated in a future release.");
logger.warn("DEPRECATED-FORMAT: Please refer to " +
"http://docs.engineblock.io/user-guide/standard_yaml/ for more details.");
} catch (Exception compatError) {
logger.warn("Tried to load yaml in compatibility mode, " +
"since it failed to load with the standard format, " +
"but found an error:" + compatError);
logger.warn("The following detailed errors are provided only " +
"for the standard format. To force loading version 1 with detailed logging, add" +
" a version qualifier to your yaml filename or url like ':1'");
// retrigger the error again, this time with logging enabled.
doclist = StatementsLoader.load(logger, yaml_loc, interp, "activities");
}
}
break;
default:
throw new RuntimeException("Unrecognized yaml format version, expected :1 or :2 " +
"at end of yaml file, but got " + yamlVersion + " instead.");
}
return doclist;
}
@Deprecated
private StmtsDocList getVersion1StmtsDoc(StrInterpolater interp, String yaml_loc) {
StmtsDocList unfiltered;
List<RawStmtsBlock> blocks = new ArrayList<>();
YamlCQLStatementLoader deprecatedLoader = new YamlCQLStatementLoader(interp);
AvailableCQLStatements rawDocs = deprecatedLoader.load(yaml_loc, "activities");
List<TaggedCQLStatementDefs> rawTagged = rawDocs.getRawTagged();
for (TaggedCQLStatementDefs rawdef : rawTagged) {
for (CQLStatementDef rawstmt : rawdef.getStatements()) {
RawStmtsBlock rawblock = new RawStmtsBlock();
// tags
rawblock.setTags(rawdef.getTags());
// params
Map<String, String> params = new HashMap<>(rawdef.getParams());
if (rawstmt.getConsistencyLevel() != null && !rawstmt.getConsistencyLevel().isEmpty())
params.put("cl", rawstmt.getConsistencyLevel());
if (!rawstmt.isPrepared()) params.put("prepared", "false");
if (rawstmt.getRatio() != 1L)
params.put("ratio", String.valueOf(rawstmt.getRatio()));
rawblock.setParams(params);
// stmts
List<RawStmtDef> stmtslist = new ArrayList<>();
stmtslist.add(new RawStmtDef(rawstmt.getName(), rawstmt.getStatement()));
rawblock.setRawStmtDefs(stmtslist);
// bindings
rawblock.setBindings(rawstmt.getBindings());
blocks.add(rawblock);
}
}
RawStmtsDoc rawStmtsDoc = new RawStmtsDoc();
rawStmtsDoc.setBlocks(blocks);
List<RawStmtsDoc> rawStmtsDocs = new ArrayList<>();
rawStmtsDocs.add(rawStmtsDoc);
RawStmtsDocList rawStmtsDocList = new RawStmtsDocList(rawStmtsDocs);
unfiltered = new StmtsDocList(rawStmtsDocList);
return unfiltered;
}
public ExceptionCountMetrics getExceptionCountMetrics() {
return exceptionCountMetrics;
}
@Override
public String toString() {
return "CQLActivity {" +
"activityDef=" + activityDef +
", session=" + session +
", opSequence=" + this.opsequence +
'}';
}
@Override
public void onActivityDefUpdate(ActivityDef activityDef) {
super.onActivityDefUpdate(activityDef);
clearResultSetCycleOperators();
clearRowCycleOperators();
clearStatementModifiers();
ParameterMap params = activityDef.getParams();
Optional<String> fetchSizeOption = params.getOptionalString("fetchsize");
Cluster cluster = getSession().getCluster();
if (fetchSizeOption.isPresent()) {
int fetchSize = fetchSizeOption.flatMap(Unit::bytesFor).map(Double::intValue).orElseThrow(() -> new RuntimeException(
"Unable to parse fetch size from " + fetchSizeOption.get()
));
if (fetchSize > 10000000 && fetchSize < 1000000000) {
logger.warn("Setting the fetchsize to " + fetchSize + " is unlikely to give good performance.");
} else if (fetchSize > 1000000000) {
throw new RuntimeException("Setting the fetch size to " + fetchSize + " is likely to cause instability.");
}
logger.trace("setting fetchSize to " + fetchSize);
cluster.getConfiguration().getQueryOptions().setFetchSize(fetchSize);
}
this.retryDelay = params.getOptionalLong("retrydelay").orElse(0L);
this.maxRetryDelay = params.getOptionalLong("maxretrydelay").orElse(500L);
this.retryReplace = params.getOptionalBoolean("retryreplace").orElse(false);
this.maxTries = params.getOptionalInteger("maxtries").orElse(10);
this.showcql = params.getOptionalBoolean("showcql").orElse(false);
this.maxpages = params.getOptionalInteger("maxpages").orElse(1);
this.statementFilter = params.getOptionalString("tokens")
.map(s -> new TokenRangeStmtFilter(cluster, s))
.orElse(null);
if (statementFilter != null) {
logger.info("filtering statements" + statementFilter);
}
errorHandler = configureErrorHandler();
params.getOptionalString("trace")
.map(SimpleConfig::new)
.map(TraceLogger::new)
.ifPresent(
tl -> {
addResultSetCycleOperator(tl);
addStatementModifier(tl);
});
this.maxTotalOpsInFlight = params.getOptionalLong("async").orElse(1L);
Optional<String> dynpooling = params.getOptionalString("pooling");
if (dynpooling.isPresent()) {
logger.info("dynamically updating pooling");
if (!dynpooling.get().equals(this.pooling)) {
PoolingOptions opts = CQLOptions.poolingOptionsFor(dynpooling.get());
logger.info("pooling=>" + dynpooling.get());
PoolingOptions cfg = getSession().getCluster().getConfiguration().getPoolingOptions();
// This looks funny, because we have to set max conns per host
// in an order that will appease the driver, as there is no "apply settings"
// to do that for us, so we raise max first if it goes higher, and we lower
// it last, if it goes lower
int prior_mcph_l = cfg.getMaxConnectionsPerHost(HostDistance.LOCAL);
int mcph_l = opts.getMaxConnectionsPerHost(HostDistance.LOCAL);
int ccph_l = opts.getCoreConnectionsPerHost(HostDistance.LOCAL);
if (prior_mcph_l < mcph_l) {
logger.info("setting mcph_l to " + mcph_l);
cfg.setMaxConnectionsPerHost(HostDistance.LOCAL, mcph_l);
}
logger.info("setting ccph_l to " + ccph_l);
cfg.setCoreConnectionsPerHost(HostDistance.LOCAL, ccph_l);
if (mcph_l < prior_mcph_l) {
logger.info("setting mcph_l to " + mcph_l);
cfg.setMaxRequestsPerConnection(HostDistance.LOCAL, mcph_l);
}
cfg.setMaxRequestsPerConnection(HostDistance.LOCAL, opts.getMaxRequestsPerConnection(HostDistance.LOCAL));
int prior_mcph_r = cfg.getMaxConnectionsPerHost(HostDistance.REMOTE);
int mcph_r = opts.getMaxConnectionsPerHost(HostDistance.REMOTE);
int ccph_r = opts.getCoreConnectionsPerHost(HostDistance.REMOTE);
if (mcph_r > 0) {
if (mcph_r > prior_mcph_r) opts.setMaxConnectionsPerHost(HostDistance.REMOTE, mcph_r);
opts.setCoreConnectionsPerHost(HostDistance.REMOTE, ccph_r);
if (prior_mcph_r > mcph_r) opts.setMaxConnectionsPerHost(HostDistance.REMOTE, mcph_r);
if (opts.getMaxConnectionsPerHost(HostDistance.REMOTE) > 0) {
cfg.setMaxRequestsPerConnection(HostDistance.REMOTE, opts.getMaxRequestsPerConnection(HostDistance.REMOTE));
}
}
this.pooling = dynpooling.get();
}
}
}
// TODO: make error handler updates consistent under concurrent updates
private HashedCQLErrorHandler configureErrorHandler() {
HashedCQLErrorHandler newerrorHandler = new HashedCQLErrorHandler(exceptionCountMetrics);
String errors = activityDef.getParams()
.getOptionalString("errors")
.orElse("stop,retryable->retry,unverified->stop");
String[] handlerSpecs = errors.split(",");
for (String spec : handlerSpecs) {
String[] keyval = spec.split("=|->|:", 2);
if (keyval.length == 1) {
String verb = keyval[0];
newerrorHandler.setDefaultHandler(
new EbdseCycleErrorHandler(
ErrorResponse.valueOf(verb),
exceptionCountMetrics,
exceptionHistoMetrics,
!getParams().getOptionalLong("async").isPresent()
)
);
} else {
String pattern = keyval[0];
String verb = keyval[1];
if (newerrorHandler.getGroupNames().contains(pattern)) {
EbdseCycleErrorHandler handler =
new EbdseCycleErrorHandler(
ErrorResponse.valueOf(verb),
exceptionCountMetrics,
exceptionHistoMetrics,
!getParams().getOptionalLong("async").isPresent()
);
logger.info("Handling error group '" + pattern + "' with handler:" + handler);
newerrorHandler.setHandlerForGroup(pattern, handler);
} else {
EbdseCycleErrorHandler handler = new EbdseCycleErrorHandler(
ErrorResponse.valueOf(keyval[1]),
exceptionCountMetrics,
exceptionHistoMetrics,
!getParams().getOptionalLong("async").isPresent()
);
logger.info("Handling error pattern '" + pattern + "' with handler:" + handler);
newerrorHandler.setHandlerForPattern(keyval[0], handler);
}
}
}
return newerrorHandler;
}
public int getMaxTries() {
return maxTries;
}
public HashedCQLErrorHandler getCqlErrorHandler() {
return this.errorHandler;
}
public StatementFilter getStatementFilter() {
return statementFilter;
}
public void setStatementFilter(StatementFilter statementFilter) {
this.statementFilter = statementFilter;
}
public Boolean isShowCql() {
return showcql;
}
public OpSequence<ReadyCQLStatement> getOpSequencer() {
return opsequence;
}
public List<RowCycleOperator> getRowCycleOperators() {
return rowCycleOperators;
}
protected synchronized void addRowCycleOperator(RowCycleOperator rsOperator) {
if (rowCycleOperators == null) {
rowCycleOperators = new ArrayList<>();
}
rowCycleOperators.add(rsOperator);
}
private void clearRowCycleOperators() {
this.rowCycleOperators = null;
}
public List<ResultSetCycleOperator> getResultSetCycleOperators() {
return resultSetCycleOperators;
}
protected synchronized void addResultSetCycleOperator(ResultSetCycleOperator resultSetCycleOperator) {
if (this.resultSetCycleOperators == null) {
this.resultSetCycleOperators = new ArrayList<>();
}
this.resultSetCycleOperators.add(resultSetCycleOperator);
}
private void clearResultSetCycleOperators() {
this.resultSetCycleOperators = null;
}
public List<StatementModifier> getStatementModifiers() {
return this.statementModifiers;
}
protected synchronized void addStatementModifier(StatementModifier modifier) {
if (this.statementModifiers == null) {
this.statementModifiers = new ArrayList<>();
}
this.statementModifiers.add(modifier);
}
private void clearStatementModifiers() {
statementModifiers = null;
}
public long getMaxOpsInFlight(int slot) {
int threads = this.getActivityDef().getThreads();
return maxTotalOpsInFlight / threads + (slot < (maxTotalOpsInFlight % threads) ? 1 : 0);
}
public long getRetryDelay() {
return retryDelay;
}
public void setRetryDelay(long retryDelay) {
this.retryDelay = retryDelay;
}
public long getMaxRetryDelay() {
return maxRetryDelay;
}
public void setMaxRetryDelay(long maxRetryDelay) {
this.maxRetryDelay = maxRetryDelay;
}
public boolean isRetryReplace() {
return retryReplace;
}
public void setRetryReplace(boolean retryReplace) {
this.retryReplace = retryReplace;
}
public synchronized Writer getNamedWriter(String name) {
Writer writer = namedWriters.computeIfAbsent(name, s -> {
try {
return new FileWriter(name, StandardCharsets.UTF_8);
} catch (IOException e) {
throw new RuntimeException(e);
}
});
this.registerAutoCloseable(writer);
return writer;
}
}

View File

@ -0,0 +1,47 @@
package io.nosqlbench.activitytype.cql.ebdrivers.cql.core;
import io.nosqlbench.engine.api.activityapi.core.ActionDispenser;
import io.nosqlbench.engine.api.activityapi.core.ActivityType;
import io.nosqlbench.engine.api.activityimpl.ActivityDef;
import io.nosqlbench.virtdata.annotations.Service;
import java.util.Optional;
@Service(ActivityType.class)
public class CqlActivityType implements ActivityType<CqlActivity> {
public String getName() {
return "cql";
}
@Override
public CqlActivity getActivity(ActivityDef activityDef) {
Optional<String> yaml = activityDef.getParams().getOptionalString("yaml");
// sanity check that we have a yaml parameter, which contains our statements and bindings
if (yaml.isEmpty()) {
throw new RuntimeException("Currently, the cql activity type requires yaml activity parameter.");
}
// allow shortcut: yaml parameter provide the default alias name
if (activityDef.getAlias().equals(ActivityDef.DEFAULT_ALIAS)) {
activityDef.getParams().set("alias",yaml.get());
}
return new CqlActivity(activityDef);
}
/**
* Returns the per-activity level dispenser. The ActionDispenser can then dispense
* per-thread actions within the activity instance.
* @param activity The activity instance which will parameterize this action
*/
@Override
public ActionDispenser getActionDispenser(CqlActivity activity) {
return new CqlActionDispenser(activity);
}
}

View File

@ -0,0 +1,265 @@
package io.nosqlbench.activitytype.cql.ebdrivers.cql.core;
import com.codahale.metrics.Timer;
import com.datastax.driver.core.ResultSet;
import com.datastax.driver.core.ResultSetFuture;
import io.nosqlbench.activitytype.cql.ebdrivers.cql.api.ErrorResponse;
import io.nosqlbench.activitytype.cql.ebdrivers.cql.api.ResultSetCycleOperator;
import io.nosqlbench.activitytype.cql.ebdrivers.cql.api.RowCycleOperator;
import io.nosqlbench.activitytype.cql.ebdrivers.cql.api.StatementFilter;
import io.nosqlbench.activitytype.cql.ebdrivers.cql.errorhandling.ErrorStatus;
import io.nosqlbench.activitytype.cql.ebdrivers.cql.errorhandling.HashedCQLErrorHandler;
import io.nosqlbench.activitytype.cql.ebdrivers.cql.errorhandling.exceptions.CQLCycleException;
import io.nosqlbench.activitytype.cql.ebdrivers.cql.errorhandling.exceptions.ChangeUnappliedCycleException;
import io.nosqlbench.activitytype.cql.ebdrivers.cql.errorhandling.exceptions.UnexpectedPagingException;
import io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.core.ReadyCQLStatement;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import io.nosqlbench.engine.api.activityapi.core.BaseAsyncAction;
import io.nosqlbench.engine.api.activityapi.core.ops.fluent.opfacets.FailedOp;
import io.nosqlbench.engine.api.activityapi.core.ops.fluent.opfacets.StartedOp;
import io.nosqlbench.engine.api.activityapi.core.ops.fluent.opfacets.SucceededOp;
import io.nosqlbench.engine.api.activityapi.core.ops.fluent.opfacets.TrackedOp;
import io.nosqlbench.engine.api.activityapi.planning.OpSequence;
import io.nosqlbench.engine.api.activityimpl.ActivityDef;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.List;
import java.util.concurrent.TimeUnit;
import java.util.function.LongFunction;
@SuppressWarnings("Duplicates")
public class CqlAsyncAction extends BaseAsyncAction<CqlOpData, CqlActivity> {
private final static Logger logger = LoggerFactory.getLogger(CqlAsyncAction.class);
private final ActivityDef activityDef;
private List<RowCycleOperator> rowOps;
private List<ResultSetCycleOperator> cycleOps;
private List<StatementModifier> modifiers;
private StatementFilter statementFilter;
private OpSequence<ReadyCQLStatement> sequencer;
// how many cycles a statement will be attempted for before giving up
private int maxTries = 10;
private HashedCQLErrorHandler cqlActivityErrorHandler;
// private int pagesFetched = 0;
// private long totalRowsFetchedForQuery = 0L;
// private ResultSet pagingResultSet;
// private Statement pagingStatement;
// private ReadyCQLStatement pagingReadyStatement;
private boolean showcql;
// private long opsInFlight = 0L;
// private long maxOpsInFlight = 1L;
// private long pendingResults = 0;
// private LinkedBlockingQueue<CqlOpContext> resultQueue = new LinkedBlockingQueue<>();
public CqlAsyncAction(CqlActivity activity, int slot) {
super(activity, slot);
onActivityDefUpdate(activity.getActivityDef());
this.activityDef = activity.getActivityDef();
}
@Override
public void init() {
onActivityDefUpdate(activityDef);
this.sequencer = activity.getOpSequencer();
}
@Override
public LongFunction<CqlOpData> getOpInitFunction() {
return (l) -> {
return new CqlOpData(l, this);
};
}
@Override
public void startOpCycle(TrackedOp<CqlOpData> opc) {
CqlOpData cqlop = opc.getData();
long cycle = opc.getCycle();
// bind timer covers all statement selection and binding, skipping, transforming logic
try (Timer.Context bindTime = activity.bindTimer.time()) {
cqlop.readyCQLStatement = sequencer.get(cycle);
cqlop.statement = cqlop.readyCQLStatement.bind(cycle);
// If a filter is defined, skip and count any statements that do not match it
if (statementFilter != null) {
if (!statementFilter.matches(cqlop.statement)) {
activity.skippedTokensHisto.update(cycle);
//opc.start().stop(-2);
cqlop.skipped = true;
opc.skip(0);
return;
}
}
// Transform the statement if there are any statement transformers defined for this CQL activity
if (modifiers != null) {
for (StatementModifier modifier : modifiers) {
cqlop.statement = modifier.modify(cqlop.statement, cycle);
}
}
// Maybe show the CQl in log/console - only for diagnostic use
if (showcql) {
logger.info("CQL(cycle=" + cycle + "):\n" + cqlop.readyCQLStatement.getQueryString(cycle));
}
}
StartedOp<CqlOpData> startedOp = opc.start();
cqlop.startedOp = startedOp;
// The execute timer covers only the point at which EB hands the op to the driver to be executed
try (Timer.Context executeTime = activity.executeTimer.time()) {
cqlop.future = activity.getSession().executeAsync(cqlop.statement);
Futures.addCallback(cqlop.future, cqlop);
}
}
public void onSuccess(StartedOp<CqlOpData> sop) {
CqlOpData cqlop = sop.getData();
HashedCQLErrorHandler.resetThreadStatusCode();
if (cqlop.skipped) {
return;
}
try {
ResultSet resultSet = cqlop.resultSet;
cqlop.totalPagesFetchedForQuery++;
// Apply any defined ResultSetCycleOperators
if (cycleOps != null) {
for (ResultSetCycleOperator cycleOp : cycleOps) {
cycleOp.apply(resultSet, cqlop.statement, cqlop.cycle);
}
}
int pageRows = resultSet.getAvailableWithoutFetching();
int remaining = pageRows;
if (rowOps == null) {
while (remaining-- > 0) {
resultSet.one();
}
} else {
while (remaining-- > 0) {
for (RowCycleOperator rowOp : rowOps) {
rowOp.apply(resultSet.one(), cqlop.cycle);
}
}
}
cqlop.totalRowsFetchedForQuery += pageRows;
if (cqlop.totalPagesFetchedForQuery++ > activity.maxpages) {
throw new UnexpectedPagingException(
cqlop.cycle,
resultSet,
cqlop.readyCQLStatement.getQueryString(cqlop.cycle),
1,
activity.maxpages,
activity.getSession().getCluster().getConfiguration().getQueryOptions().getFetchSize()
);
}
if (!resultSet.wasApplied()) {
// To make exception handling logic flow more uniformly
throw new ChangeUnappliedCycleException(
cqlop.cycle, resultSet, cqlop.readyCQLStatement.getQueryString(cqlop.cycle)
);
}
if (!resultSet.isFullyFetched()) {
logger.trace("async paging request " + cqlop.totalPagesFetchedForQuery + " for cycle " + cqlop.cycle);
ListenableFuture<ResultSet> resultSetListenableFuture = resultSet.fetchMoreResults();
Futures.addCallback(resultSetListenableFuture, cqlop);
return;
}
SucceededOp<CqlOpData> success = sop.succeed(0);
cqlop.readyCQLStatement.onSuccess(cqlop.cycle, success.getServiceTimeNanos(), cqlop.totalRowsFetchedForQuery);
activity.triesHisto.update(cqlop.triesAttempted);
activity.rowsCounter.mark(cqlop.totalRowsFetchedForQuery);
activity.resultSuccessTimer.update(success.getServiceTimeNanos(), TimeUnit.NANOSECONDS);
activity.resultSetSizeHisto.update(cqlop.totalRowsFetchedForQuery);
activity.resultTimer.update(success.getServiceTimeNanos(), TimeUnit.NANOSECONDS);
} catch (Exception e) {
long currentServiceTime = sop.getCurrentServiceTimeNanos();
cqlop.readyCQLStatement.onError(cqlop.cycle, currentServiceTime, e);
CQLCycleException cqlCycleException = new CQLCycleException(cqlop.cycle, currentServiceTime, e, cqlop.readyCQLStatement);
ErrorStatus errorStatus = cqlActivityErrorHandler.handleError(cqlop.cycle, cqlCycleException);
if (errorStatus.isRetryable() && ++cqlop.triesAttempted < maxTries) {
ResultSetFuture resultSetFuture = activity.getSession().executeAsync(cqlop.statement);
sop.retry();
Futures.addCallback(resultSetFuture, cqlop);
return;
} else {
sop.fail(errorStatus.getResultCode());
if (errorStatus.getResponse() == ErrorResponse.stop) {
cqlop.throwable = cqlCycleException;
activity.getActivityController().stopActivityWithErrorAsync(cqlCycleException);
}
}
}
}
public void onFailure(StartedOp<CqlOpData> startedOp) {
CqlOpData cqlop = startedOp.getData();
long serviceTime = startedOp.getCurrentServiceTimeNanos();
// Even if this is retryable, we expose error events
cqlop.readyCQLStatement.onError(startedOp.getCycle(),serviceTime,cqlop.throwable);
long cycle = startedOp.getCycle();
CQLCycleException cqlCycleException1 = new CQLCycleException(cqlop.cycle, serviceTime, cqlop.throwable, cqlop.readyCQLStatement);
ErrorStatus errorStatus = cqlActivityErrorHandler.handleError(startedOp.getCycle(), cqlCycleException1);
if (errorStatus.getResponse() == ErrorResponse.stop) {
activity.getActivityController().stopActivityWithErrorAsync(cqlop.throwable);
return;
}
if (errorStatus.isRetryable() && cqlop.triesAttempted < maxTries) {
startedOp.retry();
try (Timer.Context executeTime = activity.executeTimer.time()) {
cqlop.future = activity.getSession().executeAsync(cqlop.statement);
Futures.addCallback(cqlop.future, cqlop);
return;
}
}
FailedOp<CqlOpData> failed = startedOp.fail(errorStatus.getResultCode());
activity.resultTimer.update(failed.getServiceTimeNanos(), TimeUnit.NANOSECONDS);
activity.triesHisto.update(cqlop.triesAttempted);
}
@Override
public void onActivityDefUpdate(ActivityDef activityDef) {
this.maxTries = activity.getMaxTries();
this.showcql = activity.isShowCql();
this.cqlActivityErrorHandler = activity.getCqlErrorHandler();
this.statementFilter = activity.getStatementFilter();
this.rowOps = activity.getRowCycleOperators();
this.cycleOps = activity.getResultSetCycleOperators();
this.modifiers = activity.getStatementModifiers();
}
public String toString() {
return "CqlAsyncAction["+this.slot+"]";
}
}

View File

@ -0,0 +1,52 @@
package io.nosqlbench.activitytype.cql.ebdrivers.cql.core;
import com.datastax.driver.core.ResultSet;
import com.datastax.driver.core.ResultSetFuture;
import com.datastax.driver.core.Statement;
import io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.core.ReadyCQLStatement;
import com.google.common.util.concurrent.FutureCallback;
import io.nosqlbench.engine.api.activityapi.core.ops.fluent.opfacets.StartedOp;
public class CqlOpData implements FutureCallback<ResultSet> {
final long cycle;
// op state is managed via callbacks, we keep a ref here
StartedOp<CqlOpData> startedOp;
boolean skipped=false;
private CqlAsyncAction action;
int triesAttempted =0;
ReadyCQLStatement readyCQLStatement;
Statement statement;
ResultSetFuture future;
ResultSet resultSet;
long totalRowsFetchedForQuery;
long totalPagesFetchedForQuery;
public Throwable throwable;
public long resultAt;
private long errorAt;
public CqlOpData(long cycle, CqlAsyncAction action) {
this.cycle = cycle;
this.action = action;
}
@Override
public void onSuccess(ResultSet result) {
this.resultSet = result;
this.resultAt = System.nanoTime();
action.onSuccess(startedOp);
}
@Override
public void onFailure(Throwable throwable) {
this.throwable=throwable;
this.errorAt = System.nanoTime();
action.onFailure(startedOp);
}
}

View File

@ -0,0 +1,32 @@
package io.nosqlbench.activitytype.cql.ebdrivers.cql.core;
import com.datastax.driver.core.policies.AddressTranslator;
import com.datastax.driver.core.Cluster;
import java.net.InetSocketAddress;
public class ProxyTranslator implements AddressTranslator {
private int hostsIndex = 0;
private InetSocketAddress address;
public ProxyTranslator(InetSocketAddress host){
this.address= host;
}
@Override
public void init(Cluster cluster) {
// Nothing to do
}
@Override
public InetSocketAddress translate(InetSocketAddress address) {
return address;
}
@Override
public void close() {
}
}

View File

@ -0,0 +1,11 @@
package io.nosqlbench.activitytype.cql.ebdrivers.cql.core;
import com.datastax.driver.core.Statement;
/**
* Provides a modular way for any CQL activities to modify statements before execution.
* Each active modifier returns a statement in turn.
*/
public interface StatementModifier {
Statement modify(Statement unmodified, long cycleNum);
}

View File

@ -0,0 +1,113 @@
package io.nosqlbench.activitytype.cql.ebdrivers.cql.errorhandling;
import com.datastax.driver.core.exceptions.*;
import io.nosqlbench.activitytype.cql.ebdrivers.cql.errorhandling.exceptions.*;
import io.nosqlbench.engine.api.activityapi.cyclelog.buffers.results.ResultReadable;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* This enumerates all known exception classes, including supertypes,
* for the purposes of stable naming in error handling.
* This is current as of com.datastax.cassandra:cassandra-driver-core:3.2.0
*/
public enum CQLExceptionEnum implements ResultReadable {
FrameTooLongException(FrameTooLongException.class, 1),
CodecNotFoundException(CodecNotFoundException.class, 2),
DriverException(DriverException.class, 3),
AuthenticationException(AuthenticationException.class, 4),
TraceRetrievalException(TraceRetrievalException.class, 5),
UnsupportedProtocolVersionException(UnsupportedProtocolVersionException.class, 6),
NoHostAvailableException(NoHostAvailableException.class, 7),
QueryValidationException(QueryValidationException.class, 8),
InvalidQueryException(InvalidQueryException.class, 9),
InvalidConfigurationInQueryException(InvalidConfigurationInQueryException.class, 10),
UnauthorizedException(UnauthorizedException.class, 11),
SyntaxError(SyntaxError.class, 12),
AlreadyExistsException(AlreadyExistsException.class, 13),
UnpreparedException(UnpreparedException.class, 14),
InvalidTypeException(InvalidTypeException.class, 15),
QueryExecutionException(QueryExecutionException.class, 16),
UnavailableException(UnavailableException.class, 17),
BootstrappingException(BootstrappingException.class, 18),
OverloadedException(OverloadedException.class, 19),
TruncateException(TruncateException.class, 20),
QueryConsistencyException(QueryConsistencyException.class, 21),
WriteTimeoutException(WriteTimeoutException.class, 22),
WriteFailureException(WriteFailureException.class, 23),
ReadFailureException(ReadFailureException.class, 24),
ReadTimeoutException(ReadTimeoutException.class, 25),
FunctionExecutionException(FunctionExecutionException.class, 26),
DriverInternalError(DriverInternalError.class, 27),
ProtocolError(ProtocolError.class, 28),
ServerError(ServerError.class, 29),
BusyPoolException(BusyPoolException.class, 30),
ConnectionException(ConnectionException.class, 31),
TransportException(TransportException.class, 32),
OperationTimedOutException(OperationTimedOutException.class, 33),
PagingStateException(PagingStateException.class, 34),
UnresolvedUserTypeException(UnresolvedUserTypeException.class, 35),
UnsupportedFeatureException(UnsupportedFeatureException.class, 36),
BusyConnectionException(BusyConnectionException.class, 37),
ChangeUnappliedCycleException(ChangeUnappliedCycleException.class, 38),
ResultSetVerificationException(io.nosqlbench.activitytype.cql.ebdrivers.cql.errorhandling.exceptions.ResultSetVerificationException.class, 39),
RowVerificationException(io.nosqlbench.activitytype.cql.ebdrivers.cql.errorhandling.exceptions.RowVerificationException.class, 40),
UnexpectedPagingException(io.nosqlbench.activitytype.cql.ebdrivers.cql.errorhandling.exceptions.UnexpectedPagingException.class, 41),
EbdseCycleException(CqlCycleException.class, 42),
MaxTriesExhaustedException(io.nosqlbench.activitytype.cql.ebdrivers.cql.errorhandling.exceptions.MaxTriesExhaustedException.class,43);
private final static Logger logger = LoggerFactory.getLogger(CQLExceptionEnum.class);
private static Map<String, Integer> codesByName = getCodesByName();
private static String[] namesByCode = getNamesByCode();
private final Class<? extends Exception> exceptionClass;
private final int resultCode;
CQLExceptionEnum(Class<? extends Exception> clazz, int resultCode) {
this.exceptionClass = clazz;
this.resultCode = resultCode;
}
public Class<? extends Exception> getExceptionClass() {
return exceptionClass;
}
public int getResultCode() {
return resultCode;
}
public int getResult() {
return this.resultCode;
}
private static Map<String,Integer> getCodesByName() {
codesByName = new HashMap<>();
for (CQLExceptionEnum cqlExceptionEnum : CQLExceptionEnum.values()) {
codesByName.put(cqlExceptionEnum.toString(), cqlExceptionEnum.resultCode);
}
codesByName.put("NONE",0);
return codesByName;
}
private static String[] getNamesByCode() {
List<String> namesByCode = new ArrayList<>();
namesByCode.add("NONE");
for (CQLExceptionEnum cqlExceptionEnum : CQLExceptionEnum.values()) {
int code = cqlExceptionEnum.resultCode;
for (int i = namesByCode.size(); i <= code ; i++) {
namesByCode.add("UNKNOWN");
}
namesByCode.set(code, cqlExceptionEnum.toString());
}
return namesByCode.toArray(new String[0]);
}
}

View File

@ -0,0 +1,101 @@
package io.nosqlbench.activitytype.cql.ebdrivers.cql.errorhandling;
import io.nosqlbench.activitytype.cql.ebdrivers.cql.api.ErrorResponse;
import io.nosqlbench.activitytype.cql.ebdrivers.cql.errorhandling.exceptions.CQLCycleException;
import io.nosqlbench.activitytype.cql.ebdrivers.cql.errorhandling.exceptions.CQLExceptionDetailer;
import io.nosqlbench.engine.api.activityapi.errorhandling.CycleErrorHandler;
import io.nosqlbench.engine.api.metrics.ExceptionCountMetrics;
import io.nosqlbench.engine.api.metrics.ExceptionHistoMetrics;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A contextualized error handler that can catch a cycle-specific error.
* In this class, the error handlers return a boolean, which indicates
* to the call whether or not to retry the operation. This handler implements
* the error handling stack approach, which allows the user to select an
* entry point in the stack, with all lesser impacting handler rules
* applied from most impacting to least impacting order.
*
* For simplicity, the handler stack is fixed as described below. It is not
* possible to rearrange the verbs. Some care has been given to making sure
* that the selected handlers are complete and intuitive.
*
* The standard handler stack looks like this:
*
* <ol>
* <li>stop - log and throw an exception, which should escape to the
* next level of exception handling, the level which causes ebdse
* to stop running. In this case, and only in this case, the remaining
* handlers in the stack are not used.
* are not reached.</li>
* <li>warn - log an exception without stopping execution.</li>
* <li>retry - retry an operation up to a limit, IFF it is retryable</li>
* <li>count - count, in metrics, the number of this particular error type</li>
* <li>ignore - do nothing</li>
* </ol>
*
* As indicated above, if you specify "warn" for a particular error type, this means
* that also retry, count, will apply, as well as ignore, in that order. "ignore" is
* simply a no-op that allows you to specify it as the minimum case.
*/
@SuppressWarnings("Duplicates")
public class EbdseCycleErrorHandler implements CycleErrorHandler<Throwable, ErrorStatus> {
private static final Logger logger = LoggerFactory.getLogger(EbdseCycleErrorHandler.class);
private ErrorResponse errorResponse;
private ExceptionCountMetrics exceptionCountMetrics;
private final ExceptionHistoMetrics exceptionHistoMetrics;
private boolean throwExceptionOnStop=false;
public EbdseCycleErrorHandler(
ErrorResponse errorResponse,
ExceptionCountMetrics exceptionCountMetrics,
ExceptionHistoMetrics exceptionHistoMetrics,
boolean throwExceptionOnStop) {
this.errorResponse = errorResponse;
this.exceptionCountMetrics = exceptionCountMetrics;
this.exceptionHistoMetrics = exceptionHistoMetrics;
this.throwExceptionOnStop = throwExceptionOnStop;
}
@Override
public ErrorStatus handleError(long cycle, Throwable contextError) {
CQLCycleException cce = (CQLCycleException) contextError;
Throwable error = cce.getCause();
boolean retry = false;
switch (errorResponse) {
case stop:
logger.error("error with cycle " + cycle + ": statement: " + cce.getStatement() + " errmsg: " +
CQLExceptionDetailer.messageFor(cycle, error));
if (throwExceptionOnStop) {
throw new RuntimeException(error);
}
case warn:
logger.warn("error with cycle " + cycle + ": statement: " + cce.getStatement() + " errmsg: " +
CQLExceptionDetailer.messageFor(cycle, error));
case retry:
retry = true;
case histogram:
exceptionHistoMetrics.update(error,cce.getDurationNanos());
case count:
exceptionCountMetrics.count(error);
case ignore:
default:
break;
}
return new ErrorStatus(errorResponse, retry,-1);
}
@Override
public ErrorStatus handleError(long cycle, Throwable contextError, String errMsg) {
return handleError(cycle,contextError);
}
public String toString() {
return this.errorResponse.toString();
}
}

View File

@ -0,0 +1,31 @@
package io.nosqlbench.activitytype.cql.ebdrivers.cql.errorhandling;
import io.nosqlbench.activitytype.cql.ebdrivers.cql.api.ErrorResponse;
public class ErrorStatus {
private boolean retryable;
private int resultCode;
private ErrorResponse response;
public ErrorStatus(ErrorResponse response, boolean retryable, int resultCode) {
this.response = response;
this.retryable = retryable;
this.resultCode = resultCode;
}
public boolean isRetryable() {
return retryable;
}
public int getResultCode() {
return resultCode;
}
public void setResultCode(int resultCode) {
this.resultCode = resultCode;
}
public ErrorResponse getResponse() {
return response;
}
}

View File

@ -0,0 +1,80 @@
package io.nosqlbench.activitytype.cql.ebdrivers.cql.errorhandling;
import com.datastax.driver.core.exceptions.*;
import io.nosqlbench.activitytype.cql.ebdrivers.cql.errorhandling.exceptions.*;
import java.util.LinkedHashMap;
import java.util.Map;
/**
* This enumerates all known exception classes, including supertypes,
* for the purposes of stable naming in error handling.
* This is current as of com.datastax.cassandra:cassandra-driver-core:3.2.0
*/
public class ExceptionMap {
private final static Map<Class<? extends Exception>, Class<? extends Exception>> map
= new LinkedHashMap<Class<? extends Exception>, Class<? extends Exception>>() {
{
put(FrameTooLongException.class, DriverException.class);
put(CodecNotFoundException.class, DriverException.class);
put(AuthenticationException.class, DriverException.class);
put(TraceRetrievalException.class, DriverException.class);
put(UnsupportedProtocolVersionException.class, DriverException.class);
put(NoHostAvailableException.class, DriverException.class);
put(QueryValidationException.class, DriverException.class);
put(InvalidQueryException.class, QueryValidationException.class);
put(InvalidConfigurationInQueryException.class, InvalidQueryException.class);
put(UnauthorizedException.class, QueryValidationException.class);
put(SyntaxError.class, QueryValidationException.class);
put(AlreadyExistsException.class, QueryValidationException.class);
put(UnpreparedException.class, QueryValidationException.class);
put(InvalidTypeException.class, DriverException.class);
put(QueryExecutionException.class, DriverException.class);
put(UnavailableException.class, QueryValidationException.class);
put(BootstrappingException.class, QueryValidationException.class);
put(OverloadedException.class, QueryValidationException.class);
put(TruncateException.class, QueryValidationException.class);
put(QueryConsistencyException.class, QueryValidationException.class);
put(WriteTimeoutException.class, QueryConsistencyException.class);
put(WriteFailureException.class, QueryConsistencyException.class);
put(ReadFailureException.class, QueryConsistencyException.class);
put(ReadTimeoutException.class, QueryConsistencyException.class);
put(FunctionExecutionException.class, QueryValidationException.class);
put(DriverInternalError.class, DriverException.class);
put(ProtocolError.class, DriverInternalError.class);
put(ServerError.class, DriverInternalError.class);
put(BusyPoolException.class, DriverException.class);
put(ConnectionException.class, DriverException.class);
put(TransportException.class, ConnectionException.class);
put(OperationTimedOutException.class, ConnectionException.class);
put(PagingStateException.class, DriverException.class);
put(UnresolvedUserTypeException.class, DriverException.class);
put(UnsupportedFeatureException.class, DriverException.class);
put(BusyConnectionException.class, DriverException.class);
put(ChangeUnappliedCycleException.class, CqlCycleException.class);
put(ResultSetVerificationException.class, CqlCycleException.class);
put(RowVerificationException.class, CqlCycleException.class);
put(UnexpectedPagingException.class, CqlCycleException.class);
put(CqlCycleException.class, RuntimeException.class);
}
};
public Class<? extends Exception> put(
Class<? extends Exception> exceptionClass,
Class<? extends Exception> parentClass) {
if (exceptionClass.getSuperclass() != parentClass) {
throw new RuntimeException("Sanity check failed: " + exceptionClass +
" is not a parent class of " + parentClass);
}
return map.put(exceptionClass, parentClass);
}
public static Map<Class<? extends Exception>, Class<? extends Exception>> getMap() {
return map;
}
}

View File

@ -0,0 +1,82 @@
package io.nosqlbench.activitytype.cql.ebdrivers.cql.errorhandling;
import com.datastax.driver.core.exceptions.*;
import io.nosqlbench.activitytype.cql.ebdrivers.cql.errorhandling.exceptions.CQLCycleException;
import io.nosqlbench.activitytype.cql.ebdrivers.cql.errorhandling.exceptions.ChangeUnappliedCycleException;
import io.nosqlbench.activitytype.cql.ebdrivers.cql.errorhandling.exceptions.ResultSetVerificationException;
import io.nosqlbench.activitytype.cql.ebdrivers.cql.errorhandling.exceptions.RowVerificationException;
import io.nosqlbench.engine.api.activityapi.errorhandling.CycleErrorHandler;
import io.nosqlbench.engine.api.activityapi.errorhandling.HashedErrorHandler;
import io.nosqlbench.engine.api.metrics.ExceptionCountMetrics;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class HashedCQLErrorHandler extends HashedErrorHandler<Throwable, ErrorStatus> {
private static final Logger logger = LoggerFactory.getLogger(HashedCQLErrorHandler.class);
// private static Set<Class<? extends Throwable>> UNVERIFIED_ERRORS = new HashSet<Class<? extends Throwable>>() {{
// add(RowVerificationException.class);
// add(ResultSetVerificationException.class);
// }};
private ExceptionCountMetrics exceptionCountMetrics;
private static ThreadLocal<Integer> tlResultCode = ThreadLocal.withInitial(() -> (0));
public HashedCQLErrorHandler(ExceptionCountMetrics exceptionCountMetrics) {
this.exceptionCountMetrics = exceptionCountMetrics;
this.setGroup("retryable",
NoHostAvailableException.class,
UnavailableException.class,
OperationTimedOutException.class,
OverloadedException.class,
WriteTimeoutException.class,
ReadTimeoutException.class
);
this.setGroup(
"unapplied",
ChangeUnappliedCycleException.class
);
this.setGroup("unverified",
RowVerificationException.class,
ResultSetVerificationException.class
);
// realerrors is everything else but the above
}
private static class UncaughtErrorHandler implements CycleErrorHandler<Throwable, ErrorStatus> {
@Override
public ErrorStatus handleError(long cycle, Throwable error, String errMsg) {
throw new RuntimeException(
"An exception was thrown in cycle " + cycle + " that has no error: " + errMsg + ", error:" + error
);
}
}
@Override
public ErrorStatus handleError(long cycle, Throwable throwable, String errMsg) {
int resultCode = 127;
if (throwable instanceof CQLCycleException) {
CQLCycleException cce = (CQLCycleException) throwable;
Throwable cause = cce.getCause();
try {
String simpleName = cause.getClass().getSimpleName();
CQLExceptionEnum cqlExceptionEnum = CQLExceptionEnum.valueOf(simpleName);
resultCode = cqlExceptionEnum.getResult();
} catch (Throwable t) {
logger.warn("unrecognized exception while mapping status code via Enum: " + throwable.getClass());
}
} else {
logger.warn("un-marshaled exception while mapping status code: " + throwable.getClass());
}
ErrorStatus errorStatus = super.handleError(cycle, throwable, errMsg);
errorStatus.setResultCode(resultCode);
return errorStatus;
}
public static int getThreadStatusCode() {
return tlResultCode.get();
}
public static void resetThreadStatusCode() {
tlResultCode.set(0);
}
}

View File

@ -0,0 +1,38 @@
package io.nosqlbench.activitytype.cql.ebdrivers.cql.errorhandling.exceptions;
import io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.core.ReadyCQLStatement;
/**
* In internal exception type that is used to saverow exception
* context from within a CQL activity cycle.
*/
public class CQLCycleException extends Exception {
private final long cycleValue;
private final long durationNanos;
private final ReadyCQLStatement readyCQLStatement;
public CQLCycleException(long cycleValue, long durationNanos, Throwable e, ReadyCQLStatement readyCQLStatement) {
super(e);
this.cycleValue = cycleValue;
this.durationNanos = durationNanos;
this.readyCQLStatement = readyCQLStatement;
}
public long getCycleValue() {
return cycleValue;
}
public long getDurationNanos() {
return durationNanos;
}
public ReadyCQLStatement getReadyCQLStatement() {
return readyCQLStatement;
}
public String getStatement() {
return readyCQLStatement.getQueryString(cycleValue);
}
}

View File

@ -0,0 +1,25 @@
package io.nosqlbench.activitytype.cql.ebdrivers.cql.errorhandling.exceptions;
import com.datastax.driver.core.exceptions.ReadTimeoutException;
import com.datastax.driver.core.exceptions.WriteTimeoutException;
public class CQLExceptionDetailer {
public static String messageFor(long cycle, Throwable e) {
if (e instanceof ReadTimeoutException) {
ReadTimeoutException rte = (ReadTimeoutException) e;
return rte.getMessage() +
", coordinator: " + rte.getHost() +
", wasDataRetrieved: " + rte.wasDataRetrieved();
}
if (e instanceof WriteTimeoutException) {
WriteTimeoutException wte = (WriteTimeoutException) e;
return wte.getMessage() +
", coordinator: " + wte.getHost();
}
return e.getMessage();
}
}

View File

@ -0,0 +1,56 @@
package io.nosqlbench.activitytype.cql.ebdrivers.cql.errorhandling.exceptions;
import com.datastax.driver.core.BoundStatement;
import com.datastax.driver.core.ResultSet;
import com.datastax.driver.core.SimpleStatement;
import com.datastax.driver.core.Statement;
public abstract class CQLResultSetException extends CqlCycleException {
private final Statement statement;
private final ResultSet resultSet;
public CQLResultSetException(long cycle, ResultSet resultSet, Statement statement, String message, Throwable cause) {
super(cycle,message,cause);
this.resultSet = resultSet;
this.statement = statement;
}
public CQLResultSetException(long cycle, ResultSet resultSet, Statement statement) {
super(cycle);
this.resultSet = resultSet;
this.statement = statement;
}
public CQLResultSetException(long cycle, ResultSet resultSet, Statement statement, String message) {
super(cycle,message);
this.resultSet = resultSet;
this.statement=statement;
}
public CQLResultSetException(long cycle, ResultSet resultSet, Statement statement, Throwable cause) {
super(cycle,cause);
this.resultSet = resultSet;
this.statement = statement;
}
public Statement getStatement() {
return statement;
}
public ResultSet getResultSet() {
return resultSet;
}
protected static String getQueryString(Statement stmt) {
if (stmt instanceof BoundStatement) {
return ((BoundStatement)stmt).preparedStatement().getQueryString();
} else if (stmt instanceof SimpleStatement) {
return ((SimpleStatement) stmt).getQueryString();
} else {
return "UNKNOWN Statement type:" + stmt.getClass().getSimpleName();
}
}
}

View File

@ -0,0 +1,26 @@
package io.nosqlbench.activitytype.cql.ebdrivers.cql.errorhandling.exceptions;
import com.datastax.driver.core.ResultSet;
/**
* This was added to nosqlbench because the error handling logic was
* starting to look a bit contrived. Because we need to be able
* to respond to different result outcomes, it
* is just simpler to have a single type of error-handling logic for all outcomes.
*/
public class ChangeUnappliedCycleException extends CqlCycleException {
private final ResultSet resultSet;
private final String queryString;
public ChangeUnappliedCycleException(long cycle, ResultSet resultSet, String queryString) {
super(cycle, "Operation was not applied:" + queryString);
this.resultSet = resultSet;
this.queryString = queryString;
}
public ResultSet getResultSet() {
return resultSet;
}
public String getQueryString() { return queryString; }
}

View File

@ -0,0 +1,38 @@
package io.nosqlbench.activitytype.cql.ebdrivers.cql.errorhandling.exceptions;
public abstract class CqlCycleException extends RuntimeException {
private long cycle;
public CqlCycleException(long cycle, Throwable cause) {
super(cause);
this.cycle = cycle;
}
public CqlCycleException(long cycle, String message) {
super(message);
this.cycle = cycle;
}
public CqlCycleException(long cycle, String message, Throwable cause) {
super(message, cause);
this.cycle = cycle;
}
public CqlCycleException(long cycle) {
super();
this.cycle = cycle;
}
@Override
public String getMessage() {
return "cycle:" + cycle + " caused by:" + super.getMessage();
}
public long getCycle() {
return cycle;
}
}

View File

@ -0,0 +1,20 @@
package io.nosqlbench.activitytype.cql.ebdrivers.cql.errorhandling.exceptions;
public class MaxTriesExhaustedException extends CqlCycleException {
private int maxtries;
public MaxTriesExhaustedException(long cycle, int maxtries) {
super(cycle);
this.maxtries = maxtries;
}
public int getMaxTries() {
return maxtries;
}
@Override
public String getMessage() {
return "Exhausted max tries (" + getMaxTries() + ") on cycle " + getCycle() + ".";
}
}

View File

@ -0,0 +1,17 @@
package io.nosqlbench.activitytype.cql.ebdrivers.cql.errorhandling.exceptions;
import com.datastax.driver.core.ResultSet;
import com.datastax.driver.core.Statement;
public class ResultSetVerificationException extends CQLResultSetException {
public ResultSetVerificationException(
long cycle, ResultSet resultSet, Statement statement, Throwable cause) {
super(cycle, resultSet, statement, cause);
}
public ResultSetVerificationException(
long cycle, ResultSet resultSet, Statement statement, String s) {
super(cycle, resultSet, statement, s + ", \nquery string:\n" + getQueryString(statement));
}
}

View File

@ -0,0 +1,33 @@
package io.nosqlbench.activitytype.cql.ebdrivers.cql.errorhandling.exceptions;
import com.datastax.driver.core.Row;
import java.util.Map;
/**
* This exception is thrown when read verification fails.
*/
public class RowVerificationException extends CqlCycleException {
private Map<String, Object> expected;
private Row row;
public RowVerificationException(long cycle, Row row, Map<String, Object> expected, String detail) {
super(cycle, detail);
this.expected = expected;
this.row = row;
}
@Override
public String getMessage() {
return "cycle:" + getCycle() + ": " + super.getMessage();
}
public Map<String,Object> getExpectedValues() {
return expected;
}
public Row getRow() {
return row;
}
}

View File

@ -0,0 +1,55 @@
package io.nosqlbench.activitytype.cql.ebdrivers.cql.errorhandling.exceptions;
import com.datastax.driver.core.ResultSet;
/**
* <p>This is not a core exception. It was added to the CQL activity type
* driver for nosqlbench specifically to catch the following unexpected
* condition:
* Paging would be needed to read all the results from a read query, but the user
* is not expecting to intentionally check and iterate the result sets for paging.
* <p>
* This should only be thrown if a result set would need paging, but configuration
* options specific that it should not expect to. Rather than assume paging is completely
* expected or unexpected, we simply assume that only 1 page is allowed, being the
* first page, or what is thought of as "not paging".
* <p>If this error is thrown, and paging is expected, then the user can adjust
* fetchsize or maxpages in order to open up paging to the degree that is allowable or
* expected.
*/
public class UnexpectedPagingException extends CqlCycleException {
private final ResultSet resultSet;
private final String queryString;
private final int fetchSize;
private int fetchedPages;
private int maxpages;
public UnexpectedPagingException(
long cycle,
ResultSet resultSet,
String queryString,
int fetchedPages,
int maxpages,
int fetchSize) {
super(cycle);
this.resultSet = resultSet;
this.queryString = queryString;
this.fetchedPages = fetchedPages;
this.maxpages = maxpages;
this.fetchSize = fetchSize;
}
public ResultSet getResultSet() {
return resultSet;
}
public String getMessage() {
StringBuilder sb = new StringBuilder();
sb.append("Additional paging would be required to read the results from this query fully" +
", but the user has not explicitly indicated that paging was expected.")
.append(" fetched/allowed: ").append(fetchedPages).append("/").append(maxpages)
.append(" fetchSize(").append(fetchSize).append("): ").append(queryString);
return sb.toString();
}
}

View File

@ -0,0 +1,65 @@
package io.nosqlbench.activitytype.cql.ebdrivers.cql.filtering;
import io.nosqlbench.activitytype.cql.ebdrivers.cql.errorhandling.CQLExceptionEnum;
import io.nosqlbench.engine.api.activityapi.cyclelog.buffers.results.ResultReadable;
import io.nosqlbench.engine.api.activityapi.cyclelog.filters.ResultFilterDispenser;
import io.nosqlbench.engine.api.activityapi.cyclelog.filters.ResultValueFilterType;
import io.nosqlbench.engine.api.activityapi.cyclelog.filters.tristate.EnumReadableMappingFilter;
import io.nosqlbench.engine.api.activityapi.cyclelog.filters.tristate.TristateFilter;
import io.nosqlbench.engine.api.util.ConfigTuples;
import io.nosqlbench.virtdata.annotations.Service;
import java.util.function.Predicate;
@Service(ResultValueFilterType.class)
public class CQLResultFilterType implements ResultValueFilterType {
@Override
public String getName() {
return "cql";
}
@Override
public ResultFilterDispenser getDispenser(String config) {
return new Dispenser(config);
}
private class Dispenser implements ResultFilterDispenser {
private final ConfigTuples conf;
private final EnumReadableMappingFilter<CQLExceptionEnum> enumFilter;
private final Predicate<ResultReadable> filter;
public Dispenser(String config) {
this.conf = new ConfigTuples(config);
ConfigTuples inout = conf.getAllMatching("in.*", "ex.*");
// Default policy is opposite of leading rule
TristateFilter.Policy defaultPolicy = TristateFilter.Policy.Discard;
if (conf.get(0).get(0).startsWith("ex")) {
defaultPolicy = TristateFilter.Policy.Keep;
}
this.enumFilter =
new EnumReadableMappingFilter<>(CQLExceptionEnum.values(), TristateFilter.Policy.Ignore);
for (ConfigTuples.Section section : inout) {
if (section.get(0).startsWith("in")) {
this.enumFilter.addPolicy(section.get(1), TristateFilter.Policy.Keep);
} else if (section.get(0).startsWith("ex")) {
this.enumFilter.addPolicy(section.get(1), TristateFilter.Policy.Discard);
} else {
throw new RuntimeException("Section must start with in(clude) or ex(clude), but instead it is " + section);
}
}
this.filter = this.enumFilter.toDefaultingPredicate(defaultPolicy);
}
@Override
public Predicate<ResultReadable> getResultFilter() {
return filter;
}
}
}

View File

@ -0,0 +1,27 @@
package io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.binders;
import com.datastax.driver.core.PreparedStatement;
import com.datastax.driver.core.Session;
import com.datastax.driver.core.Statement;
import io.nosqlbench.virtdata.api.ValuesArrayBinder;
public enum CqlBinderTypes {
direct_array,
unset_aware,
diagnostic;
public final static CqlBinderTypes DEFAULT = unset_aware;
public ValuesArrayBinder<PreparedStatement, Statement> get(Session session) {
if (this==direct_array) {
return new DirectArrayValuesBinder();
} else if (this== unset_aware) {
return new UnsettableValuesBinder(session);
} else if (this==diagnostic) {
return new DiagnosticPreparedBinder();
} else {
throw new RuntimeException("Impossible-ish statement branch");
}
}
}

View File

@ -0,0 +1,48 @@
package io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.binders;
import com.datastax.driver.core.*;
import io.nosqlbench.activitytype.cql.ebdrivers.cql.core.CQLBindHelper;
import io.nosqlbench.virtdata.api.ValuesArrayBinder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.List;
/**
* This binder is not meant to be used primarily by default. It gives detailed
* diagnostics, but in order to do so by default it does lots of processing.
* Other binders will call to this one in an exception handler when needed in
* order to explain in more detail what is happening for users.
*/
public class DiagnosticPreparedBinder implements ValuesArrayBinder<PreparedStatement, Statement> {
public static final Logger logger = LoggerFactory.getLogger(DiagnosticPreparedBinder.class);
@Override
public Statement bindValues(PreparedStatement prepared, Object[] values) {
ColumnDefinitions columnDefinitions = prepared.getVariables();
BoundStatement bound = prepared.bind();
List<ColumnDefinitions.Definition> columnDefList;
if (columnDefinitions.asList().size() == values.length) {
columnDefList = columnDefinitions.asList();
} else {
throw new RuntimeException("The number of named anchors in your statement does not match the number of bindings provided.");
}
int i = 0;
for (Object value : values) {
if (columnDefList.size() <= i) {
logger.error("what gives?");
}
ColumnDefinitions.Definition columnDef = columnDefList.get(i);
String colName = columnDef.getName();
DataType.Name type = columnDef.getType().getName();
try {
bound = CQLBindHelper.bindStatement(bound, colName, value, type);
} catch (ClassCastException e) {
logger.error(String.format("Unable to bind column %s to cql type %s with value %s", colName, type, value));
throw e;
}
i++;
}
return bound;
}
}

View File

@ -0,0 +1,37 @@
package io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.binders;
import com.datastax.driver.core.PreparedStatement;
import com.datastax.driver.core.Statement;
import io.nosqlbench.virtdata.api.ValuesArrayBinder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Arrays;
/**
* This is now the main binder again, but if there are any exceptions, it delegates to the diagnostic
* one in order to explain what happened. This is to allow for higher performance in the general
* case, but with better user support when something goes wrong.
*
* If you want to force the client to use the array passing method of initializing a statement,
* use this one, known as 'directarray'. This does give up the benefit of allowing unset values
* to be modeled, and at no clear benefit. Thus the {@link CqlBinderTypes#unset_aware} one
* will become the default.
*/
public class DirectArrayValuesBinder implements ValuesArrayBinder<PreparedStatement, Statement> {
public final static Logger logger = LoggerFactory.getLogger(DirectArrayValuesBinder.class);
@Override
public Statement bindValues(PreparedStatement preparedStatement, Object[] objects) {
try {
return preparedStatement.bind(objects);
} catch (Exception e) {
StringBuilder sb = new StringBuilder();
sb.append("Error binding objects to prepared statement directly, falling back to diagnostic binding layer:");
sb.append(Arrays.toString(objects));
logger.warn(sb.toString(),e);
DiagnosticPreparedBinder diag = new DiagnosticPreparedBinder();
return diag.bindValues(preparedStatement, objects);
}
}
}

View File

@ -0,0 +1,19 @@
package io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.binders;
import com.datastax.driver.core.SimpleStatement;
import com.datastax.driver.core.Statement;
import io.nosqlbench.virtdata.api.ValuesArrayBinder;
/**
* This binder is not meant to be used with anything but DDL or statements
* which should not be trying to parameterize values in general. If this changes,
* support will be added for parameterized values here.
*/
public class SimpleStatementValuesBinder
implements ValuesArrayBinder<SimpleStatement, Statement> {
@Override
public Statement bindValues(SimpleStatement context, Object[] values) {
return new SimpleStatement(context.getQueryString(), values);
}
}

View File

@ -0,0 +1,73 @@
package io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.binders;
import com.datastax.driver.core.*;
import io.nosqlbench.virtdata.api.VALUE;
import io.nosqlbench.virtdata.api.ValuesArrayBinder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.nio.ByteBuffer;
import java.util.List;
public class UnsettableValuesBinder implements ValuesArrayBinder<PreparedStatement, Statement> {
private final static Logger logger = LoggerFactory.getLogger(UnsettableValuesBinder.class);
private final Session session;
private final CodecRegistry codecRegistry;
private final ProtocolVersion protocolVersion;
public UnsettableValuesBinder(Session session) {
this.session = session;
this.codecRegistry = session.getCluster().getConfiguration().getCodecRegistry();
this.protocolVersion = this.session.getCluster().getConfiguration().getProtocolOptions().getProtocolVersion();
}
// TODO: Allow for warning when nulls are passed and they aren't expected
@Override
public Statement bindValues(PreparedStatement preparedStatement, Object[] objects) {
int i=-1;
try {
BoundStatement boundStmt = preparedStatement.bind();
List<ColumnDefinitions.Definition> defs = preparedStatement.getVariables().asList();
for (i = 0; i < objects.length; i++) {
Object value = objects[i];
if (VALUE.unset != value) {
if (null==value) {
boundStmt.setToNull(i);
} else {
DataType cqlType = defs.get(i).getType();
TypeCodec<Object> codec = codecRegistry.codecFor(cqlType, value);
ByteBuffer serialized = codec.serialize(value, protocolVersion);
boundStmt.setBytesUnsafe(i,serialized);
}
}
}
return boundStmt;
} catch (Exception e) {
String typNam = (objects[i]==null ? "NULL" : objects[i].getClass().getCanonicalName());
logger.error("Error binding column " + preparedStatement.getVariables().asList().get(i).getName() + " with class " + typNam + ": " + e.getMessage(), e);
throw e;
// StringBuilder sb = new StringBuilder();
// sb.append("Error binding objects to prepared statement directly, falling back to diagnostic binding layer:");
// sb.append(Arrays.toString(objects));
// logger.warn(sb.toString(),e);
// DiagnosticPreparedBinder diag = new DiagnosticPreparedBinder();
// return diag.bindValues(preparedStatement, objects);
}
}
// static void setObject(Session session, BoundStatement bs, int index, Object value) {
//
// DataType cqlType = bs.preparedStatement().getVariables().getType(index);
//
// CodecRegistry codecRegistry = session.getCluster().getConfiguration().getCodecRegistry();
// ProtocolVersion protocolVersion =
// session.getCluster().getConfiguration().getProtocolOptions().getProtocolVersion();
//
// TypeCodec<Object> codec = codecRegistry.codecFor(cqlType, value);
// bs.setBytesUnsafe(index, codec.serialize(value, protocolVersion));
// }
}

View File

@ -0,0 +1,50 @@
package io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.core;
import io.nosqlbench.engine.api.util.TagFilter;
import java.util.*;
import java.util.stream.Collectors;
public class AvailableCQLStatements {
private List<TaggedCQLStatementDefs> availableDefs = new ArrayList<>();
public AvailableCQLStatements(List<TaggedCQLStatementDefs> allStatementDef) {
this.availableDefs = allStatementDef;
}
public List<TaggedCQLStatementDefs> getRawTagged() {
return availableDefs;
}
public Map<String, String> getFilteringDetails(String tagSpec) {
Map<String, String> details = new LinkedHashMap<>();
TagFilter ts = new TagFilter(tagSpec);
for (TaggedCQLStatementDefs availableDef : availableDefs) {
TagFilter.Result result = ts.matchesTaggedResult(availableDef);
String names = availableDef.getStatements().stream()
.map(CQLStatementDef::getName).collect(Collectors.joining(","));
details.put(names, result.getLog());
}
return details;
}
public List<CQLStatementDefParser> getMatching(String tagSpec) {
List<CQLStatementDefParser> defs = new ArrayList<>();
TagFilter ts = new TagFilter(tagSpec);
List<CQLStatementDefParser> CQLStatementDefParsers =
availableDefs.stream()
.filter(ts::matchesTagged)
.map(TaggedCQLStatementDefs::getStatements)
.flatMap(Collection::stream)
.map(p -> new CQLStatementDefParser(p.getName(), p.getStatement()))
.collect(Collectors.toList());
return CQLStatementDefParsers;
}
public List<CQLStatementDefParser> getAll() {
return getMatching("");
}
}

View File

@ -0,0 +1,339 @@
package io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.core;
import com.datastax.driver.core.*;
import com.datastax.driver.core.policies.*;
import com.datastax.driver.dse.DseCluster;
import io.nosqlbench.activitytype.cql.ebdrivers.cql.core.CQLOptions;
import io.nosqlbench.activitytype.cql.ebdrivers.cql.core.ProxyTranslator;
import io.nosqlbench.engine.api.activityapi.core.Shutdownable;
import io.nosqlbench.engine.api.activityimpl.ActivityDef;
import io.nosqlbench.engine.api.exceptions.BasicError;
import io.nosqlbench.engine.api.metrics.ActivityMetrics;
import io.nosqlbench.engine.api.scripting.NashornEvaluator;
import io.nosqlbench.engine.api.util.SSLKsFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.*;
public class CQLSessionCache implements Shutdownable {
private final static Logger logger = LoggerFactory.getLogger(CQLSessionCache.class);
private final static String DEFAULT_SESSION_ID = "default";
private static CQLSessionCache instance = new CQLSessionCache();
private Map<String, Session> sessionCache = new HashMap<>();
private CQLSessionCache() {
}
public static CQLSessionCache get() {
return instance;
}
public void stopSession(ActivityDef activityDef) {
String key = activityDef.getParams().getOptionalString("clusterid").orElse(DEFAULT_SESSION_ID);
Session session = sessionCache.get(key);
session.getCluster().close();
session.close();
}
public Session getSession(ActivityDef activityDef) {
String key = activityDef.getParams().getOptionalString("clusterid").orElse(DEFAULT_SESSION_ID);
return sessionCache.computeIfAbsent(key, (cid) -> createSession(activityDef, key));
}
// cbopts=\".withLoadBalancingPolicy(LatencyAwarePolicy.builder(new TokenAwarePolicy(new DCAwareRoundRobinPolicy(\"dc1-us-east\", 0, false))).build()).withRetryPolicy(new LoggingRetryPolicy(DefaultRetryPolicy.INSTANCE))\"
private Session createSession(ActivityDef activityDef, String sessid) {
String host = activityDef.getParams().getOptionalString("host").orElse("localhost");
int port = activityDef.getParams().getOptionalInteger("port").orElse(9042);
String driverType = activityDef.getParams().getOptionalString("driver").orElse("dse");
Cluster.Builder builder =
driverType.toLowerCase().equals("dse") ? DseCluster.builder() :
driverType.toLowerCase().equals("oss") ? Cluster.builder() : null;
if (builder==null) {
throw new RuntimeException("The driver type '" + driverType + "' is not recognized");
}
logger.info("Using driver type '" + driverType.toUpperCase() + "'");
Optional<String> scb = activityDef.getParams()
.getOptionalString("secureconnectbundle");
scb.map(File::new)
.ifPresent(builder::withCloudSecureConnectBundle);
activityDef.getParams()
.getOptionalString("insights").map(Boolean::parseBoolean)
.ifPresent(builder::withMonitorReporting);
String[] contactPoints = activityDef.getParams().getOptionalString("host")
.map(h -> h.split(",")).orElse(null);
if (contactPoints != null) {
builder.addContactPoints(contactPoints);
}else if (scb.isEmpty()){
throw new BasicError("you must provide your contact " +
"points:\n hosts=<host1,host2,...>");
}
activityDef.getParams().getOptionalInteger("port").ifPresent(builder::withPort);
builder.withCompression(ProtocolOptions.Compression.NONE);
Optional<String> usernameOpt = activityDef.getParams().getOptionalString("username");
Optional<String> passwordOpt = activityDef.getParams().getOptionalString("password");
Optional<String> passfileOpt = activityDef.getParams().getOptionalString("passfile");
if (usernameOpt.isPresent()) {
String username = usernameOpt.get();
String password;
if (passwordOpt.isPresent()) {
password = passwordOpt.get();
} else if (passfileOpt.isPresent()) {
Path path = Paths.get(passfileOpt.get());
try {
password = Files.readAllLines(path).get(0);
} catch (IOException e) {
String error = "Error while reading password from file:" + passfileOpt;
logger.error(error, e);
throw new RuntimeException(e);
}
} else {
String error = "username is present, but neither password nor passfile are defined.";
logger.error(error);
throw new RuntimeException(error);
}
builder.withCredentials(username, password);
}
Optional<String> clusteropts = activityDef.getParams().getOptionalString("cbopts");
if (clusteropts.isPresent()) {
try {
logger.info("applying cbopts:" + clusteropts.get());
NashornEvaluator<DseCluster.Builder> clusterEval = new NashornEvaluator<>(DseCluster.Builder.class);
clusterEval.put("builder", builder);
String importEnv =
"load(\"nashorn:mozilla_compat.js\");\n" +
" importPackage(com.google.common.collect.Lists);\n" +
" importPackage(com.google.common.collect.Maps);\n" +
" importPackage(com.datastax.driver);\n" +
" importPackage(com.datastax.driver.core);\n" +
" importPackage(com.datastax.driver.core.policies);\n" +
"builder" + clusteropts.get() + "\n";
clusterEval.script(importEnv);
builder = clusterEval.eval();
logger.info("successfully applied:" + clusteropts.get());
} catch (Exception e) {
logger.error("Unable to evaluate: " + clusteropts.get() + " in script context:" + e.getMessage());
throw e;
}
}
SpeculativeExecutionPolicy speculativePolicy = activityDef.getParams()
.getOptionalString("speculative")
.map(speculative -> {
logger.info("speculative=>" + speculative);
return speculative;
})
.map(CQLOptions::speculativeFor)
.orElse(CQLOptions.defaultSpeculativePolicy());
builder.withSpeculativeExecutionPolicy(speculativePolicy);
activityDef.getParams().getOptionalString("socketoptions")
.map(sockopts -> {
logger.info("socketoptions=>" + sockopts);
return sockopts;
})
.map(CQLOptions::socketOptionsFor)
.ifPresent(builder::withSocketOptions);
activityDef.getParams().getOptionalString("pooling")
.map(pooling -> {
logger.info("pooling=>" + pooling);
return pooling;
})
.map(CQLOptions::poolingOptionsFor)
.ifPresent(builder::withPoolingOptions);
activityDef.getParams().getOptionalString("whitelist")
.map(whitelist -> {
logger.info("whitelist=>" + whitelist);
return whitelist;
})
.map(p -> CQLOptions.whitelistFor(p, null))
.ifPresent(builder::withLoadBalancingPolicy);
activityDef.getParams().getOptionalString("tickduration")
.map(tickduration -> {
logger.info("tickduration=>" + tickduration);
return tickduration;
})
.map(CQLOptions::withTickDuration)
.ifPresent(builder::withNettyOptions);
activityDef.getParams().getOptionalString("compression")
.map(compression -> {
logger.info("compression=>" + compression);
return compression;
})
.map(CQLOptions::withCompression)
.ifPresent(builder::withCompression);
if (activityDef.getParams().getOptionalString("ssl").isPresent()) {
logger.info("Cluster builder proceeding with SSL but no Client Auth");
Object context = SSLKsFactory.get().getContext(activityDef);
SSLOptions sslOptions;
if (context instanceof javax.net.ssl.SSLContext) {
sslOptions = RemoteEndpointAwareJdkSSLOptions.builder()
.withSSLContext((javax.net.ssl.SSLContext) context).build();
builder.withSSL(sslOptions);
} else if (context instanceof io.netty.handler.ssl.SslContext) {
sslOptions =
new RemoteEndpointAwareNettySSLOptions((io.netty.handler.ssl.SslContext) context);
} else {
throw new RuntimeException("Unrecognized ssl context object type: " + context.getClass().getCanonicalName());
}
builder.withSSL(sslOptions);
}
// JdkSSLOptions sslOptions = RemoteEndpointAwareJdkSSLOptions
// .builder()
// .withSSLContext(context)
// .build();
// builder.withSSL(sslOptions);
//
// }
//
// boolean sslEnabled = activityDef.getParams().getOptionalBoolean("ssl").orElse(false);
// boolean jdkSslEnabled = activityDef.getParams().getOptionalBoolean("jdkssl").orElse(false);
// if (jdkSslEnabled){
// sslEnabled = true;
// }
//
// // used for OpenSSL
// boolean openSslEnabled = activityDef.getParams().getOptionalBoolean("openssl").orElse(false);
//
// if (sslEnabled && openSslEnabled) {
// logger.error("You cannot enable both OpenSSL and JDKSSL, please pick one and try again!");
// System.exit(2);
// }
//
// if (sslEnabled) {
// logger.info("Cluster builder proceeding with SSL but no Client Auth");
// SSLContext context = SSLKsFactory.get().getContext(activityDef);
// JdkSSLOptions sslOptions = RemoteEndpointAwareJdkSSLOptions
// .builder()
// .withSSLContext(context)
// .build();
// builder.withSSL(sslOptions);
// }
// else if (openSslEnabled) {
// logger.info("Cluster builder proceeding with SSL and Client Auth");
// String keyPassword = activityDef.getParams().getOptionalString("keyPassword").orElse(null);
// String caCertFileLocation = activityDef.getParams().getOptionalString("caCertFilePath").orElse(null);
// String certFileLocation = activityDef.getParams().getOptionalString("certFilePath").orElse(null);
// String keyFileLocation = activityDef.getParams().getOptionalString("keyFilePath").orElse(null);
//
//
// try {
//
// KeyStore ks = KeyStore.getInstance("JKS", "SUN");
// ks.load(null, keyPassword.toCharArray());
//
// X509Certificate cert = (X509Certificate) CertificateFactory.
// getInstance("X509").
// generateCertificate(new FileInputStream(caCertFileLocation));
//
// //set alias to cert
// ks.setCertificateEntry(cert.getSubjectX500Principal().getName(), cert);
//
// TrustManagerFactory tMF = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm());
// tMF.init(ks);
//
//
// SslContext sslContext = SslContextBuilder
// .forClient()
// /* configured with the TrustManagerFactory that has the cert from the ca.cert
// * This tells the driver to trust the server during the SSL handshake */
// .trustManager(tMF)
// /* These are needed because the server is configured with require_client_auth
// * In this case the client's public key must be in the truststore on each DSE
// * server node and the CA configured */
// .keyManager(new File(certFileLocation), new File(keyFileLocation))
// .build();
//
// RemoteEndpointAwareNettySSLOptions sslOptions = new RemoteEndpointAwareNettySSLOptions(sslContext);
//
// // Cluster builder with sslOptions
// builder.withSSL(sslOptions);
//
// } catch (Exception e) {
// throw new RuntimeException(e);
// }
// }
RetryPolicy retryPolicy = activityDef.getParams()
.getOptionalString("retrypolicy")
.map(CQLOptions::retryPolicyFor).orElse(DefaultRetryPolicy.INSTANCE);
if (retryPolicy instanceof LoggingRetryPolicy) {
logger.info("using LoggingRetryPolicy");
}
builder.withRetryPolicy(retryPolicy);
if (!activityDef.getParams().getOptionalBoolean("jmxreporting").orElse(false)) {
builder.withoutJMXReporting();
}
// Proxy Translator and Whitelist for use with DS Cloud on-demand single-endpoint setup
if (activityDef.getParams().getOptionalBoolean("single-endpoint").orElse(false)) {
InetSocketAddress inetHost = new InetSocketAddress(host, port);
final List<InetSocketAddress> whiteList = new ArrayList<>();
whiteList.add(inetHost);
LoadBalancingPolicy whitelistPolicy = new WhiteListPolicy(new RoundRobinPolicy(), whiteList);
builder.withAddressTranslator(new ProxyTranslator(inetHost)).withLoadBalancingPolicy(whitelistPolicy);
}
Cluster cl = builder.build();
// Apply default idempotence, if set
activityDef.getParams().getOptionalBoolean("defaultidempotence").map(
b -> cl.getConfiguration().getQueryOptions().setDefaultIdempotence(b)
);
Session session = cl.newSession();
// This also forces init of metadata
logger.info("cluster-metadata-allhosts:\n" + session.getCluster().getMetadata().getAllHosts());
if (activityDef.getParams().getOptionalBoolean("drivermetrics").orElse(false)) {
String driverPrefix = "driver." + sessid;
driverPrefix = activityDef.getParams().getOptionalString("driverprefix").orElse(driverPrefix) + ".";
ActivityMetrics.mountSubRegistry(driverPrefix, cl.getMetrics().getRegistry());
}
return session;
}
@Override
public void shutdown() {
for (Session session : sessionCache.values()) {
Cluster cluster = session.getCluster();
session.close();
cluster.close();
}
}
}

View File

@ -0,0 +1,105 @@
package io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.core;
import com.datastax.driver.core.ConsistencyLevel;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.*;
import java.util.stream.Collectors;
public class CQLStatementDef {
private final static Logger logger = LoggerFactory.getLogger(CQLStatementDef.class);
private Map<String,String> params = new HashMap<>();
private String name = "";
private String statement = "";
private boolean prepared = true;
private String cl = ConsistencyLevel.LOCAL_ONE.name();
private Map<String, String> bindings = new HashMap<>();
public CQLStatementDef() {
}
public String getGenSpec(String s) {
return bindings.get(s);
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getStatement() {
return statement;
}
public void setStatement(String statement) {
this.statement = statement;
}
public Map<String, String> getBindings() {
return bindings;
}
public void setBindings(Map<String, String> bindings) {
this.bindings = bindings;
}
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append(" name:").append(this.getName()).append("\n");
sb.append(" statement: |").append("\n");
String formattedStmt = Arrays.asList(getStatement().split("\\r*\n"))
.stream().map(s -> " " + s)
.collect(Collectors.joining("\n"));
sb.append(formattedStmt);
if (bindings.size() > 0) {
sb.append(" bindings:\n");
Optional<Integer> maxLen = this.bindings.keySet().stream().map(String::length).reduce(Integer::max);
for (String bindName : this.bindings.keySet()) {
sb
.append(String.format(" %-" + (maxLen.orElse(20) + 2) + "s", bindName)).append(" : ")
.append(bindings.get(bindName))
.append("\n");
}
}
return sb.toString();
}
public boolean isPrepared() {
return prepared;
}
public void setPrepared(boolean prepared) {
this.prepared = prepared;
}
public String getConsistencyLevel() {
return this.cl;
}
public void setConsistencyLevel(String consistencyLevel) {
this.cl = consistencyLevel;
}
public void setCl(String consistencyLevel) {
setConsistencyLevel(consistencyLevel);
}
public Map<String, String> getParams() {
return params;
}
public void setParams(Map<String, String> params) {
this.params = params;
}
public long getRatio() {
return Long.parseLong(Optional.ofNullable(params.get("ratio")).orElse("1"));
}
}

View File

@ -0,0 +1,161 @@
package io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.core;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.*;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
public class CQLStatementDefParser {
private final static Logger logger = LoggerFactory.getLogger(CQLStatementDefParser.class);
// private final static Pattern templateToken = Pattern.compile("<<(\\w+(:(.+?))?)>>");
private final static Pattern stmtToken = Pattern.compile("\\?(\\w+[-_\\d\\w]*)|\\{(\\w+[-_\\d\\w.]*)}");
private final static String UNSET_VALUE = "UNSET-VALUE";
private final String stmt;
private final String name;
private CQLStatementDef deprecatedDef; // deprecated, to be removed
public void setBindings(Map<String, String> bindings) {
this.bindings = bindings;
}
private Map<String, String> bindings;
public CQLStatementDef getDeprecatedDef() {
return deprecatedDef;
}
public void setDeprecatedDef(CQLStatementDef deprecatedDef) {
this.deprecatedDef = deprecatedDef;
}
public CQLStatementDefParser(String name, String stmt) {
this.stmt = stmt;
this.name = name;
this.bindings = bindings;
}
public Map<String,String> getBindings() {
return bindings;
}
/**
* @return bindableNames in order as specified in the parameter placeholders
*/
public List<String> getBindableNames() {
Matcher m = stmtToken.matcher(stmt);
List<String> bindNames = new ArrayList<>();
while (m.find()) {
String form1 = m.group(1);
String form2 = m.group(2);
bindNames.add( (form1!=null && !form1.isEmpty()) ? form1 : form2 );
}
return bindNames;
}
public String getName() {
return name;
}
public String getParsedStatementOrError(Set<String> namedBindings) {
ParseResult result = getParseResult(namedBindings);
if (result.hasError()) {
throw new RuntimeException("Statement template has errors:\n" + result.toString());
}
return result.getStatement();
}
public ParseResult getParseResult(Set<String> namedBindings) {
HashSet<String> missingAnchors = new HashSet<String>() {{ addAll(namedBindings); }};
HashSet<String> missingBindings = new HashSet<String>();
String statement = this.stmt;
StringBuilder cooked = new StringBuilder();
Matcher m = stmtToken.matcher(statement);
int lastMatch = 0;
String remainder = "";
while (m.find(lastMatch)) {
String pre = statement.substring(lastMatch, m.start());
String form1 = m.group(1);
String form2 = m.group(2);
String tokenName = (form1!=null && !form1.isEmpty()) ? form1 : form2;
lastMatch = m.end();
cooked.append(pre);
cooked.append("?");
if (!namedBindings.contains(tokenName)) {
missingBindings.add(tokenName);
} else {
if (missingAnchors.contains(tokenName)) {
missingAnchors.remove(tokenName);
}
}
}
// add remainder of unmatched
if (lastMatch>=0) {
cooked.append(statement.substring(lastMatch));
}
else {
cooked.append(statement);
}
logger.info("Parsed statement as: " + cooked.toString().replaceAll("\\n","\\\\n"));
return new ParseResult(cooked.toString(),name,bindings,missingBindings,missingAnchors);
}
public static class ParseResult {
private Set<String> missingGenerators;
private Set<String> missingAnchors;
private String statement;
private Map<String,String> bindings;
private String name;
public ParseResult(String stmt, String name, Map<String,String> bindings, Set<String> missingGenerators, Set<String> missingAnchors) {
this.missingGenerators = missingGenerators;
this.missingAnchors = missingAnchors;
this.statement = stmt;
this.name = name;
}
public String toString() {
String generatorsSummary = (this.missingGenerators.size() > 0) ?
"\nundefined generators:" + this.missingGenerators.stream().collect(Collectors.joining(",", "[", "]")) : "";
return "STMT:" + statement + "\n" + generatorsSummary;
}
public String getName() {
return name;
}
public Map<String,String> getBindings() {
return bindings;
}
public boolean hasError() {
return missingGenerators.size() > 0;
}
public String getStatement() {
return statement;
}
public Set<String> getMissingAnchors() {
return missingAnchors;
}
public Set<String> getMissingGenerators() {
return missingGenerators;
}
}
}

View File

@ -0,0 +1,37 @@
package io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.core;
import java.util.*;
public class CQLStatementGroups {
private Map<String,List<CQLStatementDefParser>> statementGroups = new HashMap<>();
public CQLStatementGroups(Map<String,List<CQLStatementDefParser>> statementGroups) {
this.statementGroups = statementGroups;
}
public List<CQLStatementDefParser> getGroups(String... groupNames) {
List<CQLStatementDefParser> statements = new ArrayList<CQLStatementDefParser>();
for (String groupName : groupNames) {
List<CQLStatementDefParser> adding = statementGroups.getOrDefault(groupName, Collections.emptyList());
statements.addAll(adding);
}
return statements;
}
public String toString() {
StringBuilder sb = new StringBuilder();
List<String> groups = new ArrayList<String>(statementGroups.keySet());
Collections.sort(groups);
sb.append("groups:\n");
for (String group : groups) {
// sb.append("section:").append(section).append("\n");
for (CQLStatementDefParser statementDef : statementGroups.get(group)) {
sb.append(statementDef.toString());
}
sb.append("\n");
}
return sb.toString();
}
}

View File

@ -0,0 +1,182 @@
package io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.core;
import com.codahale.metrics.Histogram;
import com.codahale.metrics.Timer;
import com.datastax.driver.core.PreparedStatement;
import com.datastax.driver.core.SimpleStatement;
import com.datastax.driver.core.Statement;
import io.nosqlbench.activitytype.cql.ebdrivers.cql.api.ResultSetCycleOperator;
import io.nosqlbench.activitytype.cql.ebdrivers.cql.api.RowCycleOperator;
import io.nosqlbench.virtdata.api.ContextualArrayBindings;
import java.io.IOException;
import java.io.Writer;
import java.util.concurrent.TimeUnit;
/**
* A ReadyCQLStatement instantiates new statements to be executed at some mix ratio.
* It optionally holds metrics objects for a named statement.
*/
public class ReadyCQLStatement {
private String name;
private ContextualArrayBindings<?, Statement> contextualBindings;
private long ratio;
private ResultSetCycleOperator[] resultSetOperators = null;
private RowCycleOperator[] rowCycleOperators = null;
private Timer successTimer;
private Timer errorTimer;
private Histogram rowsFetchedHisto;
private Writer resultCsvWriter;
public ReadyCQLStatement(ContextualArrayBindings<?, Statement> contextualBindings, long ratio, String name) {
this.contextualBindings = contextualBindings;
this.ratio = ratio;
this.name = name;
}
public ReadyCQLStatement withMetrics(Timer successTimer, Timer errorTimer, Histogram rowsFetchedHisto) {
this.successTimer = successTimer;
this.errorTimer = errorTimer;
this.rowsFetchedHisto = rowsFetchedHisto;
return this;
}
public Statement bind(long value) {
return contextualBindings.bind(value);
}
public ResultSetCycleOperator[] getResultSetOperators() {
return resultSetOperators;
}
public ContextualArrayBindings getContextualBindings() {
return this.contextualBindings;
}
public String getQueryString(long value) {
Object stmt = contextualBindings.getContext();
if (stmt instanceof PreparedStatement) {
String queryString = ((PreparedStatement)stmt).getQueryString();
StringBuilder sb = new StringBuilder(queryString.length()*2);
sb.append("(prepared) ");
return getQueryStringValues(value, queryString, sb);
} else if (stmt instanceof SimpleStatement) {
String queryString = ((SimpleStatement) stmt).getQueryString();
StringBuilder sb = new StringBuilder();
sb.append("(simple) ");
return getQueryStringValues(value, queryString, sb);
}
if (stmt instanceof String) {
return (String)stmt;
}
throw new RuntimeException("context object not recognized for query string:" + stmt.getClass().getCanonicalName());
}
private String getQueryStringValues(long value, String queryString, StringBuilder sb) {
if (!queryString.endsWith("\n")) {
sb.append("\n");
}
sb.append(queryString).append(" VALUES[");
Object[] all = contextualBindings.getBindings().getAll(value);
String delim="";
for (Object o : all) {
sb.append(delim);
delim=",";
sb.append(o.toString());
}
sb.append("]");
return sb.toString();
}
public long getRatio() {
return ratio;
}
public void setRatio(long ratio) {
this.ratio = ratio;
}
/**
* This method should be called when an associated statement is executed successfully.
* @param cycleValue The cycle associated with the execution.
* @param nanoTime The nanoTime duration of the execution.
* @param rowsFetched The number of rows fetched for this cycle
*/
public void onSuccess(long cycleValue, long nanoTime, long rowsFetched) {
if (successTimer!=null) {
successTimer.update(nanoTime, TimeUnit.NANOSECONDS);
}
if (rowsFetchedHisto!=null) {
rowsFetchedHisto.update(rowsFetched);
}
if (resultCsvWriter!=null) {
try {
synchronized(resultCsvWriter) {
// <cycle>,(SUCCESS|FAILURE),<nanos>,<rowsfetched>,<errorname>\n
resultCsvWriter
.append(String.valueOf(cycleValue)).append(",")
.append("SUCCESS,")
.append(String.valueOf(nanoTime)).append(",")
.append(String.valueOf(rowsFetched))
.append(",NONE")
.append("\n");
}
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}
/**
* This method should be called when an associated statement is executed unsuccessfully.
* It should be called only once per cycle in the case of execution error.
* @param cycleValue The cycle associated with the erred execution.
* @param resultNanos The nanoTime duration of the execution.
* @param t The associated throwable
*/
public void onError(long cycleValue, long resultNanos, Throwable t) {
if (errorTimer!=null) {
errorTimer.update(resultNanos, TimeUnit.NANOSECONDS);
}
if (resultCsvWriter!=null) {
try {
synchronized(resultCsvWriter) {
// <cycle>,(SUCCESS|FAILURE),<nanos>,<rowsfetched>,<errorname>\n
resultCsvWriter
.append(String.valueOf(cycleValue)).append(",")
.append("FAILURE,")
.append(String.valueOf(resultNanos)).append(",")
.append("0,")
.append(t.getClass().getSimpleName()).append(",")
.append("\n");
}
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}
public ReadyCQLStatement withResultSetCycleOperators(ResultSetCycleOperator[] resultSetCycleOperators) {
this.resultSetOperators = resultSetCycleOperators;
return this;
}
public ReadyCQLStatement withRowCycleOperators(RowCycleOperator[] rowCycleOperators) {
this.rowCycleOperators = rowCycleOperators;
return this;
}
public RowCycleOperator[] getRowCycleOperators() {
return this.rowCycleOperators;
}
public ReadyCQLStatement withResultCsvWriter(Writer resultCsvWriter) {
this.resultCsvWriter = resultCsvWriter;
return this;
}
}

View File

@ -0,0 +1,109 @@
package io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.core;
import com.codahale.metrics.Histogram;
import com.codahale.metrics.Timer;
import com.datastax.driver.core.PreparedStatement;
import com.datastax.driver.core.Session;
import com.datastax.driver.core.SimpleStatement;
import com.datastax.driver.core.Statement;
import io.nosqlbench.activitytype.cql.ebdrivers.cql.api.ResultSetCycleOperator;
import io.nosqlbench.activitytype.cql.ebdrivers.cql.api.RowCycleOperator;
import io.nosqlbench.activitytype.cql.ebdrivers.cql.core.CqlActivity;
import io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.binders.CqlBinderTypes;
import io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.binders.SimpleStatementValuesBinder;
import io.nosqlbench.engine.api.metrics.ActivityMetrics;
import io.nosqlbench.virtdata.api.BindingsTemplate;
import io.nosqlbench.virtdata.api.ContextualBindingsArrayTemplate;
import io.nosqlbench.virtdata.api.ValuesArrayBinder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.Writer;
public class ReadyCQLStatementTemplate {
private final static Logger logger = LoggerFactory.getLogger(ReadyCQLStatementTemplate.class);
private final Session session;
private ContextualBindingsArrayTemplate<?, Statement> template;
private long ratio;
private String name;
private ResultSetCycleOperator[] resultSetCycleOperators;
private RowCycleOperator[] rowCycleOperators;
private Timer successTimer;
private Timer errorTimer;
private Histogram rowsFetchedHisto;
private Writer resultCsvWriter;
public ReadyCQLStatementTemplate(CqlBinderTypes binderType, Session session, PreparedStatement preparedStmt, long ratio, String name) {
this.session = session;
this.name = name;
ValuesArrayBinder<PreparedStatement, Statement> binder = binderType.get(session);
logger.trace("Using binder_type=>" + binder.toString());
template = new ContextualBindingsArrayTemplate<>(
preparedStmt,
new BindingsTemplate(),
binder
);
this.ratio = ratio;
}
public ReadyCQLStatementTemplate(Session session, SimpleStatement simpleStatement, long ratio, String name) {
this.session = session;
this.name = name;
template = new ContextualBindingsArrayTemplate<>(
simpleStatement,
new BindingsTemplate(),
new SimpleStatementValuesBinder()
);
this.ratio = ratio;
}
public ReadyCQLStatement resolve() {
return new ReadyCQLStatement(template.resolveBindings(), ratio, name)
.withMetrics(this.successTimer, this.errorTimer, this.rowsFetchedHisto)
.withResultSetCycleOperators(resultSetCycleOperators)
.withRowCycleOperators(rowCycleOperators)
.withResultCsvWriter(resultCsvWriter);
}
public ContextualBindingsArrayTemplate<?, Statement> getContextualBindings() {
return template;
}
public String getName() {
return name;
}
public void instrument(CqlActivity activity) {
this.successTimer = ActivityMetrics.timer(activity.getActivityDef(), name + "--success");
this.errorTimer = ActivityMetrics.timer(activity.getActivityDef(), name + "--error");
this.rowsFetchedHisto = ActivityMetrics.histogram(activity.getActivityDef(), name + "--resultset-size");
}
public void logResultCsv(CqlActivity activity, String name) {
this.resultCsvWriter = activity.getNamedWriter(name);
}
public void addResultSetOperators(ResultSetCycleOperator... addingOperators) {
resultSetCycleOperators = (resultSetCycleOperators==null) ? new ResultSetCycleOperator[0]: resultSetCycleOperators;
ResultSetCycleOperator[] newOperators = new ResultSetCycleOperator[resultSetCycleOperators.length + addingOperators.length];
System.arraycopy(resultSetCycleOperators,0,newOperators,0,resultSetCycleOperators.length);
System.arraycopy(addingOperators,0,newOperators,resultSetCycleOperators.length,addingOperators.length);
this.resultSetCycleOperators=newOperators;
}
public void addRowCycleOperators(RowCycleOperator... addingOperators) {
rowCycleOperators = (rowCycleOperators==null) ? new RowCycleOperator[0]: rowCycleOperators;
RowCycleOperator[] newOperators = new RowCycleOperator[rowCycleOperators.length + addingOperators.length];
System.arraycopy(rowCycleOperators,0,newOperators,0,rowCycleOperators.length);
System.arraycopy(addingOperators, 0, newOperators,rowCycleOperators.length,addingOperators.length);
this.rowCycleOperators = newOperators;
}
}

View File

@ -0,0 +1,25 @@
package io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.core;
import java.util.ArrayList;
import java.util.List;
import java.util.stream.Collectors;
public class ReadyCQLStatementsTemplate {
private List<ReadyCQLStatementTemplate> readyStatementList = new ArrayList<>();
public void addTemplate(ReadyCQLStatementTemplate t) {
this.readyStatementList.add(t);
}
public List<ReadyCQLStatement> resolve() {
return readyStatementList.stream()
.map(ReadyCQLStatementTemplate::resolve)
.collect(Collectors.toList());
}
public int size() {
return readyStatementList.size();
}
}

View File

@ -0,0 +1,57 @@
package io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.core;
import io.nosqlbench.engine.api.util.Tagged;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
public class TaggedCQLStatementDefs implements Tagged {
private List<CQLStatementDef> statements = new ArrayList<>();
private Map<String,String> tags = new HashMap<>();
private Map<String,String> params = new HashMap<>();
public TaggedCQLStatementDefs(Map<String,String> tags, Map<String,String> params, List<CQLStatementDef> statements) {
this.tags = tags;
this.params = params;
this.statements = statements;
}
public TaggedCQLStatementDefs(Map<String,String> tags, List<CQLStatementDef> statements) {
this.tags = tags;
this.statements = statements;
}
public TaggedCQLStatementDefs(List<CQLStatementDef> statements) {
this.statements = statements;
}
public TaggedCQLStatementDefs() {
}
public List<CQLStatementDef> getStatements() {
return statements;
}
public void setStatements(List<CQLStatementDef> statements) {
this.statements = statements;
}
public Map<String, String> getTags() {
return tags;
}
public void setTags(Map<String, String> tags) {
this.tags = tags;
}
public Map<String, String> getParams() {
return params;
}
public void setParams(Map<String, String> params) {
this.params = params;
}
}

View File

@ -0,0 +1,81 @@
package io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.core;
import io.nosqlbench.engine.api.activityimpl.ActivityInitializationError;
import io.nosqlbench.engine.api.util.NosqlBenchFiles;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.yaml.snakeyaml.TypeDescription;
import org.yaml.snakeyaml.Yaml;
import org.yaml.snakeyaml.constructor.Constructor;
import java.io.BufferedReader;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.function.Function;
import java.util.stream.Collectors;
@SuppressWarnings("ALL")
public class YamlCQLStatementLoader {
private final static Logger logger = LoggerFactory.getLogger(YamlCQLStatementLoader.class);
List<Function<String, String>> transformers = new ArrayList<>();
public YamlCQLStatementLoader() {
}
public YamlCQLStatementLoader(Function<String, String>... transformers) {
this.transformers.addAll(Arrays.asList(transformers));
}
public AvailableCQLStatements load(String fromPath, String... searchPaths) {
InputStream stream = NosqlBenchFiles.findRequiredStreamOrFile(fromPath,
"yaml", searchPaths);
String data = "";
try (BufferedReader buffer = new BufferedReader(new InputStreamReader(stream))) {
data = buffer.lines().collect(Collectors.joining("\n"));
} catch (Exception e) {
throw new RuntimeException("Error while reading yaml stream data:" + e);
}
for (Function<String, String> xform : transformers) {
try {
logger.debug("Applying string transformer to yaml data:" + xform);
data = xform.apply(data);
} catch (Exception e) {
RuntimeException t = new ActivityInitializationError("Error applying string transform to input", e);
logger.error(t.getMessage(), t);
throw t;
}
}
Yaml yaml = getCustomYaml();
try {
Iterable<Object> objects = yaml.loadAll(data);
List<TaggedCQLStatementDefs> stmtListList = new ArrayList<>();
for (Object object : objects) {
TaggedCQLStatementDefs tsd = (TaggedCQLStatementDefs) object;
stmtListList.add(tsd);
}
return new AvailableCQLStatements(stmtListList);
} catch (Exception e) {
logger.error("Error loading yaml from " + fromPath, e);
throw e;
}
}
private Yaml getCustomYaml() {
Constructor constructor = new Constructor(TaggedCQLStatementDefs.class);
TypeDescription tds = new TypeDescription(TaggedCQLStatementDefs.class);
tds.putListPropertyType("statements", CQLStatementDef.class);
constructor.addTypeDescription(tds);
return new Yaml(constructor);
}
}

View File

@ -0,0 +1,17 @@
package io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.rowoperators;
import com.datastax.driver.core.Row;
import io.nosqlbench.activitytype.cql.ebdrivers.cql.api.RowCycleOperator;
/**
* Save specific variables to the thread local object map
*/
public class Print implements RowCycleOperator {
@Override
public int apply(Row row, long cycle) {
System.out.println("ROW:" + row);
return 0;
}
}

View File

@ -0,0 +1,34 @@
package io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.rowoperators;
import io.nosqlbench.activitytype.cql.ebdrivers.cql.api.RowCycleOperator;
public enum RowCycleOperators {
saverows(SaveRows.class),
savevars(SaveVars.class),
print(Print.class);
private final Class<? extends RowCycleOperator> implClass;
RowCycleOperators(Class<? extends RowCycleOperator> traceLoggerClass) {
this.implClass = traceLoggerClass;
}
public Class<? extends RowCycleOperator> getImplementation() {
return implClass;
}
public RowCycleOperator getInstance() {
try {
return getImplementation().getConstructor().newInstance();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
public static RowCycleOperator newOperator(String name) {
return RowCycleOperators.valueOf(name).getInstance();
}
}

View File

@ -0,0 +1,47 @@
package io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.rowoperators;
import com.datastax.driver.core.ColumnDefinitions;
import com.datastax.driver.core.Row;
import io.nosqlbench.activitytype.cql.ebdrivers.cql.api.RowCycleOperator;
import io.nosqlbench.virtdata.library.basics.core.threadstate.SharedState;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.stream.Collectors;
/**
* Save specific variables to the thread local object map
*/
public class Save implements RowCycleOperator {
private final static Logger logger = LoggerFactory.getLogger(Save.class);
ThreadLocal<HashMap<String, Object>> tl_objectMap = SharedState.tl_ObjectMap;
private String[] varnames;
public Save(String... varnames) {
this.varnames = varnames;
}
@Override
public int apply(Row row, long cycle) {
try {
HashMap<String, Object> tlvars= tl_objectMap.get();
for (String varname : varnames) {
Object object = row.getObject(varname);
tlvars.put(varname,object);
}
} catch (Exception e) {
List<ColumnDefinitions.Definition> definitions = row.getColumnDefinitions().asList();
logger.error("Unable to save '" + Arrays.toString(varnames) + "' from " +
definitions.stream().map(ColumnDefinitions.Definition::getName)
.collect(Collectors.joining(",","[","]")) + ": " + e.getMessage(),e);
throw e;
}
return 0;
}
}

View File

@ -0,0 +1,18 @@
package io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.rowoperators;
import com.datastax.driver.core.Row;
import io.nosqlbench.activitytype.cql.ebdrivers.cql.api.RowCycleOperator;
import io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.rsoperators.PerThreadCQLData;
import java.util.LinkedList;
public class SaveRows implements RowCycleOperator {
@Override
public int apply(Row row, long cycle) {
LinkedList<Row>rows = PerThreadCQLData.rows.get();
rows.add(row);
return 0;
}
}

View File

@ -0,0 +1,27 @@
package io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.rowoperators;
import com.datastax.driver.core.ColumnDefinitions;
import com.datastax.driver.core.Row;
import io.nosqlbench.activitytype.cql.ebdrivers.cql.api.RowCycleOperator;
import io.nosqlbench.virtdata.library.basics.core.threadstate.SharedState;
import java.util.HashMap;
import java.util.List;
public class SaveVars implements RowCycleOperator {
ThreadLocal<HashMap<String, Object>> tl_objectMap = SharedState.tl_ObjectMap;
@Override
public int apply(Row row, long cycle) {
HashMap<String, Object> tlvars= tl_objectMap.get();
List<ColumnDefinitions.Definition> cdlist = row.getColumnDefinitions().asList();
for (ColumnDefinitions.Definition definition : cdlist) {
String name = definition.getName();
Object object = row.getObject(name);
tlvars.put(name,object);
}
return 0;
}
}

View File

@ -0,0 +1,22 @@
package io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.rsoperators;
import com.datastax.driver.core.ResultSet;
import com.datastax.driver.core.Statement;
import io.nosqlbench.activitytype.cql.ebdrivers.cql.api.ResultSetCycleOperator;
import io.nosqlbench.activitytype.cql.ebdrivers.cql.errorhandling.exceptions.ResultSetVerificationException;
public class AssertSingleRowResultSet implements ResultSetCycleOperator {
@Override
public int apply(ResultSet resultSet, Statement statement, long cycle) {
int rowsIncoming = resultSet.getAvailableWithoutFetching();
if (rowsIncoming<1) {
throw new ResultSetVerificationException(cycle, resultSet, statement, "no row in result set, expected exactly 1");
}
if (rowsIncoming>1) {
throw new ResultSetVerificationException(cycle, resultSet, statement, "more than one row in result set, expected exactly 1");
}
return rowsIncoming;
}
}

View File

@ -0,0 +1,15 @@
package io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.rsoperators;
import com.datastax.driver.core.ResultSet;
import com.datastax.driver.core.Statement;
import io.nosqlbench.activitytype.cql.ebdrivers.cql.api.ResultSetCycleOperator;
import io.nosqlbench.virtdata.library.basics.core.threadstate.SharedState;
public class ClearVars implements ResultSetCycleOperator {
@Override
public int apply(ResultSet resultSet, Statement statement, long cycle) {
SharedState.tl_ObjectMap.get().clear();
return 0;
}
}

View File

@ -0,0 +1,36 @@
package io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.rsoperators;
import com.datastax.driver.core.*;
import io.nosqlbench.activitytype.cql.ebdrivers.cql.api.ResultSetCycleOperator;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class CqlResultSetLogger implements ResultSetCycleOperator {
private final static Logger logger = LoggerFactory.getLogger(CqlResultSetLogger.class);
private static String getQueryString(Statement stmt) {
if (stmt instanceof PreparedStatement) {
return "(prepared) " + ((PreparedStatement) stmt).getQueryString();
} else if (stmt instanceof SimpleStatement) {
return "(simple) " + ((SimpleStatement) stmt).getQueryString();
} else if (stmt instanceof BoundStatement) {
return "(bound) " + ((BoundStatement) stmt).preparedStatement().getQueryString();
} else {
return "(unknown) " + stmt.toString();
}
}
@Override
public int apply(ResultSet resultSet, Statement statement, long cycle) {
logger.debug("result-set-logger: "
+ " cycle=" + cycle
+ " rows=" + resultSet.getAvailableWithoutFetching()
+ " fetched=" + resultSet.isFullyFetched()
+ " statement=" + getQueryString(statement).stripTrailing()
);
for (Row row : resultSet) {
logger.trace(row.toString());
}
return 0;
}
}

View File

@ -0,0 +1,9 @@
package io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.rsoperators;
import com.datastax.driver.core.Row;
import java.util.LinkedList;
public class PerThreadCQLData {
public final static ThreadLocal<LinkedList<Row>> rows = ThreadLocal.withInitial(LinkedList::new);
}

View File

@ -0,0 +1,23 @@
package io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.rsoperators;
import com.datastax.driver.core.ResultSet;
import com.datastax.driver.core.Statement;
import io.nosqlbench.activitytype.cql.ebdrivers.cql.api.ResultSetCycleOperator;
import io.nosqlbench.virtdata.library.basics.core.threadstate.SharedState;
import java.util.HashMap;
public class PopVars implements ResultSetCycleOperator {
@Override
public int apply(ResultSet resultSet, Statement statement, long cycle) {
HashMap<String, Object> stringObjectHashMap = SharedState.tl_ObjectMap.get();
Object o = SharedState.tl_ObjectStack.get().pollLast();
if (o != null && o instanceof HashMap) {
SharedState.tl_ObjectMap.set((HashMap) o);
return 0;
} else {
throw new RuntimeException("Tried to pop thread local data from stack, but there was none.");
}
}
}

View File

@ -0,0 +1,14 @@
package io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.rsoperators;
import com.datastax.driver.core.ResultSet;
import com.datastax.driver.core.Statement;
import io.nosqlbench.activitytype.cql.ebdrivers.cql.api.ResultSetCycleOperator;
public class Print implements ResultSetCycleOperator {
@Override
public int apply(ResultSet resultSet, Statement statement, long cycle) {
System.out.println("RS:"+ resultSet.toString());
return 0;
}
}

View File

@ -0,0 +1,20 @@
package io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.rsoperators;
import com.datastax.driver.core.ResultSet;
import com.datastax.driver.core.Statement;
import io.nosqlbench.activitytype.cql.ebdrivers.cql.api.ResultSetCycleOperator;
import io.nosqlbench.virtdata.library.basics.core.threadstate.SharedState;
import java.util.HashMap;
public class PushVars implements ResultSetCycleOperator {
@Override
public int apply(ResultSet resultSet, Statement statement, long cycle) {
HashMap<String, Object> existingVars = SharedState.tl_ObjectMap.get();
HashMap<String, Object> topush = new HashMap<>(existingVars);
SharedState.tl_ObjectStack.get().addLast(topush);
return 0;
}
}

View File

@ -0,0 +1,40 @@
package io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.rsoperators;
import io.nosqlbench.activitytype.cql.ebdrivers.cql.api.ResultSetCycleOperator;
public enum ResultSetCycleOperators {
pushvars(PushVars.class),
popvars(PopVars.class),
clearvars(ClearVars.class),
trace(TraceLogger.class),
log(CqlResultSetLogger.class),
assert_singlerow(AssertSingleRowResultSet.class),
print(Print.class);
private final Class<? extends ResultSetCycleOperator> implClass;
ResultSetCycleOperators(Class<? extends ResultSetCycleOperator> traceLoggerClass) {
this.implClass = traceLoggerClass;
}
public Class<? extends ResultSetCycleOperator> getImplementation() {
return implClass;
}
public ResultSetCycleOperator getInstance() {
try {
return getImplementation().getConstructor().newInstance();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
public static ResultSetCycleOperator newOperator(String name) {
return ResultSetCycleOperators.valueOf(name).getInstance();
}
}

View File

@ -0,0 +1,16 @@
package io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.rsoperators;
import com.datastax.driver.core.ResultSet;
import com.datastax.driver.core.Row;
import com.datastax.driver.core.Statement;
import io.nosqlbench.activitytype.cql.ebdrivers.cql.api.ResultSetCycleOperator;
import java.util.LinkedList;
public class RowCapture implements ResultSetCycleOperator {
@Override
public int apply(ResultSet resultSet, Statement statement, long cycle) {
ThreadLocal<LinkedList<Row>> rows = PerThreadCQLData.rows;
return 0;
}
}

View File

@ -0,0 +1,97 @@
package io.nosqlbench.activitytype.cql.ebdrivers.cql.statements.rsoperators;
import com.datastax.driver.core.ExecutionInfo;
import com.datastax.driver.core.QueryTrace;
import com.datastax.driver.core.ResultSet;
import com.datastax.driver.core.Statement;
import io.nosqlbench.activitytype.cql.ebdrivers.cql.api.ResultSetCycleOperator;
import io.nosqlbench.activitytype.cql.ebdrivers.cql.core.StatementModifier;
import io.nosqlbench.engine.api.util.SimpleConfig;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.FileDescriptor;
import java.io.FileWriter;
import java.io.IOException;
import java.text.SimpleDateFormat;
import java.util.Date;
public class TraceLogger implements ResultSetCycleOperator, StatementModifier {
private final static Logger logger = LoggerFactory.getLogger(TraceLogger.class);
private static SimpleDateFormat sdf = new SimpleDateFormat("HH:mm:ss.SSS");
private final long modulo;
private final String filename;
private final FileWriter writer;
private final ThreadLocal<StringBuilder> tlsb = ThreadLocal.withInitial(StringBuilder::new);
public TraceLogger(SimpleConfig conf) {
this(
conf.getLong("modulo").orElse(1L),
conf.getString("filename").orElse("tracelog")
);
}
public TraceLogger(long modulo, String filename) {
this.modulo = modulo;
this.filename = filename;
try {
if (filename.equals("stdout")) {
writer = new FileWriter(FileDescriptor.out);
} else {
writer = new FileWriter(filename);
}
} catch (IOException e) {
throw new RuntimeException(e);
}
}
@Override
public int apply(ResultSet rs, Statement statement, long cycle) {
if ((cycle%modulo)!=0) {
return 0;
}
ExecutionInfo ei = rs.getExecutionInfo();
QueryTrace qt = ei.getQueryTrace();
StringBuilder sb = tlsb.get();
sb.setLength(0);
sb.append("\n---------------------------- QueryTrace Summary ---------------------------\n");
sb.append("\n Coordinator: ").append(qt.getCoordinator());
sb.append("\n Cycle: ").append(cycle);
sb.append("\nServer-side query duration (us): ").append(qt.getDurationMicros());
sb.append("\n Request type: ").append(qt.getRequestType());
sb.append("\n Start time: ").append(qt.getStartedAt());
sb.append("\n Trace UUID: ").append(qt.getTraceId());
sb.append("\n Params: ").append(qt.getParameters());
sb.append("\n--------------------------------------------------------------------------\n");
sb.append("\n---------------------------- QueryTrace Events ---------------------------\n");
for (QueryTrace.Event event : qt.getEvents()) {
sb.append("\n Date: ").append(sdf.format(new Date(event.getTimestamp())));
sb.append("\n Source: ").append(event.getSource());
sb.append("\nSourceElapsedMicros: ").append(event.getSourceElapsedMicros());
sb.append("\n Thread: ").append(event.getThreadName());
sb.append("\n Description: ").append(event.getDescription()).append("\n");
}
sb.append("\n--------------------------------------------------------------------------\n");
try {
writer.append(sb.toString());
writer.flush();
} catch (IOException e) {
throw new RuntimeException(e);
}
return 0;
}
@Override
public Statement modify(Statement statement, long cycle) {
if ((cycle%modulo)==0) {
statement.enableTracing();
}
return statement;
}
}

View File

@ -0,0 +1 @@
io.nosqlbench.virtdata.processors.ServiceProcessor

View File

@ -0,0 +1,98 @@
# cql activity type - advanced features
This is an addendum to the standard CQL Activity Type docs. For that, see "cql".
Use the features in this guide carefully. They do not come with as much documentation
as they are less used than the main CQL features.
### ResultSet and Row operators
Within the CQL Activity type, synchronous mode (activities with out
the async= parameter), you have the ability to attach operators to a
given statement such that it will get per-statement handling. These
operators are ways of interrogating the result of an operation, saving
values, or managing other side-effects for specific types of testing.
When enabled for a statement, operators are applied in this order:
1. Activity-level ResultSet operators are applied in specified order.
2. Statement-level ResultSet operators are applied in specified order.
3. Activity-level Row operators are applied in specified order.
4. Statement-level Row operators are applied in specified order.
The result set handling does not go to any extra steps of making
a copy of the data. When a row is read from the result set,
it is consumed from it. Thus, if you want to do anything with
row data, you must apply a row operator as explained below.
### CQL Statement Parameters
- **rsoperators** - If provided as a CQL statement param, then the
list of operator names that follow, separated by a comma, will
be used to attach ResultSet operators to the given statement.
Such operators act on the whole result set of a statement.
- **rowoperators** - If provided as a CQL statement param, then the
list of operator names that follow, separated by a comma, will
be used to attache Row operators to the given statement.
## Available ResultSet Operators
- pushvars - Push a copy of the current thread local variables onto
the thread-local stack. This does nothing with the ResultSet data,
but is meant to be used for stateful management of these in
conjunction with the row operators below.
- popvars - Pop the last thread local variable set from the thread-local
stack into vars, replacing the previous content. This does nothing
with the ResultSet data.
- clearvars - Clears the contents of the thread local variables. This
does nothign with the ResultSet data.
- trace - Flags a statement to be traced on the server-side and then logs
the details of the trace to the trace log file.
- log - Logs basic data to the main log. This is useful to verify that
operators are loading and triggering as expected.
- assert_singlerow - Throws an exception (ResultSetVerificationException)
if the ResultSet has more or less than one row.
Examples:
```
statements:
- s1: |
a statement
rsoperators: pushvars, clearvars
```
## Available Row Operators:
- savevars - Copies the values of the row into the thread-local variables.
- saverows - Copies the rows into a special CQL-only thread local row state.
Examples:
```
statements:
- s2: |
a statement
rowoperators: saverows
```
## Injecting additional Queries
It is possible to inject new operations to an activity. However, such
operations are _indirect_ to cycles, since they must be based on the results
of other operations. As such, they will not be represented in cycle output or
other advanced features. This is a specific feature for the CQL activity --
implemented internal to the way a CQL cycle is processed. A future version
of EngineBlock will provide a more uniform way to achieve this result across
activity types. For now, remember that this is a CQL-only capability.
- subquery-statement - Adds additional operations to the current cycle, based
on the contents of the thread-local row state. The value to this parameter
is a name of a statement in the current YAML.
local thread based on contents
of the CQL-only thread local row state. Each row is consumed from this list,
and a new operation is added to the current cycle.
- subquery-concurrency - Allow subqueries to execute with concurrency, up to
the level specified.
default: 1

View File

@ -0,0 +1,397 @@
# cql activity type
This is an activity type which allows for the execution of CQL statements.
This particular activity type is wired synchronously within each client
thread, however the async API is used in order to expose fine-grain
metrics about op binding, op submission, and waiting for a result.
### Example activity definitions
Run a cql activity named 'cql1', with definitions from activities/cqldefs.yaml
~~~
... type=cql alias=cql1 yaml=cqldefs
~~~
Run a cql activity defined by cqldefs.yaml, but with shortcut naming
~~~
... type=cql yaml=cqldefs
~~~
Only run statement groups which match a tag regex
~~~
... type=cql yaml=cqldefs tags=group:'ddl.*'
~~~
Run the matching 'dml' statements, with 100 cycles, from [1000..1100)
~~~
... type=cql yaml=cqldefs tags=group:'dml.*' cycles=1000..1100
~~~
This last example shows that the cycle range is [inclusive..exclusive),
to allow for stacking test intervals. This is standard across all
activity types.
### CQL ActivityType Parameters
- **driver** - default: dse - The type of driver to use, either dse, or
oss. If you need DSE-specific features, use the dse driver. If you are
connecting to an OSS Apache Cassandra cluster, you must use the oss
driver. The oss driver option is only available in ebdse.
- **host** - The host or hosts to use for connection points to
the cluster. If you specify multiple values here, use commas
with no spaces.
Examples:
- `host=192.168.1.25`
- `host=`192.168.1.25,testhost42`
- **yaml** - The file which holds the schema and statement defs.
(no default, required)
- **port** - The port to connect with
- **cl** - An override to consistency levels for the activity. If
this option is used, then all consistency levels will be replaced
by this one for the current activity, and a log line explaining
the difference with respect to the yaml will be emitted.
This is not a dynamic parameter. It will only be applied at
activity start.
- **cbopts** - default: none - this is how you customize the cluster
settings for the client, including policies, compression, etc. This
is a string of *Java*-like method calls just as you would use them
in the Cluster.Builder fluent API. They are evaluated inline
with the default Cluster.Builder options not covered below.
Example: cbopts=".withCompression(ProtocolOptions.Compression.NONE)"
- **whitelist** default: none - Applies a whitelist policy to the load balancing
policy in the driver. If used, a WhitelistPolicy(RoundRobinPolicy())
will be created and added to the cluster builder on startup.
Examples:
- whitelist=127.0.0.1
- whitelist=127.0.0.1:9042,127.0.0.2:1234
- **retrypolicy** default: none - Applies a retry policy in the driver
The only option supported for this version is `retrypolicy=logging`,
which uses the default retry policy, but with logging added.
- **pooling** default: none - Applies the connection pooling options
to the policy.
Examples:
- `pooling=4:10`
keep between 4 and 10 connections to LOCAL hosts
- `pooling=4:10,2:5`
keep 4-10 connections to LOCAL hosts and 2-5 to REMOTE
- `pooling=4:10:2000`
keep between 4-10 connections to LOCAL hosts with
up to 2000 requests per connection
- `pooling=5:10:2000,2:4:1000` keep between 5-10 connections to
LOCAL hosts with up to 2000 requests per connection, and 2-4
connection to REMOTE hosts with up to 1000 requests per connection
Additionally, you may provide the following options on pooling. Any
of these that are provided must appear in this order:
`,heartbeat_interval_s:n,idle_timeout_s:n,pool_timeout_ms:n`, so a
full example with all options set would appear as:
`pooling=5:10:2000,2:4:1000,heartbeat_interval_s:30,idle_timeout_s:120,pool_timeout_ms:5`
- **socketoptions** default: none - Applies any of the valid socket
options to the client when the session is built. Each of the options
uses the long form of the name, with either a numeric or boolean
value. Individual sub-parameters should be separated by a comma, and
the parameter names and values can be separated by either equals or a
colon. All of these values may be changed:
- read_timeout_ms
- connect_timeout_ms
- keep_alive
- reuse_address
- so_linger
- tcp_no_delay
- receive_buffer_size
- send_buffer_size
Examples:
- `socketoptions=read_timeout_ms=23423,connect_timeout_ms=4444`
- `socketoptions=tcp_no_delay=true
- **tokens** default: unset - Only executes statements that fall within
any of the specified token ranges. Others are counted in metrics
as skipped-tokens, with a histogram value of the cycle number.
Examples:
- tokens=1:10000,100000:1000000
- tokens=1:123456
- **maxtries** - default: 10 - how many times an operation may be
attempted before it is disregarded
- **maxpages** - default: 1 - how many pages can be read from a query which
is larger than the fetchsize. If more than this number of pages
is required for such a query, then an UnexpectedPaging excpetion
is passed to the error handler as explained below.
- **fetchsize** - controls the driver parameter of the same name.
Suffixed units can be used here, such as "50K". If this parameter
is not present, then the driver option is not set.
- **cycles** - standard, however the cql activity type will default
this to however many statements are included in the current
activity, after tag filtering, etc.
- **username** - the user to authenticate as. This option requires
that one of **password** or **passfile** also be defined.
- **password** - the password to authenticate with. This will be
ignored if passfile is also present.
- **passfile** - the file to read the password from. The first
line of this file is used as the password.
- **ssl** - enable ssl if you want transport level encryption.
Examples:
- `ssl=true`
enable ssl
- `ssl=false`
disable ssl (the default)
- **keystore** - specify the keystore location for SSL.
Examples:
- `keystore=JKS` (the default)
- **kspass** - specify the password to the keystore for SSL.
Examples:
- `kspass=mypass`
- **tlsversion** - specify the TLS version to use for SSL.
Examples:
- `tlsversion=TLSv1.2` (the default)
- **jmxreporting** - enable JMX reporting if needed.
Examples:
- `jmxreporting=true`
- `jmxreporting=false` (the default)
- **alias** - this is a standard engineblock parameter, however
the cql type will use the yaml value also as the alias value
when not specified.
- **errors** - error handler configuration.
(default errors=stop,retryable->retry,unverified->stop)
Examples:
- errors=stop,WriteTimeoutException=histogram
- errors=count
- errors=warn,retryable=count
See the separate help on 'cqlerrors' for detailed
configuration options.
- **defaultidempotence** - sets default idempotence on the
driver options, but only if it has a value.
(default unset, valid values: true or false)
- **speculative** - sets the speculative retry policy on the cluster.
(default unset)
This can be in one of the following forms:
- pT:E:L - where :L is optional and
T is a floating point threshold between 0.0 and 100.0 and
E is an allowed number of concurrent speculative executions and
L is the maximum latency tracked in the tracker instance
(L defaults to 15000 when left out)
Examples:
- p99.8:5:15000ms - 99.8 percentile, 5 executions, 15000ms max tracked
- p98:2:10000ms - 98.0 percentile, 2 executions allowed, 10s max tracked
- Tms:E - where :E is optional and
T is a constant threshold latency and
E is the allowed number of concurrent speculative retries
(E default to 5 when left out)
Examples:
- 100ms:5 - constant threshold of 100ms and 5 allowed executions
- **seq** - selects the statement sequencer used with statement ratios.
(default: bucket)
(options: concat | bucket | interval)
The concat sequencer repeats each statement in order until the ratio
is achieved.
The bucket sequencer uses simple round-robin distribution to plan
statement ratios, a simple but unbalanced form of interleaving.
The interval sequencer apportions statements over time and then by
order of appearance for ties. This has the effect of interleaving
statements from an activity more evenly, but is less obvious in how
it works.
All of the sequencers create deterministic schedules which use an internal
lookup table for indexing into a list of possible statements.
- **trace** - enables a trace on a subset of operations. This is disabled
by default.
Examples:
`trace=modulo:100,filename:trace.log`
The above traces every 100th cycle to a file named trace.log.
`trace=modulo:1000,filename:stdout`
The above traces every 1000th cycle to stdout.
If the trace log is not specified, then 'tracelog' is assumed.
If the filename is specified as stdout, then traces are dumped to stdout.
- **clusterid** - names the configuration to be used for this activity. Within
a given scenario, any activities that use the same name for clusterid will
share a session and cluster.
default: 'default'
- **drivermetrics** - enable reporting of driver metrics.
default: false
- **driverprefix** - set the metrics name that will prefix all CQL driver metrics.
default: 'driver.*clusterid*.'
The clusterid specified is included so that separate cluster and session
contexts can be reported independently for advanced tests.
- **usercodecs** - enable the loading of user codec libraries
for more details see: com.datastax.codecs.framework.UDTCodecInjector in the ebdse
code base. This is for dynamic codec loading with user-provided codecs mapped
via the internal UDT APIs.
default: false
- **secureconnectbundle** - used to connect to CaaS, accepts a path to the secure connect bundle
that is downloaded from the CaaS UI.
Examples:
- `secureconnectbundle=/tmp/secure-connect-my_db.zip`
- `secureconnectbundle="/home/automaton/secure-connect-my_db.zip"`
- **insights** - Set to false to disable the driver from sending insights monitoring information
- `insights=false`
- **tickduration** - sets the tickDuration (milliseconds) of HashedWheelTimer of the
java driver. This timer is used to schedule speculative requests.
Examples:
- `tickduration=10`
- `tickduration=100` (driver default value)
- **compression** - sets the transport compression to use for this
activity. Valid values are 'LZ4' and 'SNAPPY'. Both types are bundled
with EBDSE.
### CQL YAML Parameters
A uniform YAML configuration format was introduced with engineblock 2.0.
As part of this format, statement parameters were added for the CQL Activity Type.
These parameters will be consolidated with the above parameters in time, but for
now **they are limited to a YAML params block**:
params:
ratio: 1
# Sets the statement ratio within the operation sequencer
# scheme. Integers only.
# When preparing the operation order (AKA sequencing),
# frequency of the associated statements.
cl: ONE
# Sets the consistency level, using any of the standard
# identifiers from com.datastax.driver.core.ConsistencyLevel,
# any one of:
# LOCAL_QUORUM, ANY, ONE, TWO, THREE, QUORUM, ALL,
# EACH_QUORUM, SERIAL, LOCAL_SERIAL, LOCAL_ONE
prepared: true
# By default, all statements are prepared. If you are
# creating schema, set this to false.
idempotent: false
# For statements that are known to be idempotent, set this
# to true
instrument: false
# If a statement has instrument set to true, then
# individual Timer metrics will be tracked for
# that statement for both successes and errors,
# using the given statement name.
logresultcsv: true
OR
logresultcsv: myfilename.csv
# If a statement has logresultcsv set to true,
# then individual operations will be logged to a CSV file.
# In this case the CSV file will be named as
# <statement-name>--results.csv.
# If the value is present and not "true", then the value will
# be used as the name of the file.
#
# The format of the file is:
# <cycle>,(SUCCESS|FAILURE),<nanos>,<rows-fetched>,(<error-class,NONE)
# NOTES:
# 1) BE CAREFUL with this setting. A single logged line per
# result is not useful for high-speed testing as it will
# impose IO loads on the client to slow it down.
# 2) BE CAREFUL with the name. It is best to just pick good
# names for your statement defs so that everything remains
# coherent and nothing gets accidentally overwritten.
# 3) If logresultcsv is provided at the activity level, it
# applies to all statements, and the only value values
# there are true and false.
### Generic Parameters
*provided by the runtime*
- **targetrate** - The target rate in ops/s
- **linkinput** - if the name of another activity is specified, this activity
will only go as fast as that one.
- **tags** - optional filter for matching tags in yaml sections (detailed help
link needed)
- **threads** - the number of client threads driving this activity
### Metrics
- alias.cycles - (provided by engineblock) A timer around the whole cycle
- alias.phase - (provided by engineblock) A timer around additional phases
within a cycle. For this driver, it captures all the work in the client
around fetching additional pages for paged reads.
- alias.bind - A timer which tracks the performance of the statement
binding logic, including the generation of data immediately prior
- alias.execute - A timer which tracks the performance of op submission
only. This is the async execution call, broken out as a separate step.
- alias.result - A timer which tracks the performance of an op result only.
This is the async get on the future, broken out as a separate step.
- alias.tries - A histogram of how many tries were required to get a
completed operation
- alias.pages - A timer which tracks the performance of paging, specific
to more than 1-page query results. i.e., if all reads return within 1
page, this metric will not have any data.
- alias.strides - A timer around each stride of operations within a thread
- alias.skipped-tokens - A histogram that records the count and cycle values
of skipped tokens.
- alias.result-success - A timer that records rate and histograms of the time
it takes from submitting a query to completely reading the result
set that it returns, across all pages. This metric is only counted
for non-exceptional results, while the result metric above includes
all operations.
##### Metrics Details
The cycles metric captures data on the outside of each operation, but it also
includes any internal processing time needed by the client. Within the
cycles metric, bind, execute, and result all occur in sequence. There may
be multiple values recorded for submit and execute for a single bind event.
This is because a bind exception is final, but an execute and result may
both be retried. The tries metric captures how many tries were required. It
is a histogram only. If the metric for tries is 1 across the board, then
no operation had to be retried.
As for a normal single page read result, both the execute and result timers
are included within the code block wrapped by the pages metric.
### YAML Format
The YAML file for a CQL activity has the following structure:
1. One or more document sections, separated with '---' and a newline.
1. An optional tag map
2. One or more statements
1. a descriptive name
2. prepared: false, if you want to modify the default (prepared:true)
3. statement CQL
4. statement data bindings
Each section is a separate yaml document internally to the yaml file. The
tags that are provided allow for subgroups of statements to be activated.
All statements in a matching document (when filtered by tags) are included
in the statement rotation.
If no tags are provided in a document section, then it will be matched by
all possible tag filters. Conversely, if no tag filter is applied in
the activity definition, all tagged documents will match.
Data bindings specify how values are generated to plug into each operation. More
details on data bindings are available in the activity usage guide.
### Parameter Templating
Double angle brackets may be used to drop parameters into the YAML
arbitrarily. When the YAML file is loaded, and only then, these parameters
are interpolated from activity parameters like those above. This allows you
to create activity templates that can be customized simply by providing
additional parameters to the activity. There are two forms,
\<\<some_var_name:default_value\>\> and \<\<some_var_name\>\>. The first
form contains a default value. In any case, if one of these parameters is
encountered and a qualifying value is not found, an error will be thrown.
### YAML Location
The YAML file referenced in the yaml= parameter will be searched for in the following places, in this order:
1. A URL, if it starts with 'http:' or 'https:'
2. The local filesystem, if it exists there
3. The internal classpath and assets in the jar.
The '.yaml' suffix is not required in the yaml= parameter, however it is
required on the actual file. As well, the logical search path "activities/"
will be used if necessary to locate the file, both on the filesystem and in
the classpath.
There is a basic example below that can be copied as a starting template.
## YAML Examples
Please see the bundled activities with ebdse for examples.

View File

@ -0,0 +1,198 @@
# cql error handling
The error handling facility utilizes a type-aware error handler
provided by nosqlbench. However, it is much more modular and configurable
than most error handlers found in other testing tools. The trade-off here
is that so many options may bewilder newer users. If you agree, then
simply use one of these basic recipes in your activity parameters:
# error and stop on *any exception
# incidentally, this is the same as the deprecated diagnose=true option
errors=stop
# error and stop for (usually) unrecoverable errors
# warn and retry everything else (this is actually the default)
errors=stop,retryable->retry
# record histograms for WriteTimeoutException, error and stop
# for everything else.
errors=stop,WriteTimeoutException:histogram
As you can see, the error handling format is pretty basic. Behind this basic
format is modular and flexible configuration scheme that should allow for either
simple or advanced testing setups. The errors value is simply a list of error to
hander verbs mappings, but also allows for a simple verb to be specified to
cover all error types. Going from left to right, each mapping is applied in
order. You can use any of ':', '->', or '=' for the error to verb assignment
operator.
Anytime you assign a value to the *errors* parameter for a cql activity, you are
replacing the default 'stop,retryable->retry,unverified->stop' configuration.
That is, each time this value is assigned, a new error handler is configured and
installed according to the new value.
### errors= parameter format
The errors parameter contains a comma-separated list of one or more
handler assignments where the error can be in any of these forms:
- group name ( "unapplied" | "retryable" | "unverified" )
- a single exception name like 'WriteTimeoutException', or a substring of
that which is long enough to avoid ambiguity (only one match allowed)
- A regex, like '.*WriteTimeout.*' (multiple matches allowed)
The verb can be any of the named starting points in the error handler
stack, as explained below.
As a special case, if the handler assignment consists of only a single word,
then it is assumed to be the default handler verb. This gets applied
as a last resort to any errors which do not match another handler by class
type or parent class type. This allows for simple hard wiring of a
handler default for all non-specific errors in the form:
# force the test to stop with any error, even retryable ones
errors=stop
### Error Handler Verbs
When an error occurs, you can control how it is handled for the most part.
This is the error handler stack:
- **stop** - logs an error, and then rethrows the causing exception,
causing ebdse to shutdown the current scenario.
- **warn** - log a warning in the log, with details about the error
and associated statement.
- **retry** - Retry the operation if the number of retries hasn't been
used up *and* the causing exception falls in the set of
*retryable* errors.
- **histogram** - keep a histogram of the exception counts, under the
name errorhistos.classname, using the simple class name.
The magnitude of these histos is how long the operation was pending
before the related error occurred.
- **count** - keep a count in metrics for the exception, under the name
errorcounts.classname, using the simple class name.
- **ignore** - do nothing, do not even retry or count
Each handling verb above is ordered from the most invasive to least
invasive starting at the top. With the exception of the **stop**
handler, the rest of them will be applied to an error all the way
to the bottom. For now, the error handling stack is exactly as above.
You can't modify it, although it may be made configurable in the future.
One way to choose the right handler is to say "How serious is this type
of error to the test results if it happens?" In general, it is best
to be more conservative and choose a more aggressive setting unless you
are specifically wanting to measure how often a given error happens,
for example.
Each exception type will have one and only one error handler at all times.
No matter how you set an error handler for a class, only the most
recently assigned handler stack will be active for it. This might be
important to keep in mind when you make multiple assignments to potentially
overlapping sets of error types. In any case, the default 'stop' handler
will always catch an error that does not otherwise have a more specific
handler assigned to it.
##### Error Types
The errors that can be handled are simply all the exception types that
can be thrown by either the DataStax Java Driver for DSE, *or* the
ebdse client itself. This includes errors that indicate a potentially
intermittent failure condition. It also includes errors that are more
permanent in nature, like WriteFailure, which would continue to occur
on subsequent retries without some form of intervention. The ebdse
application will also generate some additional exceptions that capture
common error cases that the Java driver doesn't or shouldn't have a
special case for, but which may be important for ebdse testing purposes.
In ebdse, all error handlers are specific to a particular kind of
exception that you would catch in a typical application that uses DSE,
although you can tell a handler to take care of a whole category
of problems as long as you know the right name to use.
##### Assigned by Java Exception Type
Error handlers can be assigned to a common parent type in order to also handle
all known subtypes, hence the default on the top line applies to all of the
driver exceptions that do not have a more specific handler assigned, either
by a closer parent or directly.
##### Assigning by Error Group Name
Error types for which you would commonly assign the same handling behavior
are also grouped in predefined names. If a handler is assigned to one
of the group names, then the handler is assigned all of the exceptions
in the group individually. For example, 'errors=retryable=stop'
### Recognized Exceptions
The whole hierarchy of exceptions as of DSE Driver 3.2.0 is as follows,
with the default configuration shown.
DriverException -> stop
FrameTooLongException
CodecNotFoundException
AuthenticationException
TraceRetrievalException
UnsupportedProtocolVersionException
NoHostAvailableException -> retry (group: retryable)
QueryValidationException (abstract)
InvalidQueryException
InvalidConfigurationInQueryException
UnauthorizedException
SyntaxError
AlreadyExistsException
UnpreparedException
InvalidTypeException
QueryExecutionException (abstract)
UnavailableException
BootstrappingException -> retry (group: retryable)
OverloadedException -> retry (group: retryable)
TruncateException
QueryConsistencyException (abstract)
WriteTimeoutException -> retry (group: retryable)
WriteFailureException -> retry (group: retryable)
ReadFailureException
ReadTimeoutException
FunctionExecutionException
DriverInternalError
ProtocolError
ServerError
BusyPoolException
ConnectionException
TransportException
OperationTimedOutException -> retry (group: retryable)
PagingStateException
UnresolvedUserTypeException
UnsupportedFeatureException
BusyConnectionException
EbdseException (abstract) -> stop
CQLResultSetException (abstract)
UnexpectedPagingException
ResultSetVerificationException
RowVerificationException
ChangeUnappliedCycleException (group:unapplied)
RetriesExhaustedCycleException -> count
##### Additional Exceptions
The following exceptions are synthesized directly by ebdse, but get
handled alongside the normal exceptions as explained above.
1. ChangeUnappliedException - The change unapplied condition is important to
detect when it is not expected, although some testing may intentionally send
changes that can't be applied. For this reason, it is kept as a separately
controllable error group "unapplied".
2. UnexpectedPaging - The UnexpectedPaging exception is meant to keep users from
being surprised when there is paging activity in the workload, as this can have
other implications for tuning and performance. See the details on the
**maxpages** parameter, and the *fetch size* parameter in the java driver for
details.
3. Unverified\* Exceptions - For data set verification; These exceptions
indicate when a cqlverify activity has found rows that differ from what
was expected.
4. RetriesExhaustedException - Indicates that all retries were exhausted before
a given operation could complete successfully.

View File

@ -0,0 +1,42 @@
DriverException -> stop
1 FrameTooLongException
2 CodecNotFoundException
3 AuthenticationException
4 TraceRetrievalException
5 UnsupportedProtocolVersionException
6 NoHostAvailableException
7 QueryValidationException (abstract)
8 InvalidQueryException
9 InvalidConfigurationInQueryException
10 UnauthorizedException
11 SyntaxError
12 AlreadyExistsException
13 UnpreparedException
14 InvalidTypeException
15 QueryExecutionException (abstract) -> retry
16 UnavailableException
17 BootstrappingException
18 OverloadedException
19 TruncateException
20 QueryConsistencyException (abstract)
21 WriteTimeoutException
22 WriteFailureException
23 ReadFailureException
24 ReadTimeoutException
25 FunctionExecutionException
26 DriverInternalError
27 ProtocolError
28 ServerError
29 BusyPoolException
30 ConnectionException
31 TransportException
32 OperationTimedOutException
33 PagingStateException
34 UnresolvedUserTypeException
35 UnsupportedFeatureException
36 BusyConnectionException
41 EbdseCycleException (abstract) -> stop
37 ChangeUnappliedCycleException
38 ResultSetVerificationException
39 RowVerificationException (abstract)
40 UnexpectedPagingException

Some files were not shown because too many files have changed in this diff Show More