partial staging of cqld4

This commit is contained in:
Jonathan Shook 2020-05-05 20:53:24 -05:00
parent 65cbbe7fb4
commit 8b0bfef235
98 changed files with 7063 additions and 20 deletions

View File

@ -2,8 +2,8 @@ name: release
on:
push:
tags:
- invoke-release
paths:
- RELEASENOTES.**
jobs:
release:
@ -15,20 +15,18 @@ jobs:
java-version: '14'
java-package: jdk
architecture: x64
- name: avoid release loop
run: scripts/avoid-release-loop.sh
env:
GIT_RELEASE_BOT_NAME: "nb-droid"
- name: capture tty
run: |
echo "::set-env name=TTY::"$(tty)
echo "::set-env name=GPG_TTY::"$(tty)
- name: initialize gpg
# env:
# GPG_TTY: ${TTY}
run: |
set -x
echo "${{ secrets.GITHUB_GPG_KEY }}" | base64 -d > private.key
@ -36,7 +34,7 @@ jobs:
rm ./private.key
echo "gnupg files:"
ls -l ~/.gnupg/
- name: set git username
run: git config --global user.email "${{ secrets.NBDROID_EMAIL }}"
- name: set git email
@ -55,7 +53,7 @@ jobs:
with:
path: ~/.m2
key: ${{ runner.os }}-m2-${{ hashFiles('**/pom.xml') }}
restore-keys: ${{ runner.os }}-m2
restore-keys: ${{ runner.os }}-m2
- name: read versions
run: |
@ -68,12 +66,12 @@ jobs:
NEXT_SNAPSHOT="${BASE_VERSION}.${NEXT_MINOR_VERSION}-SNAPSHOT"
RELEASE_TAGNAME="nosqlbench-${RELEASE_VERSION}"
echo "::set-env name=NEXT_SNAPSHOT::${NEXT_SNAPSHOT}"
echo "::set-env name=RELEASE_VERSION::${RELEASE_VERSION}"
echo "::set-env name=RELEASE_VERSION::${RELEASE_VERSION}"
echo "::set-env name=RELEASE_TAGNAME::${RELEASE_TAGNAME}"
- name: prepare release
run: scripts/release-prepare.sh
env:
env:
RELEASE_BRANCH_NAME: "master"
GIT_RELEASE_BOT_NAME: "nb-droid"
GIT_RELEASE_BOT_EMAIL: ${{ secrets.GIT_RELEASE_BOT_EMAIL }}
@ -90,7 +88,7 @@ jobs:
- name: perform release
run: scripts/release-perform.sh
continue-on-error: true
env:
env:
RELEASE_BRANCH_NAME: "master"
GIT_RELEASE_BOT_NAME: "nb-droid"
GIT_RELEASE_BOT_EMAIL: ${{ secrets.GIT_RELEASE_BOT_EMAIL }}
@ -103,18 +101,18 @@ jobs:
MAVEN_REPO_SERVER_ID: ${{ secrets.MAVEN_REPO_SERVER_ID }}
MAVEN_REPO_SERVER_USERNAME: ${{ secrets.MVN_REPO_PRIVATE_REPO_USER }}
MAVEN_REPO_SERVER_PASSWORD: ${{ secrets.MVN_REPO_PRIVATE_REPO_PASSWORD }}
- name: upload artifacts
run: |
pwd
ls -l
mkdir staging && cp nb/target/nb.jar nb/target/nb staging
- uses: actions/upload-artifact@v1
with:
with:
name: binaries
path: staging
- name: upload guidebook
run: mkdir guidebook && cp -R nb/target/guidebook guidebook
- uses: actions/upload-artifact@v1
@ -170,7 +168,7 @@ jobs:
# uses: actions/upload-release-asset@v1
# env:
# GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
# with:
# with:
# asset_path: nb/target/nb
# asset_name: nb
# asset_content_type: application/octet-stream
@ -219,6 +217,6 @@ jobs:
git push
fi

115
driver-cqld4/pom.xml Normal file
View File

@ -0,0 +1,115 @@
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>io.nosqlbench</groupId>
<artifactId>mvn-defaults</artifactId>
<version>3.12.100-SNAPSHOT</version>
<relativePath>../mvn-defaults</relativePath>
</parent>
<artifactId>driver-cqld4</artifactId>
<packaging>jar</packaging>
<name>${project.artifactId}</name>
<description>
A CQL ActivityType driver for http://nosqlbench.io/
</description>
<dependencies>
<!-- core dependencies -->
<dependency>
<groupId>io.nosqlbench</groupId>
<artifactId>engine-api</artifactId>
<version>3.12.100-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>com.datastax.oss</groupId>
<artifactId>java-driver-core</artifactId>
<version>4.6.0</version>
</dependency>
<dependency>
<groupId>com.datastax.oss</groupId>
<artifactId>java-driver-query-builder</artifactId>
<version>4.6.0</version>
</dependency>
<dependency>
<groupId>com.datastax.dse</groupId>
<artifactId>dse-java-driver-core</artifactId>
<version>2.4.0</version>
</dependency>
<!-- <dependency>-->
<!-- <groupId>com.datastax.dse</groupId>-->
<!-- <artifactId>dse-java-driver-core</artifactId>-->
<!-- </dependency>-->
<!-- <dependency>-->
<!-- <groupId>com.datastax.dse</groupId>-->
<!-- <artifactId>dse-java-driver-extras</artifactId>-->
<!-- </dependency>-->
<!-- <dependency>-->
<!-- <groupId>com.datastax.dse</groupId>-->
<!-- <artifactId>dse-java-driver-mapping</artifactId>-->
<!-- </dependency>-->
<!-- For CQL compression option -->
<dependency>
<groupId>org.lz4</groupId>
<artifactId>lz4-java</artifactId>
</dependency>
<!-- For CQL compression option -->
<dependency>
<groupId>org.xerial.snappy</groupId>
<artifactId>snappy-java</artifactId>
</dependency>
<!-- <dependency>-->
<!-- <groupId>io.netty</groupId>-->
<!-- <artifactId>netty-transport-native-epoll</artifactId>-->
<!-- <version>4.1.36.Final</version>-->
<!-- <classifier>linux-x86_64</classifier>-->
<!-- </dependency>-->
<!-- <dependency>-->
<!-- <groupId>org.yaml</groupId>-->
<!-- <artifactId>snakeyaml</artifactId>-->
<!-- <version>1.23</version>-->
<!-- </dependency>-->
<!-- <dependency>-->
<!-- <groupId>org.apache.commons</groupId>-->
<!-- <artifactId>commons-lang3</artifactId>-->
<!-- <version>3.7</version>-->
<!-- </dependency>-->
<!-- <dependency>-->
<!-- <groupId>org.slf4j</groupId>-->
<!-- <artifactId>slf4j-api</artifactId>-->
<!-- <version>1.7.25</version>-->
<!-- </dependency>-->
<!-- test only scope -->
<dependency>
<groupId>org.assertj</groupId>
<artifactId>assertj-core</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<scope>test</scope>
</dependency>
</dependencies>
</project>

View File

@ -0,0 +1,47 @@
package com.datastax.driver.core;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.List;
import java.util.OptionalLong;
import java.util.Set;
public class M3PTokenFilter {
private final TokenRange[] ranges;
private final ProtocolVersion protocolVersion;
private final CodecRegistry codecRegistry;
private final Metadata clusterMetadata;
private final Token.Factory factory;
public M3PTokenFilter(Set<TokenRange> ranges, Cluster cluster) {
protocolVersion = cluster.getConfiguration().getProtocolOptions().getProtocolVersion();
codecRegistry = cluster.getConfiguration().getCodecRegistry();
clusterMetadata = cluster.getMetadata();
factory = Token.getFactory(clusterMetadata.partitioner);
List<TokenRange> rangeList = new ArrayList<>();
for (TokenRange range : ranges) {
if (!range.getStart().getType().equals(DataType.bigint())) {
throw new RuntimeException("This filter only works with bigint valued token types");
}
rangeList.add(range);
}
this.ranges=rangeList.toArray(new TokenRange[0]);
if (this.ranges.length<1) {
throw new RuntimeException("There were no tokens found. Please check your keyspace and cluster settings.");
}
}
public OptionalLong matches(Statement statement) {
ByteBuffer routingKey = statement.getRoutingKey(protocolVersion, codecRegistry);
Token token = factory.hash(routingKey);
for (TokenRange range : ranges) {
if (range.contains(token)) {
return OptionalLong.of((long)token.getValue());
}
}
return OptionalLong.empty();
}
}

View File

@ -0,0 +1,60 @@
package com.datastax.driver.core;
import io.nosqlbench.activitytype.cqld4.api.StatementFilter;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.stream.Collectors;
public class TokenRangeStmtFilter implements StatementFilter {
private final Metadata clusterMetadata;
private final ProtocolVersion protocolVersion;
private final CodecRegistry codecRegistry;
private final Token.Factory factory;
private TokenRange[] ranges;
public TokenRangeStmtFilter(Cluster cluster, String rangesSpec) {
clusterMetadata = cluster.getMetadata();
protocolVersion = cluster.getConfiguration().getProtocolOptions().getProtocolVersion();
codecRegistry = cluster.getConfiguration().getCodecRegistry();
factory = Token.getFactory(clusterMetadata.partitioner);
ranges = parseRanges(factory, rangesSpec);
}
private TokenRange[] parseRanges(Token.Factory factory, String rangesStr) {
String[] ranges = rangesStr.split(",");
List<TokenRange> tr = new ArrayList<>();
for (String range : ranges) {
String[] interval = range.split(":");
Token start = factory.fromString(interval[0]);
Token end = factory.fromString(interval[1]);
TokenRange tokenRange = new TokenRange(start, end, factory);
tr.add(tokenRange);
}
return tr.toArray(new TokenRange[tr.size()]);
}
@Override
public boolean matches(Statement statement) {
ByteBuffer routingKey = statement.getRoutingKey(protocolVersion, codecRegistry);
Token token = factory.hash(routingKey);
for (TokenRange range : ranges) {
if (range.contains(token)) {
return true;
}
}
return false;
}
@Override
public String toString() {
return "including token ranges: " +
Arrays.stream(ranges)
.map(String::valueOf)
.collect(Collectors.joining(","));
}
}

View File

@ -0,0 +1,71 @@
package com.datastax.driver.core;
import java.io.BufferedWriter;
import java.io.FileWriter;
import java.io.IOException;
import java.util.Comparator;
import java.util.Set;
public class TokenRangeUtil {
private final Metadata clusterMetadata;
private final ProtocolVersion protocolVersion;
private final CodecRegistry codecRegistry;
private final Token.Factory factory;
private final Cluster cluster;
public TokenRangeUtil(Cluster cluster) {
this.cluster= cluster;
clusterMetadata = cluster.getMetadata();
protocolVersion = cluster.getConfiguration().getProtocolOptions().getProtocolVersion();
codecRegistry = cluster.getConfiguration().getCodecRegistry();
factory = Token.getFactory(clusterMetadata.partitioner);
}
public Set<TokenRange> getTokenRangesFor(String keyspace, String hostaddress) {
Host host=null;
if (hostaddress.matches("\\d+")) {
int hostenum = Integer.parseInt(hostaddress);
host = clusterMetadata.getAllHosts().stream()
.sorted(Comparator.comparing(h -> h.getAddress().toString()))
.skip(hostenum)
.findFirst()
.orElseThrow();
} else if (!hostaddress.isEmpty()) {
host = clusterMetadata.getAllHosts().stream()
.filter(h -> h.getAddress().toString().replaceAll("/","").equals(hostaddress))
.findFirst()
.orElseThrow();
} else {
throw new RuntimeException("You must specify a host enum in order or a host address.");
}
return clusterMetadata.getTokenRanges(keyspace,host);
}
public void printRanges(String tokensks) {
Set<Host> hosts = clusterMetadata.getAllHosts();
for (Host host : hosts) {
String address = host.getAddress().toString().substring(1);
BufferedWriter writer = null;
try {
writer = new BufferedWriter(new FileWriter("ranges-"+address));
String ranges = getTokenRangesFor(tokensks, address).toString();
writer.write(ranges);
writer.close();
} catch (IOException e) {
e.printStackTrace();
throw new RuntimeException("Can't write token range files");
}
}
}
public M3PTokenFilter getFilterFor(Set<TokenRange> ranges) {
return new M3PTokenFilter(ranges, this.cluster);
}
}

View File

@ -0,0 +1,23 @@
package io.nosqlbench.activitytype.cqld4.api;
/**
* When an error filter allows us to see and handle an error in a specific way,
* the ErrorResponse determines exactly how we handle it. Each level represents
* a starting point in handling, including everything after the starting point.
* The first enum is the most severe response
*/
public enum ErrorResponse {
stop("S"), // Rethrow this error to the runtime, forcing it to handle the error or stop
warn("W"), // log a warning with some details about this error
retry("R"), // resubmit this operation up to the available tries
histogram("H"), // record this metric in a histogram
count("C"), // count this metric separately
ignore("I"); // do nothing
private String symbol;
ErrorResponse(String symbol) {
this.symbol = symbol;
}
}

View File

@ -0,0 +1,18 @@
package io.nosqlbench.activitytype.cqld4.api;
import com.datastax.oss.driver.api.core.cql.ResultSet;
import com.datastax.oss.driver.api.core.cql.Statement;
/**
* An operator interface for performing a modular action on CQL ResultSets per-cycle.
*/
public interface ResultSetCycleOperator {
/**
* Perform an action on a result set for a specific cycle.
* @param resultSet The ResultSet for the given cycle
* @param statement The statement for the given cycle
* @param cycle The cycle for which the statement was submitted
* @return A value, only meaningful when used with aggregated operators
*/
int apply(ResultSet resultSet, Statement statement, long cycle);
}

View File

@ -0,0 +1,11 @@
package io.nosqlbench.activitytype.cqld4.api;
import com.datastax.oss.driver.api.core.cql.Row;
/**
* An operator interface for consuming ResultSets and producing some
* int that can be used as a status code in activities.
*/
public interface RowCycleOperator {
int apply(Row row, long cycle);
}

View File

@ -0,0 +1,7 @@
package io.nosqlbench.activitytype.cqld4.api;
import com.datastax.oss.driver.api.core.cql.Statement;
public interface StatementFilter {
boolean matches(Statement statement);
}

View File

@ -0,0 +1,7 @@
package io.nosqlbench.activitytype.cqld4.api;
public enum VerifyApplied {
ignore,
error,
retry
}

View File

@ -0,0 +1,12 @@
package io.nosqlbench.activitytype.cqld4.codecsupport;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.TYPE)
public @interface CQLUserTypeNames {
String[] value();
}

View File

@ -0,0 +1,12 @@
package io.nosqlbench.activitytype.cqld4.codecsupport;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.TYPE)
public @interface UDTCodecClasses {
Class<? extends UDTTransformCodec>[] value();
}

View File

@ -0,0 +1,33 @@
package io.nosqlbench.activitytype.cqld4.codecsupport;
import com.datastax.driver.core.CodecRegistry;
import com.datastax.driver.core.Session;
import com.datastax.driver.core.UserType;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.List;
import java.util.ServiceLoader;
public class UDTCodecInjector {
private final static Logger logger = LoggerFactory.getLogger(UDTCodecInjector.class);
private List<UserCodecProvider> codecProviders = new ArrayList<>();
private List<UserType> userTypes = new ArrayList<>();
public void injectUserProvidedCodecs(Session session, boolean allowAcrossKeyspaces) {
CodecRegistry registry = session.getCluster().getConfiguration().getCodecRegistry();
ServiceLoader<UserCodecProvider> codecLoader = ServiceLoader.load(UserCodecProvider.class);
for (UserCodecProvider userCodecProvider : codecLoader) {
codecProviders.add(userCodecProvider);
}
for (UserCodecProvider codecProvider : codecProviders) {
codecProvider.registerCodecsForCluster(session,true);
}
}
}

View File

@ -0,0 +1,12 @@
package io.nosqlbench.activitytype.cqld4.codecsupport;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.TYPE)
public @interface UDTJavaType {
Class<?> value();
}

View File

@ -0,0 +1,22 @@
package io.nosqlbench.activitytype.cqld4.codecsupport;
import com.datastax.driver.core.TypeCodec;
import com.datastax.driver.core.UDTValue;
import com.datastax.driver.core.UserType;
import com.datastax.driver.extras.codecs.MappingCodec;
public abstract class UDTTransformCodec<T> extends MappingCodec<T,UDTValue> {
protected UserType userType;
public UDTTransformCodec(UserType userType, Class<T> javaType) {
super(TypeCodec.userType(userType), javaType);
this.userType = userType;
}
public UserType getUserType() {
return userType;
}
}

View File

@ -0,0 +1,138 @@
package io.nosqlbench.activitytype.cqld4.codecsupport;
import com.datastax.driver.core.*;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.lang.reflect.Constructor;
import java.util.*;
import java.util.stream.Collectors;
public abstract class UserCodecProvider {
private final static Logger logger = LoggerFactory.getLogger(UserCodecProvider.class);
public List<UDTTransformCodec> registerCodecsForCluster(
Session session,
boolean allowAcrossKeyspaces
) {
List<UDTTransformCodec> typeCodecs = new ArrayList<>();
List<KeyspaceMetadata> ksMetas = new ArrayList<>(session.getCluster().getMetadata().getKeyspaces());
for (KeyspaceMetadata keyspace : ksMetas) {
List<UDTTransformCodec> keyspaceCodecs = registerCodecsForKeyspace(session, keyspace.getName());
for (UDTTransformCodec typeCodec : keyspaceCodecs) {
if (typeCodecs.contains(typeCodec) && !allowAcrossKeyspaces) {
throw new RuntimeException("codec " + typeCodec + " could be registered" +
"in multiple keyspaces, but this is not allowed.");
}
typeCodecs.add(typeCodec);
logger.debug("Found user-provided codec for ks:" + keyspace + ", udt:" + typeCodec);
}
}
return typeCodecs;
}
public List<UDTTransformCodec> registerCodecsForKeyspace(Session session, String keyspace) {
CodecRegistry registry = session.getCluster().getConfiguration().getCodecRegistry();
List<UDTTransformCodec> codecsForKeyspace = new ArrayList<>();
KeyspaceMetadata ksMeta = session.getCluster().getMetadata().getKeyspace(keyspace);
if (ksMeta==null) {
logger.warn("No metadata for " + keyspace);
return Collections.emptyList();
}
Collection<UserType> typesInKeyspace = ksMeta.getUserTypes();
List<Class<? extends UDTTransformCodec>> providedCodecClasses = getUDTCodecClasses();
Map<UserType, Class<? extends UDTTransformCodec>> codecMap = new HashMap<>();
for (Class<? extends TypeCodec> providedCodecClass : providedCodecClasses) {
Class<? extends UDTTransformCodec> udtCodecClass = (Class<? extends UDTTransformCodec>) providedCodecClass;
List<String> targetUDTTypes = getUDTTypeNames(udtCodecClass);
for (UserType keyspaceUserType : typesInKeyspace) {
String ksTypeName = keyspaceUserType.getTypeName();
String globalTypeName = (ksTypeName.contains(".") ? ksTypeName.split("\\.",2)[1] : ksTypeName);
if (targetUDTTypes.contains(ksTypeName) || targetUDTTypes.contains(globalTypeName)) {
codecMap.put(keyspaceUserType, udtCodecClass);
}
}
}
for (UserType userType : codecMap.keySet()) {
Class<? extends UDTTransformCodec> codecClass = codecMap.get(userType);
Class<?> udtJavaType = getUDTJavaType(codecClass);
UDTTransformCodec udtCodec = instantiate(userType, codecClass, udtJavaType);
codecsForKeyspace.add(udtCodec);
registry.register(udtCodec);
logger.info("registered codec:" + udtCodec);
}
return codecsForKeyspace;
}
private UDTTransformCodec instantiate(UserType key, Class<? extends UDTTransformCodec> codecClass, Class<?> javaType) {
try {
Constructor<? extends UDTTransformCodec> ctor = codecClass.getConstructor(UserType.class, Class.class);
UDTTransformCodec typeCodec = ctor.newInstance(key, javaType);
return typeCodec;
} catch (Exception e) {
e.printStackTrace();
throw new RuntimeException(e);
}
}
private List<Class<? extends UDTTransformCodec>> getUDTCodecClasses() {
UDTCodecClasses[] annotationsByType = this.getClass().getAnnotationsByType(UDTCodecClasses.class);
List<Class<? extends UDTTransformCodec>> codecClasses = Arrays.stream(annotationsByType)
.map(UDTCodecClasses::value)
.flatMap(Arrays::stream)
.collect(Collectors.toList());
return codecClasses;
}
/**
* Allows simple annotation of implementations of this class to use
* {@code @CQLUserTypeNames({"type1","type2",...}}
*
* @param codecClass the UDTTransformCode class which is to be inspected
* @return THe list of target UDT type names, as defined in CQL
*/
private List<String> getUDTTypeNames(Class<? extends UDTTransformCodec> codecClass) {
CQLUserTypeNames[] annotationsByType = codecClass.getAnnotationsByType(CQLUserTypeNames.class);
List<String> cqlTypeNames = new ArrayList<>();
for (CQLUserTypeNames cqlUserTypeNames : annotationsByType) {
cqlTypeNames.addAll(Arrays.asList(cqlUserTypeNames.value()));
}
return cqlTypeNames;
}
/**
* Allows simple annotation of implementations of this class to use
* {@code @UDTJavaType(POJOType.class)}
*
* @param codecClass the UDTTransformCode class which is to be inspected
* @return The class type of the POJO which this codec maps to and from
*/
private Class<?> getUDTJavaType(Class<? extends UDTTransformCodec> codecClass) {
UDTJavaType[] annotationsByType = codecClass.getAnnotationsByType(UDTJavaType.class);
Class<?> javaType = Arrays.stream(annotationsByType)
.map(UDTJavaType::value)
.findFirst()
.orElseThrow(
() -> new RuntimeException("Unable to find UDTJavaType annotation for " + codecClass.getCanonicalName())
);
return (Class<?>) javaType;
}
}

View File

@ -0,0 +1,168 @@
package io.nosqlbench.activitytype.cqld4.core;
import com.datastax.oss.driver.api.core.cql.ColumnDefinition;
import com.datastax.oss.driver.api.core.cql.ColumnDefinitions;
import com.datastax.oss.driver.api.core.cql.Row;
import com.datastax.oss.driver.api.core.cql.Statement;
import io.nosqlbench.engine.api.activityconfig.ParsedStmt;
import io.nosqlbench.engine.api.activityconfig.yaml.StmtDef;
import java.math.BigDecimal;
import java.net.InetAddress;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
public class CQLBindHelper {
private final static Pattern stmtToken = Pattern.compile("\\?(\\w+[-_\\d\\w]*)|\\{(\\w+[-_\\d\\w.]*)}");
public static Statement rebindUnappliedStatement(Statement statement, ColumnDefinitions defs, Row row) {
for (ColumnDefinitions.Definition def : defs) {
String name = def.getName();
def.getType();
if (!name.equals("[applied]")) {
DataType.Name typeName = def.getType().getName();
switch (typeName) {
case ASCII: // ASCII(1, String.class)
((BoundStatement) statement).bind().setString(name, row.getString(name));
case VARCHAR: // VARCHAR(13, String.class)
((BoundStatement) statement).bind().setString(name, row.getString(name));
case TEXT: // TEXT(10, String.class)
((BoundStatement) statement).bind().setString(name, row.getString(name));
case BIGINT: // BIGINT(2, Long.class)
((BoundStatement) statement).bind().setLong(name, row.getLong(name));
case COUNTER: // COUNTER(5, Long.class)
((BoundStatement) statement).bind().setLong(name, row.getLong(name));
case BLOB: // BLOB(3, ByteBuffer.class)
((BoundStatement) statement).bind().setBytes(name, row.getBytes(name));
case CUSTOM: // CUSTOM(0, ByteBuffer.class)
throw new RuntimeException("The diagnostic binder does not understand custom types yet.");
case BOOLEAN: // BOOLEAN(4, Boolean.class)
((BoundStatement) statement).bind().setBool(name, row.getBool(name));
case DECIMAL: // DECIMAL(6, BigDecimal.class)
((BoundStatement) statement).bind().setDecimal(name, row.getDecimal(name));
case DOUBLE: // DOUBLE(7, Double.class)
((BoundStatement) statement).bind().setDouble(name, row.getDouble(name));
case FLOAT: // FLOAT(8, Float.class)
((BoundStatement) statement).bind().setFloat(name, row.getFloat(name));
case INET: // INET(16, InetAddress.class)
((BoundStatement) statement).bind().setInet(name, row.getInet(name));
case INT: // INT(9, Integer.class)
((BoundStatement) statement).bind().setInt(name, row.getInt(name));
case TIMESTAMP: // TIMESTAMP(11, Date.class)
((BoundStatement) statement).bind().setTimestamp(name, row.getTimestamp(name));
case UUID: // UUID(12, UUID.class)
((BoundStatement) statement).bind().setUUID(name, row.getUUID(name));
case TIMEUUID: // TIMEUUID(15, UUID.class)
((BoundStatement) statement).bind().setUUID(name, row.getUUID(name));
case VARINT: // VARINT(14, BigInteger.class)
((BoundStatement) statement).bind().setInt(name, row.getInt(name));
case UDT: // UDT(48, UDTValue.class)
((BoundStatement) statement).bind().setUDTValue(name, row.getUDTValue(name));
case TUPLE: // TUPLE(49, TupleValue.class)
((BoundStatement) statement).bind().setTupleValue(name, row.getTupleValue(name));
case SMALLINT:
((BoundStatement) statement).bind().setInt(name, row.getInt(name));
case TINYINT:
((BoundStatement) statement).bind().setInt(name, row.getInt(name));
case DATE:
((BoundStatement) statement).bind().setDate(name, row.getDate(name));
case TIME:
((BoundStatement) statement).bind().setTime(name, row.getTime(name));
default:
throw new RuntimeException("Unrecognized type:" + typeName);
}
}
}
return statement;
}
public static BoundStatement bindStatement(Statement statement, String name, Object value, DataType.Name typeName) {
switch (typeName) {
case ASCII: // ASCII(1, String.class)
return ((BoundStatement) statement).bind().setString(name, (String) value);
case VARCHAR: // VARCHAR(13, String.class)
return ((BoundStatement) statement).bind().setString(name, (String) value);
case TEXT: // TEXT(10, String.class)
return ((BoundStatement) statement).bind().setString(name, (String) value);
case BIGINT: // BIGINT(2, Long.class)
return ((BoundStatement) statement).bind().setLong(name, (long) value);
case COUNTER: // COUNTER(5, Long.class)
return ((BoundStatement) statement).bind().setLong(name, (long) value);
case BLOB: // BLOB(3, ByteBuffer.class)
return ((BoundStatement) statement).bind().setBytes(name, (ByteBuffer) value);
case CUSTOM: // CUSTOM(0, ByteBuffer.class)
throw new RuntimeException("The diagnostic binder does not understand custom types yet.");
case BOOLEAN: // BOOLEAN(4, Boolean.class)
return ((BoundStatement) statement).bind().setBool(name, (boolean) value);
case DECIMAL: // DECIMAL(6, BigDecimal.class)
return ((BoundStatement) statement).bind().setDecimal(name, (BigDecimal) value);
case DOUBLE: // DOUBLE(7, Double.class)
return ((BoundStatement) statement).bind().setDouble(name, (double) value);
case FLOAT: // FLOAT(8, Float.class)
return ((BoundStatement) statement).bind().setFloat(name, (float) value);
case INET: // INET(16, InetAddress.class)
return ((BoundStatement) statement).bind().setInet(name, (InetAddress) value);
case INT: // INT(9, Integer.class)
return ((BoundStatement) statement).bind().setInt(name, (int) value);
case TIMESTAMP: // TIMESTAMP(11, Date.class)
return ((BoundStatement) statement).bind().setTimestamp(name, (Date) value);
case UUID: // UUID(12, UUID.class)
return ((BoundStatement) statement).bind().setUUID(name, (UUID) value);
case TIMEUUID: // TIMEUUID(15, UUID.class)
return ((BoundStatement) statement).bind().setUUID(name, (UUID) value);
case VARINT: // VARINT(14, BigInteger.class)
return ((BoundStatement) statement).bind().setInt(name, (int) value);
case UDT: // UDT(48, UDTValue.class)
return ((BoundStatement) statement).bind().setUDTValue(name, (UDTValue) value);
case TUPLE: // TUPLE(49, TupleValue.class
return ((BoundStatement) statement).bind().setTupleValue(name, (TupleValue) value);
case SMALLINT:
return ((BoundStatement) statement).bind().setInt(name, (int) value);
case TINYINT:
return ((BoundStatement) statement).bind().setInt(name, (int) value);
case DATE:
return ((BoundStatement) statement).bind().setDate(name, (LocalDate) value);
case TIME:
return ((BoundStatement) statement).bind().setTime(name, (long) value);
default:
throw new RuntimeException("Unrecognized type:" + typeName);
}
}
public static Map<String, String> parseAndGetSpecificBindings(StmtDef stmtDef, ParsedStmt parsed) {
List<String> spans = new ArrayList<>();
String statement = stmtDef.getStmt();
Set<String> extraBindings = new HashSet<>();
extraBindings.addAll(stmtDef.getBindings().keySet());
Map<String, String> specificBindings = new LinkedHashMap<>();
Matcher m = stmtToken.matcher(statement);
int lastMatch = 0;
String remainder = "";
while (m.find(lastMatch)) {
String pre = statement.substring(lastMatch, m.start());
String form1 = m.group(1);
String form2 = m.group(2);
String tokenName = (form1 != null && !form1.isEmpty()) ? form1 : form2;
lastMatch = m.end();
spans.add(pre);
if (extraBindings.contains(tokenName)) {
if (specificBindings.get(tokenName) != null){
String postfix = UUID.randomUUID().toString();
specificBindings.put(tokenName+postfix, stmtDef.getBindings().get(tokenName));
}else {
specificBindings.put(tokenName, stmtDef.getBindings().get(tokenName));
}
}
}
return specificBindings;
}
}

View File

@ -0,0 +1,217 @@
package io.nosqlbench.activitytype.cqld4.core;
import com.datastax.driver.core.*;
import com.datastax.driver.core.policies.*;
import io.netty.util.HashedWheelTimer;
import io.nosqlbench.nb.api.errors.BasicError;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.net.InetSocketAddress;
import java.util.*;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.TimeUnit;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
public class CQLOptions {
private final static Logger logger = LoggerFactory.getLogger(CQLOptions.class);
private final static Pattern CORE_AND_MAX_RQ_PATTERN = Pattern.compile("(?<core>\\d+)(:(?<max>\\d+)(:(?<rq>\\d+))?)?(,(?<rcore>\\d+)(:(?<rmax>\\d+)(:(?<rrq>\\d+))?)?)?(,?heartbeat_interval_s:(?<heartbeatinterval>\\d+))?(,?idle_timeout_s:(?<idletimeout>\\d+))?(,?pool_timeout_ms:(?<pooltimeout>\\d+))?");
private final static Pattern PERCENTILE_EAGER_PATTERN = Pattern.compile("^p(?<pctile>[^:]+)(:(?<executions>\\d+))?(:(?<tracked>\\d+)ms)?$");
private final static Pattern CONSTANT_EAGER_PATTERN = Pattern.compile("^((?<msThreshold>\\d++)ms)(:(?<executions>\\d+))?$");
private static ConstantSpeculativeExecutionPolicy constantPolicy(int threshold, int executions) {
return new ConstantSpeculativeExecutionPolicy(threshold, executions);
}
private static SpeculativeExecutionPolicy percentilePolicy(long tracked, double threshold, int executions) {
PerHostPercentileTracker tracker = newTracker(tracked);
return new PercentileSpeculativeExecutionPolicy(tracker, threshold, executions);
}
private static PerHostPercentileTracker newTracker(long millis) {
return PerHostPercentileTracker.builder(millis).build();
}
public static PoolingOptions poolingOptionsFor(String spec) {
Matcher matcher = CORE_AND_MAX_RQ_PATTERN.matcher(spec);
if (matcher.matches()) {
PoolingOptions poolingOptions = new PoolingOptions();
Optional.ofNullable(matcher.group("core")).map(Integer::valueOf)
.ifPresent(core -> poolingOptions.setCoreConnectionsPerHost(HostDistance.LOCAL, core));
Optional.ofNullable(matcher.group("max")).map(Integer::valueOf)
.ifPresent(max -> poolingOptions.setMaxConnectionsPerHost(HostDistance.LOCAL, max));
Optional.ofNullable(matcher.group("rq")).map(Integer::valueOf)
.ifPresent(rq -> poolingOptions.setMaxRequestsPerConnection(HostDistance.LOCAL, rq));
Optional.ofNullable(matcher.group("rcore")).map(Integer::valueOf)
.ifPresent(rcore -> poolingOptions.setCoreConnectionsPerHost(HostDistance.REMOTE, rcore));
Optional.ofNullable(matcher.group("rmax")).map(Integer::valueOf)
.ifPresent(rmax -> poolingOptions.setMaxConnectionsPerHost(HostDistance.REMOTE, rmax));
Optional.ofNullable(matcher.group("rrq")).map(Integer::valueOf)
.ifPresent(rrq -> poolingOptions.setMaxRequestsPerConnection(HostDistance.REMOTE, rrq));
Optional.ofNullable(matcher.group("heartbeatinterval")).map(Integer::valueOf)
.ifPresent(poolingOptions::setHeartbeatIntervalSeconds);
Optional.ofNullable(matcher.group("idletimeout")).map(Integer::valueOf)
.ifPresent(poolingOptions::setIdleTimeoutSeconds);
Optional.ofNullable(matcher.group("pooltimeout")).map(Integer::valueOf)
.ifPresent(poolingOptions::setPoolTimeoutMillis);
return poolingOptions;
}
throw new RuntimeException("No pooling options could be parsed from spec: " + spec);
}
public static RetryPolicy retryPolicyFor(String spec) {
Set<String> retryBehaviors = Arrays.stream(spec.split(",")).map(String::toLowerCase).collect(Collectors.toSet());
RetryPolicy retryPolicy = DefaultRetryPolicy.INSTANCE;
if (retryBehaviors.contains("default")) {
return retryPolicy;
} // add other mutually-exclusive behaviors here with checks, if we want to extend beyond "default"
if (retryBehaviors.contains("logging")) {
retryPolicy = new LoggingRetryPolicy(retryPolicy);
}
return retryPolicy;
}
public static ReconnectionPolicy reconnectPolicyFor(String spec) {
if(spec.startsWith("exponential(")){
String argsString = spec.substring(12);
String[] args = argsString.substring(0, argsString.length() - 1).split("[,;]");
if (args.length != 2){
throw new BasicError("Invalid reconnectionpolicy, try reconnectionpolicy=exponential(<baseDelay>, <maxDelay>)");
}
long baseDelay = Long.parseLong(args[0]);
long maxDelay = Long.parseLong(args[1]);
return new ExponentialReconnectionPolicy(baseDelay,maxDelay);
}else if(spec.startsWith("constant(")){
String argsString = spec.substring(9);
long constantDelayMs= Long.parseLong(argsString.substring(0, argsString.length() - 1));
return new ConstantReconnectionPolicy(constantDelayMs);
}
throw new BasicError("Invalid reconnectionpolicy, try reconnectionpolicy=exponential(<baseDelay>, <maxDelay>) or constant(<constantDelayMs>)");
}
public static SocketOptions socketOptionsFor(String spec) {
String[] assignments = spec.split("[,;]");
Map<String, String> values = new HashMap<>();
for (String assignment : assignments) {
String[] namevalue = assignment.split("[:=]", 2);
String name = namevalue[0];
String value = namevalue[1];
values.put(name, value);
}
SocketOptions options = new SocketOptions();
Optional.ofNullable(values.get("read_timeout_ms")).map(Integer::parseInt).ifPresent(
options::setReadTimeoutMillis
);
Optional.ofNullable(values.get("connect_timeout_ms")).map(Integer::parseInt).ifPresent(
options::setConnectTimeoutMillis
);
Optional.ofNullable(values.get("keep_alive")).map(Boolean::parseBoolean).ifPresent(
options::setKeepAlive
);
Optional.ofNullable(values.get("reuse_address")).map(Boolean::parseBoolean).ifPresent(
options::setReuseAddress
);
Optional.ofNullable(values.get("so_linger")).map(Integer::parseInt).ifPresent(
options::setSoLinger
);
Optional.ofNullable(values.get("tcp_no_delay")).map(Boolean::parseBoolean).ifPresent(
options::setTcpNoDelay
);
Optional.ofNullable(values.get("receive_buffer_size")).map(Integer::parseInt).ifPresent(
options::setReceiveBufferSize
);
Optional.ofNullable(values.get("send_buffer_size")).map(Integer::parseInt).ifPresent(
options::setSendBufferSize
);
return options;
}
public static SpeculativeExecutionPolicy defaultSpeculativePolicy() {
PerHostPercentileTracker tracker = PerHostPercentileTracker
.builder(15000)
.build();
PercentileSpeculativeExecutionPolicy defaultSpecPolicy =
new PercentileSpeculativeExecutionPolicy(tracker, 99.0, 5);
return defaultSpecPolicy;
}
public static SpeculativeExecutionPolicy speculativeFor(String spec) {
Matcher pctileMatcher = PERCENTILE_EAGER_PATTERN.matcher(spec);
Matcher constantMatcher = CONSTANT_EAGER_PATTERN.matcher(spec);
if (pctileMatcher.matches()) {
double pctile = Double.valueOf(pctileMatcher.group("pctile"));
if (pctile > 100.0 || pctile < 0.0) {
throw new RuntimeException("pctile must be between 0.0 and 100.0");
}
String executionsSpec = pctileMatcher.group("executions");
String trackedSpec = pctileMatcher.group("tracked");
int executions = (executionsSpec != null && !executionsSpec.isEmpty()) ? Integer.valueOf(executionsSpec) : 5;
int tracked = (trackedSpec != null && !trackedSpec.isEmpty()) ? Integer.valueOf(trackedSpec) : 15000;
logger.debug("speculative: Creating new percentile tracker policy from spec '" + spec + "'");
return percentilePolicy(tracked, pctile, executions);
} else if (constantMatcher.matches()) {
int threshold = Integer.valueOf(constantMatcher.group("msThreshold"));
String executionsSpec = constantMatcher.group("executions");
int executions = (executionsSpec != null && !executionsSpec.isEmpty()) ? Integer.valueOf(executionsSpec) : 5;
logger.debug("speculative: Creating new constant policy from spec '" + spec + "'");
return constantPolicy(threshold, executions);
} else {
throw new RuntimeException("Unable to parse pattern for speculative option: " + spec + ", it must be in " +
"an accepted form, like p99.0:5:15000, or p99.0:5, or 5000ms:5");
}
}
public static LoadBalancingPolicy whitelistFor(String s, LoadBalancingPolicy innerPolicy) {
String[] addrSpecs = s.split(",");
List<InetSocketAddress> sockAddrs = Arrays.stream(addrSpecs)
.map(CQLOptions::toSocketAddr)
.collect(Collectors.toList());
if (innerPolicy == null) {
innerPolicy = new RoundRobinPolicy();
}
return new WhiteListPolicy(innerPolicy, sockAddrs);
}
public static NettyOptions withTickDuration(String tick) {
logger.info("Cluster builder using custom tick duration value for HashedWheelTimer: " + tick + " milliseconds");
int tickDuration = Integer.valueOf(tick);
return new NettyOptions() {
public io.netty.util.Timer timer(ThreadFactory threadFactory) {
return new HashedWheelTimer(
threadFactory, tickDuration, TimeUnit.MILLISECONDS);
}
};
}
private static InetSocketAddress toSocketAddr(String addr) {
String[] addrs = addr.split(":", 2);
String inetHost = addrs[0];
String inetPort = (addrs.length == 2) ? addrs[1] : "9042";
return new InetSocketAddress(inetHost, Integer.valueOf(inetPort));
}
public static ProtocolOptions.Compression withCompression(String compspec) {
try {
return ProtocolOptions.Compression.valueOf(compspec);
} catch (IllegalArgumentException iae) {
throw new RuntimeException("Compression option '" + compspec + "' was specified, but only " +
Arrays.toString(ProtocolOptions.Compression.values()) + " are available.");
}
}
}

View File

@ -0,0 +1,359 @@
package io.nosqlbench.activitytype.cqld4.core;
import com.codahale.metrics.Timer;
import com.datastax.oss.driver.api.core.cql.ResultSet;
import com.datastax.oss.driver.api.core.cql.Statement;
import io.nosqlbench.activitytype.cqld4.api.ResultSetCycleOperator;
import io.nosqlbench.activitytype.cqld4.api.RowCycleOperator;
import io.nosqlbench.activitytype.cqld4.api.StatementFilter;
import io.nosqlbench.activitytype.cqld4.errorhandling.ErrorStatus;
import io.nosqlbench.activitytype.cqld4.errorhandling.HashedCQLErrorHandler;
import io.nosqlbench.activitytype.cqld4.errorhandling.exceptions.CQLCycleWithStatementException;
import io.nosqlbench.activitytype.cqld4.errorhandling.exceptions.ChangeUnappliedCycleException;
import io.nosqlbench.activitytype.cqld4.errorhandling.exceptions.MaxTriesExhaustedException;
import io.nosqlbench.activitytype.cqld4.errorhandling.exceptions.UnexpectedPagingException;
import io.nosqlbench.activitytype.cqld4.statements.core.ReadyCQLStatement;
import io.nosqlbench.engine.api.activityapi.core.ActivityDefObserver;
import io.nosqlbench.engine.api.activityapi.core.MultiPhaseAction;
import io.nosqlbench.engine.api.activityapi.core.SyncAction;
import io.nosqlbench.engine.api.activityapi.planning.OpSequence;
import io.nosqlbench.engine.api.activityimpl.ActivityDef;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.List;
import java.util.concurrent.TimeUnit;
@SuppressWarnings("Duplicates")
public class CqlAction implements SyncAction, MultiPhaseAction, ActivityDefObserver {
private final static Logger logger = LoggerFactory.getLogger(CqlAction.class);
private final int slot;
private final CqlActivity cqlActivity;
private final ActivityDef activityDef;
private List<RowCycleOperator> rowOps;
private List<ResultSetCycleOperator> cycleOps;
private List<StatementModifier> modifiers;
private StatementFilter statementFilter;
private OpSequence<ReadyCQLStatement> sequencer;
private int maxTries = 10; // how many cycles a statement will be attempted for before giving up
private HashedCQLErrorHandler ebdseErrorHandler;
private int pagesFetched = 0;
private long totalRowsFetchedForQuery = 0L;
private ResultSet pagingResultSet;
private Statement pagingStatement;
private ReadyCQLStatement pagingReadyStatement;
private boolean showcql;
private long nanoStartTime;
private long retryDelay;
private long maxRetryDelay;
private boolean retryReplace;
public CqlAction(ActivityDef activityDef, int slot, CqlActivity cqlActivity) {
this.activityDef = activityDef;
this.cqlActivity = cqlActivity;
this.slot = slot;
onActivityDefUpdate(activityDef);
}
@Override
public void init() {
onActivityDefUpdate(activityDef);
this.sequencer = cqlActivity.getOpSequencer();
}
@Override
public int runCycle(long value) {
// In this activity type, we use the same phase
// logic for the initial phase (runCycle(...))
// as well as subsequent phases.
return runPhase(value);
}
public int runPhase(long cycleValue) {
HashedCQLErrorHandler.resetThreadStatusCode();
if (pagingResultSet == null) {
totalRowsFetchedForQuery = 0L;
Statement statement;
ResultSetFuture resultSetFuture;
ReadyCQLStatement readyCQLStatement;
int tries = 0;
try (Timer.Context bindTime = cqlActivity.bindTimer.time()) {
readyCQLStatement = sequencer.get(cycleValue);
statement = readyCQLStatement.bind(cycleValue);
if (statementFilter != null) {
if (!statementFilter.matches(statement)) {
cqlActivity.skippedTokensHisto.update(cycleValue);
return 0;
}
}
if (modifiers != null) {
for (StatementModifier modifier : modifiers) {
statement = modifier.modify(statement, cycleValue);
}
}
if (showcql) {
logger.info("CQL(cycle=" + cycleValue + "):\n" + readyCQLStatement.getQueryString(cycleValue));
}
}
nanoStartTime = System.nanoTime();
while (tries < maxTries) {
tries++;
if (tries > maxTries) {
throw new MaxTriesExhaustedException(cycleValue, maxTries);
}
if (tries > 1) {
try (Timer.Context retryTime = cqlActivity.retryDelayTimer.time()) {
Thread.sleep(Math.min((retryDelay << tries) / 1000, maxRetryDelay / 1000));
} catch (InterruptedException ignored) {
}
}
try (Timer.Context executeTime = cqlActivity.executeTimer.time()) {
resultSetFuture = cqlActivity.getSession().executeAsync(statement);
}
Timer.Context resultTime = cqlActivity.resultTimer.time();
try {
ResultSet resultSet = resultSetFuture.getUninterruptibly();
if (cycleOps != null) {
for (ResultSetCycleOperator cycleOp : cycleOps) {
cycleOp.apply(resultSet, statement, cycleValue);
}
}
ResultSetCycleOperator[] perStmtRSOperators = readyCQLStatement.getResultSetOperators();
if (perStmtRSOperators != null) {
for (ResultSetCycleOperator perStmtRSOperator : perStmtRSOperators) {
perStmtRSOperator.apply(resultSet, statement, cycleValue);
}
}
if (!resultSet.wasApplied()) {
//resultSet.b
Row row = resultSet.one();
ColumnDefinitions defs = row.getColumnDefinitions();
if (retryReplace) {
statement = CQLBindHelper.rebindUnappliedStatement(statement, defs, row);
}
logger.trace(readyCQLStatement.getQueryString(cycleValue));
// To make exception handling logic flow more uniformly
throw new ChangeUnappliedCycleException(
cycleValue, resultSet, readyCQLStatement.getQueryString(cycleValue)
);
}
int pageRows = resultSet.getAvailableWithoutFetching();
int remaining = pageRows;
RowCycleOperator[] perStmtRowOperators = readyCQLStatement.getRowCycleOperators();
if (rowOps == null && perStmtRowOperators==null) {
while (remaining-- > 0) {
Row row = resultSet.one();
// NOTE: This has been replaced by:
// params:
// rowops: savevars
// You must add this to the YAML for statements that are meant to capture vars
// HashMap<String, Object> bindings = SharedState.tl_ObjectMap.get();
// for (ColumnDefinitions.Definition cdef : row.getColumnDefinitions()) {
// bindings.put(cdef.getName(), row.getObject(cdef.getName()));
// }
//
}
} else {
while (remaining-- > 0) {
Row onerow = resultSet.one();
if (rowOps!=null) {
for (RowCycleOperator rowOp : rowOps) {
rowOp.apply(onerow, cycleValue);
}
}
if (perStmtRowOperators!=null) {
for (RowCycleOperator rowOp : perStmtRowOperators) {
rowOp.apply(onerow, cycleValue);
}
}
}
}
cqlActivity.rowsCounter.mark(pageRows);
totalRowsFetchedForQuery += pageRows;
if (resultSet.isFullyFetched()) {
long resultNanos = System.nanoTime() - nanoStartTime;
cqlActivity.resultSuccessTimer.update(resultNanos, TimeUnit.NANOSECONDS);
cqlActivity.resultSetSizeHisto.update(totalRowsFetchedForQuery);
readyCQLStatement.onSuccess(cycleValue, resultNanos, totalRowsFetchedForQuery);
} else {
if (cqlActivity.maxpages > 1) {
pagingResultSet = resultSet;
pagingStatement = statement;
pagingReadyStatement = readyCQLStatement;
pagesFetched = 1;
} else {
throw new UnexpectedPagingException(
cycleValue,
resultSet,
readyCQLStatement.getQueryString(cycleValue),
1,
cqlActivity.maxpages,
cqlActivity.getSession().getCluster().getConfiguration().getQueryOptions().getFetchSize()
);
}
}
break; // This is normal termination of this loop, when retries aren't needed
} catch (Exception e) {
long resultNanos = resultTime.stop();
resultTime = null;
readyCQLStatement.onError(cycleValue, resultNanos, e);
CQLCycleWithStatementException cqlCycleException = new CQLCycleWithStatementException(cycleValue, resultNanos, e, readyCQLStatement);
ErrorStatus errorStatus = ebdseErrorHandler.handleError(cycleValue, cqlCycleException);
if (!errorStatus.isRetryable()) {
cqlActivity.triesHisto.update(tries);
return errorStatus.getResultCode();
}
} finally {
if (resultTime != null) {
resultTime.stop();
}
}
}
cqlActivity.triesHisto.update(tries);
} else {
int tries = 0;
while (tries < maxTries) {
tries++;
if (tries > maxTries) {
throw new MaxTriesExhaustedException(cycleValue, maxTries);
}
ListenableFuture<ResultSet> pagingFuture;
try (Timer.Context pagingTime = cqlActivity.pagesTimer.time()) {
try (Timer.Context executeTime = cqlActivity.executeTimer.time()) {
pagingFuture = pagingResultSet.fetchMoreResults();
}
Timer.Context resultTime = cqlActivity.resultTimer.time();
try {
ResultSet resultSet = pagingFuture.get();
if (cycleOps != null) {
for (ResultSetCycleOperator cycleOp : cycleOps) {
cycleOp.apply(resultSet, pagingStatement, cycleValue);
}
}
ResultSetCycleOperator[] perStmtRSOperators = pagingReadyStatement.getResultSetOperators();
if (perStmtRSOperators != null) {
for (ResultSetCycleOperator perStmtRSOperator : perStmtRSOperators) {
perStmtRSOperator.apply(resultSet, pagingStatement, cycleValue);
}
}
pagesFetched++;
int pageRows = resultSet.getAvailableWithoutFetching();
int remaining = pageRows;
if (rowOps == null) {
while (remaining-- > 0) {
resultSet.one();
}
} else {
while (remaining-- > 0) {
for (RowCycleOperator rowOp : rowOps) {
rowOp.apply(resultSet.one(), cycleValue);
}
}
}
cqlActivity.rowsCounter.mark(pageRows);
totalRowsFetchedForQuery += pageRows;
if (resultSet.isFullyFetched()) {
long nanoTime = System.nanoTime() - nanoStartTime;
cqlActivity.resultSuccessTimer.update(nanoTime, TimeUnit.NANOSECONDS);
cqlActivity.resultSetSizeHisto.update(totalRowsFetchedForQuery);
pagingReadyStatement.onSuccess(cycleValue, nanoTime, totalRowsFetchedForQuery);
pagingResultSet = null;
} else {
if (pagesFetched > cqlActivity.maxpages) {
throw new UnexpectedPagingException(
cycleValue,
pagingResultSet,
pagingReadyStatement.getQueryString(cycleValue),
pagesFetched,
cqlActivity.maxpages,
cqlActivity.getSession().getCluster().getConfiguration().getQueryOptions().getFetchSize()
);
}
pagingResultSet = resultSet;
}
break; // This is normal termination of this loop, when retries aren't needed
} catch (Exception e) {
long resultNanos = resultTime.stop();
resultTime = null;
pagingReadyStatement.onError(cycleValue, resultNanos, e);
CQLCycleWithStatementException cqlCycleException = new CQLCycleWithStatementException(cycleValue, resultNanos, e, pagingReadyStatement);
ErrorStatus errorStatus = ebdseErrorHandler.handleError(cycleValue, cqlCycleException);
if (!errorStatus.isRetryable()) {
cqlActivity.triesHisto.update(tries);
return errorStatus.getResultCode();
}
} finally {
if (resultTime != null) {
resultTime.stop();
}
}
}
}
cqlActivity.triesHisto.update(tries);
}
return 0;
}
@Override
public boolean incomplete() {
return pagingResultSet != null;
}
@Override
public void onActivityDefUpdate(ActivityDef activityDef) {
this.maxTries = cqlActivity.getMaxTries();
this.retryDelay = cqlActivity.getRetryDelay();
this.maxRetryDelay = cqlActivity.getMaxRetryDelay();
this.retryReplace = cqlActivity.isRetryReplace();
this.showcql = cqlActivity.isShowCql();
this.ebdseErrorHandler = cqlActivity.getCqlErrorHandler();
this.statementFilter = cqlActivity.getStatementFilter();
this.rowOps = cqlActivity.getRowCycleOperators();
this.cycleOps = cqlActivity.getResultSetCycleOperators();
this.modifiers = cqlActivity.getStatementModifiers();
}
protected CqlActivity getCqlActivity() {
return cqlActivity;
}
}

View File

@ -0,0 +1,27 @@
package io.nosqlbench.activitytype.cqld4.core;
import io.nosqlbench.engine.api.activityapi.core.Action;
import io.nosqlbench.engine.api.activityapi.core.ActionDispenser;
public class CqlActionDispenser implements ActionDispenser {
public CqlActivity getCqlActivity() {
return cqlActivity;
}
private CqlActivity cqlActivity;
public CqlActionDispenser(CqlActivity activityContext) {
this.cqlActivity = activityContext;
}
public Action getAction(int slot) {
long async= cqlActivity.getActivityDef().getParams().getOptionalLong("async").orElse(0L);
if (async>0) {
return new CqlAsyncAction(cqlActivity, slot);
} else {
return new CqlAction(cqlActivity.getActivityDef(), slot, cqlActivity);
}
}
}

View File

@ -0,0 +1,664 @@
package io.nosqlbench.activitytype.cqld4.core;
import com.codahale.metrics.Histogram;
import com.codahale.metrics.Meter;
import com.codahale.metrics.Timer;
import com.datastax.driver.core.*;
import com.datastax.oss.driver.api.core.session.Session;
import io.nosqlbench.activitytype.cqld4.codecsupport.UDTCodecInjector;
import com.datastax.driver.core.TokenRangeStmtFilter;
import io.nosqlbench.activitytype.cqld4.api.ErrorResponse;
import io.nosqlbench.activitytype.cqld4.api.ResultSetCycleOperator;
import io.nosqlbench.activitytype.cqld4.api.RowCycleOperator;
import io.nosqlbench.activitytype.cqld4.api.StatementFilter;
import io.nosqlbench.activitytype.cqld4.errorhandling.NBCycleErrorHandler;
import io.nosqlbench.activitytype.cqld4.errorhandling.HashedCQLErrorHandler;
import io.nosqlbench.activitytype.cqld4.statements.binders.CqlBinderTypes;
import io.nosqlbench.activitytype.cqld4.statements.rowoperators.RowCycleOperators;
import io.nosqlbench.activitytype.cqld4.statements.rowoperators.Save;
import io.nosqlbench.activitytype.cqld4.statements.rsoperators.ResultSetCycleOperators;
import io.nosqlbench.activitytype.cqld4.statements.rsoperators.TraceLogger;
import io.nosqlbench.activitytype.cqld4.statements.core.*;
import io.nosqlbench.engine.api.activityapi.core.Activity;
import io.nosqlbench.engine.api.activityapi.core.ActivityDefObserver;
import io.nosqlbench.engine.api.activityapi.planning.OpSequence;
import io.nosqlbench.engine.api.activityapi.planning.SequencePlanner;
import io.nosqlbench.engine.api.activityapi.planning.SequencerType;
import io.nosqlbench.engine.api.activityconfig.ParsedStmt;
import io.nosqlbench.engine.api.activityconfig.StatementsLoader;
import io.nosqlbench.engine.api.activityconfig.rawyaml.RawStmtDef;
import io.nosqlbench.engine.api.activityconfig.rawyaml.RawStmtsBlock;
import io.nosqlbench.engine.api.activityconfig.rawyaml.RawStmtsDoc;
import io.nosqlbench.engine.api.activityconfig.rawyaml.RawStmtsDocList;
import io.nosqlbench.engine.api.activityconfig.yaml.StmtDef;
import io.nosqlbench.engine.api.activityconfig.yaml.StmtsDocList;
import io.nosqlbench.engine.api.activityimpl.ActivityDef;
import io.nosqlbench.engine.api.activityimpl.ParameterMap;
import io.nosqlbench.engine.api.activityimpl.SimpleActivity;
import io.nosqlbench.engine.api.metrics.ActivityMetrics;
import io.nosqlbench.engine.api.metrics.ExceptionCountMetrics;
import io.nosqlbench.engine.api.metrics.ExceptionHistoMetrics;
import io.nosqlbench.engine.api.util.SimpleConfig;
import io.nosqlbench.engine.api.templating.StrInterpolator;
import io.nosqlbench.engine.api.util.TagFilter;
import io.nosqlbench.engine.api.util.Unit;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.FileWriter;
import java.io.IOException;
import java.io.Writer;
import java.nio.charset.StandardCharsets;
import java.util.*;
@SuppressWarnings("Duplicates")
public class CqlActivity extends SimpleActivity implements Activity, ActivityDefObserver {
private final static Logger logger = LoggerFactory.getLogger(CqlActivity.class);
private final ExceptionCountMetrics exceptionCountMetrics;
private final ExceptionHistoMetrics exceptionHistoMetrics;
private final ActivityDef activityDef;
private final Map<String, Writer> namedWriters = new HashMap<>();
protected List<StmtDef> stmts;
Timer retryDelayTimer;
Timer bindTimer;
Timer executeTimer;
Timer resultTimer;
Timer resultSuccessTimer;
Timer pagesTimer;
Histogram triesHisto;
Histogram skippedTokensHisto;
Histogram resultSetSizeHisto;
int maxpages;
Meter rowsCounter;
private HashedCQLErrorHandler errorHandler;
private OpSequence<ReadyCQLStatement> opsequence;
private Session session;
private int maxTries;
private StatementFilter statementFilter;
private Boolean showcql;
private List<RowCycleOperator> rowCycleOperators;
private List<ResultSetCycleOperator> resultSetCycleOperators;
private List<StatementModifier> statementModifiers;
private Long maxTotalOpsInFlight;
private long retryDelay;
private long maxRetryDelay;
private boolean retryReplace;
private String pooling;
public CqlActivity(ActivityDef activityDef) {
super(activityDef);
this.activityDef = activityDef;
exceptionCountMetrics = new ExceptionCountMetrics(activityDef);
exceptionHistoMetrics = new ExceptionHistoMetrics(activityDef);
}
private void registerCodecs(Session session) {
UDTCodecInjector injector = new UDTCodecInjector();
injector.injectUserProvidedCodecs(session, true);
}
@Override
public synchronized void initActivity() {
logger.debug("initializing activity: " + this.activityDef.getAlias());
session = getSession();
if (getParams().getOptionalBoolean("usercodecs").orElse(false)) {
registerCodecs(session);
}
initSequencer();
setDefaultsFromOpSequence(this.opsequence);
retryDelayTimer = ActivityMetrics.timer(activityDef, "retry-delay");
bindTimer = ActivityMetrics.timer(activityDef, "bind");
executeTimer = ActivityMetrics.timer(activityDef, "execute");
resultTimer = ActivityMetrics.timer(activityDef, "result");
triesHisto = ActivityMetrics.histogram(activityDef, "tries");
pagesTimer = ActivityMetrics.timer(activityDef, "pages");
rowsCounter = ActivityMetrics.meter(activityDef, "rows");
skippedTokensHisto = ActivityMetrics.histogram(activityDef, "skipped-tokens");
resultSuccessTimer = ActivityMetrics.timer(activityDef, "result-success");
resultSetSizeHisto = ActivityMetrics.histogram(activityDef, "resultset-size");
onActivityDefUpdate(activityDef);
logger.debug("activity fully initialized: " + this.activityDef.getAlias());
}
public synchronized Session getSession() {
if (session == null) {
session = CQLSessionCache.get().getSession(this.getActivityDef());
}
return session;
}
private void initSequencer() {
Session session = getSession();
Map<String,Object> fconfig = Map.of("cluster",session.getCluster());
SequencerType sequencerType = SequencerType.valueOf(
getParams().getOptionalString("seq").orElse("bucket")
);
SequencePlanner<ReadyCQLStatement> planner = new SequencePlanner<>(sequencerType);
StmtsDocList unfiltered = loadStmtsYaml();
// log tag filtering results
String tagfilter = activityDef.getParams().getOptionalString("tags").orElse("");
TagFilter tagFilter = new TagFilter(tagfilter);
unfiltered.getStmts().stream().map(tagFilter::matchesTaggedResult).forEach(r -> logger.info(r.getLog()));
stmts = unfiltered.getStmts(tagfilter);
if (stmts.size() == 0) {
throw new RuntimeException("There were no unfiltered statements found for this activity.");
}
for (StmtDef stmtDef : stmts) {
ParsedStmt parsed = stmtDef.getParsed().orError();
boolean prepared = Boolean.valueOf(stmtDef.getParams().getOrDefault("prepared", "true"));
boolean parametrized = Boolean.valueOf(stmtDef.getParams().getOrDefault("parametrized", "false"));
long ratio = Long.valueOf(stmtDef.getParams().getOrDefault("ratio", "1"));
Optional<ConsistencyLevel> cl = Optional.ofNullable(
stmtDef.getParams().getOrDefault("cl", null)).map(ConsistencyLevel::valueOf);
Optional<ConsistencyLevel> serial_cl = Optional.ofNullable(
stmtDef.getParams().getOrDefault("serial_cl", null)).map(ConsistencyLevel::valueOf);
Optional<Boolean> idempotent = Optional.ofNullable(stmtDef.getParams().getOrDefault("idempotent", null))
.map(Boolean::valueOf);
StringBuilder psummary = new StringBuilder();
boolean instrument = Optional.ofNullable(stmtDef.getParams()
.get("instrument")).map(Boolean::valueOf)
.orElse(getParams().getOptionalBoolean("instrument").orElse(false));
String logresultcsv = stmtDef.getParams().getOrDefault("logresultcsv","");
String logresultcsv_act = getParams().getOptionalString("logresultcsv").orElse("");
if (!logresultcsv_act.isEmpty() && !logresultcsv_act.toLowerCase().equals("true")) {
throw new RuntimeException("At the activity level, only logresultcsv=true is allowed, no other values.");
}
logresultcsv = !logresultcsv.isEmpty() ? logresultcsv : logresultcsv_act;
logresultcsv = !logresultcsv.toLowerCase().equals("true") ? logresultcsv : stmtDef.getName()+"--results.csv";
logger.debug("readying statement[" + (prepared ? "" : "un") + "prepared]:" + parsed.getStmt());
ReadyCQLStatementTemplate template;
String stmtForDriver = parsed.getPositionalStatement(s -> "?");
if (prepared) {
psummary.append(" prepared=>").append(prepared);
PreparedStatement prepare = getSession().prepare(stmtForDriver);
cl.ifPresent((conlvl) -> {
psummary.append(" consistency_level=>").append(conlvl);
prepare.setConsistencyLevel(conlvl);
});
serial_cl.ifPresent((scl) -> {
psummary.append(" serial_consistency_level=>").append(serial_cl);
prepare.setSerialConsistencyLevel(scl);
});
idempotent.ifPresent((i) -> {
psummary.append(" idempotent=").append(idempotent);
prepare.setIdempotent(i);
});
CqlBinderTypes binderType = CqlBinderTypes.valueOf(stmtDef.getParams()
.getOrDefault("binder", CqlBinderTypes.DEFAULT.toString()));
template = new ReadyCQLStatementTemplate(fconfig, binderType, getSession(), prepare, ratio,
parsed.getName());
} else {
SimpleStatement simpleStatement = new SimpleStatement(stmtForDriver);
cl.ifPresent((conlvl) -> {
psummary.append(" consistency_level=>").append(conlvl);
simpleStatement.setConsistencyLevel(conlvl);
});
serial_cl.ifPresent((scl) -> {
psummary.append(" serial_consistency_level=>").append(scl);
simpleStatement.setSerialConsistencyLevel(scl);
});
idempotent.ifPresent((i) -> {
psummary.append(" idempotent=>").append(i);
simpleStatement.setIdempotent(i);
});
template = new ReadyCQLStatementTemplate(fconfig, getSession(), simpleStatement, ratio,
parsed.getName(), parametrized);
}
Optional.ofNullable(stmtDef.getParams().getOrDefault("save", null))
.map(s -> s.split("[,; ]"))
.map(Save::new)
.ifPresent(save_op -> {
psummary.append(" save=>").append(save_op.toString());
template.addRowCycleOperators(save_op);
});
Optional.ofNullable(stmtDef.getParams().getOrDefault("rsoperators", null))
.map(s -> s.split(","))
.stream().flatMap(Arrays::stream)
.map(ResultSetCycleOperators::newOperator)
.forEach(rso -> {
psummary.append(" rsop=>").append(rso.toString());
template.addResultSetOperators(rso);
});
Optional.ofNullable(stmtDef.getParams().getOrDefault("rowoperators", null))
.map(s -> s.split(","))
.stream().flatMap(Arrays::stream)
.map(RowCycleOperators::newOperator)
.forEach(ro -> {
psummary.append(" rowop=>").append(ro.toString());
template.addRowCycleOperators(ro);
});
if (instrument) {
logger.info("Adding per-statement success and error and resultset-size timers to statement '" + parsed.getName() + "'");
template.instrument(this);
psummary.append(" instrument=>").append(instrument);
}
if (!logresultcsv.isEmpty()) {
logger.info("Adding per-statement result CSV logging to statement '" + parsed.getName() + "'");
template.logResultCsv(this,logresultcsv);
psummary.append(" logresultcsv=>").append(logresultcsv);
}
template.getContextualBindings().getBindingsTemplate().addFieldBindings(stmtDef.getParsed().getBindPoints());
if (psummary.length() > 0) {
logger.info("statement named '" + stmtDef.getName() + "' has custom settings:" + psummary.toString());
}
planner.addOp(template.resolve(), ratio);
}
opsequence = planner.resolve();
}
private StmtsDocList loadStmtsYaml() {
StmtsDocList doclist = null;
String yaml_loc = activityDef.getParams().getOptionalString("yaml", "workload").orElse("default");
StrInterpolator interp = new StrInterpolator(activityDef);
String yamlVersion = "unset";
if (yaml_loc.endsWith(":1") || yaml_loc.endsWith(":2")) {
yamlVersion = yaml_loc.substring(yaml_loc.length() - 1);
yaml_loc = yaml_loc.substring(0, yaml_loc.length() - 2);
}
switch (yamlVersion) {
case "1":
doclist = getVersion1StmtsDoc(interp, yaml_loc);
logger.warn("DEPRECATED-FORMAT: Loaded yaml " + yaml_loc + " with compatibility mode. " +
"This will be deprecated in a future release.");
logger.warn("DEPRECATED-FORMAT: Please refer to " +
"http://docs.engineblock.io/user-guide/standard_yaml/ for more details.");
break;
case "2":
doclist = StatementsLoader.load(logger, yaml_loc, interp, "activities");
break;
case "unset":
try {
logger.debug("You can suffix your yaml filename or url with the " +
"format version, such as :1 or :2. Assuming version 2.");
doclist = StatementsLoader.load(null, yaml_loc, interp, "activities");
} catch (Exception ignored) {
try {
doclist = getVersion1StmtsDoc(interp, yaml_loc);
logger.warn("DEPRECATED-FORMAT: Loaded yaml " + yaml_loc +
" with compatibility mode. This will be deprecated in a future release.");
logger.warn("DEPRECATED-FORMAT: Please refer to " +
"http://docs.engineblock.io/user-guide/standard_yaml/ for more details.");
} catch (Exception compatError) {
logger.warn("Tried to load yaml in compatibility mode, " +
"since it failed to load with the standard format, " +
"but found an error:" + compatError);
logger.warn("The following detailed errors are provided only " +
"for the standard format. To force loading version 1 with detailed logging, add" +
" a version qualifier to your yaml filename or url like ':1'");
// retrigger the error again, this time with logging enabled.
doclist = StatementsLoader.load(logger, yaml_loc, interp, "activities");
}
}
break;
default:
throw new RuntimeException("Unrecognized yaml format version, expected :1 or :2 " +
"at end of yaml file, but got " + yamlVersion + " instead.");
}
return doclist;
}
@Deprecated
private StmtsDocList getVersion1StmtsDoc(StrInterpolator interp, String yaml_loc) {
StmtsDocList unfiltered;
List<RawStmtsBlock> blocks = new ArrayList<>();
YamlCQLStatementLoader deprecatedLoader = new YamlCQLStatementLoader(interp);
AvailableCQLStatements rawDocs = deprecatedLoader.load(yaml_loc, "activities");
List<TaggedCQLStatementDefs> rawTagged = rawDocs.getRawTagged();
for (TaggedCQLStatementDefs rawdef : rawTagged) {
for (CQLStatementDef rawstmt : rawdef.getStatements()) {
RawStmtsBlock rawblock = new RawStmtsBlock();
// tags
rawblock.setTags(rawdef.getTags());
// params
Map<String, String> params = new HashMap<>(rawdef.getParams());
if (rawstmt.getConsistencyLevel() != null && !rawstmt.getConsistencyLevel().isEmpty())
params.put("cl", rawstmt.getConsistencyLevel());
if (!rawstmt.isPrepared()) params.put("prepared", "false");
if (rawstmt.getRatio() != 1L)
params.put("ratio", String.valueOf(rawstmt.getRatio()));
rawblock.setParams(params);
// stmts
List<RawStmtDef> stmtslist = new ArrayList<>();
stmtslist.add(new RawStmtDef(rawstmt.getName(), rawstmt.getStatement()));
rawblock.setRawStmtDefs(stmtslist);
// bindings
rawblock.setBindings(rawstmt.getBindings());
blocks.add(rawblock);
}
}
RawStmtsDoc rawStmtsDoc = new RawStmtsDoc();
rawStmtsDoc.setBlocks(blocks);
List<RawStmtsDoc> rawStmtsDocs = new ArrayList<>();
rawStmtsDocs.add(rawStmtsDoc);
RawStmtsDocList rawStmtsDocList = new RawStmtsDocList(rawStmtsDocs);
unfiltered = new StmtsDocList(rawStmtsDocList);
return unfiltered;
}
public ExceptionCountMetrics getExceptionCountMetrics() {
return exceptionCountMetrics;
}
@Override
public String toString() {
return "CQLActivity {" +
"activityDef=" + activityDef +
", session=" + session +
", opSequence=" + this.opsequence +
'}';
}
@Override
public void onActivityDefUpdate(ActivityDef activityDef) {
super.onActivityDefUpdate(activityDef);
clearResultSetCycleOperators();
clearRowCycleOperators();
clearStatementModifiers();
ParameterMap params = activityDef.getParams();
Optional<String> fetchSizeOption = params.getOptionalString("fetchsize");
Cluster cluster = getSession().getCluster();
if (fetchSizeOption.isPresent()) {
int fetchSize = fetchSizeOption.flatMap(Unit::bytesFor).map(Double::intValue).orElseThrow(() -> new RuntimeException(
"Unable to parse fetch size from " + fetchSizeOption.get()
));
if (fetchSize > 10000000 && fetchSize < 1000000000) {
logger.warn("Setting the fetchsize to " + fetchSize + " is unlikely to give good performance.");
} else if (fetchSize > 1000000000) {
throw new RuntimeException("Setting the fetch size to " + fetchSize + " is likely to cause instability.");
}
logger.trace("setting fetchSize to " + fetchSize);
cluster.getConfiguration().getQueryOptions().setFetchSize(fetchSize);
}
this.retryDelay = params.getOptionalLong("retrydelay").orElse(0L);
this.maxRetryDelay = params.getOptionalLong("maxretrydelay").orElse(500L);
this.retryReplace = params.getOptionalBoolean("retryreplace").orElse(false);
this.maxTries = params.getOptionalInteger("maxtries").orElse(10);
this.showcql = params.getOptionalBoolean("showcql").orElse(false);
this.maxpages = params.getOptionalInteger("maxpages").orElse(1);
this.statementFilter = params.getOptionalString("tokens")
.map(s -> new TokenRangeStmtFilter(cluster, s))
.orElse(null);
if (statementFilter != null) {
logger.info("filtering statements" + statementFilter);
}
errorHandler = configureErrorHandler();
params.getOptionalString("trace")
.map(SimpleConfig::new)
.map(TraceLogger::new)
.ifPresent(
tl -> {
addResultSetCycleOperator(tl);
addStatementModifier(tl);
});
this.maxTotalOpsInFlight = params.getOptionalLong("async").orElse(1L);
Optional<String> dynpooling = params.getOptionalString("pooling");
if (dynpooling.isPresent()) {
logger.info("dynamically updating pooling");
if (!dynpooling.get().equals(this.pooling)) {
PoolingOptions opts = CQLOptions.poolingOptionsFor(dynpooling.get());
logger.info("pooling=>" + dynpooling.get());
PoolingOptions cfg = getSession().getCluster().getConfiguration().getPoolingOptions();
// This looks funny, because we have to set max conns per host
// in an order that will appease the driver, as there is no "apply settings"
// to do that for us, so we raise max first if it goes higher, and we lower
// it last, if it goes lower
int prior_mcph_l = cfg.getMaxConnectionsPerHost(HostDistance.LOCAL);
int mcph_l = opts.getMaxConnectionsPerHost(HostDistance.LOCAL);
int ccph_l = opts.getCoreConnectionsPerHost(HostDistance.LOCAL);
if (prior_mcph_l < mcph_l) {
logger.info("setting mcph_l to " + mcph_l);
cfg.setMaxConnectionsPerHost(HostDistance.LOCAL, mcph_l);
}
logger.info("setting ccph_l to " + ccph_l);
cfg.setCoreConnectionsPerHost(HostDistance.LOCAL, ccph_l);
if (mcph_l < prior_mcph_l) {
logger.info("setting mcph_l to " + mcph_l);
cfg.setMaxRequestsPerConnection(HostDistance.LOCAL, mcph_l);
}
cfg.setMaxRequestsPerConnection(HostDistance.LOCAL, opts.getMaxRequestsPerConnection(HostDistance.LOCAL));
int prior_mcph_r = cfg.getMaxConnectionsPerHost(HostDistance.REMOTE);
int mcph_r = opts.getMaxConnectionsPerHost(HostDistance.REMOTE);
int ccph_r = opts.getCoreConnectionsPerHost(HostDistance.REMOTE);
if (mcph_r > 0) {
if (mcph_r > prior_mcph_r) opts.setMaxConnectionsPerHost(HostDistance.REMOTE, mcph_r);
opts.setCoreConnectionsPerHost(HostDistance.REMOTE, ccph_r);
if (prior_mcph_r > mcph_r) opts.setMaxConnectionsPerHost(HostDistance.REMOTE, mcph_r);
if (opts.getMaxConnectionsPerHost(HostDistance.REMOTE) > 0) {
cfg.setMaxRequestsPerConnection(HostDistance.REMOTE, opts.getMaxRequestsPerConnection(HostDistance.REMOTE));
}
}
this.pooling = dynpooling.get();
}
}
}
// TODO: make error handler updates consistent under concurrent updates
private HashedCQLErrorHandler configureErrorHandler() {
HashedCQLErrorHandler newerrorHandler = new HashedCQLErrorHandler(exceptionCountMetrics);
String errors = activityDef.getParams()
.getOptionalString("errors")
.orElse("stop,retryable->retry,unverified->stop");
String[] handlerSpecs = errors.split(",");
for (String spec : handlerSpecs) {
String[] keyval = spec.split("=|->|:", 2);
if (keyval.length == 1) {
String verb = keyval[0];
newerrorHandler.setDefaultHandler(
new NBCycleErrorHandler(
ErrorResponse.valueOf(verb),
exceptionCountMetrics,
exceptionHistoMetrics,
!getParams().getOptionalLong("async").isPresent()
)
);
} else {
String pattern = keyval[0];
String verb = keyval[1];
if (newerrorHandler.getGroupNames().contains(pattern)) {
NBCycleErrorHandler handler =
new NBCycleErrorHandler(
ErrorResponse.valueOf(verb),
exceptionCountMetrics,
exceptionHistoMetrics,
!getParams().getOptionalLong("async").isPresent()
);
logger.info("Handling error group '" + pattern + "' with handler:" + handler);
newerrorHandler.setHandlerForGroup(pattern, handler);
} else {
NBCycleErrorHandler handler = new NBCycleErrorHandler(
ErrorResponse.valueOf(keyval[1]),
exceptionCountMetrics,
exceptionHistoMetrics,
!getParams().getOptionalLong("async").isPresent()
);
logger.info("Handling error pattern '" + pattern + "' with handler:" + handler);
newerrorHandler.setHandlerForPattern(keyval[0], handler);
}
}
}
return newerrorHandler;
}
public int getMaxTries() {
return maxTries;
}
public HashedCQLErrorHandler getCqlErrorHandler() {
return this.errorHandler;
}
public StatementFilter getStatementFilter() {
return statementFilter;
}
public void setStatementFilter(StatementFilter statementFilter) {
this.statementFilter = statementFilter;
}
public Boolean isShowCql() {
return showcql;
}
public OpSequence<ReadyCQLStatement> getOpSequencer() {
return opsequence;
}
public List<RowCycleOperator> getRowCycleOperators() {
return rowCycleOperators;
}
protected synchronized void addRowCycleOperator(RowCycleOperator rsOperator) {
if (rowCycleOperators == null) {
rowCycleOperators = new ArrayList<>();
}
rowCycleOperators.add(rsOperator);
}
private void clearRowCycleOperators() {
this.rowCycleOperators = null;
}
public List<ResultSetCycleOperator> getResultSetCycleOperators() {
return resultSetCycleOperators;
}
protected synchronized void addResultSetCycleOperator(ResultSetCycleOperator resultSetCycleOperator) {
if (this.resultSetCycleOperators == null) {
this.resultSetCycleOperators = new ArrayList<>();
}
this.resultSetCycleOperators.add(resultSetCycleOperator);
}
private void clearResultSetCycleOperators() {
this.resultSetCycleOperators = null;
}
public List<StatementModifier> getStatementModifiers() {
return this.statementModifiers;
}
protected synchronized void addStatementModifier(StatementModifier modifier) {
if (this.statementModifiers == null) {
this.statementModifiers = new ArrayList<>();
}
this.statementModifiers.add(modifier);
}
private void clearStatementModifiers() {
statementModifiers = null;
}
public long getMaxOpsInFlight(int slot) {
int threads = this.getActivityDef().getThreads();
return maxTotalOpsInFlight / threads + (slot < (maxTotalOpsInFlight % threads) ? 1 : 0);
}
public long getRetryDelay() {
return retryDelay;
}
public void setRetryDelay(long retryDelay) {
this.retryDelay = retryDelay;
}
public long getMaxRetryDelay() {
return maxRetryDelay;
}
public void setMaxRetryDelay(long maxRetryDelay) {
this.maxRetryDelay = maxRetryDelay;
}
public boolean isRetryReplace() {
return retryReplace;
}
public void setRetryReplace(boolean retryReplace) {
this.retryReplace = retryReplace;
}
public synchronized Writer getNamedWriter(String name) {
Writer writer = namedWriters.computeIfAbsent(name, s -> {
try {
return new FileWriter(name, StandardCharsets.UTF_8);
} catch (IOException e) {
throw new RuntimeException(e);
}
});
this.registerAutoCloseable(writer);
return writer;
}
}

View File

@ -0,0 +1,87 @@
package io.nosqlbench.activitytype.cqld4.core;
import com.datastax.driver.core.LocalDate;
import com.datastax.driver.core.TupleValue;
import com.datastax.driver.core.UDTValue;
import io.nosqlbench.engine.api.activityapi.core.ActionDispenser;
import io.nosqlbench.engine.api.activityapi.core.ActivityType;
import io.nosqlbench.engine.api.activityimpl.ActivityDef;
import io.nosqlbench.nb.annotations.Service;
import java.math.BigDecimal;
import java.math.BigInteger;
import java.net.InetAddress;
import java.nio.ByteBuffer;
import java.time.Instant;
import java.time.LocalTime;
import java.util.*;
@Service(ActivityType.class)
public class CqlActivityType implements ActivityType<CqlActivity> {
public String getName() {
return "cqld4";
}
@Override
public CqlActivity getActivity(ActivityDef activityDef) {
Optional<String> yaml = activityDef.getParams().getOptionalString("yaml", "workload");
// sanity check that we have a yaml parameter, which contains our statements and bindings
if (yaml.isEmpty()) {
throw new RuntimeException("Currently, the cql activity type requires yaml/workload activity parameter.");
}
// allow shortcut: yaml parameter provide the default alias name
if (activityDef.getAlias().equals(ActivityDef.DEFAULT_ALIAS)) {
activityDef.getParams().set("alias",yaml.get());
}
return new CqlActivity(activityDef);
}
/**
* Returns the per-activity level dispenser. The ActionDispenser can then dispense
* per-thread actions within the activity instance.
* @param activity The activity instance which will parameterize this action
*/
@Override
public ActionDispenser getActionDispenser(CqlActivity activity) {
return new CqlActionDispenser(activity);
}
@Override
public Map<String, Class<?>> getTypeMap() {
Map<String,Class<?>> typemap = new LinkedHashMap<>();
typemap.put("ascii",String.class);
typemap.put("bigint",long.class);
typemap.put("blob", ByteBuffer.class);
typemap.put("boolean",boolean.class);
typemap.put("counter",long.class);
typemap.put("date", LocalDate.class);
typemap.put("decimal", BigDecimal.class);
typemap.put("double",double.class);
// typemap.put("duration",CqlDuration.class);
typemap.put("float",float.class);
typemap.put("inet", InetAddress.class);
typemap.put("int",int.class);
typemap.put("list", List.class);
typemap.put("map",Map.class);
typemap.put("set", Set.class);
typemap.put("smallint",short.class);
typemap.put("text",String.class);
typemap.put("time", LocalTime.class);
typemap.put("timestamp", Instant.class);
typemap.put("tinyint",byte.class);
typemap.put("tuple", TupleValue.class);
typemap.put("<udt>", UDTValue.class);
typemap.put("uuid",UUID.class);
typemap.put("timeuuid",UUID.class);
typemap.put("varchar",String.class);
typemap.put("varint", BigInteger.class);
return typemap;
}
}

View File

@ -0,0 +1,265 @@
package io.nosqlbench.activitytype.cqld4.core;
import com.codahale.metrics.Timer;
import com.datastax.oss.driver.api.core.cql.ResultSet;
import com.datastax.driver.core.ResultSetFuture;
import io.nosqlbench.activitytype.cqld4.api.ErrorResponse;
import io.nosqlbench.activitytype.cqld4.api.ResultSetCycleOperator;
import io.nosqlbench.activitytype.cqld4.api.RowCycleOperator;
import io.nosqlbench.activitytype.cqld4.api.StatementFilter;
import io.nosqlbench.activitytype.cqld4.errorhandling.ErrorStatus;
import io.nosqlbench.activitytype.cqld4.errorhandling.HashedCQLErrorHandler;
import io.nosqlbench.activitytype.cqld4.errorhandling.exceptions.CQLCycleWithStatementException;
import io.nosqlbench.activitytype.cqld4.errorhandling.exceptions.ChangeUnappliedCycleException;
import io.nosqlbench.activitytype.cqld4.errorhandling.exceptions.UnexpectedPagingException;
import io.nosqlbench.activitytype.cqld4.statements.core.ReadyCQLStatement;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import io.nosqlbench.engine.api.activityapi.core.BaseAsyncAction;
import io.nosqlbench.engine.api.activityapi.core.ops.fluent.opfacets.FailedOp;
import io.nosqlbench.engine.api.activityapi.core.ops.fluent.opfacets.StartedOp;
import io.nosqlbench.engine.api.activityapi.core.ops.fluent.opfacets.SucceededOp;
import io.nosqlbench.engine.api.activityapi.core.ops.fluent.opfacets.TrackedOp;
import io.nosqlbench.engine.api.activityapi.planning.OpSequence;
import io.nosqlbench.engine.api.activityimpl.ActivityDef;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.List;
import java.util.concurrent.TimeUnit;
import java.util.function.LongFunction;
@SuppressWarnings("Duplicates")
public class CqlAsyncAction extends BaseAsyncAction<CqlOpData, CqlActivity> {
private final static Logger logger = LoggerFactory.getLogger(CqlAsyncAction.class);
private final ActivityDef activityDef;
private List<RowCycleOperator> rowOps;
private List<ResultSetCycleOperator> cycleOps;
private List<StatementModifier> modifiers;
private StatementFilter statementFilter;
private OpSequence<ReadyCQLStatement> sequencer;
// how many cycles a statement will be attempted for before giving up
private int maxTries = 10;
private HashedCQLErrorHandler cqlActivityErrorHandler;
// private int pagesFetched = 0;
// private long totalRowsFetchedForQuery = 0L;
// private ResultSet pagingResultSet;
// private Statement pagingStatement;
// private ReadyCQLStatement pagingReadyStatement;
private boolean showcql;
// private long opsInFlight = 0L;
// private long maxOpsInFlight = 1L;
// private long pendingResults = 0;
// private LinkedBlockingQueue<CqlOpContext> resultQueue = new LinkedBlockingQueue<>();
public CqlAsyncAction(CqlActivity activity, int slot) {
super(activity, slot);
onActivityDefUpdate(activity.getActivityDef());
this.activityDef = activity.getActivityDef();
}
@Override
public void init() {
onActivityDefUpdate(activityDef);
this.sequencer = activity.getOpSequencer();
}
@Override
public LongFunction<CqlOpData> getOpInitFunction() {
return (l) -> {
return new CqlOpData(l, this);
};
}
@Override
public void startOpCycle(TrackedOp<CqlOpData> opc) {
CqlOpData cqlop = opc.getData();
long cycle = opc.getCycle();
// bind timer covers all statement selection and binding, skipping, transforming logic
try (Timer.Context bindTime = activity.bindTimer.time()) {
cqlop.readyCQLStatement = sequencer.get(cycle);
cqlop.statement = cqlop.readyCQLStatement.bind(cycle);
// If a filter is defined, skip and count any statements that do not match it
if (statementFilter != null) {
if (!statementFilter.matches(cqlop.statement)) {
activity.skippedTokensHisto.update(cycle);
//opc.start().stop(-2);
cqlop.skipped = true;
opc.skip(0);
return;
}
}
// Transform the statement if there are any statement transformers defined for this CQL activity
if (modifiers != null) {
for (StatementModifier modifier : modifiers) {
cqlop.statement = modifier.modify(cqlop.statement, cycle);
}
}
// Maybe show the CQl in log/console - only for diagnostic use
if (showcql) {
logger.info("CQL(cycle=" + cycle + "):\n" + cqlop.readyCQLStatement.getQueryString(cycle));
}
}
StartedOp<CqlOpData> startedOp = opc.start();
cqlop.startedOp = startedOp;
// The execute timer covers only the point at which EB hands the op to the driver to be executed
try (Timer.Context executeTime = activity.executeTimer.time()) {
cqlop.future = activity.getSession().executeAsync(cqlop.statement);
Futures.addCallback(cqlop.future, cqlop);
}
}
public void onSuccess(StartedOp<CqlOpData> sop) {
CqlOpData cqlop = sop.getData();
HashedCQLErrorHandler.resetThreadStatusCode();
if (cqlop.skipped) {
return;
}
try {
ResultSet resultSet = cqlop.resultSet;
cqlop.totalPagesFetchedForQuery++;
// Apply any defined ResultSetCycleOperators
if (cycleOps != null) {
for (ResultSetCycleOperator cycleOp : cycleOps) {
cycleOp.apply(resultSet, cqlop.statement, cqlop.cycle);
}
}
int pageRows = resultSet.getAvailableWithoutFetching();
int remaining = pageRows;
if (rowOps == null) {
while (remaining-- > 0) {
resultSet.one();
}
} else {
while (remaining-- > 0) {
for (RowCycleOperator rowOp : rowOps) {
rowOp.apply(resultSet.one(), cqlop.cycle);
}
}
}
cqlop.totalRowsFetchedForQuery += pageRows;
if (cqlop.totalPagesFetchedForQuery++ > activity.maxpages) {
throw new UnexpectedPagingException(
cqlop.cycle,
resultSet,
cqlop.readyCQLStatement.getQueryString(cqlop.cycle),
1,
activity.maxpages,
activity.getSession().getCluster().getConfiguration().getQueryOptions().getFetchSize()
);
}
if (!resultSet.wasApplied()) {
// To make exception handling logic flow more uniformly
throw new ChangeUnappliedCycleException(
cqlop.cycle, resultSet, cqlop.readyCQLStatement.getQueryString(cqlop.cycle)
);
}
if (!resultSet.isFullyFetched()) {
logger.trace("async paging request " + cqlop.totalPagesFetchedForQuery + " for cycle " + cqlop.cycle);
ListenableFuture<ResultSet> resultSetListenableFuture = resultSet.fetchMoreResults();
Futures.addCallback(resultSetListenableFuture, cqlop);
return;
}
SucceededOp<CqlOpData> success = sop.succeed(0);
cqlop.readyCQLStatement.onSuccess(cqlop.cycle, success.getServiceTimeNanos(), cqlop.totalRowsFetchedForQuery);
activity.triesHisto.update(cqlop.triesAttempted);
activity.rowsCounter.mark(cqlop.totalRowsFetchedForQuery);
activity.resultSuccessTimer.update(success.getServiceTimeNanos(), TimeUnit.NANOSECONDS);
activity.resultSetSizeHisto.update(cqlop.totalRowsFetchedForQuery);
activity.resultTimer.update(success.getServiceTimeNanos(), TimeUnit.NANOSECONDS);
} catch (Exception e) {
long currentServiceTime = sop.getCurrentServiceTimeNanos();
cqlop.readyCQLStatement.onError(cqlop.cycle, currentServiceTime, e);
CQLCycleWithStatementException cqlCycleException = new CQLCycleWithStatementException(cqlop.cycle, currentServiceTime, e, cqlop.readyCQLStatement);
ErrorStatus errorStatus = cqlActivityErrorHandler.handleError(cqlop.cycle, cqlCycleException);
if (errorStatus.isRetryable() && ++cqlop.triesAttempted < maxTries) {
ResultSetFuture resultSetFuture = activity.getSession().executeAsync(cqlop.statement);
sop.retry();
Futures.addCallback(resultSetFuture, cqlop);
return;
} else {
sop.fail(errorStatus.getResultCode());
if (errorStatus.getResponse() == ErrorResponse.stop) {
cqlop.throwable = cqlCycleException;
activity.getActivityController().stopActivityWithErrorAsync(cqlCycleException);
}
}
}
}
public void onFailure(StartedOp<CqlOpData> startedOp) {
CqlOpData cqlop = startedOp.getData();
long serviceTime = startedOp.getCurrentServiceTimeNanos();
// Even if this is retryable, we expose error events
cqlop.readyCQLStatement.onError(startedOp.getCycle(),serviceTime,cqlop.throwable);
long cycle = startedOp.getCycle();
CQLCycleWithStatementException cqlCycleException1 = new CQLCycleWithStatementException(cqlop.cycle, serviceTime, cqlop.throwable, cqlop.readyCQLStatement);
ErrorStatus errorStatus = cqlActivityErrorHandler.handleError(startedOp.getCycle(), cqlCycleException1);
if (errorStatus.getResponse() == ErrorResponse.stop) {
activity.getActivityController().stopActivityWithErrorAsync(cqlop.throwable);
return;
}
if (errorStatus.isRetryable() && cqlop.triesAttempted < maxTries) {
startedOp.retry();
try (Timer.Context executeTime = activity.executeTimer.time()) {
cqlop.future = activity.getSession().executeAsync(cqlop.statement);
Futures.addCallback(cqlop.future, cqlop);
return;
}
}
FailedOp<CqlOpData> failed = startedOp.fail(errorStatus.getResultCode());
activity.resultTimer.update(failed.getServiceTimeNanos(), TimeUnit.NANOSECONDS);
activity.triesHisto.update(cqlop.triesAttempted);
}
@Override
public void onActivityDefUpdate(ActivityDef activityDef) {
this.maxTries = activity.getMaxTries();
this.showcql = activity.isShowCql();
this.cqlActivityErrorHandler = activity.getCqlErrorHandler();
this.statementFilter = activity.getStatementFilter();
this.rowOps = activity.getRowCycleOperators();
this.cycleOps = activity.getResultSetCycleOperators();
this.modifiers = activity.getStatementModifiers();
}
public String toString() {
return "CqlAsyncAction["+this.slot+"]";
}
}

View File

@ -0,0 +1,52 @@
package io.nosqlbench.activitytype.cqld4.core;
import com.datastax.oss.driver.api.core.cql.ResultSet;
import com.datastax.driver.core.ResultSetFuture;
import com.datastax.oss.driver.api.core.cql.Statement;
import io.nosqlbench.activitytype.cqld4.statements.core.ReadyCQLStatement;
import com.google.common.util.concurrent.FutureCallback;
import io.nosqlbench.engine.api.activityapi.core.ops.fluent.opfacets.StartedOp;
public class CqlOpData implements FutureCallback<ResultSet> {
final long cycle;
// op state is managed via callbacks, we keep a ref here
StartedOp<CqlOpData> startedOp;
boolean skipped=false;
private CqlAsyncAction action;
int triesAttempted =0;
ReadyCQLStatement readyCQLStatement;
Statement statement;
ResultSetFuture future;
ResultSet resultSet;
long totalRowsFetchedForQuery;
long totalPagesFetchedForQuery;
public Throwable throwable;
public long resultAt;
private long errorAt;
public CqlOpData(long cycle, CqlAsyncAction action) {
this.cycle = cycle;
this.action = action;
}
@Override
public void onSuccess(ResultSet result) {
this.resultSet = result;
this.resultAt = System.nanoTime();
action.onSuccess(startedOp);
}
@Override
public void onFailure(Throwable throwable) {
this.throwable=throwable;
this.errorAt = System.nanoTime();
action.onFailure(startedOp);
}
}

View File

@ -0,0 +1,32 @@
package io.nosqlbench.activitytype.cqld4.core;
import com.datastax.driver.core.policies.AddressTranslator;
import com.datastax.driver.core.Cluster;
import java.net.InetSocketAddress;
public class ProxyTranslator implements AddressTranslator {
private int hostsIndex = 0;
private InetSocketAddress address;
public ProxyTranslator(InetSocketAddress host){
this.address= host;
}
@Override
public void init(Cluster cluster) {
// Nothing to do
}
@Override
public InetSocketAddress translate(InetSocketAddress address) {
return address;
}
@Override
public void close() {
}
}

View File

@ -0,0 +1,11 @@
package io.nosqlbench.activitytype.cqld4.core;
import com.datastax.oss.driver.api.core.cql.Statement;
/**
* Provides a modular way for any CQL activities to modify statements before execution.
* Each active modifier returns a statement in turn.
*/
public interface StatementModifier {
Statement modify(Statement unmodified, long cycleNum);
}

View File

@ -0,0 +1,113 @@
package io.nosqlbench.activitytype.cqld4.errorhandling;
import com.datastax.driver.core.exceptions.*;
import io.nosqlbench.activitytype.cqld4.errorhandling.exceptions.*;
import io.nosqlbench.engine.api.activityapi.cyclelog.buffers.results.ResultReadable;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* This enumerates all known exception classes, including supertypes,
* for the purposes of stable naming in error handling.
* This is current as of com.datastax.cassandra:cassandra-driver-core:3.2.0
*/
public enum CQLExceptionEnum implements ResultReadable {
FrameTooLongException(FrameTooLongException.class, 1),
CodecNotFoundException(CodecNotFoundException.class, 2),
DriverException(DriverException.class, 3),
AuthenticationException(AuthenticationException.class, 4),
TraceRetrievalException(TraceRetrievalException.class, 5),
UnsupportedProtocolVersionException(UnsupportedProtocolVersionException.class, 6),
NoHostAvailableException(NoHostAvailableException.class, 7),
QueryValidationException(QueryValidationException.class, 8),
InvalidQueryException(InvalidQueryException.class, 9),
InvalidConfigurationInQueryException(InvalidConfigurationInQueryException.class, 10),
UnauthorizedException(UnauthorizedException.class, 11),
SyntaxError(SyntaxError.class, 12),
AlreadyExistsException(AlreadyExistsException.class, 13),
UnpreparedException(UnpreparedException.class, 14),
InvalidTypeException(InvalidTypeException.class, 15),
QueryExecutionException(QueryExecutionException.class, 16),
UnavailableException(UnavailableException.class, 17),
BootstrappingException(BootstrappingException.class, 18),
OverloadedException(OverloadedException.class, 19),
TruncateException(TruncateException.class, 20),
QueryConsistencyException(QueryConsistencyException.class, 21),
WriteTimeoutException(WriteTimeoutException.class, 22),
WriteFailureException(WriteFailureException.class, 23),
ReadFailureException(ReadFailureException.class, 24),
ReadTimeoutException(ReadTimeoutException.class, 25),
FunctionExecutionException(FunctionExecutionException.class, 26),
DriverInternalError(DriverInternalError.class, 27),
ProtocolError(ProtocolError.class, 28),
ServerError(ServerError.class, 29),
BusyPoolException(BusyPoolException.class, 30),
ConnectionException(ConnectionException.class, 31),
TransportException(TransportException.class, 32),
OperationTimedOutException(OperationTimedOutException.class, 33),
PagingStateException(PagingStateException.class, 34),
UnresolvedUserTypeException(UnresolvedUserTypeException.class, 35),
UnsupportedFeatureException(UnsupportedFeatureException.class, 36),
BusyConnectionException(BusyConnectionException.class, 37),
ChangeUnappliedCycleException(io.nosqlbench.activitytype.cqld4.errorhandling.exceptions.ChangeUnappliedCycleException.class, 38),
ResultSetVerificationException(io.nosqlbench.activitytype.cqld4.errorhandling.exceptions.ResultSetVerificationException.class, 39),
RowVerificationException(io.nosqlbench.activitytype.cqld4.errorhandling.exceptions.RowVerificationException.class, 40),
UnexpectedPagingException(io.nosqlbench.activitytype.cqld4.errorhandling.exceptions.UnexpectedPagingException.class, 41),
EbdseCycleException(CqlGenericCycleException.class, 42),
MaxTriesExhaustedException(io.nosqlbench.activitytype.cqld4.errorhandling.exceptions.MaxTriesExhaustedException.class,43);
private final static Logger logger = LoggerFactory.getLogger(CQLExceptionEnum.class);
private static Map<String, Integer> codesByName = getCodesByName();
private static String[] namesByCode = getNamesByCode();
private final Class<? extends Exception> exceptionClass;
private final int resultCode;
CQLExceptionEnum(Class<? extends Exception> clazz, int resultCode) {
this.exceptionClass = clazz;
this.resultCode = resultCode;
}
public Class<? extends Exception> getExceptionClass() {
return exceptionClass;
}
public int getResultCode() {
return resultCode;
}
public int getResult() {
return this.resultCode;
}
private static Map<String,Integer> getCodesByName() {
codesByName = new HashMap<>();
for (CQLExceptionEnum cqlExceptionEnum : CQLExceptionEnum.values()) {
codesByName.put(cqlExceptionEnum.toString(), cqlExceptionEnum.resultCode);
}
codesByName.put("NONE",0);
return codesByName;
}
private static String[] getNamesByCode() {
List<String> namesByCode = new ArrayList<>();
namesByCode.add("NONE");
for (CQLExceptionEnum cqlExceptionEnum : CQLExceptionEnum.values()) {
int code = cqlExceptionEnum.resultCode;
for (int i = namesByCode.size(); i <= code ; i++) {
namesByCode.add("UNKNOWN");
}
namesByCode.set(code, cqlExceptionEnum.toString());
}
return namesByCode.toArray(new String[0]);
}
}

View File

@ -0,0 +1,31 @@
package io.nosqlbench.activitytype.cqld4.errorhandling;
import io.nosqlbench.activitytype.cqld4.api.ErrorResponse;
public class ErrorStatus {
private boolean retryable;
private int resultCode;
private ErrorResponse response;
public ErrorStatus(ErrorResponse response, boolean retryable, int resultCode) {
this.response = response;
this.retryable = retryable;
this.resultCode = resultCode;
}
public boolean isRetryable() {
return retryable;
}
public int getResultCode() {
return resultCode;
}
public void setResultCode(int resultCode) {
this.resultCode = resultCode;
}
public ErrorResponse getResponse() {
return response;
}
}

View File

@ -0,0 +1,80 @@
package io.nosqlbench.activitytype.cqld4.errorhandling;
import com.datastax.driver.core.exceptions.*;
import io.nosqlbench.activitytype.cqld4.errorhandling.exceptions.*;
import java.util.LinkedHashMap;
import java.util.Map;
/**
* This enumerates all known exception classes, including supertypes,
* for the purposes of stable naming in error handling.
* This is current as of com.datastax.cassandra:cassandra-driver-core:3.2.0
*/
public class ExceptionMap {
private final static Map<Class<? extends Exception>, Class<? extends Exception>> map
= new LinkedHashMap<Class<? extends Exception>, Class<? extends Exception>>() {
{
put(FrameTooLongException.class, DriverException.class);
put(CodecNotFoundException.class, DriverException.class);
put(AuthenticationException.class, DriverException.class);
put(TraceRetrievalException.class, DriverException.class);
put(UnsupportedProtocolVersionException.class, DriverException.class);
put(NoHostAvailableException.class, DriverException.class);
put(QueryValidationException.class, DriverException.class);
put(InvalidQueryException.class, QueryValidationException.class);
put(InvalidConfigurationInQueryException.class, InvalidQueryException.class);
put(UnauthorizedException.class, QueryValidationException.class);
put(SyntaxError.class, QueryValidationException.class);
put(AlreadyExistsException.class, QueryValidationException.class);
put(UnpreparedException.class, QueryValidationException.class);
put(InvalidTypeException.class, DriverException.class);
put(QueryExecutionException.class, DriverException.class);
put(UnavailableException.class, QueryValidationException.class);
put(BootstrappingException.class, QueryValidationException.class);
put(OverloadedException.class, QueryValidationException.class);
put(TruncateException.class, QueryValidationException.class);
put(QueryConsistencyException.class, QueryValidationException.class);
put(WriteTimeoutException.class, QueryConsistencyException.class);
put(WriteFailureException.class, QueryConsistencyException.class);
put(ReadFailureException.class, QueryConsistencyException.class);
put(ReadTimeoutException.class, QueryConsistencyException.class);
put(FunctionExecutionException.class, QueryValidationException.class);
put(DriverInternalError.class, DriverException.class);
put(ProtocolError.class, DriverInternalError.class);
put(ServerError.class, DriverInternalError.class);
put(BusyPoolException.class, DriverException.class);
put(ConnectionException.class, DriverException.class);
put(TransportException.class, ConnectionException.class);
put(OperationTimedOutException.class, ConnectionException.class);
put(PagingStateException.class, DriverException.class);
put(UnresolvedUserTypeException.class, DriverException.class);
put(UnsupportedFeatureException.class, DriverException.class);
put(BusyConnectionException.class, DriverException.class);
put(ChangeUnappliedCycleException.class, CqlGenericCycleException.class);
put(ResultSetVerificationException.class, CqlGenericCycleException.class);
put(RowVerificationException.class, CqlGenericCycleException.class);
put(UnexpectedPagingException.class, CqlGenericCycleException.class);
put(CqlGenericCycleException.class, RuntimeException.class);
}
};
public Class<? extends Exception> put(
Class<? extends Exception> exceptionClass,
Class<? extends Exception> parentClass) {
if (exceptionClass.getSuperclass() != parentClass) {
throw new RuntimeException("Sanity check failed: " + exceptionClass +
" is not a parent class of " + parentClass);
}
return map.put(exceptionClass, parentClass);
}
public static Map<Class<? extends Exception>, Class<? extends Exception>> getMap() {
return map;
}
}

View File

@ -0,0 +1,86 @@
package io.nosqlbench.activitytype.cqld4.errorhandling;
import com.datastax.oss.driver.api.core.NoNodeAvailableException;
import com.datastax.oss.driver.api.core.servererrors.OverloadedException;
import com.datastax.oss.driver.api.core.servererrors.ReadTimeoutException;
import com.datastax.oss.driver.api.core.servererrors.UnavailableException;
import com.datastax.oss.driver.api.core.servererrors.WriteTimeoutException;
import io.nosqlbench.activitytype.cqld4.errorhandling.exceptions.CQLCycleWithStatementException;
import io.nosqlbench.activitytype.cqld4.errorhandling.exceptions.ChangeUnappliedCycleException;
import io.nosqlbench.activitytype.cqld4.errorhandling.exceptions.ResultSetVerificationException;
import io.nosqlbench.activitytype.cqld4.errorhandling.exceptions.RowVerificationException;
import io.nosqlbench.engine.api.activityapi.errorhandling.CycleErrorHandler;
import io.nosqlbench.engine.api.activityapi.errorhandling.HashedErrorHandler;
import io.nosqlbench.engine.api.metrics.ExceptionCountMetrics;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class HashedCQLErrorHandler extends HashedErrorHandler<Throwable, ErrorStatus> {
private static final Logger logger = LoggerFactory.getLogger(HashedCQLErrorHandler.class);
// private static Set<Class<? extends Throwable>> UNVERIFIED_ERRORS = new HashSet<Class<? extends Throwable>>() {{
// add(RowVerificationException.class);
// add(ResultSetVerificationException.class);
// }};
private ExceptionCountMetrics exceptionCountMetrics;
private static ThreadLocal<Integer> tlResultCode = ThreadLocal.withInitial(() -> (0));
public HashedCQLErrorHandler(ExceptionCountMetrics exceptionCountMetrics) {
this.exceptionCountMetrics = exceptionCountMetrics;
this.setGroup("retryable",
NoNodeAvailableException.class,
UnavailableException.class,
OperationTimedOutException.class,
OverloadedException.class,
WriteTimeoutException.class,
ReadTimeoutException.class
);
this.setGroup(
"unapplied",
ChangeUnappliedCycleException.class
);
this.setGroup("unverified",
RowVerificationException.class,
ResultSetVerificationException.class
);
// realerrors is everything else but the above
}
private static class UncaughtErrorHandler implements CycleErrorHandler<Throwable, ErrorStatus> {
@Override
public ErrorStatus handleError(long cycle, Throwable error, String errMsg) {
throw new RuntimeException(
"An exception was thrown in cycle " + cycle + " that has no error: " + errMsg + ", error:" + error
);
}
}
@Override
public ErrorStatus handleError(long cycle, Throwable throwable, String errMsg) {
int resultCode = 127;
if (throwable instanceof CQLCycleWithStatementException) {
CQLCycleWithStatementException cce = (CQLCycleWithStatementException) throwable;
Throwable cause = cce.getCause();
try {
String simpleName = cause.getClass().getSimpleName();
CQLExceptionEnum cqlExceptionEnum = CQLExceptionEnum.valueOf(simpleName);
resultCode = cqlExceptionEnum.getResult();
} catch (Throwable t) {
logger.warn("unrecognized exception while mapping status code via Enum: " + throwable.getClass());
}
} else {
logger.warn("un-marshaled exception while mapping status code: " + throwable.getClass());
}
ErrorStatus errorStatus = super.handleError(cycle, throwable, errMsg);
errorStatus.setResultCode(resultCode);
return errorStatus;
}
public static int getThreadStatusCode() {
return tlResultCode.get();
}
public static void resetThreadStatusCode() {
tlResultCode.set(0);
}
}

View File

@ -0,0 +1,101 @@
package io.nosqlbench.activitytype.cqld4.errorhandling;
import io.nosqlbench.activitytype.cqld4.api.ErrorResponse;
import io.nosqlbench.activitytype.cqld4.errorhandling.exceptions.CQLCycleWithStatementException;
import io.nosqlbench.activitytype.cqld4.errorhandling.exceptions.CQLExceptionDetailer;
import io.nosqlbench.engine.api.activityapi.errorhandling.CycleErrorHandler;
import io.nosqlbench.engine.api.metrics.ExceptionCountMetrics;
import io.nosqlbench.engine.api.metrics.ExceptionHistoMetrics;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A contextualized error handler that can catch a cycle-specific error.
* In this class, the error handlers return a boolean, which indicates
* to the call whether or not to retry the operation. This handler implements
* the error handling stack approach, which allows the user to select an
* entry point in the stack, with all lesser impacting handler rules
* applied from most impacting to least impacting order.
*
* For simplicity, the handler stack is fixed as described below. It is not
* possible to rearrange the verbs. Some care has been given to making sure
* that the selected handlers are complete and intuitive.
*
* The standard handler stack looks like this:
*
* <ol>
* <li>stop - log and throw an exception, which should escape to the
* next level of exception handling, the level which causes ebdse
* to stop running. In this case, and only in this case, the remaining
* handlers in the stack are not used.
* are not reached.</li>
* <li>warn - log an exception without stopping execution.</li>
* <li>retry - retry an operation up to a limit, IFF it is retryable</li>
* <li>count - count, in metrics, the number of this particular error type</li>
* <li>ignore - do nothing</li>
* </ol>
*
* As indicated above, if you specify "warn" for a particular error type, this means
* that also retry, count, will apply, as well as ignore, in that order. "ignore" is
* simply a no-op that allows you to specify it as the minimum case.
*/
@SuppressWarnings("Duplicates")
public class NBCycleErrorHandler implements CycleErrorHandler<Throwable, ErrorStatus> {
private static final Logger logger = LoggerFactory.getLogger(NBCycleErrorHandler.class);
private ErrorResponse errorResponse;
private ExceptionCountMetrics exceptionCountMetrics;
private final ExceptionHistoMetrics exceptionHistoMetrics;
private boolean throwExceptionOnStop=false;
public NBCycleErrorHandler(
ErrorResponse errorResponse,
ExceptionCountMetrics exceptionCountMetrics,
ExceptionHistoMetrics exceptionHistoMetrics,
boolean throwExceptionOnStop) {
this.errorResponse = errorResponse;
this.exceptionCountMetrics = exceptionCountMetrics;
this.exceptionHistoMetrics = exceptionHistoMetrics;
this.throwExceptionOnStop = throwExceptionOnStop;
}
@Override
public ErrorStatus handleError(long cycle, Throwable contextError) {
CQLCycleWithStatementException cce = (CQLCycleWithStatementException) contextError;
Throwable error = cce.getCause();
boolean retry = false;
switch (errorResponse) {
case stop:
logger.error("error with cycle " + cycle + ": statement: " + cce.getStatement() + " errmsg: " +
CQLExceptionDetailer.messageFor(cycle, error));
if (throwExceptionOnStop) {
throw new RuntimeException(error);
}
case warn:
logger.warn("error with cycle " + cycle + ": statement: " + cce.getStatement() + " errmsg: " +
CQLExceptionDetailer.messageFor(cycle, error));
case retry:
retry = true;
case histogram:
exceptionHistoMetrics.update(error,cce.getDurationNanos());
case count:
exceptionCountMetrics.count(error);
case ignore:
default:
break;
}
return new ErrorStatus(errorResponse, retry,-1);
}
@Override
public ErrorStatus handleError(long cycle, Throwable contextError, String errMsg) {
return handleError(cycle,contextError);
}
public String toString() {
return this.errorResponse.toString();
}
}

View File

@ -0,0 +1,38 @@
package io.nosqlbench.activitytype.cqld4.errorhandling.exceptions;
import io.nosqlbench.activitytype.cqld4.statements.core.ReadyCQLStatement;
/**
* In internal exception type that is used to saverow exception
* context from within a CQL activity cycle.
*/
public class CQLCycleWithStatementException extends Exception {
private final long cycleValue;
private final long durationNanos;
private final ReadyCQLStatement readyCQLStatement;
public CQLCycleWithStatementException(long cycleValue, long durationNanos, Throwable e, ReadyCQLStatement readyCQLStatement) {
super(e);
this.cycleValue = cycleValue;
this.durationNanos = durationNanos;
this.readyCQLStatement = readyCQLStatement;
}
public long getCycleValue() {
return cycleValue;
}
public long getDurationNanos() {
return durationNanos;
}
public ReadyCQLStatement getReadyCQLStatement() {
return readyCQLStatement;
}
public String getStatement() {
return readyCQLStatement.getQueryString(cycleValue);
}
}

View File

@ -0,0 +1,25 @@
package io.nosqlbench.activitytype.cqld4.errorhandling.exceptions;
import com.datastax.driver.core.exceptions.ReadTimeoutException;
import com.datastax.driver.core.exceptions.WriteTimeoutException;
public class CQLExceptionDetailer {
public static String messageFor(long cycle, Throwable e) {
if (e instanceof ReadTimeoutException) {
ReadTimeoutException rte = (ReadTimeoutException) e;
return rte.getMessage() +
", coordinator: " + rte.getHost() +
", wasDataRetrieved: " + rte.wasDataRetrieved();
}
if (e instanceof WriteTimeoutException) {
WriteTimeoutException wte = (WriteTimeoutException) e;
return wte.getMessage() +
", coordinator: " + wte.getHost();
}
return e.getMessage();
}
}

View File

@ -0,0 +1,56 @@
package io.nosqlbench.activitytype.cqld4.errorhandling.exceptions;
import com.datastax.driver.core.BoundStatement;
import com.datastax.oss.driver.api.core.cql.ResultSet;
import com.datastax.driver.core.SimpleStatement;
import com.datastax.oss.driver.api.core.cql.Statement;
public abstract class CQLResultSetException extends CqlGenericCycleException {
private final Statement statement;
private final ResultSet resultSet;
public CQLResultSetException(long cycle, ResultSet resultSet, Statement statement, String message, Throwable cause) {
super(cycle,message,cause);
this.resultSet = resultSet;
this.statement = statement;
}
public CQLResultSetException(long cycle, ResultSet resultSet, Statement statement) {
super(cycle);
this.resultSet = resultSet;
this.statement = statement;
}
public CQLResultSetException(long cycle, ResultSet resultSet, Statement statement, String message) {
super(cycle,message);
this.resultSet = resultSet;
this.statement=statement;
}
public CQLResultSetException(long cycle, ResultSet resultSet, Statement statement, Throwable cause) {
super(cycle,cause);
this.resultSet = resultSet;
this.statement = statement;
}
public Statement getStatement() {
return statement;
}
public ResultSet getResultSet() {
return resultSet;
}
protected static String getQueryString(Statement stmt) {
if (stmt instanceof BoundStatement) {
return ((BoundStatement)stmt).preparedStatement().getQueryString();
} else if (stmt instanceof SimpleStatement) {
return ((SimpleStatement) stmt).getQueryString();
} else {
return "UNKNOWN Statement type:" + stmt.getClass().getSimpleName();
}
}
}

View File

@ -0,0 +1,26 @@
package io.nosqlbench.activitytype.cqld4.errorhandling.exceptions;
import com.datastax.oss.driver.api.core.cql.ResultSet;
/**
* This was added to nosqlbench because the error handling logic was
* starting to look a bit contrived. Because we need to be able
* to respond to different result outcomes, it
* is just simpler to have a single type of error-handling logic for all outcomes.
*/
public class ChangeUnappliedCycleException extends CqlGenericCycleException {
private final ResultSet resultSet;
private final String queryString;
public ChangeUnappliedCycleException(long cycle, ResultSet resultSet, String queryString) {
super(cycle, "Operation was not applied:" + queryString);
this.resultSet = resultSet;
this.queryString = queryString;
}
public ResultSet getResultSet() {
return resultSet;
}
public String getQueryString() { return queryString; }
}

View File

@ -0,0 +1,38 @@
package io.nosqlbench.activitytype.cqld4.errorhandling.exceptions;
public abstract class CqlGenericCycleException extends RuntimeException {
private long cycle;
public CqlGenericCycleException(long cycle, Throwable cause) {
super(cause);
this.cycle = cycle;
}
public CqlGenericCycleException(long cycle, String message) {
super(message);
this.cycle = cycle;
}
public CqlGenericCycleException(long cycle, String message, Throwable cause) {
super(message, cause);
this.cycle = cycle;
}
public CqlGenericCycleException(long cycle) {
super();
this.cycle = cycle;
}
@Override
public String getMessage() {
return "cycle:" + cycle + " caused by:" + super.getMessage();
}
public long getCycle() {
return cycle;
}
}

View File

@ -0,0 +1,20 @@
package io.nosqlbench.activitytype.cqld4.errorhandling.exceptions;
public class MaxTriesExhaustedException extends CqlGenericCycleException {
private int maxtries;
public MaxTriesExhaustedException(long cycle, int maxtries) {
super(cycle);
this.maxtries = maxtries;
}
public int getMaxTries() {
return maxtries;
}
@Override
public String getMessage() {
return "Exhausted max tries (" + getMaxTries() + ") on cycle " + getCycle() + ".";
}
}

View File

@ -0,0 +1,17 @@
package io.nosqlbench.activitytype.cqld4.errorhandling.exceptions;
import com.datastax.oss.driver.api.core.cql.ResultSet;
import com.datastax.oss.driver.api.core.cql.Statement;
public class ResultSetVerificationException extends CQLResultSetException {
public ResultSetVerificationException(
long cycle, ResultSet resultSet, Statement statement, Throwable cause) {
super(cycle, resultSet, statement, cause);
}
public ResultSetVerificationException(
long cycle, ResultSet resultSet, Statement statement, String s) {
super(cycle, resultSet, statement, s + ", \nquery string:\n" + getQueryString(statement));
}
}

View File

@ -0,0 +1,33 @@
package io.nosqlbench.activitytype.cqld4.errorhandling.exceptions;
import com.datastax.oss.driver.api.core.cql.Row;
import java.util.Map;
/**
* This exception is thrown when read verification fails.
*/
public class RowVerificationException extends CqlGenericCycleException {
private Map<String, Object> expected;
private Row row;
public RowVerificationException(long cycle, Row row, Map<String, Object> expected, String detail) {
super(cycle, detail);
this.expected = expected;
this.row = row;
}
@Override
public String getMessage() {
return "cycle:" + getCycle() + ": " + super.getMessage();
}
public Map<String,Object> getExpectedValues() {
return expected;
}
public Row getRow() {
return row;
}
}

View File

@ -0,0 +1,55 @@
package io.nosqlbench.activitytype.cqld4.errorhandling.exceptions;
import com.datastax.oss.driver.api.core.cql.ResultSet;
/**
* <p>This is not a core exception. It was added to the CQL activity type
* driver for nosqlbench specifically to catch the following unexpected
* condition:
* Paging would be needed to read all the results from a read query, but the user
* is not expecting to intentionally check and iterate the result sets for paging.
* <p>
* This should only be thrown if a result set would need paging, but configuration
* options specific that it should not expect to. Rather than assume paging is completely
* expected or unexpected, we simply assume that only 1 page is allowed, being the
* first page, or what is thought of as "not paging".
* <p>If this error is thrown, and paging is expected, then the user can adjust
* fetchsize or maxpages in order to open up paging to the degree that is allowable or
* expected.
*/
public class UnexpectedPagingException extends CqlGenericCycleException {
private final ResultSet resultSet;
private final String queryString;
private final int fetchSize;
private int fetchedPages;
private int maxpages;
public UnexpectedPagingException(
long cycle,
ResultSet resultSet,
String queryString,
int fetchedPages,
int maxpages,
int fetchSize) {
super(cycle);
this.resultSet = resultSet;
this.queryString = queryString;
this.fetchedPages = fetchedPages;
this.maxpages = maxpages;
this.fetchSize = fetchSize;
}
public ResultSet getResultSet() {
return resultSet;
}
public String getMessage() {
StringBuilder sb = new StringBuilder();
sb.append("Additional paging would be required to read the results from this query fully" +
", but the user has not explicitly indicated that paging was expected.")
.append(" fetched/allowed: ").append(fetchedPages).append("/").append(maxpages)
.append(" fetchSize(").append(fetchSize).append("): ").append(queryString);
return sb.toString();
}
}

View File

@ -0,0 +1,65 @@
package io.nosqlbench.activitytype.cqld4.filtering;
import io.nosqlbench.activitytype.cqld4.errorhandling.CQLExceptionEnum;
import io.nosqlbench.engine.api.activityapi.cyclelog.buffers.results.ResultReadable;
import io.nosqlbench.engine.api.activityapi.cyclelog.filters.ResultFilterDispenser;
import io.nosqlbench.engine.api.activityapi.cyclelog.filters.ResultValueFilterType;
import io.nosqlbench.engine.api.activityapi.cyclelog.filters.tristate.EnumReadableMappingFilter;
import io.nosqlbench.engine.api.activityapi.cyclelog.filters.tristate.TristateFilter;
import io.nosqlbench.engine.api.util.ConfigTuples;
import io.nosqlbench.nb.annotations.Service;
import java.util.function.Predicate;
@Service(ResultValueFilterType.class)
public class CQLResultFilterType implements ResultValueFilterType {
@Override
public String getName() {
return "cql";
}
@Override
public ResultFilterDispenser getDispenser(String config) {
return new Dispenser(config);
}
private class Dispenser implements ResultFilterDispenser {
private final ConfigTuples conf;
private final EnumReadableMappingFilter<CQLExceptionEnum> enumFilter;
private final Predicate<ResultReadable> filter;
public Dispenser(String config) {
this.conf = new ConfigTuples(config);
ConfigTuples inout = conf.getAllMatching("in.*", "ex.*");
// Default policy is opposite of leading rule
TristateFilter.Policy defaultPolicy = TristateFilter.Policy.Discard;
if (conf.get(0).get(0).startsWith("ex")) {
defaultPolicy = TristateFilter.Policy.Keep;
}
this.enumFilter =
new EnumReadableMappingFilter<>(CQLExceptionEnum.values(), TristateFilter.Policy.Ignore);
for (ConfigTuples.Section section : inout) {
if (section.get(0).startsWith("in")) {
this.enumFilter.addPolicy(section.get(1), TristateFilter.Policy.Keep);
} else if (section.get(0).startsWith("ex")) {
this.enumFilter.addPolicy(section.get(1), TristateFilter.Policy.Discard);
} else {
throw new RuntimeException("Section must start with in(clude) or ex(clude), but instead it is " + section);
}
}
this.filter = this.enumFilter.toDefaultingPredicate(defaultPolicy);
}
@Override
public Predicate<ResultReadable> getResultFilter() {
return filter;
}
}
}

View File

@ -0,0 +1,27 @@
package io.nosqlbench.activitytype.cqld4.statements.binders;
import com.datastax.oss.driver.api.core.cql.PreparedStatement;
import com.datastax.oss.driver.api.core.cql.Statement;
import com.datastax.oss.driver.api.core.session.Session;
import io.nosqlbench.virtdata.core.bindings.ValuesArrayBinder;
public enum CqlBinderTypes {
direct_array,
unset_aware,
diagnostic;
public final static CqlBinderTypes DEFAULT = unset_aware;
public ValuesArrayBinder<PreparedStatement, Statement> get(Session session) {
if (this==direct_array) {
return new DirectArrayValuesBinder();
} else if (this== unset_aware) {
return new UnsettableValuesBinder(session);
} else if (this==diagnostic) {
return new DiagnosticPreparedBinder();
} else {
throw new RuntimeException("Impossible-ish statement branch");
}
}
}

View File

@ -0,0 +1,53 @@
package io.nosqlbench.activitytype.cqld4.statements.binders;
import com.datastax.oss.driver.api.core.cql.*;
import com.datastax.oss.driver.api.core.type.DataType;
import io.nosqlbench.activitytype.cqld4.core.CQLBindHelper;
import io.nosqlbench.virtdata.core.bindings.ValuesArrayBinder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.List;
/**
* This binder is not meant to be used primarily by default. It gives detailed
* diagnostics, but in order to do so by default it does lots of processing.
* Other binders will call to this one in an exception handler when needed in
* order to explain in more detail what is happening for users.
*/
public class DiagnosticPreparedBinder implements ValuesArrayBinder<PreparedStatement, Statement> {
public static final Logger logger = LoggerFactory.getLogger(DiagnosticPreparedBinder.class);
@Override
public Statement bindValues(PreparedStatement prepared, Object[] values) {
ColumnDefinitions columnDefinitions = prepared.getVariableDefinitions();
BoundStatement bound = prepared.bind();
List<ColumnDefinition> columnDefList = new ArrayList<>();
prepared.getVariableDefinitions().forEach(columnDefList::add);
if (columnDefList.size() == values.length) {
columnDefList = columnDefinitions.asList();
} else {
throw new RuntimeException("The number of named anchors in your statement does not match the number of bindings provided.");
}
int i = 0;
for (Object value : values) {
if (columnDefList.size() <= i) {
logger.error("what gives?");
}
ColumnDefinition columnDef = columnDefList.get(i);
String colName = columnDef.getName().toString();
DataType type =columnDef.getType();
try {
bound = CQLBindHelper.bindStatement(bound, colName, value, type);
} catch (ClassCastException e) {
logger.error(String.format("Unable to bind column %s to cql type %s with value %s", colName, type, value));
throw e;
}
i++;
}
return bound;
}
}

View File

@ -0,0 +1,37 @@
package io.nosqlbench.activitytype.cqld4.statements.binders;
import com.datastax.oss.driver.api.core.cql.PreparedStatement;
import com.datastax.oss.driver.api.core.cql.Statement;
import io.nosqlbench.virtdata.core.bindings.ValuesArrayBinder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Arrays;
/**
* This is now the main binder again, but if there are any exceptions, it delegates to the diagnostic
* one in order to explain what happened. This is to allow for higher performance in the general
* case, but with better user support when something goes wrong.
*
* If you want to force the client to use the array passing method of initializing a statement,
* use this one, known as 'directarray'. This does give up the benefit of allowing unset values
* to be modeled, and at no clear benefit. Thus the {@link CqlBinderTypes#unset_aware} one
* will become the default.
*/
public class DirectArrayValuesBinder implements ValuesArrayBinder<PreparedStatement, Statement> {
public final static Logger logger = LoggerFactory.getLogger(DirectArrayValuesBinder.class);
@Override
public Statement bindValues(PreparedStatement preparedStatement, Object[] objects) {
try {
return preparedStatement.bind(objects);
} catch (Exception e) {
StringBuilder sb = new StringBuilder();
sb.append("Error binding objects to prepared statement directly, falling back to diagnostic binding layer:");
sb.append(Arrays.toString(objects));
logger.warn(sb.toString(),e);
DiagnosticPreparedBinder diag = new DiagnosticPreparedBinder();
return diag.bindValues(preparedStatement, objects);
}
}
}

View File

@ -0,0 +1,55 @@
package io.nosqlbench.activitytype.cqld4.statements.binders;
import com.datastax.driver.core.ConsistencyLevel;
import com.datastax.driver.core.SimpleStatement;
import com.datastax.oss.driver.api.core.cql.Statement;
import io.nosqlbench.virtdata.core.bindings.ValuesArrayBinder;
/**
* This binder is not meant to be used with anything but DDL or statements
* which should not be trying to parameterize values in general.
* Parametrized values are still possible through parametrized constructor parameter.
* This binder should be avoided in favor of binders returning PreparedStatement
*/
public class SimpleStatementValuesBinder
implements ValuesArrayBinder<SimpleStatement, Statement> {
private final boolean parametrized;
public SimpleStatementValuesBinder(boolean parametrized){
this.parametrized = parametrized;
}
@Override
public Statement bindValues(SimpleStatement context, Object[] values) {
String query = context.getQueryString();
if(parametrized) {
String[] splits = query.split("\\?");
assert splits.length == values.length+1;
StringBuilder sb = new StringBuilder();
sb.append(splits[0]);
for(int i = 1; i < splits.length; i++) {
sb.append(values[i - 1]);
sb.append(splits[i]);
}
query = sb.toString();
System.out.println(query);
}
SimpleStatement simpleStatement = new SimpleStatement(query);
ConsistencyLevel cl = context.getConsistencyLevel();
if(cl != null){
simpleStatement.setConsistencyLevel(context.getConsistencyLevel());
}
//Does it really makes senses?
ConsistencyLevel serial_cl = context.getSerialConsistencyLevel();
if(serial_cl != null){
simpleStatement.setSerialConsistencyLevel(context.getSerialConsistencyLevel());
}
Boolean idempotent = context.isIdempotent();
if(idempotent != null){
simpleStatement.setIdempotent(idempotent);
}
return simpleStatement;
}
}

View File

@ -0,0 +1,73 @@
package io.nosqlbench.activitytype.cqld4.statements.binders;
import com.datastax.driver.core.*;
import io.nosqlbench.virtdata.api.bindings.VALUE;
import io.nosqlbench.virtdata.core.bindings.ValuesArrayBinder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.nio.ByteBuffer;
import java.util.List;
public class UnsettableValuesBinder implements ValuesArrayBinder<PreparedStatement, Statement> {
private final static Logger logger = LoggerFactory.getLogger(UnsettableValuesBinder.class);
private final Session session;
private final CodecRegistry codecRegistry;
private final ProtocolVersion protocolVersion;
public UnsettableValuesBinder(Session session) {
this.session = session;
this.codecRegistry = session.getCluster().getConfiguration().getCodecRegistry();
this.protocolVersion = this.session.getCluster().getConfiguration().getProtocolOptions().getProtocolVersion();
}
// TODO: Allow for warning when nulls are passed and they aren't expected
@Override
public Statement bindValues(PreparedStatement preparedStatement, Object[] objects) {
int i=-1;
try {
BoundStatement boundStmt = preparedStatement.bind();
List<ColumnDefinitions.Definition> defs = preparedStatement.getVariables().asList();
for (i = 0; i < objects.length; i++) {
Object value = objects[i];
if (VALUE.unset != value) {
if (null==value) {
boundStmt.setToNull(i);
} else {
DataType cqlType = defs.get(i).getType();
TypeCodec<Object> codec = codecRegistry.codecFor(cqlType, value);
ByteBuffer serialized = codec.serialize(value, protocolVersion);
boundStmt.setBytesUnsafe(i,serialized);
}
}
}
return boundStmt;
} catch (Exception e) {
String typNam = (objects[i]==null ? "NULL" : objects[i].getClass().getCanonicalName());
logger.error("Error binding column " + preparedStatement.getVariables().asList().get(i).getName() + " with class " + typNam + ": " + e.getMessage(), e);
throw e;
// StringBuilder sb = new StringBuilder();
// sb.append("Error binding objects to prepared statement directly, falling back to diagnostic binding layer:");
// sb.append(Arrays.toString(objects));
// logger.warn(sb.toString(),e);
// DiagnosticPreparedBinder diag = new DiagnosticPreparedBinder();
// return diag.bindValues(preparedStatement, objects);
}
}
// static void setObject(Session session, BoundStatement bs, int index, Object value) {
//
// DataType cqlType = bs.preparedStatement().getVariables().getType(index);
//
// CodecRegistry codecRegistry = session.getCluster().getConfiguration().getCodecRegistry();
// ProtocolVersion protocolVersion =
// session.getCluster().getConfiguration().getProtocolOptions().getProtocolVersion();
//
// TypeCodec<Object> codec = codecRegistry.codecFor(cqlType, value);
// bs.setBytesUnsafe(index, codec.serialize(value, protocolVersion));
// }
}

View File

@ -0,0 +1,50 @@
package io.nosqlbench.activitytype.cqld4.statements.core;
import io.nosqlbench.engine.api.util.TagFilter;
import java.util.*;
import java.util.stream.Collectors;
public class AvailableCQLStatements {
private List<TaggedCQLStatementDefs> availableDefs = new ArrayList<>();
public AvailableCQLStatements(List<TaggedCQLStatementDefs> allStatementDef) {
this.availableDefs = allStatementDef;
}
public List<TaggedCQLStatementDefs> getRawTagged() {
return availableDefs;
}
public Map<String, String> getFilteringDetails(String tagSpec) {
Map<String, String> details = new LinkedHashMap<>();
TagFilter ts = new TagFilter(tagSpec);
for (TaggedCQLStatementDefs availableDef : availableDefs) {
TagFilter.Result result = ts.matchesTaggedResult(availableDef);
String names = availableDef.getStatements().stream()
.map(CQLStatementDef::getName).collect(Collectors.joining(","));
details.put(names, result.getLog());
}
return details;
}
public List<CQLStatementDefParser> getMatching(String tagSpec) {
List<CQLStatementDefParser> defs = new ArrayList<>();
TagFilter ts = new TagFilter(tagSpec);
List<CQLStatementDefParser> CQLStatementDefParsers =
availableDefs.stream()
.filter(ts::matchesTagged)
.map(TaggedCQLStatementDefs::getStatements)
.flatMap(Collection::stream)
.map(p -> new CQLStatementDefParser(p.getName(), p.getStatement()))
.collect(Collectors.toList());
return CQLStatementDefParsers;
}
public List<CQLStatementDefParser> getAll() {
return getMatching("");
}
}

View File

@ -0,0 +1,279 @@
package io.nosqlbench.activitytype.cqld4.statements.core;
import com.datastax.oss.driver.api.core.loadbalancing.LoadBalancingPolicy;
import com.datastax.oss.driver.api.core.retry.RetryPolicy;
import com.datastax.oss.driver.api.core.session.Session;
import com.datastax.oss.driver.api.core.specex.SpeculativeExecutionPolicy;
import com.datastax.oss.driver.internal.core.retry.DefaultRetryPolicy;
import io.nosqlbench.activitytype.cqld4.core.CQLOptions;
import io.nosqlbench.activitytype.cqld4.core.ProxyTranslator;
import io.nosqlbench.engine.api.activityapi.core.Shutdownable;
import io.nosqlbench.engine.api.activityimpl.ActivityDef;
import io.nosqlbench.engine.api.metrics.ActivityMetrics;
import io.nosqlbench.engine.api.scripting.NashornEvaluator;
import io.nosqlbench.engine.api.util.SSLKsFactory;
import org.apache.tinkerpop.gremlin.driver.Cluster;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.*;
public class CQLSessionCache implements Shutdownable {
private final static Logger logger = LoggerFactory.getLogger(CQLSessionCache.class);
private final static String DEFAULT_SESSION_ID = "default";
private static CQLSessionCache instance = new CQLSessionCache();
private Map<String, Session> sessionCache = new HashMap<>();
private CQLSessionCache() {
}
public static CQLSessionCache get() {
return instance;
}
public void stopSession(ActivityDef activityDef) {
String key = activityDef.getParams().getOptionalString("clusterid").orElse(DEFAULT_SESSION_ID);
Session session = sessionCache.get(key);
session.close();
}
public Session getSession(ActivityDef activityDef) {
String key = activityDef.getParams().getOptionalString("clusterid").orElse(DEFAULT_SESSION_ID);
return sessionCache.computeIfAbsent(key, (cid) -> createSession(activityDef, key));
}
// cbopts=\".withLoadBalancingPolicy(LatencyAwarePolicy.builder(new TokenAwarePolicy(new DCAwareRoundRobinPolicy(\"dc1-us-east\", 0, false))).build()).withRetryPolicy(new LoggingRetryPolicy(DefaultRetryPolicy.INSTANCE))\"
private Session createSession(ActivityDef activityDef, String sessid) {
String host = activityDef.getParams().getOptionalString("host").orElse("localhost");
int port = activityDef.getParams().getOptionalInteger("port").orElse(9042);
String driverType = activityDef.getParams().getOptionalString("cqldriver").orElse("dse");
Cluster.Builder builder =
driverType.toLowerCase().equals("dse") ? DseCluster.builder() :
driverType.toLowerCase().equals("oss") ? Cluster.builder() : null;
if (builder==null) {
throw new RuntimeException("The driver type '" + driverType + "' is not recognized");
}
logger.info("Using driver type '" + driverType.toUpperCase() + "'");
Optional<String> scb = activityDef.getParams()
.getOptionalString("secureconnectbundle");
scb.map(File::new)
.ifPresent(builder::withCloudSecureConnectBundle);
activityDef.getParams()
.getOptionalString("insights").map(Boolean::parseBoolean)
.ifPresent(builder::withMonitorReporting);
String[] contactPoints = activityDef.getParams().getOptionalString("host")
.map(h -> h.split(",")).orElse(null);
if (contactPoints == null) {
contactPoints = activityDef.getParams().getOptionalString("hosts")
.map(h -> h.split(",")).orElse(null);
}
if (contactPoints == null && scb.isEmpty()) {
contactPoints = new String[]{"localhost"};
}
if (contactPoints != null) {
builder.addContactPoints(contactPoints);
}
activityDef.getParams().getOptionalInteger("port").ifPresent(builder::withPort);
builder.withCompression(ProtocolOptions.Compression.NONE);
Optional<String> usernameOpt = activityDef.getParams().getOptionalString("username");
Optional<String> passwordOpt = activityDef.getParams().getOptionalString("password");
Optional<String> passfileOpt = activityDef.getParams().getOptionalString("passfile");
if (usernameOpt.isPresent()) {
String username = usernameOpt.get();
String password;
if (passwordOpt.isPresent()) {
password = passwordOpt.get();
} else if (passfileOpt.isPresent()) {
Path path = Paths.get(passfileOpt.get());
try {
password = Files.readAllLines(path).get(0);
} catch (IOException e) {
String error = "Error while reading password from file:" + passfileOpt;
logger.error(error, e);
throw new RuntimeException(e);
}
} else {
String error = "username is present, but neither password nor passfile are defined.";
logger.error(error);
throw new RuntimeException(error);
}
builder.withCredentials(username, password);
}
Optional<String> clusteropts = activityDef.getParams().getOptionalString("cbopts");
if (clusteropts.isPresent()) {
try {
logger.info("applying cbopts:" + clusteropts.get());
NashornEvaluator<DseCluster.Builder> clusterEval = new NashornEvaluator<>(DseCluster.Builder.class);
clusterEval.put("builder", builder);
String importEnv =
"load(\"nashorn:mozilla_compat.js\");\n" +
" importPackage(com.google.common.collect.Lists);\n" +
" importPackage(com.google.common.collect.Maps);\n" +
" importPackage(com.datastax.driver);\n" +
" importPackage(com.datastax.driver.core);\n" +
" importPackage(com.datastax.driver.core.policies);\n" +
"builder" + clusteropts.get() + "\n";
clusterEval.script(importEnv);
builder = clusterEval.eval();
logger.info("successfully applied:" + clusteropts.get());
} catch (Exception e) {
logger.error("Unable to evaluate: " + clusteropts.get() + " in script context:" + e.getMessage());
throw e;
}
}
SpeculativeExecutionPolicy speculativePolicy = activityDef.getParams()
.getOptionalString("speculative")
.map(speculative -> {
logger.info("speculative=>" + speculative);
return speculative;
})
.map(CQLOptions::speculativeFor)
.orElse(CQLOptions.defaultSpeculativePolicy());
builder.withSpeculativeExecutionPolicy(speculativePolicy);
activityDef.getParams().getOptionalString("socketoptions")
.map(sockopts -> {
logger.info("socketoptions=>" + sockopts);
return sockopts;
})
.map(CQLOptions::socketOptionsFor)
.ifPresent(builder::withSocketOptions);
activityDef.getParams().getOptionalString("reconnectpolicy")
.map(reconnectpolicy-> {
logger.info("reconnectpolicy=>" + reconnectpolicy);
return reconnectpolicy;
})
.map(CQLOptions::reconnectPolicyFor)
.ifPresent(builder::withReconnectionPolicy);
activityDef.getParams().getOptionalString("pooling")
.map(pooling -> {
logger.info("pooling=>" + pooling);
return pooling;
})
.map(CQLOptions::poolingOptionsFor)
.ifPresent(builder::withPoolingOptions);
activityDef.getParams().getOptionalString("whitelist")
.map(whitelist -> {
logger.info("whitelist=>" + whitelist);
return whitelist;
})
.map(p -> CQLOptions.whitelistFor(p, null))
.ifPresent(builder::withLoadBalancingPolicy);
activityDef.getParams().getOptionalString("tickduration")
.map(tickduration -> {
logger.info("tickduration=>" + tickduration);
return tickduration;
})
.map(CQLOptions::withTickDuration)
.ifPresent(builder::withNettyOptions);
activityDef.getParams().getOptionalString("compression")
.map(compression -> {
logger.info("compression=>" + compression);
return compression;
})
.map(CQLOptions::withCompression)
.ifPresent(builder::withCompression);
if (activityDef.getParams().getOptionalString("ssl").isPresent()) {
logger.info("Cluster builder proceeding with SSL but no Client Auth");
Object context = SSLKsFactory.get().getContext(activityDef);
SSLOptions sslOptions;
if (context instanceof javax.net.ssl.SSLContext) {
sslOptions = RemoteEndpointAwareJdkSSLOptions.builder()
.withSSLContext((javax.net.ssl.SSLContext) context).build();
builder.withSSL(sslOptions);
} else if (context instanceof io.netty.handler.ssl.SslContext) {
sslOptions =
new RemoteEndpointAwareNettySSLOptions((io.netty.handler.ssl.SslContext) context);
} else {
throw new RuntimeException("Unrecognized ssl context object type: " + context.getClass().getCanonicalName());
}
builder.withSSL(sslOptions);
}
RetryPolicy retryPolicy = activityDef.getParams()
.getOptionalString("retrypolicy")
.map(CQLOptions::retryPolicyFor).orElse(DefaultRetryPolicy.INSTANCE);
if (retryPolicy instanceof LoggingRetryPolicy) {
logger.info("using LoggingRetryPolicy");
}
builder.withRetryPolicy(retryPolicy);
if (!activityDef.getParams().getOptionalBoolean("jmxreporting").orElse(false)) {
builder.withoutJMXReporting();
}
// Proxy Translator and Whitelist for use with DS Cloud on-demand single-endpoint setup
if (activityDef.getParams().getOptionalBoolean("single-endpoint").orElse(false)) {
InetSocketAddress inetHost = new InetSocketAddress(host, port);
final List<InetSocketAddress> whiteList = new ArrayList<>();
whiteList.add(inetHost);
LoadBalancingPolicy whitelistPolicy = new WhiteListPolicy(new RoundRobinPolicy(), whiteList);
builder.withAddressTranslator(new ProxyTranslator(inetHost)).withLoadBalancingPolicy(whitelistPolicy);
}
Cluster cl = builder.build();
// Apply default idempotence, if set
activityDef.getParams().getOptionalBoolean("defaultidempotence").map(
b -> cl.getConfiguration().getQueryOptions().setDefaultIdempotence(b)
);
Session session = cl.newSession();
// This also forces init of metadata
logger.info("cluster-metadata-allhosts:\n" + session.getCluster().getMetadata().getAllHosts());
if (activityDef.getParams().getOptionalBoolean("drivermetrics").orElse(false)) {
String driverPrefix = "driver." + sessid;
driverPrefix = activityDef.getParams().getOptionalString("driverprefix").orElse(driverPrefix) + ".";
ActivityMetrics.mountSubRegistry(driverPrefix, cl.getMetrics().getRegistry());
}
return session;
}
@Override
public void shutdown() {
for (Session session : sessionCache.values()) {
Cluster cluster = session.getCluster();
session.close();
cluster.close();
}
}
}

View File

@ -0,0 +1,105 @@
package io.nosqlbench.activitytype.cqld4.statements.core;
import com.datastax.driver.core.ConsistencyLevel;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.*;
import java.util.stream.Collectors;
public class CQLStatementDef {
private final static Logger logger = LoggerFactory.getLogger(CQLStatementDef.class);
private Map<String,String> params = new HashMap<>();
private String name = "";
private String statement = "";
private boolean prepared = true;
private String cl = ConsistencyLevel.LOCAL_ONE.name();
private Map<String, String> bindings = new HashMap<>();
public CQLStatementDef() {
}
public String getGenSpec(String s) {
return bindings.get(s);
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getStatement() {
return statement;
}
public void setStatement(String statement) {
this.statement = statement;
}
public Map<String, String> getBindings() {
return bindings;
}
public void setBindings(Map<String, String> bindings) {
this.bindings = bindings;
}
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append(" name:").append(this.getName()).append("\n");
sb.append(" statement: |").append("\n");
String formattedStmt = Arrays.asList(getStatement().split("\\r*\n"))
.stream().map(s -> " " + s)
.collect(Collectors.joining("\n"));
sb.append(formattedStmt);
if (bindings.size() > 0) {
sb.append(" bindings:\n");
Optional<Integer> maxLen = this.bindings.keySet().stream().map(String::length).reduce(Integer::max);
for (String bindName : this.bindings.keySet()) {
sb
.append(String.format(" %-" + (maxLen.orElse(20) + 2) + "s", bindName)).append(" : ")
.append(bindings.get(bindName))
.append("\n");
}
}
return sb.toString();
}
public boolean isPrepared() {
return prepared;
}
public void setPrepared(boolean prepared) {
this.prepared = prepared;
}
public String getConsistencyLevel() {
return this.cl;
}
public void setConsistencyLevel(String consistencyLevel) {
this.cl = consistencyLevel;
}
public void setCl(String consistencyLevel) {
setConsistencyLevel(consistencyLevel);
}
public Map<String, String> getParams() {
return params;
}
public void setParams(Map<String, String> params) {
this.params = params;
}
public long getRatio() {
return Long.parseLong(Optional.ofNullable(params.get("ratio")).orElse("1"));
}
}

View File

@ -0,0 +1,161 @@
package io.nosqlbench.activitytype.cqld4.statements.core;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.*;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
public class CQLStatementDefParser {
private final static Logger logger = LoggerFactory.getLogger(CQLStatementDefParser.class);
// private final static Pattern templateToken = Pattern.compile("<<(\\w+(:(.+?))?)>>");
private final static Pattern stmtToken = Pattern.compile("\\?(\\w+[-_\\d\\w]*)|\\{(\\w+[-_\\d\\w.]*)}");
private final static String UNSET_VALUE = "UNSET-VALUE";
private final String stmt;
private final String name;
private CQLStatementDef deprecatedDef; // deprecated, to be removed
public void setBindings(Map<String, String> bindings) {
this.bindings = bindings;
}
private Map<String, String> bindings;
public CQLStatementDef getDeprecatedDef() {
return deprecatedDef;
}
public void setDeprecatedDef(CQLStatementDef deprecatedDef) {
this.deprecatedDef = deprecatedDef;
}
public CQLStatementDefParser(String name, String stmt) {
this.stmt = stmt;
this.name = name;
this.bindings = bindings;
}
public Map<String,String> getBindings() {
return bindings;
}
/**
* @return bindableNames in order as specified in the parameter placeholders
*/
public List<String> getBindableNames() {
Matcher m = stmtToken.matcher(stmt);
List<String> bindNames = new ArrayList<>();
while (m.find()) {
String form1 = m.group(1);
String form2 = m.group(2);
bindNames.add( (form1!=null && !form1.isEmpty()) ? form1 : form2 );
}
return bindNames;
}
public String getName() {
return name;
}
public String getParsedStatementOrError(Set<String> namedBindings) {
ParseResult result = getParseResult(namedBindings);
if (result.hasError()) {
throw new RuntimeException("Statement template has errors:\n" + result.toString());
}
return result.getStatement();
}
public ParseResult getParseResult(Set<String> namedBindings) {
HashSet<String> missingAnchors = new HashSet<String>() {{ addAll(namedBindings); }};
HashSet<String> missingBindings = new HashSet<String>();
String statement = this.stmt;
StringBuilder cooked = new StringBuilder();
Matcher m = stmtToken.matcher(statement);
int lastMatch = 0;
String remainder = "";
while (m.find(lastMatch)) {
String pre = statement.substring(lastMatch, m.start());
String form1 = m.group(1);
String form2 = m.group(2);
String tokenName = (form1!=null && !form1.isEmpty()) ? form1 : form2;
lastMatch = m.end();
cooked.append(pre);
cooked.append("?");
if (!namedBindings.contains(tokenName)) {
missingBindings.add(tokenName);
} else {
if (missingAnchors.contains(tokenName)) {
missingAnchors.remove(tokenName);
}
}
}
// add remainder of unmatched
if (lastMatch>=0) {
cooked.append(statement.substring(lastMatch));
}
else {
cooked.append(statement);
}
logger.info("Parsed statement as: " + cooked.toString().replaceAll("\\n","\\\\n"));
return new ParseResult(cooked.toString(),name,bindings,missingBindings,missingAnchors);
}
public static class ParseResult {
private Set<String> missingGenerators;
private Set<String> missingAnchors;
private String statement;
private Map<String,String> bindings;
private String name;
public ParseResult(String stmt, String name, Map<String,String> bindings, Set<String> missingGenerators, Set<String> missingAnchors) {
this.missingGenerators = missingGenerators;
this.missingAnchors = missingAnchors;
this.statement = stmt;
this.name = name;
}
public String toString() {
String generatorsSummary = (this.missingGenerators.size() > 0) ?
"\nundefined generators:" + this.missingGenerators.stream().collect(Collectors.joining(",", "[", "]")) : "";
return "STMT:" + statement + "\n" + generatorsSummary;
}
public String getName() {
return name;
}
public Map<String,String> getBindings() {
return bindings;
}
public boolean hasError() {
return missingGenerators.size() > 0;
}
public String getStatement() {
return statement;
}
public Set<String> getMissingAnchors() {
return missingAnchors;
}
public Set<String> getMissingGenerators() {
return missingGenerators;
}
}
}

View File

@ -0,0 +1,37 @@
package io.nosqlbench.activitytype.cqld4.statements.core;
import java.util.*;
public class CQLStatementGroups {
private Map<String,List<CQLStatementDefParser>> statementGroups = new HashMap<>();
public CQLStatementGroups(Map<String,List<CQLStatementDefParser>> statementGroups) {
this.statementGroups = statementGroups;
}
public List<CQLStatementDefParser> getGroups(String... groupNames) {
List<CQLStatementDefParser> statements = new ArrayList<CQLStatementDefParser>();
for (String groupName : groupNames) {
List<CQLStatementDefParser> adding = statementGroups.getOrDefault(groupName, Collections.emptyList());
statements.addAll(adding);
}
return statements;
}
public String toString() {
StringBuilder sb = new StringBuilder();
List<String> groups = new ArrayList<String>(statementGroups.keySet());
Collections.sort(groups);
sb.append("groups:\n");
for (String group : groups) {
// sb.append("section:").append(section).append("\n");
for (CQLStatementDefParser statementDef : statementGroups.get(group)) {
sb.append(statementDef.toString());
}
sb.append("\n");
}
return sb.toString();
}
}

View File

@ -0,0 +1,182 @@
package io.nosqlbench.activitytype.cqld4.statements.core;
import com.codahale.metrics.Histogram;
import com.codahale.metrics.Timer;
import com.datastax.oss.driver.api.core.cql.PreparedStatement;
import com.datastax.oss.driver.api.core.cql.SimpleStatement;
import com.datastax.oss.driver.api.core.cql.Statement;
import io.nosqlbench.activitytype.cqld4.api.ResultSetCycleOperator;
import io.nosqlbench.activitytype.cqld4.api.RowCycleOperator;
import io.nosqlbench.virtdata.core.bindings.ContextualArrayBindings;
import java.io.IOException;
import java.io.Writer;
import java.util.concurrent.TimeUnit;
/**
* A ReadyCQLStatement instantiates new statements to be executed at some mix ratio.
* It optionally holds metrics objects for a named statement.
*/
public class ReadyCQLStatement {
private String name;
private ContextualArrayBindings<?, Statement> contextualBindings;
private long ratio;
private ResultSetCycleOperator[] resultSetOperators = null;
private RowCycleOperator[] rowCycleOperators = null;
private Timer successTimer;
private Timer errorTimer;
private Histogram rowsFetchedHisto;
private Writer resultCsvWriter;
public ReadyCQLStatement(ContextualArrayBindings<?, Statement> contextualBindings, long ratio, String name) {
this.contextualBindings = contextualBindings;
this.ratio = ratio;
this.name = name;
}
public ReadyCQLStatement withMetrics(Timer successTimer, Timer errorTimer, Histogram rowsFetchedHisto) {
this.successTimer = successTimer;
this.errorTimer = errorTimer;
this.rowsFetchedHisto = rowsFetchedHisto;
return this;
}
public Statement bind(long value) {
return contextualBindings.bind(value);
}
public ResultSetCycleOperator[] getResultSetOperators() {
return resultSetOperators;
}
public ContextualArrayBindings getContextualBindings() {
return this.contextualBindings;
}
public String getQueryString(long value) {
Object stmt = contextualBindings.getContext();
if (stmt instanceof PreparedStatement) {
String queryString = ((PreparedStatement)stmt).getQuery();
StringBuilder sb = new StringBuilder(queryString.length()*2);
sb.append("(prepared) ");
return getQueryStringValues(value, queryString, sb);
} else if (stmt instanceof SimpleStatement) {
String queryString = ((SimpleStatement) stmt).getQuery();
StringBuilder sb = new StringBuilder();
sb.append("(simple) ");
return getQueryStringValues(value, queryString, sb);
}
if (stmt instanceof String) {
return (String)stmt;
}
throw new RuntimeException("context object not recognized for query string:" + stmt.getClass().getCanonicalName());
}
private String getQueryStringValues(long value, String queryString, StringBuilder sb) {
if (!queryString.endsWith("\n")) {
sb.append("\n");
}
sb.append(queryString).append(" VALUES[");
Object[] all = contextualBindings.getBindings().getAll(value);
String delim="";
for (Object o : all) {
sb.append(delim);
delim=",";
sb.append(o.toString());
}
sb.append("]");
return sb.toString();
}
public long getRatio() {
return ratio;
}
public void setRatio(long ratio) {
this.ratio = ratio;
}
/**
* This method should be called when an associated statement is executed successfully.
* @param cycleValue The cycle associated with the execution.
* @param nanoTime The nanoTime duration of the execution.
* @param rowsFetched The number of rows fetched for this cycle
*/
public void onSuccess(long cycleValue, long nanoTime, long rowsFetched) {
if (successTimer!=null) {
successTimer.update(nanoTime, TimeUnit.NANOSECONDS);
}
if (rowsFetchedHisto!=null) {
rowsFetchedHisto.update(rowsFetched);
}
if (resultCsvWriter!=null) {
try {
synchronized(resultCsvWriter) {
// <cycle>,(SUCCESS|FAILURE),<nanos>,<rowsfetched>,<errorname>\n
resultCsvWriter
.append(String.valueOf(cycleValue)).append(",")
.append("SUCCESS,")
.append(String.valueOf(nanoTime)).append(",")
.append(String.valueOf(rowsFetched))
.append(",NONE")
.append("\n");
}
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}
/**
* This method should be called when an associated statement is executed unsuccessfully.
* It should be called only once per cycle in the case of execution error.
* @param cycleValue The cycle associated with the erred execution.
* @param resultNanos The nanoTime duration of the execution.
* @param t The associated throwable
*/
public void onError(long cycleValue, long resultNanos, Throwable t) {
if (errorTimer!=null) {
errorTimer.update(resultNanos, TimeUnit.NANOSECONDS);
}
if (resultCsvWriter!=null) {
try {
synchronized(resultCsvWriter) {
// <cycle>,(SUCCESS|FAILURE),<nanos>,<rowsfetched>,<errorname>\n
resultCsvWriter
.append(String.valueOf(cycleValue)).append(",")
.append("FAILURE,")
.append(String.valueOf(resultNanos)).append(",")
.append("0,")
.append(t.getClass().getSimpleName()).append(",")
.append("\n");
}
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}
public ReadyCQLStatement withResultSetCycleOperators(ResultSetCycleOperator[] resultSetCycleOperators) {
this.resultSetOperators = resultSetCycleOperators;
return this;
}
public ReadyCQLStatement withRowCycleOperators(RowCycleOperator[] rowCycleOperators) {
this.rowCycleOperators = rowCycleOperators;
return this;
}
public RowCycleOperator[] getRowCycleOperators() {
return this.rowCycleOperators;
}
public ReadyCQLStatement withResultCsvWriter(Writer resultCsvWriter) {
this.resultCsvWriter = resultCsvWriter;
return this;
}
}

View File

@ -0,0 +1,111 @@
package io.nosqlbench.activitytype.cqld4.statements.core;
import com.codahale.metrics.Histogram;
import com.codahale.metrics.Timer;
import com.datastax.oss.driver.api.core.cql.PreparedStatement;
import com.datastax.oss.driver.api.core.cql.SimpleStatement;
import com.datastax.oss.driver.api.core.cql.Statement;
import com.datastax.oss.driver.api.core.session.Session;
import io.nosqlbench.activitytype.cqld4.api.ResultSetCycleOperator;
import io.nosqlbench.activitytype.cqld4.api.RowCycleOperator;
import io.nosqlbench.activitytype.cqld4.core.CqlActivity;
import io.nosqlbench.activitytype.cqld4.statements.binders.CqlBinderTypes;
import io.nosqlbench.activitytype.cqld4.statements.binders.SimpleStatementValuesBinder;
import io.nosqlbench.engine.api.metrics.ActivityMetrics;
import io.nosqlbench.virtdata.core.bindings.BindingsTemplate;
import io.nosqlbench.virtdata.core.bindings.ContextualBindingsArrayTemplate;
import io.nosqlbench.virtdata.core.bindings.ValuesArrayBinder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.Writer;
import java.util.Map;
public class ReadyCQLStatementTemplate {
private final static Logger logger = LoggerFactory.getLogger(ReadyCQLStatementTemplate.class);
private final Session session;
private ContextualBindingsArrayTemplate<?, Statement> template;
private long ratio;
private String name;
private ResultSetCycleOperator[] resultSetCycleOperators;
private RowCycleOperator[] rowCycleOperators;
private Timer successTimer;
private Timer errorTimer;
private Histogram rowsFetchedHisto;
private Writer resultCsvWriter;
public ReadyCQLStatementTemplate(Map<String,Object> fconfig, CqlBinderTypes binderType, Session session,
PreparedStatement preparedStmt, long ratio, String name) {
this.session = session;
this.name = name;
ValuesArrayBinder<PreparedStatement, Statement> binder = binderType.get(session);
logger.trace("Using binder_type=>" + binder.toString());
template = new ContextualBindingsArrayTemplate<>(
preparedStmt,
new BindingsTemplate(fconfig),
binder
);
this.ratio = ratio;
}
public ReadyCQLStatementTemplate(Map<String,Object> fconfig, Session session, SimpleStatement simpleStatement, long ratio, String name, boolean parametrized) {
this.session = session;
this.name = name;
template = new ContextualBindingsArrayTemplate<>(
simpleStatement,
new BindingsTemplate(fconfig),
new SimpleStatementValuesBinder(parametrized)
);
this.ratio = ratio;
}
public ReadyCQLStatement resolve() {
return new ReadyCQLStatement(template.resolveBindings(), ratio, name)
.withMetrics(this.successTimer, this.errorTimer, this.rowsFetchedHisto)
.withResultSetCycleOperators(resultSetCycleOperators)
.withRowCycleOperators(rowCycleOperators)
.withResultCsvWriter(resultCsvWriter);
}
public ContextualBindingsArrayTemplate<?, Statement> getContextualBindings() {
return template;
}
public String getName() {
return name;
}
public void instrument(CqlActivity activity) {
this.successTimer = ActivityMetrics.timer(activity.getActivityDef(), name + "--success");
this.errorTimer = ActivityMetrics.timer(activity.getActivityDef(), name + "--error");
this.rowsFetchedHisto = ActivityMetrics.histogram(activity.getActivityDef(), name + "--resultset-size");
}
public void logResultCsv(CqlActivity activity, String name) {
this.resultCsvWriter = activity.getNamedWriter(name);
}
public void addResultSetOperators(ResultSetCycleOperator... addingOperators) {
resultSetCycleOperators = (resultSetCycleOperators==null) ? new ResultSetCycleOperator[0]: resultSetCycleOperators;
ResultSetCycleOperator[] newOperators = new ResultSetCycleOperator[resultSetCycleOperators.length + addingOperators.length];
System.arraycopy(resultSetCycleOperators,0,newOperators,0,resultSetCycleOperators.length);
System.arraycopy(addingOperators,0,newOperators,resultSetCycleOperators.length,addingOperators.length);
this.resultSetCycleOperators=newOperators;
}
public void addRowCycleOperators(RowCycleOperator... addingOperators) {
rowCycleOperators = (rowCycleOperators==null) ? new RowCycleOperator[0]: rowCycleOperators;
RowCycleOperator[] newOperators = new RowCycleOperator[rowCycleOperators.length + addingOperators.length];
System.arraycopy(rowCycleOperators,0,newOperators,0,rowCycleOperators.length);
System.arraycopy(addingOperators, 0, newOperators,rowCycleOperators.length,addingOperators.length);
this.rowCycleOperators = newOperators;
}
}

View File

@ -0,0 +1,25 @@
package io.nosqlbench.activitytype.cqld4.statements.core;
import java.util.ArrayList;
import java.util.List;
import java.util.stream.Collectors;
public class ReadyCQLStatementsTemplate {
private List<ReadyCQLStatementTemplate> readyStatementList = new ArrayList<>();
public void addTemplate(ReadyCQLStatementTemplate t) {
this.readyStatementList.add(t);
}
public List<ReadyCQLStatement> resolve() {
return readyStatementList.stream()
.map(ReadyCQLStatementTemplate::resolve)
.collect(Collectors.toList());
}
public int size() {
return readyStatementList.size();
}
}

View File

@ -0,0 +1,57 @@
package io.nosqlbench.activitytype.cqld4.statements.core;
import io.nosqlbench.engine.api.util.Tagged;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
public class TaggedCQLStatementDefs implements Tagged {
private List<CQLStatementDef> statements = new ArrayList<>();
private Map<String,String> tags = new HashMap<>();
private Map<String,String> params = new HashMap<>();
public TaggedCQLStatementDefs(Map<String,String> tags, Map<String,String> params, List<CQLStatementDef> statements) {
this.tags = tags;
this.params = params;
this.statements = statements;
}
public TaggedCQLStatementDefs(Map<String,String> tags, List<CQLStatementDef> statements) {
this.tags = tags;
this.statements = statements;
}
public TaggedCQLStatementDefs(List<CQLStatementDef> statements) {
this.statements = statements;
}
public TaggedCQLStatementDefs() {
}
public List<CQLStatementDef> getStatements() {
return statements;
}
public void setStatements(List<CQLStatementDef> statements) {
this.statements = statements;
}
public Map<String, String> getTags() {
return tags;
}
public void setTags(Map<String, String> tags) {
this.tags = tags;
}
public Map<String, String> getParams() {
return params;
}
public void setParams(Map<String, String> params) {
this.params = params;
}
}

View File

@ -0,0 +1,72 @@
package io.nosqlbench.activitytype.cqld4.statements.core;
import io.nosqlbench.engine.api.activityimpl.ActivityInitializationError;
import io.nosqlbench.nb.api.content.Content;
import io.nosqlbench.nb.api.content.NBIO;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.yaml.snakeyaml.TypeDescription;
import org.yaml.snakeyaml.Yaml;
import org.yaml.snakeyaml.constructor.Constructor;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.function.Function;
@SuppressWarnings("ALL")
public class YamlCQLStatementLoader {
private final static Logger logger = LoggerFactory.getLogger(YamlCQLStatementLoader.class);
List<Function<String, String>> transformers = new ArrayList<>();
public YamlCQLStatementLoader() {
}
public YamlCQLStatementLoader(Function<String, String>... transformers) {
this.transformers.addAll(Arrays.asList(transformers));
}
public AvailableCQLStatements load(String fromPath, String... searchPaths) {
Content<?> yamlContent = NBIO.all().prefix(searchPaths).name(fromPath).extension("yaml").one();
String data = yamlContent.asString();
for (Function<String, String> xform : transformers) {
try {
logger.debug("Applying string transformer to yaml data:" + xform);
data = xform.apply(data);
} catch (Exception e) {
RuntimeException t = new ActivityInitializationError("Error applying string transform to input", e);
logger.error(t.getMessage(), t);
throw t;
}
}
Yaml yaml = getCustomYaml();
try {
Iterable<Object> objects = yaml.loadAll(data);
List<TaggedCQLStatementDefs> stmtListList = new ArrayList<>();
for (Object object : objects) {
TaggedCQLStatementDefs tsd = (TaggedCQLStatementDefs) object;
stmtListList.add(tsd);
}
return new AvailableCQLStatements(stmtListList);
} catch (Exception e) {
logger.error("Error loading yaml from " + fromPath, e);
throw e;
}
}
private Yaml getCustomYaml() {
Constructor constructor = new Constructor(TaggedCQLStatementDefs.class);
TypeDescription tds = new TypeDescription(TaggedCQLStatementDefs.class);
tds.putListPropertyType("statements", CQLStatementDef.class);
constructor.addTypeDescription(tds);
return new Yaml(constructor);
}
}

View File

@ -0,0 +1,17 @@
package io.nosqlbench.activitytype.cqld4.statements.rowoperators;
import com.datastax.oss.driver.api.core.cql.Row;
import io.nosqlbench.activitytype.cqld4.api.RowCycleOperator;
/**
* Save specific variables to the thread local object map
*/
public class Print implements RowCycleOperator {
@Override
public int apply(Row row, long cycle) {
System.out.println("ROW:" + row);
return 0;
}
}

View File

@ -0,0 +1,35 @@
package io.nosqlbench.activitytype.cqld4.statements.rowoperators;
import io.nosqlbench.activitytype.cqld4.api.RowCycleOperator;
public enum RowCycleOperators {
saverows(SaveThreadRows.class),
savevars(SaveThreadVars.class),
saveglobalvars(SaveGlobalVars.class),
print(Print.class);
private final Class<? extends RowCycleOperator> implClass;
RowCycleOperators(Class<? extends RowCycleOperator> traceLoggerClass) {
this.implClass = traceLoggerClass;
}
public Class<? extends RowCycleOperator> getImplementation() {
return implClass;
}
public RowCycleOperator getInstance() {
try {
return getImplementation().getConstructor().newInstance();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
public static RowCycleOperator newOperator(String name) {
return RowCycleOperators.valueOf(name).getInstance();
}
}

View File

@ -0,0 +1,48 @@
package io.nosqlbench.activitytype.cqld4.statements.rowoperators;
import com.datastax.oss.driver.api.core.cql.ColumnDefinition;
import com.datastax.oss.driver.api.core.cql.ColumnDefinitions;
import com.datastax.oss.driver.api.core.cql.Row;
import io.nosqlbench.activitytype.cqld4.api.RowCycleOperator;
import io.nosqlbench.virtdata.library.basics.core.threadstate.SharedState;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Arrays;
import java.util.HashMap;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import java.util.stream.StreamSupport;
/**
* Save specific variables to the thread local object map
*/
public class Save implements RowCycleOperator {
private final static Logger logger = LoggerFactory.getLogger(Save.class);
ThreadLocal<HashMap<String, Object>> tl_objectMap = SharedState.tl_ObjectMap;
private String[] varnames;
public Save(String... varnames) {
this.varnames = varnames;
}
@Override
public int apply(Row row, long cycle) {
try {
HashMap<String, Object> tlvars = tl_objectMap.get();
for (String varname : varnames) {
Object object = row.getObject(varname);
tlvars.put(varname, object);
}
} catch (Exception e) {
Stream<ColumnDefinition> stream = StreamSupport.stream(row.getColumnDefinitions().spliterator(), false);
logger.error("Unable to save '" + Arrays.toString(varnames) + "' from " + stream.map(d -> d.getName().toString())
.collect(Collectors.joining(",", "[", "]")) + ": " + e.getMessage(), e);
throw e;
}
return 0;
}
}

View File

@ -0,0 +1,31 @@
package io.nosqlbench.activitytype.cqld4.statements.rowoperators;
import com.datastax.oss.driver.api.core.cql.ColumnDefinition;
import com.datastax.oss.driver.api.core.cql.Row;
import io.nosqlbench.activitytype.cqld4.api.RowCycleOperator;
import io.nosqlbench.virtdata.library.basics.core.threadstate.SharedState;
import java.util.concurrent.ConcurrentHashMap;
/**
* Stores the current row into the global object map. Key names are set from the field names. Null values are stored
* as empty strings.
*/
public class SaveGlobalVars implements RowCycleOperator {
ConcurrentHashMap<String, Object> gl_vars = SharedState.gl_ObjectMap;
@Override
public int apply(Row row, long cycle) {
for (ColumnDefinition definition : row.getColumnDefinitions()) {
String name = definition.getName().toString();
Object object = row.getObject(name);
if (object == null){
object = "";
}
gl_vars.put(name,object);
}
return 0;
}
}

View File

@ -0,0 +1,21 @@
package io.nosqlbench.activitytype.cqld4.statements.rowoperators;
import com.datastax.oss.driver.api.core.cql.Row;
import io.nosqlbench.activitytype.cqld4.api.RowCycleOperator;
import io.nosqlbench.activitytype.cqld4.statements.rsoperators.PerThreadCQLData;
import java.util.LinkedList;
/**
* Adds the current row to the per-thread row cache.
*/
public class SaveThreadRows implements RowCycleOperator {
@Override
public int apply(Row row, long cycle) {
LinkedList<Row>rows = PerThreadCQLData.rows.get();
rows.add(row);
return 0;
}
}

View File

@ -0,0 +1,30 @@
package io.nosqlbench.activitytype.cqld4.statements.rowoperators;
import com.datastax.oss.driver.api.core.cql.ColumnDefinition;
import com.datastax.oss.driver.api.core.cql.Row;
import io.nosqlbench.activitytype.cqld4.api.RowCycleOperator;
import io.nosqlbench.virtdata.library.basics.core.threadstate.SharedState;
import java.util.HashMap;
import java.util.List;
/**
* Saves all the values in this row to the thread-local object map,
* with the field names as keys.
*/
public class SaveThreadVars implements RowCycleOperator {
ThreadLocal<HashMap<String, Object>> tl_objectMap = SharedState.tl_ObjectMap;
@Override
public int apply(Row row, long cycle) {
HashMap<String, Object> tlvars= tl_objectMap.get();
for (ColumnDefinition cd : row.getColumnDefinitions()) {
String name = cd.getName().toString();
Object object = row.getObject(name);
tlvars.put(name,object);
}
return 0;
}
}

View File

@ -0,0 +1,25 @@
package io.nosqlbench.activitytype.cqld4.statements.rsoperators;
import com.datastax.oss.driver.api.core.cql.ResultSet;
import com.datastax.oss.driver.api.core.cql.Statement;
import io.nosqlbench.activitytype.cqld4.api.ResultSetCycleOperator;
import io.nosqlbench.activitytype.cqld4.errorhandling.exceptions.ResultSetVerificationException;
/**
* Throws a {@link ResultSetVerificationException} unless there is exactly one row in the result set.
*/
public class AssertSingleRowResultSet implements ResultSetCycleOperator {
@Override
public int apply(ResultSet resultSet, Statement statement, long cycle) {
int rowsIncoming = resultSet.getAvailableWithoutFetching();
if (rowsIncoming<1) {
throw new ResultSetVerificationException(cycle, resultSet, statement, "no row in result set, expected exactly 1");
}
if (rowsIncoming>1) {
throw new ResultSetVerificationException(cycle, resultSet, statement, "more than one row in result set, expected exactly 1");
}
return rowsIncoming;
}
}

View File

@ -0,0 +1,15 @@
package io.nosqlbench.activitytype.cqld4.statements.rsoperators;
import com.datastax.oss.driver.api.core.cql.ResultSet;
import com.datastax.oss.driver.api.core.cql.Statement;
import io.nosqlbench.activitytype.cqld4.api.ResultSetCycleOperator;
import io.nosqlbench.virtdata.library.basics.core.threadstate.SharedState;
public class ClearVars implements ResultSetCycleOperator {
@Override
public int apply(ResultSet resultSet, Statement statement, long cycle) {
SharedState.tl_ObjectMap.get().clear();
return 0;
}
}

View File

@ -0,0 +1,40 @@
package io.nosqlbench.activitytype.cqld4.statements.rsoperators;
import com.datastax.oss.driver.api.core.cql.*;
import io.nosqlbench.activitytype.cqld4.api.ResultSetCycleOperator;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Logs a trace-level event for the result set, including
* cycles, rows, fetched row count, and the statement.
*/
public class CqlResultSetLogger implements ResultSetCycleOperator {
private final static Logger logger = LoggerFactory.getLogger(CqlResultSetLogger.class);
private static String getQueryString(Statement stmt) {
if (stmt instanceof PreparedStatement) {
return "(prepared) " + ((PreparedStatement) stmt).getQuery();
} else if (stmt instanceof SimpleStatement) {
return "(simple) " + ((SimpleStatement) stmt).getQuery();
} else if (stmt instanceof BoundStatement) {
return "(bound) " + ((BoundStatement) stmt).getPreparedStatement().getQuery();
} else {
return "(unknown) " + stmt.toString();
}
}
@Override
public int apply(ResultSet resultSet, Statement statement, long cycle) {
logger.debug("result-set-logger: "
+ " cycle=" + cycle
+ " rows=" + resultSet.getAvailableWithoutFetching()
+ " fetched=" + resultSet.isFullyFetched()
+ " statement=" + getQueryString(statement).stripTrailing()
);
for (Row row : resultSet) {
logger.trace(row.toString());
}
return 0;
}
}

View File

@ -0,0 +1,14 @@
package io.nosqlbench.activitytype.cqld4.statements.rsoperators;
import com.datastax.oss.driver.api.core.cql.Row;
import java.util.LinkedList;
/**
* This contains a linked list of {@link Row} objects. This is per-thread.
* You can use this list as a per-thread data cache for sharing data between
* cycles in the same thread.
*/
public class PerThreadCQLData {
public final static ThreadLocal<LinkedList<Row>> rows = ThreadLocal.withInitial(LinkedList::new);
}

View File

@ -0,0 +1,23 @@
package io.nosqlbench.activitytype.cqld4.statements.rsoperators;
import com.datastax.oss.driver.api.core.cql.ResultSet;
import com.datastax.oss.driver.api.core.cql.Statement;
import io.nosqlbench.activitytype.cqld4.api.ResultSetCycleOperator;
import io.nosqlbench.virtdata.library.basics.core.threadstate.SharedState;
import java.util.HashMap;
public class PopVars implements ResultSetCycleOperator {
@Override
public int apply(ResultSet resultSet, Statement statement, long cycle) {
HashMap<String, Object> stringObjectHashMap = SharedState.tl_ObjectMap.get();
Object o = SharedState.tl_ObjectStack.get().pollLast();
if (o != null && o instanceof HashMap) {
SharedState.tl_ObjectMap.set((HashMap) o);
return 0;
} else {
throw new RuntimeException("Tried to pop thread local data from stack, but there was none.");
}
}
}

View File

@ -0,0 +1,14 @@
package io.nosqlbench.activitytype.cqld4.statements.rsoperators;
import com.datastax.oss.driver.api.core.cql.ResultSet;
import com.datastax.oss.driver.api.core.cql.Statement;
import io.nosqlbench.activitytype.cqld4.api.ResultSetCycleOperator;
public class Print implements ResultSetCycleOperator {
@Override
public int apply(ResultSet resultSet, Statement statement, long cycle) {
System.out.println("RS:"+ resultSet.toString());
return 0;
}
}

View File

@ -0,0 +1,20 @@
package io.nosqlbench.activitytype.cqld4.statements.rsoperators;
import com.datastax.oss.driver.api.core.cql.ResultSet;
import com.datastax.oss.driver.api.core.cql.Statement;
import io.nosqlbench.activitytype.cqld4.api.ResultSetCycleOperator;
import io.nosqlbench.virtdata.library.basics.core.threadstate.SharedState;
import java.util.HashMap;
public class PushVars implements ResultSetCycleOperator {
@Override
public int apply(ResultSet resultSet, Statement statement, long cycle) {
HashMap<String, Object> existingVars = SharedState.tl_ObjectMap.get();
HashMap<String, Object> topush = new HashMap<>(existingVars);
SharedState.tl_ObjectStack.get().addLast(topush);
return 0;
}
}

View File

@ -0,0 +1,40 @@
package io.nosqlbench.activitytype.cqld4.statements.rsoperators;
import io.nosqlbench.activitytype.cqld4.api.ResultSetCycleOperator;
public enum ResultSetCycleOperators {
pushvars(PushVars.class),
popvars(PopVars.class),
clearvars(ClearVars.class),
trace(TraceLogger.class),
log(CqlResultSetLogger.class),
assert_singlerow(AssertSingleRowResultSet.class),
print(Print.class);
private final Class<? extends ResultSetCycleOperator> implClass;
ResultSetCycleOperators(Class<? extends ResultSetCycleOperator> traceLoggerClass) {
this.implClass = traceLoggerClass;
}
public Class<? extends ResultSetCycleOperator> getImplementation() {
return implClass;
}
public ResultSetCycleOperator getInstance() {
try {
return getImplementation().getConstructor().newInstance();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
public static ResultSetCycleOperator newOperator(String name) {
return ResultSetCycleOperators.valueOf(name).getInstance();
}
}

View File

@ -0,0 +1,16 @@
package io.nosqlbench.activitytype.cqld4.statements.rsoperators;
import com.datastax.oss.driver.api.core.cql.ResultSet;
import com.datastax.oss.driver.api.core.cql.Row;
import com.datastax.oss.driver.api.core.cql.Statement;
import io.nosqlbench.activitytype.cqld4.api.ResultSetCycleOperator;
import java.util.LinkedList;
public class RowCapture implements ResultSetCycleOperator {
@Override
public int apply(ResultSet resultSet, Statement statement, long cycle) {
ThreadLocal<LinkedList<Row>> rows = PerThreadCQLData.rows;
return 0;
}
}

View File

@ -0,0 +1,94 @@
package io.nosqlbench.activitytype.cqld4.statements.rsoperators;
import com.datastax.oss.driver.api.core.cql.*;
import io.nosqlbench.activitytype.cqld4.api.ResultSetCycleOperator;
import io.nosqlbench.activitytype.cqld4.core.StatementModifier;
import io.nosqlbench.engine.api.util.SimpleConfig;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.FileDescriptor;
import java.io.FileWriter;
import java.io.IOException;
import java.text.SimpleDateFormat;
import java.util.Date;
public class TraceLogger implements ResultSetCycleOperator, StatementModifier {
private final static Logger logger = LoggerFactory.getLogger(TraceLogger.class);
private static SimpleDateFormat sdf = new SimpleDateFormat("HH:mm:ss.SSS");
private final long modulo;
private final String filename;
private final FileWriter writer;
private final ThreadLocal<StringBuilder> tlsb = ThreadLocal.withInitial(StringBuilder::new);
public TraceLogger(SimpleConfig conf) {
this(
conf.getLong("modulo").orElse(1L),
conf.getString("filename").orElse("tracelog")
);
}
public TraceLogger(long modulo, String filename) {
this.modulo = modulo;
this.filename = filename;
try {
if (filename.equals("stdout")) {
writer = new FileWriter(FileDescriptor.out);
} else {
writer = new FileWriter(filename);
}
} catch (IOException e) {
throw new RuntimeException(e);
}
}
@Override
public int apply(ResultSet rs, Statement statement, long cycle) {
if ((cycle%modulo)!=0) {
return 0;
}
ExecutionInfo ei = rs.getExecutionInfo();
QueryTrace qt = ei.getQueryTrace();
StringBuilder sb = tlsb.get();
sb.setLength(0);
sb.append("\n---------------------------- QueryTrace Summary ---------------------------\n");
sb.append("\n Coordinator: ").append(qt.getCoordinator());
sb.append("\n Cycle: ").append(cycle);
sb.append("\nServer-side query duration (us): ").append(qt.getDurationMicros());
sb.append("\n Request type: ").append(qt.getRequestType());
sb.append("\n Start time: ").append(qt.getStartedAt());
sb.append("\n Trace UUID: ").append(qt.getTracingId());
sb.append("\n Params: ").append(qt.getParameters());
sb.append("\n--------------------------------------------------------------------------\n");
sb.append("\n---------------------------- QueryTrace Events ---------------------------\n");
for (TraceEvent event : qt.getEvents()) {
sb.append("\n Date: ").append(sdf.format(new Date(event.getTimestamp())));
sb.append("\n Source: ").append(event.getSource());
sb.append("\nSourceElapsedMicros: ").append(event.getSourceElapsedMicros());
sb.append("\n Thread: ").append(event.getThreadName());
sb.append("\n Activity: ").append(event.getActivity()).append("\n");
}
sb.append("\n--------------------------------------------------------------------------\n");
try {
writer.append(sb.toString());
writer.flush();
} catch (IOException e) {
throw new RuntimeException(e);
}
return 0;
}
@Override
public Statement modify(Statement statement, long cycle) {
if ((cycle%modulo)==0) {
return statement.setTracing(true);
}
return statement;
}
}

View File

@ -0,0 +1,2 @@
io.nosqlbench.virtdata.api.processors.FunctionDocInfoProcessor
io.nosqlbench.nb.annotations.ServiceProcessor

View File

@ -0,0 +1,106 @@
# nb -v run driver=cql yaml=cql-iot tags=phase:schema host=dsehost
description: An IOT workload which more optimal DSE settings
scenarios:
default:
- run driver=cql tags==phase:schema threads==1 cycles==UNDEF
- run driver=cql tags==phase:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
- run driver=cql tags==phase:main cycles===TEMPLATE(main-cycles,10000000) threads=auto
bindings:
machine_id: Mod(<<sources:10000>>); ToHashedUUID() -> java.util.UUID
sensor_name: HashedLineToString('data/variable_words.txt')
time: Mul(<<timespeed:100>>L); Div(<<sources:10000>>L); ToDate()
cell_timestamp: Mul(<<timespeed:100>>L); Div(<<sources:10000>>L); Mul(1000L)
sensor_value: Normal(0.0,5.0); Add(100.0) -> double
station_id: Div(<<sources:10000>>);Mod(<<stations:100>>); ToHashedUUID() -> java.util.UUID
data: HashedFileExtractToString('data/lorem_ipsum_full.txt',800,1200)
blocks:
- tags:
phase: schema
params:
prepared: false
statements:
- create-keyspace: |
create keyspace if not exists <<keyspace:baselines>>
WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '<<rf:1>>'}
AND durable_writes = true;
tags:
name: create-keyspace
- create-table : |
create table if not exists <<keyspace:baselines>>.<<table:iot>> (
machine_id UUID, // source machine
sensor_name text, // sensor name
time timestamp, // timestamp of collection
sensor_value double, //
station_id UUID, // source location
data text,
PRIMARY KEY ((machine_id, sensor_name), time)
) WITH CLUSTERING ORDER BY (time DESC)
AND compression = { 'sstable_compression' : '<<compression:LZ4Compressor>>' }
AND nodesync={'enabled': 'true'}
AND compaction = {
'class': 'TimeWindowCompactionStrategy',
'compaction_window_size': <<expiry_minutes:60>>,
'compaction_window_unit': 'MINUTES',
'split_during_flush': true
};
tags:
name: create-table
- truncate-table: |
truncate table <<keyspace:baselines>>.<<table:iot>>;
tags:
name: truncate-table
- tags:
phase: rampup
params:
cl: <<write_cl:LOCAL_QUORUM>>
statements:
- insert-rampup: |
insert into <<keyspace:baselines>>.<<table:iot>>
(machine_id, sensor_name, time, sensor_value, station_id, data)
values ({machine_id}, {sensor_name}, {time}, {sensor_value}, {station_id}, {data})
using timestamp {cell_timestamp}
idempotent: true
tags:
name: insert-rampup
- tags:
phase: verify
type: read
params:
ratio: 1
cl: <<read_cl:LOCAL_QUORUM>>
statements:
- select-verify: |
select * from <<keyspace:baselines>>.<<table:iot>>
where machine_id={machine_id} and sensor_name={sensor_name} and time={time};
verify-fields: "*, -cell_timestamp"
tags:
name: select-verify
- tags:
phase: main
type: read
params:
ratio: <<read_ratio:1>>
cl: <<read_cl:LOCAL_QUORUM>>
statements:
- select-read: |
select * from <<keyspace:baselines>>.<<table:iot>>
where machine_id={machine_id} and sensor_name={sensor_name}
limit <<limit:10>>
tags:
name: select-read
- tags:
phase: main
type: write
params:
ratio: <<write_ratio:9>>
cl: <<write_cl:LOCAL_QUORUM>>
statements:
- insert-main: |
insert into <<keyspace:baselines>>.<<table:iot>>
(machine_id, sensor_name, time, sensor_value, station_id, data)
values ({machine_id}, {sensor_name}, {time}, {sensor_value}, {station_id}, {data})
using timestamp {cell_timestamp}
idempotent: true
tags:
name: insert-main

View File

@ -0,0 +1,93 @@
---
title: CQL IoT
weight: 2
---
# CQL IoT
## Description
The CQL IoT workload demonstrates a time-series telemetry system as typically found in IoT applications. The bulk of the
traffic is telemetry ingest. This is useful for establishing steady-state capacity with an actively managed data
lifecycle. This is a steady-state workload, where inserts are 90% of the operations and queries are the remaining 10%.
## Named Scenarios
### default
The default scenario for cql-iot.yaml runs the conventional test phases: schema, rampup, main
## Testing Considerations
For in-depth testing, this workload will take some time to build up data density where TTLs begin purging expired data.
At this point, the test should be considered steady-state.
## Data Set
### baselines.iot dataset (rampup,main)
- machine_id - 1000 unique values
- sensor_name - 100 symbolic names, from a seed file
- time - monotonically increasing timestamp
- station_id - 100 unique values
- sensor_value - normal distribution, median 100, stddev 5.0
## Operations
### insert (rampup, main)
insert into baselines.iot
(machine_id, sensor_name, time, sensor_value, station_id)
values (?,?,?,?,?)
### query (main)
select * from baselines.iot
where machine_id=? and sensor_name=?
limit 10
## Workload Parameters
This workload has no adjustable parameters when used in the baseline tests.
When used for additional testing, the following parameters should be supported:
- machines - the number of unique sources (default: 1000)
- stations - the number of unique stations (default: 100)
- limit - the limit for rows in reads (default: 10)
- expiry_minutes - the TTL for data in minutes.
- compression - enabled or disabled, to disable, set compression=''
- write_cl - the consistency level for writes (default: LOCAL_QUORUM)
- read_cl - the consistency level for reads (defaultL LOCAL_QUORUM)
## Key Performance Metrics
Client side metrics are a more accurate measure of the system behavior from a user's perspective. For microbench and
baseline tests, these are the only required metrics. When gathering metrics from multiple server nodes, they should be
kept in aggregate form, for min, max, and average for each time interval in monitoring. For example, the avg p99 latency
for reads should be kept, as well as the min p99 latency for reads. If possible metrics, should be kept in plot form,
with discrete histogram values per interval.
### Client-Side
- read ops/s
- write ops/s
- read latency histograms
- write latency histograms
- exception counts
### Server-Side
- bytes compacted over time
- pending compactions
- active data on disk
- total data on disk
## Notes on Interpretation
- In order for this test to show useful performance contrasts, it has to be ramped to steady-state.
- Ingest of 1G rows yields an on-disk data density of 20.8 GB using default compression settings.

View File

@ -0,0 +1,117 @@
# nb -v run driver=cql yaml=cql-iot tags=phase:schema host=dsehost
description: |
This workload emulates a time-series data model and access patterns.
scenarios:
default:
- run driver=cql tags==phase:schema threads==1 cycles==UNDEF
- run driver=cql tags==phase:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
- run driver=cql tags==phase:main cycles===TEMPLATE(main-cycles,10000000) threads=auto
params:
instrument: TEMPLATE(instrument,false)
bindings:
machine_id: Mod(<<sources:10000>>); ToHashedUUID() -> java.util.UUID
sensor_name: HashedLineToString('data/variable_words.txt')
time: Mul(<<timespeed:100>>L); Div(<<sources:10000>>L); ToDate()
cell_timestamp: Mul(<<timespeed:100>>L); Div(<<sources:10000>>L); Mul(1000L)
sensor_value: Normal(0.0,5.0); Add(100.0) -> double
station_id: Div(<<sources:10000>>);Mod(<<stations:100>>); ToHashedUUID() -> java.util.UUID
data: HashedFileExtractToString('data/lorem_ipsum_full.txt',800,1200)
blocks:
- tags:
phase: schema
params:
prepared: false
statements:
- create-keyspace: |
create keyspace if not exists <<keyspace:baselines>>
WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '<<rf:1>>'}
AND durable_writes = true;
tags:
name: create-keyspace
- create-table : |
create table if not exists <<keyspace:baselines>>.<<table:iot>> (
machine_id UUID, // source machine
sensor_name text, // sensor name
time timestamp, // timestamp of collection
sensor_value double, //
station_id UUID, // source location
data text,
PRIMARY KEY ((machine_id, sensor_name), time)
) WITH CLUSTERING ORDER BY (time DESC)
AND compression = { 'sstable_compression' : '<<compression:LZ4Compressor>>' }
AND compaction = {
'class': 'TimeWindowCompactionStrategy',
'compaction_window_size': <<expiry_minutes:60>>,
'compaction_window_unit': 'MINUTES'
};
tags:
name: create-table
- truncate-table: |
truncate table <<keyspace:baselines>>.<<table:iot>>;
tags:
name: truncate-table
- tags:
phase: rampup
params:
cl: <<write_cl:LOCAL_QUORUM>>
statements:
- insert-rampup: |
insert into <<keyspace:baselines>>.<<table:iot>>
(machine_id, sensor_name, time, sensor_value, station_id, data)
values ({machine_id}, {sensor_name}, {time}, {sensor_value}, {station_id}, {data})
using timestamp {cell_timestamp}
idempotent: true
tags:
name: insert-rampup
params:
instrument: TEMPLATE(instrument-writes,TEMPLATE(instrument,false))
- tags:
phase: verify
type: read
params:
ratio: 1
cl: <<read_cl:LOCAL_QUORUM>>
statements:
- select-verify: |
select * from <<keyspace:baselines>>.<<table:iot>>
where machine_id={machine_id} and sensor_name={sensor_name} and time={time};
verify-fields: "*, -cell_timestamp"
tags:
name: select-verify
params:
instrument: TEMPLATE(instrument-reads,TEMPLATE(instrument,false))
- tags:
phase: main
type: read
params:
ratio: <<read_ratio:1>>
cl: <<read_cl:LOCAL_QUORUM>>
statements:
- select-read: |
select * from <<keyspace:baselines>>.<<table:iot>>
where machine_id={machine_id} and sensor_name={sensor_name}
limit <<limit:10>>
tags:
name: select-read
params:
instrument: TEMPLATE(instrument-reads,TEMPLATE(instrument,false))
- tags:
phase: main
type: write
params:
ratio: <<write_ratio:9>>
cl: <<write_cl:LOCAL_QUORUM>>
statements:
- insert-main: |
insert into <<keyspace:baselines>>.<<table:iot>>
(machine_id, sensor_name, time, sensor_value, station_id, data)
values ({machine_id}, {sensor_name}, {time}, {sensor_value}, {station_id}, {data})
using timestamp {cell_timestamp}
idempotent: true
tags:
name: insert-main
params:
instrument: TEMPLATE(instrument-writes,TEMPLATE(instrument,false))

View File

@ -0,0 +1,77 @@
---
title: CQL Key-Value
weight: 1
---
## Description
The CQL Key-Value workload demonstrates the simplest possible schema with payload data. This is useful for measuring
system capacity most directly in terms of raw operations. As a reference point, provides some insight around types of
workloads that are constrained around messaging, threading, and tasking, rather than bulk throughput.
During preload, all keys are set with a value. During the main phase of the workload, random keys from the known
population are replaced with new values which never repeat. During the main phase, random partitions are selected for
upsert, with row values never repeating.
## Operations
### insert (rampup, main)
insert into baselines.keyvalue (key, value) values (?,?);
### read (main)
select * from baselines.keyvalue where key=?key;
## Data Set
### baselines.keyvalue insert (rampup)
- key - text, number as string, selected sequentially up to keycount
- value - text, number as string, selected sequentially up to valuecount
### baselines.keyvalue insert (main)
- key - text, number as string, selected uniformly within keycount
- value - text, number as string, selected uniformly within valuecount
### baselines.keyvalue read (main)
- key - text, number as string, selected uniformly within keycount
## Workload Parameters
This workload has no adjustable parameters when used in the baseline tests.
When used for additional testing, the following parameters should be supported:
- keycount - the number of unique keys
- valuecount - the number of unique values
## Key Performance Metrics
Client side metrics are a more accurate measure of the system behavior from a user's perspective. For microbench and
baseline tests, these are the only required metrics. When gathering metrics from multiple server nodes, they should be
kept in aggregate form, for min, max, and average for each time interval in monitoring. For example, the avg p99 latency
for reads should be kept, as well as the min p99 latency for reads. If possible metrics, should be kept in plot form,
with discrete histogram values per interval.
### Client-Side
- read ops/s
- write ops/s
- read latency histograms
- write latency histograms
- exception counts
### Server-Side
- pending compactions
- bytes compacted
- active data on disk
- total data on disk
# Notes on Interpretation
Once the average ratio of overwrites starts to balance with the rate of compaction, a steady state should be achieved.
At this point, pending compactions and bytes compacted should be mostly flat over time.

View File

@ -0,0 +1,82 @@
# nb -v run driver=cql yaml=cql-keyvalue tags=phase:schema host=dsehost
scenarios:
default:
- run driver=cql tags==phase:schema threads==1 cycles==UNDEF
- run driver=cql tags==phase:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
- run driver=cql tags==phase:main cycles===TEMPLATE(main-cycles,10000000) threads=auto
bindings:
seq_key: Mod(<<keycount:1000000000>>); ToString() -> String
seq_value: Hash(); Mod(<<valuecount:1000000000>>); ToString() -> String
rw_key: <<keydist:Uniform(0,1000000000)->int>>; ToString() -> String
rw_value: Hash(); <<valdist:Uniform(0,1000000000)->int>>; ToString() -> String
blocks:
- name: schema
tags:
phase: schema
params:
prepared: false
statements:
- create-keyspace: |
create keyspace if not exists <<keyspace:baselines>>
WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '<<rf:1>>'}
AND durable_writes = true;
tags:
name: create-keyspace
- create-table: |
create table if not exists <<keyspace:baselines>>.<<table:keyvalue>> (
key text,
value text,
PRIMARY KEY (key)
);
tags:
name: create-table
- name: rampup
tags:
phase: rampup
params:
cl: <<write_cl:LOCAL_QUORUM>>
statements:
- rampup-insert: |
insert into <<keyspace:baselines>>.<<table:keyvalue>>
(key, value)
values ({seq_key},{seq_value});
tags:
name: rampup-insert
- name: verify
tags:
phase: verify
type: read
params:
cl: <<read_cl:LOCAL_QUORUM>>
statements:
- verify-select: |
select * from <<keyspace:baselines>>.<<table:keyvalue>> where key={seq_key};
verify-fields: key->seq_key, value->seq_value
tags:
name: verify
- name: main-read
tags:
phase: main
type: read
params:
ratio: 5
cl: <<read_cl:LOCAL_QUORUM>>
statements:
- main-select: |
select * from <<keyspace:baselines>>.<<table:keyvalue>> where key={rw_key};
tags:
name: main-select
- name: main-write
tags:
phase: main
type: write
params:
ratio: 5
cl: <<write_cl:LOCAL_QUORUM>>
statements:
- main-insert: |
insert into <<keyspace:baselines>>.<<table:keyvalue>>
(key, value) values ({rw_key}, {rw_value});
tags:
name: main-insert

View File

@ -0,0 +1,91 @@
# nb -v cql-tabular rampup-cycles=1E6 main-cycles=1E9
scenarios:
default:
- run driver=cql tags==phase:schema threads==1 cycles==UNDEF
- run driver=cql tags==phase:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
- run driver=cql tags==phase:main cycles===TEMPLATE(main-cycles,10000000) threads=auto
bindings:
# for ramp-up and verify
part_layout: Div(<<partsize:1000000>>); ToString() -> String
clust_layout: Mod(<<partsize:1000000>>); ToString() -> String
data: HashedFileExtractToString('data/lorem_ipsum_full.txt',50,150)
# for read
limit: Uniform(1,10) -> int
part_read: Uniform(0,<<partcount:100>>)->int; ToString() -> String
clust_read: Add(1); Uniform(0,<<partsize:1000000>>)->int; ToString() -> String
# for write
part_write: Hash(); Uniform(0,<<partcount:100>>)->int; ToString() -> String
clust_write: Hash(); Add(1); Uniform(0,<<partsize:1000000>>)->int; ToString() -> String
data_write: Hash(); HashedFileExtractToString('data/lorem_ipsum_full.txt',50,150) -> String
blocks:
- name: schema
tags:
phase: schema
params:
prepared: false
statements:
- create-keyspace: |
create keyspace if not exists <<keyspace:baselines>>
WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '<<rf:1>>'}
AND durable_writes = true;
tags:
name: create-keyspace
- create-table: |
create table if not exists <<keyspace:baselines>>.<<table:tabular>> (
part text,
clust text,
data text,
PRIMARY KEY (part,clust)
);
tags:
name: create-table
- name: rampup
tags:
phase: rampup
params:
cl: <<write_cl:LOCAL_QUORUM>>
statements:
- rampup-insert: |
insert into <<keyspace:baselines>>.<<table:tabular>>
(part,clust,data)
values ({part_layout},{clust_layout},{data})
tags:
name: rampup-insert
- name: verify
tags:
phase: verify
type: read
params:
cl: <<read_cl:LOCAL_QUORUM>>
statements:
- verify-select: |
select * from <<keyspace:baselines>>.<<table:tabular>> where part={part_layout} and clust={clust_layout}
tags:
name: verify-select
- name: main-read
tags:
phase: main
type: read
params:
ratio: 5
cl: <<read_cl:LOCAL_QUORUM>>
statements:
- main-select: |
select * from <<keyspace:baselines>>.<<table:tabular>> where part={part_read} limit {limit};
tags:
name: main-select
- name: main-write
tags:
phase: main
type: write
params:
ratio: 5
cl: <<write_cl:LOCAL_QUORUM>>
statements:
- main-write: |
insert into <<keyspace:baselines>>.<<table:tabular>>
(part, clust, data)
values ({part_write},{clust_write},{data_write})
tags:
name: main-write

View File

@ -0,0 +1,86 @@
---
title: CQL Wide Rows
weight: 3
---
## Description
The CQL Wide Rows workload provides a way to tax a system with wide rows of a given size. This is useful to help
understand underlying performance differences between version and configuration options when using data models that have
wide rows.
For in-depth testing, this workload needs significant density of partitions in combination with fully populated wide
rows. For exploratory or parameter contrasting tests, ensure that the rampup phase is configured correctly to establish
this initial state.
## Data Set
### baselines.widerows dataset (rampup)
- part - text, number in string form, sequentially from 1..1E9
- clust - text, number in string form, sequentially from 1..1E9
- data - text, extract from lorem ipsum between 50 and 150 characters
### baselines.widerows dataset (main)
- part - text, number in string form, sequentially from 1..1E9
- clust - text, number in string form, sequentially from 1..<partsize>
- data - text, extract from lorem ipsum between 50 and 150 characters
- machine_id - 1000 unique values
- sensor_name - 100 symbolic names, from a seed file
- time - monotonically increasing timestamp
- station_id - 100 unique values
- sensor_value - normal distribution, median 100, stddev 5.0
## Operations
### insert (rampup, main)
insert into baselines.iot
(machine_id, sensor_name, time, sensor_value, station_id)
values (?,?,?,?,?)
### query (main)
select * from baselines.iot
where machine_id=? and sensor_name=?
limit 10
## Workload Parameters
This workload has no adjustable parameters when used in the baseline tests.
When used for additional testing, the following parameters should be supported:
- partcount - the number of unique partitions
- partsize - the number of logical rows within a CQL partition
## Key Performance Metrics
Client side metrics are a more accurate measure of the system behavior from a user's perspective. For microbench and
baseline tests, these are the only required metrics. When gathering metrics from multiple server nodes, they should be
kept in aggregate form, for min, max, and average for each time interval in monitoring. For example, the avg p99 latency
for reads should be kept, as well as the min p99 latency for reads. If possible metrics, should be kept in plot form,
with discrete histogram values per interval.
### Client-Side
- read ops/s
- write ops/s
- read latency histograms
- write latency histograms
- exception counts
### Server-Side
- bytes compacted over time
- pending compactions
- active data on disk
- total data on disk
## Notes on Interpretation

View File

@ -0,0 +1,97 @@
# cql driver - advanced features
This is an addendum to the standard CQL Activity Type docs. For that,
see "cql". Use the features in this guide carefully. They do not come
with as much documentation as they are less used than the main CQL
features.
### ResultSet and Row operators
Within the CQL Activity type, synchronous mode (activities with out the
async= parameter), you have the ability to attach operators to a given
statement such that it will get per-statement handling. These operators
are ways of interrogating the result of an operation, saving values, or
managing other side-effects for specific types of testing.
When enabled for a statement, operators are applied in this order:
1. Activity-level ResultSet operators are applied in specified order.
2. Statement-level ResultSet operators are applied in specified order.
3. Activity-level Row operators are applied in specified order.
4. Statement-level Row operators are applied in specified order.
The result set handling does not go to any extra steps of making
a copy of the data. When a row is read from the result set,
it is consumed from it. Thus, if you want to do anything with
row data, you must apply a row operator as explained below.
### CQL Statement Parameters
- **rsoperators** - If provided as a CQL statement param, then the
list of operator names that follow, separated by a comma, will
be used to attach ResultSet operators to the given statement.
Such operators act on the whole result set of a statement.
- **rowoperators** - If provided as a CQL statement param, then the
list of operator names that follow, separated by a comma, will
be used to attache Row operators to the given statement.
## Available ResultSet Operators
- pushvars - Push a copy of the current thread local variables onto
the thread-local stack. This does nothing with the ResultSet data,
but is meant to be used for stateful management of these in
conjunction with the row operators below.
- popvars - Pop the last thread local variable set from the thread-local
stack into vars, replacing the previous content. This does nothing
with the ResultSet data.
- clearvars - Clears the contents of the thread local variables. This
does nothign with the ResultSet data.
- trace - Flags a statement to be traced on the server-side and then
logs the details of the trace to the trace log file.
- log - Logs basic data to the main log. This is useful to verify that
operators are loading and triggering as expected.
- assert_singlerow - Throws an exception (ResultSetVerificationException)
if the ResultSet has more or less than one row.
Examples:
```
statements:
- s1: |
a statement
rsoperators: pushvars, clearvars
```
## Available Row Operators:
- savevars - Copies the values of the row into the thread-local variables.
- saverows - Copies the rows into a special CQL-only thread local row state.
Examples:
```
statements:
- s2: |
a statement
rowoperators: saverows
```
## Injecting additional Queries (Future)
It is possible to inject new operations to an activity. However, such operations are _indirect_ to cycles, since they
must be based on the results of other operations. As such, they will not be represented in cycle output or other
advanced features. This is a specific feature for the CQL activity -- implemented internal to the way a CQL cycle is
processed. A future version of NoSQLBench will provide a more uniform way to achieve this result across activity types.
For now, remember that this is a CQL-only capability.
- subquery-statement - Adds additional operations to the current cycle, based
on the contents of the thread-local row state. The value to this parameter
is a name of a statement in the current YAML.
local thread based on contents
of the CQL-only thread local row state. Each row is consumed from this list,
and a new operation is added to the current cycle.
- subquery-concurrency - Allow subqueries to execute with concurrency, up to
the level specified.
default: 1

View File

@ -0,0 +1,198 @@
# cql error handling
The error handling facility utilizes a type-aware error handler
provided by nosqlbench. However, it is much more modular and configurable
than most error handlers found in other testing tools. The trade-off here
is that so many options may bewilder newer users. If you agree, then
simply use one of these basic recipes in your activity parameters:
# error and stop on *any exception
# incidentally, this is the same as the deprecated diagnose=true option
errors=stop
# error and stop for (usually) unrecoverable errors
# warn and retry everything else (this is actually the default)
errors=stop,retryable->retry
# record histograms for WriteTimeoutException, error and stop
# for everything else.
errors=stop,WriteTimeoutException:histogram
As you can see, the error handling format is pretty basic. Behind this basic
format is modular and flexible configuration scheme that should allow for either
simple or advanced testing setups. The errors value is simply a list of error to
hander verbs mappings, but also allows for a simple verb to be specified to
cover all error types. Going from left to right, each mapping is applied in
order. You can use any of ':', '->', or '=' for the error to verb assignment
operator.
Anytime you assign a value to the *errors* parameter for a cql activity, you are
replacing the default 'stop,retryable->retry,unverified->stop' configuration.
That is, each time this value is assigned, a new error handler is configured and
installed according to the new value.
### errors= parameter format
The errors parameter contains a comma-separated list of one or more
handler assignments where the error can be in any of these forms:
- group name ( "unapplied" | "retryable" | "unverified" )
- a single exception name like 'WriteTimeoutException', or a substring of
that which is long enough to avoid ambiguity (only one match allowed)
- A regex, like '.*WriteTimeout.*' (multiple matches allowed)
The verb can be any of the named starting points in the error handler
stack, as explained below.
As a special case, if the handler assignment consists of only a single word,
then it is assumed to be the default handler verb. This gets applied
as a last resort to any errors which do not match another handler by class
type or parent class type. This allows for simple hard wiring of a
handler default for all non-specific errors in the form:
# force the test to stop with any error, even retryable ones
errors=stop
### Error Handler Verbs
When an error occurs, you can control how it is handled for the most part.
This is the error handler stack:
- **stop** - logs an error, and then rethrows the causing exception,
causing nosqlbench to shutdown the current scenario.
- **warn** - log a warning in the log, with details about the error
and associated statement.
- **retry** - Retry the operation if the number of retries hasn't been
used up *and* the causing exception falls in the set of
*retryable* errors.
- **histogram** - keep a histogram of the exception counts, under the
name errorhistos.classname, using the simple class name.
The magnitude of these histos is how long the operation was pending
before the related error occurred.
- **count** - keep a count in metrics for the exception, under the name
errorcounts.classname, using the simple class name.
- **ignore** - do nothing, do not even retry or count
Each handling verb above is ordered from the most invasive to least
invasive starting at the top. With the exception of the **stop**
handler, the rest of them will be applied to an error all the way
to the bottom. For now, the error handling stack is exactly as above.
You can't modify it, although it may be made configurable in the future.
One way to choose the right handler is to say "How serious is this type
of error to the test results if it happens?" In general, it is best
to be more conservative and choose a more aggressive setting unless you
are specifically wanting to measure how often a given error happens,
for example.
Each exception type will have one and only one error handler at all times.
No matter how you set an error handler for a class, only the most
recently assigned handler stack will be active for it. This might be
important to keep in mind when you make multiple assignments to potentially
overlapping sets of error types. In any case, the default 'stop' handler
will always catch an error that does not otherwise have a more specific
handler assigned to it.
##### Error Types
The errors that can be handled are simply all the exception types that
can be thrown by either the DataStax Java Driver for DSE, *or* the
nosqlbench client itself. This includes errors that indicate a potentially
intermittent failure condition. It also includes errors that are more
permanent in nature, like WriteFailure, which would continue to occur
on subsequent retries without some form of intervention. The nosqlbench
application will also generate some additional exceptions that capture
common error cases that the Java driver doesn't or shouldn't have a
special case for, but which may be important for nosqlbench testing purposes.
In nosqlbench, all error handlers are specific to a particular kind of
exception that you would catch in a typical application that uses DSE,
although you can tell a handler to take care of a whole category
of problems as long as you know the right name to use.
##### Assigned by Java Exception Type
Error handlers can be assigned to a common parent type in order to also handle
all known subtypes, hence the default on the top line applies to all of the
driver exceptions that do not have a more specific handler assigned, either
by a closer parent or directly.
##### Assigning by Error Group Name
Error types for which you would commonly assign the same handling behavior
are also grouped in predefined names. If a handler is assigned to one
of the group names, then the handler is assigned all of the exceptions
in the group individually. For example, 'errors=retryable=stop'
### Recognized Exceptions
The whole hierarchy of exceptions as of DSE Driver 3.2.0 is as follows,
with the default configuration shown.
DriverException -> stop
FrameTooLongException
CodecNotFoundException
AuthenticationException
TraceRetrievalException
UnsupportedProtocolVersionException
NoHostAvailableException -> retry (group: retryable)
QueryValidationException (abstract)
InvalidQueryException
InvalidConfigurationInQueryException
UnauthorizedException
SyntaxError
AlreadyExistsException
UnpreparedException
InvalidTypeException
QueryExecutionException (abstract)
UnavailableException
BootstrappingException -> retry (group: retryable)
OverloadedException -> retry (group: retryable)
TruncateException
QueryConsistencyException (abstract)
WriteTimeoutException -> retry (group: retryable)
WriteFailureException -> retry (group: retryable)
ReadFailureException
ReadTimeoutException
FunctionExecutionException
DriverInternalError
ProtocolError
ServerError
BusyPoolException
ConnectionException
TransportException
OperationTimedOutException -> retry (group: retryable)
PagingStateException
UnresolvedUserTypeException
UnsupportedFeatureException
BusyConnectionException
EbdseException (abstract) -> stop
CQLResultSetException (abstract)
UnexpectedPagingException
ResultSetVerificationException
RowVerificationException
ChangeUnappliedCycleException (group:unapplied)
RetriesExhaustedCycleException -> count
##### Additional Exceptions
The following exceptions are synthesized directly by nosqlbench, but get
handled alongside the normal exceptions as explained above.
1. ChangeUnappliedException - The change unapplied condition is important to
detect when it is not expected, although some testing may intentionally send
changes that can't be applied. For this reason, it is kept as a separately
controllable error group "unapplied".
2. UnexpectedPaging - The UnexpectedPaging exception is meant to keep users from
being surprised when there is paging activity in the workload, as this can have
other implications for tuning and performance. See the details on the
**maxpages** parameter, and the *fetch size* parameter in the java driver for
details.
3. Unverified\* Exceptions - For data set verification; These exceptions
indicate when a cqlverify activity has found rows that differ from what
was expected.
4. RetriesExhaustedException - Indicates that all retries were exhausted before
a given operation could complete successfully.

View File

@ -0,0 +1,42 @@
DriverException -> stop
1 FrameTooLongException
2 CodecNotFoundException
3 AuthenticationException
4 TraceRetrievalException
5 UnsupportedProtocolVersionException
6 NoHostAvailableException
7 QueryValidationException (abstract)
8 InvalidQueryException
9 InvalidConfigurationInQueryException
10 UnauthorizedException
11 SyntaxError
12 AlreadyExistsException
13 UnpreparedException
14 InvalidTypeException
15 QueryExecutionException (abstract) -> retry
16 UnavailableException
17 BootstrappingException
18 OverloadedException
19 TruncateException
20 QueryConsistencyException (abstract)
21 WriteTimeoutException
22 WriteFailureException
23 ReadFailureException
24 ReadTimeoutException
25 FunctionExecutionException
26 DriverInternalError
27 ProtocolError
28 ServerError
29 BusyPoolException
30 ConnectionException
31 TransportException
32 OperationTimedOutException
33 PagingStateException
34 UnresolvedUserTypeException
35 UnsupportedFeatureException
36 BusyConnectionException
41 EbdseCycleException (abstract) -> stop
37 ChangeUnappliedCycleException
38 ResultSetVerificationException
39 RowVerificationException (abstract)
40 UnexpectedPagingException

View File

@ -0,0 +1,342 @@
# cql driver
This is the CQL version 4 driver for NoSQLBench. As it gets more use, we will make it the primary driver under the 'cql'
name. For now, the 'cql' refers to the version 1.9 driver, while 'cqld4' refers to this one. The drivers will have
identical features where possible, but new enhancements will be targeted at this one first.
This is an driver which allows for the execution of CQL statements. This driver supports both sync and async modes, with
detailed metrics provided for both.
### Example activity definitions
Run a cql activity named 'cql1', with definitions from activities/cqldefs.yaml
~~~
... driver=cql alias=cql1 workload=cqldefs
~~~
Run a cql activity defined by cqldefs.yaml, but with shortcut naming
~~~
... driver=cql workload=cqldefs
~~~
Only run statement groups which match a tag regex
~~~
... driver=cql workload=cqldefs tags=group:'ddl.*'
~~~
Run the matching 'dml' statements, with 100 cycles, from [1000..1100)
~~~
... driver=cql workload=cqldefs tags=group:'dml.*' cycles=1000..1100
~~~
This last example shows that the cycle range is [inclusive..exclusive),
to allow for stacking test intervals. This is standard across all
activity types.
### CQL ActivityType Parameters
- **cqldriver** - default: dse - The type of driver to use, either dse, or oss. If you need DSE-specific features, use
the dse driver. If you are connecting to an OSS Apache Cassandra cluster, you must use the oss driver. The oss driver
option is only available in nosqlbench.
- **host** - The host or hosts to use for connection points to
the cluster. If you specify multiple values here, use commas
with no spaces.
Examples:
- `host=192.168.1.25`
- `host=`192.168.1.25,testhost42`
- **workload** - The workload definition which holds the schema and statement defs.
see workload yaml location for additional details
(no default, required)
- **port** - The port to connect with
- **cl** - An override to consistency levels for the activity. If
this option is used, then all consistency levels will be replaced
by this one for the current activity, and a log line explaining
the difference with respect to the yaml will be emitted.
This is not a dynamic parameter. It will only be applied at
activity start.
- **cbopts** - default: none - this is how you customize the cluster
settings for the client, including policies, compression, etc. This
is a string of *Java*-like method calls just as you would use them
in the Cluster.Builder fluent API. They are evaluated inline
with the default Cluster.Builder options not covered below.
Example: cbopts=".withCompression(ProtocolOptions.Compression.NONE)"
- **whitelist** default: none - Applies a whitelist policy to the load balancing
policy in the driver. If used, a WhitelistPolicy(RoundRobinPolicy())
will be created and added to the cluster builder on startup.
Examples:
- whitelist=127.0.0.1
- whitelist=127.0.0.1:9042,127.0.0.2:1234
- **retrypolicy** default: none - Applies a retry policy in the driver
The only option supported for this version is `retrypolicy=logging`,
which uses the default retry policy, but with logging added.
- **reconnectpolicy** default: none - Applies a reconnection policy in the driver
Supports either `reconnectpolicy=exponential(minDelayInMs,maxDelayInMs)` or `reconnectpolicy=constant(delayInMs)`.
The driver reconnects using this policy when the entire cluster becomes unavailable.
- **pooling** default: none - Applies the connection pooling options
to the policy.
Examples:
- `pooling=4:10`
keep between 4 and 10 connections to LOCAL hosts
- `pooling=4:10,2:5`
keep 4-10 connections to LOCAL hosts and 2-5 to REMOTE
- `pooling=4:10:2000`
keep between 4-10 connections to LOCAL hosts with
up to 2000 requests per connection
- `pooling=5:10:2000,2:4:1000` keep between 5-10 connections to
LOCAL hosts with up to 2000 requests per connection, and 2-4
connection to REMOTE hosts with up to 1000 requests per connection
Additionally, you may provide the following options on pooling. Any
of these that are provided must appear in this order:
`,heartbeat_interval_s:n,idle_timeout_s:n,pool_timeout_ms:n`, so a
full example with all options set would appear as:
`pooling=5:10:2000,2:4:1000,heartbeat_interval_s:30,idle_timeout_s:120,pool_timeout_ms:5`
- **socketoptions** default: none - Applies any of the valid socket
options to the client when the session is built. Each of the options
uses the long form of the name, with either a numeric or boolean
value. Individual sub-parameters should be separated by a comma, and
the parameter names and values can be separated by either equals or a
colon. All of these values may be changed:
- read_timeout_ms
- connect_timeout_ms
- keep_alive
- reuse_address
- so_linger
- tcp_no_delay
- receive_buffer_size
- send_buffer_size
Examples:
- `socketoptions=read_timeout_ms=23423,connect_timeout_ms=4444`
- `socketoptions=tcp_no_delay=true
- **tokens** default: unset - Only executes statements that fall within
any of the specified token ranges. Others are counted in metrics
as skipped-tokens, with a histogram value of the cycle number.
Examples:
- tokens=1:10000,100000:1000000
- tokens=1:123456
- **maxtries** - default: 10 - how many times an operation may be
attempted before it is disregarded
- **maxpages** - default: 1 - how many pages can be read from a query which
is larger than the fetchsize. If more than this number of pages
is required for such a query, then an UnexpectedPaging excpetion
is passed to the error handler as explained below.
- **fetchsize** - controls the driver parameter of the same name.
Suffixed units can be used here, such as "50K". If this parameter
is not present, then the driver option is not set.
- **cycles** - standard, however the cql activity type will default
this to however many statements are included in the current
activity, after tag filtering, etc.
- **username** - the user to authenticate as. This option requires
that one of **password** or **passfile** also be defined.
- **password** - the password to authenticate with. This will be
ignored if passfile is also present.
- **passfile** - the file to read the password from. The first
line of this file is used as the password.
- **ssl** - specifies the type of the SSL implementation.
Disabled by default, possible values are `jdk`, and `openssl`.
Depending on type, additional parameters need to be provided.
- **tlsversion** - specify the TLS version to use for SSL.
Examples:
- `tlsversion=TLSv1.2` (the default)
- **truststore** (`jdk`, `openssl`) - specify the path to the SSL truststore.
Examples:
- `truststore=file.truststore`
- **tspass** (`jdk`, `openssl`) - specify the password for the SSL truststore.
Examples:
- `tspass=mypass`
- **keystore** (`jdk`) - specify the path to the SSL keystore.
Examples:
- `keystore=file.keystore`
- **kspass** (`jdk`) - specify the password for the SSL keystore.
Examples:
- `kspass=mypass`
- **keyFilePath** (`openssl`) - path to the OpenSSL key file.
Examples:
- `keyFilePath=file.key`
- **keyPassword** (`openssl`) - key password;
Examples:
- `keyPassword=password`
- **caCertFilePath** (`openssl`) - path to the X509 CA certificate file.
Examples:
- `caCertFilePath=cacert.pem`
- **certFilePath** (`openssl`) - path to the X509 certificate file.
Examples:
- `certFilePath=ca.pem`
- **jmxreporting** - enable JMX reporting if needed.
Examples:
- `jmxreporting=true`
- `jmxreporting=false` (the default)
- **alias** - this is a standard nosqlbench parameter, however the cql type will use the workload value also as the
alias value when not specified.
- **errors** - error handler configuration.
(default errors=stop,retryable->retry,unverified->stop)
Examples:
- errors=stop,WriteTimeoutException=histogram
- errors=count
- errors=warn,retryable=count
See the separate help on 'cqlerrors' for detailed
configuration options.
- **defaultidempotence** - sets default idempotence on the
driver options, but only if it has a value.
(default unset, valid values: true or false)
- **speculative** - sets the speculative retry policy on the cluster.
(default unset)
This can be in one of the following forms:
- pT:E:L - where :L is optional and
T is a floating point threshold between 0.0 and 100.0 and
E is an allowed number of concurrent speculative executions and
L is the maximum latency tracked in the tracker instance
(L defaults to 15000 when left out)
Examples:
- p99.8:5:15000ms - 99.8 percentile, 5 executions, 15000ms max tracked
- p98:2:10000ms - 98.0 percentile, 2 executions allowed, 10s max tracked
- Tms:E - where :E is optional and
T is a constant threshold latency and
E is the allowed number of concurrent speculative retries
(E default to 5 when left out)
Examples:
- 100ms:5 - constant threshold of 100ms and 5 allowed executions
- **seq** - selects the statement sequencer used with statement ratios.
(default: bucket)
(options: concat | bucket | interval)
The concat sequencer repeats each statement in order until the ratio
is achieved.
The bucket sequencer uses simple round-robin distribution to plan
statement ratios, a simple but unbalanced form of interleaving.
The interval sequencer apportions statements over time and then by
order of appearance for ties. This has the effect of interleaving
statements from an activity more evenly, but is less obvious in how
it works.
All of the sequencers create deterministic schedules which use an internal
lookup table for indexing into a list of possible statements.
- **trace** - enables a trace on a subset of operations. This is disabled
by default.
Examples:
`trace=modulo:100,filename:trace.log`
The above traces every 100th cycle to a file named trace.log.
`trace=modulo:1000,filename:stdout`
The above traces every 1000th cycle to stdout.
If the trace log is not specified, then 'tracelog' is assumed.
If the filename is specified as stdout, then traces are dumped to stdout.
- **clusterid** - names the configuration to be used for this activity. Within
a given scenario, any activities that use the same name for clusterid will
share a session and cluster.
default: 'default'
- **drivermetrics** - enable reporting of driver metrics.
default: false
- **driverprefix** - set the metrics name that will prefix all CQL driver metrics.
default: 'driver.*clusterid*.'
The clusterid specified is included so that separate cluster and session
contexts can be reported independently for advanced tests.
- **usercodecs** - enable the loading of user codec libraries
for more details see: com.datastax.codecs.framework.UDTCodecInjector in the nosqlbench
code base. This is for dynamic codec loading with user-provided codecs mapped
via the internal UDT APIs.
default: false
- **secureconnectbundle** - used to connect to CaaS, accepts a path to the secure connect bundle
that is downloaded from the CaaS UI.
Examples:
- `secureconnectbundle=/tmp/secure-connect-my_db.zip`
- `secureconnectbundle="/home/automaton/secure-connect-my_db.zip"`
- **insights** - Set to false to disable the driver from sending insights monitoring information
- `insights=false`
- **tickduration** - sets the tickDuration (milliseconds) of HashedWheelTimer of the
java driver. This timer is used to schedule speculative requests.
Examples:
- `tickduration=10`
- `tickduration=100` (driver default value)
- **compression** - sets the transport compression to use for this
activity. Valid values are 'LZ4' and 'SNAPPY'. Both types are bundled
with EBDSE.
- **showcql** - logs cql statements as INFO (to see INFO messages in stdout use -v or greater) Note: this is expensive
and should only be done to troubleshoot workloads. Do not use `showcql` for your tests.
### CQL YAML Parameters
A uniform YAML configuration format was introduced with engineblock 2.0.
As part of this format, statement parameters were added for the CQL Activity Type.
These parameters will be consolidated with the above parameters in time, but for
now **they are limited to a YAML params block**:
params:
ratio: 1
# Sets the statement ratio within the operation sequencer
# scheme. Integers only.
# When preparing the operation order (AKA sequencing),
# frequency of the associated statements.
cl: ONE
# Sets the consistency level, using any of the standard
# identifiers from com.datastax.driver.core.ConsistencyLevel,
# any one of:
# LOCAL_QUORUM, ANY, ONE, TWO, THREE, QUORUM, ALL,
# EACH_QUORUM, SERIAL, LOCAL_SERIAL, LOCAL_ONE
prepared: true
# By default, all statements are prepared. If you are
# creating schema, set this to false.
idempotent: false
# For statements that are known to be idempotent, set this
# to true
instrument: false
# If a statement has instrument set to true, then
# individual Timer metrics will be tracked for
# that statement for both successes and errors,
# using the given statement name.
logresultcsv: true
OR
logresultcsv: myfilename.csv
# If a statement has logresultcsv set to true,
# then individual operations will be logged to a CSV file.
# In this case the CSV file will be named as
# <statement-name>--results.csv.
# If the value is present and not "true", then the value will
# be used as the name of the file.
#
# The format of the file is:
# <cycle>,(SUCCESS|FAILURE),<nanos>,<rows-fetched>,(<error-class,NONE)
# NOTES:
# 1) BE CAREFUL with this setting. A single logged line per
# result is not useful for high-speed testing as it will
# impose IO loads on the client to slow it down.
# 2) BE CAREFUL with the name. It is best to just pick good
# names for your statement defs so that everything remains
# coherent and nothing gets accidentally overwritten.
# 3) If logresultcsv is provided at the activity level, it
# applies to all statements, and the only value values
# there are true and false.
### Metrics
- alias.result - A timer which tracks the performance of an op result only.
This is the async get on the future, broken out as a separate step.
- alias.result-success - A timer that records rate and histograms of the time
it takes from submitting a query to completely reading the result
set that it returns, across all pages. This metric is only counted
for non-exceptional results, while the result metric above includes
all operations.
- alias.bind - A timer which tracks the performance of the statement
binding logic, including the generation of data immediately prior
- alias.execute - A timer which tracks the performance of op submission
only. This is the async execution call, broken out as a separate step.
- alias.tries - A histogram of how many tries were required to get a
completed operation
- alias.pages - A timer which tracks the performance of paging, specific
to more than 1-page query results. i.e., if all reads return within 1
page, this metric will not have any data.
- alias.strides - A timer around each stride of operations within a thread
- alias.skipped-tokens - A histogram that records the count and cycle values
of skipped tokens.
## YAML Examples
Please see the bundled activities with nosqlbench for examples.

View File

@ -0,0 +1,39 @@
# You can run this file with this command line to see the values printed to stdout:
# ./ebdse run driver=stdout yaml=bindings/date.yaml cycles=10
# This file demonstrates different types of timestamp recipes
# that you can use with virtdata. (The bindings used in ebdse)
# If you want to control the output, uncomment and edit the statement template below
# and modify the named anchors to suit your output requirements.
#statements:
# example1: "{fullname}\n"
bindings:
# All uncommented lines under this are indented, so they become named bindings below
# the entry above
# Normally, the value that you get with a cycle starts at 0.
cycleNum: Identity();
# here we convert the cycle number to a Date by casting.
id: Identity(); ToDate();
# Date during 2017 (number of milliseconds in a year: 31,536,000,000)
date: StartingEpochMillis('2017-01-01 23:59:59'); AddHashRange(0L,31536000000L); StringDateWrapper("YYYY-MM-dd")
# Example output:
# date : 2017-09-17
# date : 2017-08-01
# date : 2017-04-22
# date : 2017-04-09
# date : 2017-05-28
# date : 2017-08-06
# date : 2017-07-05
# date : 2017-02-07
# date : 2017-05-25
# date : 2017-12-02

View File

@ -0,0 +1,28 @@
# You can run this file with this command line to see the values printed to stdout:
# ./ebdse run driver=stdout yaml=bindings/expr.yaml cycles=10
# This file demonstrates different types of timestamp recipes
# that you can use with virtdata. (The bindings used in ebdse)
# If you want to control the output, uncomment and edit the statement template below
# and modify the named anchors to suit your output requirements.
#statements:
# example1: "{fullname}\n"
bindings:
# flight times based on hour / minute / second computation
hour: HashRange(0,2); ToInt()
minute: Shuffle(0,2); ToInt()
second: HashRange(0,60); ToInt()
flightDate: HashRange(0,2); Mul(3600000); Save('hour'); Shuffle(0,2); Mul(60000); Save('minute'); HashRange(0,60); Mul(1000); Save('second'); Expr('hour + minute + second'); StartingEpochMillis('2018-10-02 04:00:00'); ToDate(); ToString()
flightDateFixed: Save('cycle'); HashRange(0,2); Mul(3600000); Load('cycle'); Save('hour'); Shuffle(0,2); Mul(60000); Save('minute'); Load('cycle'); HashRange(0,60); Mul(1000); Save('second'); Expr('hour + minute + second'); StartingEpochMillis('2018-10-02 04:00:00'); ToDate(); ToString()
flightDateLong: Save('cycle'); HashRange(0,2); Mul(3600000); Load('cycle'); Save('hour'); Shuffle(0,2); Mul(60000); Save('minute'); Load('cycle'); HashRange(0,60); Mul(1000); Save('second'); Expr('hour + minute + second'); ToString()
# status that depends on score
riskScore: Normal(0.0,5.0); Clamp(1, 100); Save('riskScore') -> int
status: |
Expr('riskScore > 90 ? 0 : 1') -> long; ToBoolean(); ToString()
status_2: |
ToInt(); Expr('riskScore >90 ? 0 : 1') -> int; WeightedStrings('accepted:1;rejected:1')

View File

@ -0,0 +1,172 @@
# You can run this file with this command line to see the values printed to stdout:
# ./ebdse run driver=stdout yaml=bindings/text.yaml cycles=10
# This file demonstrates different types of timestamp recipes
# that you can use with virtdata. (The bindings used in ebdse)
# If you want to control the output, uncomment and edit the statement template below
# and modify the named anchors to suit your output requirements.
#statements:
# example1: "{fullname}\n"
bindings:
# All uncommented lines under this are indented, so they become named bindings below
# the entry above
# Normally, the value that you get with a cycle starts at 0.
cycleNum: Identity();
# here we convert the cycle number to a text by casting.
id: Identity(); ToString()
## Names
# See http://docs.virtdata.io/functions/funcref_premade/
# Full name
fullname: FullNames()
# Example output:
# fullname : Norman Wolf
# fullname : Lisa Harris
# fullname : John Williams
# fullname : Freda Gaytan
# fullname : Violet Ferguson
# fullname : Larry Roberts
# fullname : Andrew Daniels
# fullname : Jean Keys
# fullname : Mark Cole
# fullname : Roberta Bounds
# Name with last name first
fullname_lastname_first: Template('{}, {}', LastNames(), FirstNames())
# Example output:
# fullname_lastname_first : Miracle, Lisa
# fullname_lastname_first : Wolf, John
# fullname_lastname_first : Harris, Freda
# fullname_lastname_first : Williams, Violet
# fullname_lastname_first : Gaytan, Larry
# fullname_lastname_first : Ferguson, Andrew
# fullname_lastname_first : Roberts, Jean
# fullname_lastname_first : Daniels, Mark
# fullname_lastname_first : Keys, Roberta
# fullname_lastname_first : Cole, Timothy
# Phone
phone: compose HashRange(10000000000L,99999999999L); Combinations('0-9;0-9;0-9;-;0-9;0-9;0-9;-;0-9;0-9;0-9;0-9')
# Example output:
# $ ebdse run driver=stdout yaml=example-bindings format=readout cycles=10
# phone : 241-478-6787
# phone : 784-482-7668
# phone : 804-068-5502
# phone : 044-195-5579
# phone : 237-202-5601
# phone : 916-390-8911
# phone : 550-943-7851
# phone : 762-031-1362
# phone : 234-050-2563
# phone : 312-672-0039
## Career
career: HashedLineToString('data/careers.txt')
# Example output:
# career : Paper Goods Machine Setters, Operators, and Tenders
# career : Training and Development Specialists
# career : Embossing Machine Set-Up Operators
# career : Airframe-and-Power-Plant Mechanics
# career : Sales Representatives, Agricultural
# career : Automotive Body and Related Repairers
# career : Community Health Workers
# career : Billing, Posting, and Calculating Machine Operators
# career : Data Processing Equipment Repairers
# career : Sawing Machine Setters and Set-Up Operators
## Job Description
jobdescription: Add(0); HashedLineToString('data/jobdescription.txt')
# Example output:
# jobdescription: Add(0); HashedLineToString('data/jobdescription.txt')
## Weighted enumerated values
# Sorting hat (even distribution)
house: WeightedStrings('Gryffindor:0.2;Hufflepuff:0.2;Ravenclaw:0.2;Slytherin:0.2')
# Example output:
# house : Hufflepuff
# house : Ravenclaw
# house : Slytherin
# house : Slytherin
# house : Gryffindor
# house : Hufflepuff
# house : Ravenclaw
# house : Ravenclaw
# house : Hufflepuff
# house : Hufflepuff
## Weighted prefixes
prefix: WeightedStrings('Mr:0.45;Mrs:0.25;Ms:0.1;Miss:0.1;Dr:0.05')
# Example output:
# prefix : Mr
# prefix : Mrs
# prefix : Miss
# prefix : Miss
# prefix : Mr
# prefix : Mrs
# prefix : Mrs
# prefix : Mrs
# prefix : Mr
# prefix : Mr
# prefix : Mr
# prefix : Mr
# prefix : Mrs
# prefix : Mrs
# prefix : Mr
# prefix : Mr
# prefix : Mrs
# prefix : Miss
# prefix : Ms
# prefix : Dr
## Current Employer
current_employer: HashedLineToString('data/companies.txt')
# Example output:
# current_employer : Monsanto Company
# current_employer : International Flavors & Fragrances
# current_employer : Carpenter Technology Corporation
# current_employer : Union Pacific Corporation
# current_employer : Rush Enterprises
# current_employer : Peabody Energy Corporation
# current_employer : Rockwell Automation
# current_employer : Auto-Owners Insurance Group
# current_employer : ArcBest Corporation
# current_employer : WGL Holdings
## Sensor
sensor_name: HashedLineToString('data/variable_words.txt')
# Example output:
# sensor_name : rotational_latency
# sensor_name : half_life
# sensor_name : clarity
# sensor_name : fairness
# sensor_name : diversity
# sensor_name : turbulence
# sensor_name : mode
# sensor_name : current
# sensor_name : rating
# sensor_name : stall_speed

View File

@ -0,0 +1,72 @@
# You can run this file with this command line to see the values printed to stdout:
# ./ebdse run driver=stdout yaml=bindings/timestamp.yaml cycles=10
# This file demonstrates different types of timestamp recipes
# that you can use with virtdata. (The bindings used in ebdse)
# If you want to control the output, uncomment and edit the statement template below
# and modify the named anchors to suit your output requirements.
#statements:
# example1: "{epochMillis}\n"
bindings:
# All uncommented lines under this are indented, so they become named bindings below
# the entry above
# Normally, the value that you get with a cycle starts at 0.
cycleNum: Identity();
# So far, we've only been dealing in milliseconds. This is important to get working
# before adding the next step, converting to a more specific type.
# You can take any millisecond output and add conversion functions as shown below.
# this one converts to a java.util.Time
randomDateWithinFeb2018: AddHashRange(0,2419200000L); StartingEpochMillis('2018-02-01 05:00:00'); ToDate();
# ToDate(...) supports a few argument forms that you can experiment with.
# ToDate(int) will space the dates apart by this many milliseconds.
# ToDate(int,int) will space the dates apart by some millis and also repeat the value for some number of cycles.
# Alternately, you might want to use a org.joda.time.DateTime instead of a java.util.Time:
randomJodaDateWithinFeb2018: AddHashRange(0,2419200000L); StartingEpochMillis('2018-02-01 05:00:00'); ToJodaDateTime();
# ToJodaDateTime(...) also supports the space and repeat forms as shown above for ToDate(...)
# You can also have the dates in order, but with some limited out-of-order pertubation.
# In this case, we are swizzling the offset by some pseudo-random amount, up to an hour (in millis)
randomDateWithinFeb2018Jittery: AddHashRange(0,3600000L); StartingEpochMillis('2018-02-01 05:00:00'); ToDate();
# If you want to have the result be a string-formatted date representation for testing, try this:
# You can use any formatter from here: http://joda-time.sourceforge.net/apidocs/org/joda/time/format/DateTimeFormat.html
timeuuid_string: AddHashRange(0,2419200000L); StartingEpochMillis('2018-02-01 05:00:00'); StringDateWrapper("yyyy-MM-dd HH:mm:ss.SSS");
# ebdse bundles some specialized mapping functions in addition to those explained above, which
# come with eb. These are shown below.
# You can create a com.datastax.driver.core.LocalDate for use with the java driver.
# This takes as its input, the number of days since the unix epoch.
localdate: LongToLocalDateDays()
# You can also take the millis from any of the examples above which provide epoch millis,
# and convert the output to a millisecond-stable value, analogous to the CQL functions
# that do the same.
minUUID: AddHashRange(0,3600000); StartingEpochMillis('2018-02-01 05:00:00'); ToTimeUUIDMin();
maxUUID: AddHashRange(0,3600000); StartingEpochMillis('2018-02-01 05:00:00'); ToTimeUUIDMax();
# If you find useful recipes which are needed by others, please contribute them back to our examples!

View File

@ -0,0 +1,62 @@
# You can run this file with this command line to see the values printed to stdout:
# ./ebdse run driver=stdout yaml=bindings/timeuuid.yaml cycles=10
# This file demonstrates different types of timestamp recipes
# that you can use with virtdata. (The bindings used in ebdse)
# If you want to control the output, uncomment and edit the statement template below
# and modify the named anchors to suit your output requirements.
#statements:
# example1: "{fullname}\n"
bindings:
# All uncommented lines under this are indented, so they become named bindings below
# the entry above
# Normally, the value that you get with a cycle starts at 0.
cycleNum: Identity();
# here we convert the cycle number to a TIMEUUID by casting.
id: Identity(); ToEpochTimeUUID()
## Client ID
client_id: AddHashRange(0L, 2000000000000L); ToEpochTimeUUID()
# Example output:
# client_id : 4eb369b0-91de-11bd-8000-000000000000
# client_id : 0b9edab0-5401-11e7-8000-000000000000
# client_id : 58f21c30-0eec-11f3-8000-000000000000
# client_id : 4f547e60-a48a-11ca-8000-000000000000
# client_id : 42db8510-cad8-11bb-8000-000000000000
# client_id : 78cc7790-529c-11d6-8000-000000000000
# client_id : 55382200-9cfd-11d7-8000-000000000000
# client_id : 1ebdbef0-b6dc-11b7-8000-000000000000
# client_id : 8bc58ba0-57fe-11da-8000-000000000000
# client_id : 03d1b690-ba64-11f5-8000-000000000000
# If you wanted a java.util.UUID instead of a java.util.Date type, you can use something like below.
# This form avoids setting the non-time fields in the timeuuid value. This makes testing determinstically
# possible, when the basic data type as used in practice, is designed specifically to avoid repeatability.
timeuuid1: AddHashRange(0,2419200000L); StartingEpochMillis('2018-02-01 05:00:00'); ToEpochTimeUUID();
# There is a shortcut for this version supported directly by ToEpochTimeUUID(..) as seen here:
timeuuid2: AddHashRange(0,2419200000L); ToEpochTimeUUID('2018-02-01 05:00:00');
# You can also access the finest level of resolution of the timeuuid type, where each cycle value represents
# the smallest possible change for a timeuuid. Bear in mind that this represents many many sub-millisecond
# level timestamp values which may not be easy to see in normal timestamp formats. In this case, millisecond
# semantics are not appropriate, so make sure you adjust the input values accordingly.
timeuuid_finest1: ToFinestTimeUUID();
# However, since starting at some reference time is a popular option, ToFinestTimeUUID(...) also supports
# the shortcut version just like ToEpochTimeUUID(). This is provided because converting between epoch
# millis and timeuuid ticks is not fun.
timeuuid_finest_relative: ToFinestTimeUUID('2018-02-01 05:00:00');

View File

@ -0,0 +1,39 @@
# You can run this file with this command line to see the values printed to stdout:
# ./ebdse run driver=stdout yaml=bindings/uuid.yaml cycles=10
# This file demonstrates different types of timestamp recipes
# that you can use with virtdata. (The bindings used in ebdse)
# If you want to control the output, uncomment and edit the statement template below
# and modify the named anchors to suit your output requirements.
#statements:
# example1: "{fullname}\n"
bindings:
# All uncommented lines under this are indented, so they become named bindings below
# the entry above
# Normally, the value that you get with a cycle starts at 0.
cycleNum: Identity();
# here we convert the cycle number to a UUID by casting.
id: Identity(); ToHashedUUID()
## Station ID (100 unique UUID values, can override stations on the command-line)
station_id: Mod(<<stations:100>>); ToHashedUUID()
# Example output:
# station_id : 28df63b7-cc57-43cb-9752-fae69d1653da
# station_id : 5752fae6-9d16-43da-b20f-557a1dd5c571
# station_id : 720f557a-1dd5-4571-afb2-0dd47d657943
# station_id : 6fb20dd4-7d65-4943-9967-459343efafdd
# station_id : 19674593-43ef-4fdd-bdf4-98b19568b584
# station_id : 3df498b1-9568-4584-96fd-76f6081da01a
# station_id : 56fd76f6-081d-401a-85eb-b1d9e5bba058
# station_id : 45ebb1d9-e5bb-4058-b75d-d51547d31952
# station_id : 375dd515-47d3-4952-a49d-236be9a5c070
# station_id : 249d236b-e9a5-4070-9afa-8fae9060d959

View File

@ -0,0 +1,54 @@
scenarios:
default:
schema: run driver=cql tags==phase:schema cycles==UNDEF threads==1
rampup: run driver=cql tags==phase:rampup cycles=TEMPLATE(rampup-cycles,100K) threads=auto
bindings:
userid: Template('user-{}',ToString()); SaveString('userid');
interest: Template('interest-{}',ToString());
blocks:
- name: schema
tags:
phase: schema
statements:
- create-keyspace: |
create KEYSPACE if not exists TEMPLATE(keyspace,examples)
WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'}
AND durable_writes = 'true';
- create-users-table: |
create table if not exists TEMPLATE(keyspace,examples).users (
userid text PRIMARY KEY
);
- create-interests-table: |
create table if not exists TEMPLATE(keyspace,examples).interests (
userid text,
interest text,
primary key (interest, userid)
);
- name: rampup
tags:
phase: rampup
statements:
- insert-users: |
insert into TEMPLATE(keyspace,examples).users (userid) VALUES ({userid});
tags:
entity: users
- insert-interests: |
insert into TEMPLATE(keyspace,examples).interests(
interest, userid
) VALUES (
{interest}, {userid}
);
tags:
entity: interests
- name: main
tags:
phase: main
statements:
- read-user: |
select * from TEMPLATE(keyspace,examples).users
where userid={userid};
- read interests: |
select * from TEMPLATE(keyspace,examples).interests
where interest={interest};

View File

@ -0,0 +1,4 @@
# cql help topics
- cql
- cql-errors
- cql-exception-list

View File

@ -0,0 +1,22 @@
package com.datastax.ebdrivers.cql;
import io.nosqlbench.activitytype.cqld4.core.CqlAction;
import io.nosqlbench.activitytype.cqld4.core.CqlActivity;
import io.nosqlbench.engine.api.activityimpl.ActivityDef;
import org.junit.Ignore;
import org.junit.Test;
public class CqlActionTest {
@Test
@Ignore
public void testCqlAction() {
ActivityDef ad = ActivityDef.parseActivityDef("driver=ebdrivers;alias=foo;yaml=write-telemetry.yaml;");
CqlActivity cac = new CqlActivity(ad);
CqlAction cq = new CqlAction(ad, 0, cac);
cq.init();
cq.runCycle(5);
}
}

View File

@ -0,0 +1,61 @@
package com.datastax.ebdrivers.cql.statements;
import io.nosqlbench.activitytype.cqld4.statements.core.CQLStatementDefParser;
import org.junit.Test;
import java.util.HashMap;
import java.util.List;
import static org.assertj.core.api.Assertions.assertThat;
public class CQLCQLStatementDefParserTest {
// TODO: Implment support for default values in yaml
@Test
public void testBasicParsing() {
HashMap<String, String> bindings = new HashMap<String, String>() {{
put("not", "even");
}};
CQLStatementDefParser sdp = new CQLStatementDefParser("test-name","This is ?not an error.");
CQLStatementDefParser.ParseResult r = sdp.getParseResult(bindings.keySet());
assertThat(r.hasError()).isFalse();
assertThat(r.getStatement()).isEqualTo("This is ? an error.");
assertThat(r.getMissingAnchors().size()).isEqualTo(0);
assertThat(r.getMissingGenerators().size()).isEqualTo(0);
}
@Test
public void testParsingDiagnostics() {
HashMap<String, String> bindings = new HashMap<String, String>() {{
put("BINDABLE", "two");
put("EXTRABINDING", "5");
}};
CQLStatementDefParser sdp = new CQLStatementDefParser("test-name","This is a test of ?BINDABLE interpolation and ?MISSINGBINDING.");
List<String> bindableNames = sdp.getBindableNames();
CQLStatementDefParser.ParseResult result = sdp.getParseResult(bindings.keySet());
assertThat(result.hasError()).isTrue();
assertThat(result.getStatement()).isEqualTo("This is a test of ? interpolation and ?.");
assertThat(result.getMissingAnchors().size()).isEqualTo(1);
assertThat(result.getMissingGenerators().size()).isEqualTo(1);
assertThat(result.getMissingAnchors()).contains("EXTRABINDING");
assertThat(result.getMissingGenerators()).contains("MISSINGBINDING");
}
@Test
public void testParsingPatterns() {
HashMap<String, String> bindings = new HashMap<String, String>() {{
put("B-1", "one");
put("B_-1.2", "two");
}};
CQLStatementDefParser sdp = new CQLStatementDefParser("test-name","This is a test of ?B-1 and {B_-1.2}");
List<String> bindableNames = sdp.getBindableNames();
assertThat(bindableNames).containsExactly("B-1","B_-1.2");
CQLStatementDefParser.ParseResult parseResult = sdp.getParseResult(bindings.keySet());
assertThat(parseResult.hasError()).isFalse();
assertThat(parseResult.getStatement()).isEqualTo("This is a test of ? and ?");
}
}

View File

@ -0,0 +1,78 @@
package com.datastax.ebdrivers.cql.statements;
import com.datastax.driver.core.HostDistance;
import com.datastax.driver.core.PoolingOptions;
import com.datastax.driver.core.SocketOptions;
import com.datastax.driver.core.policies.LoadBalancingPolicy;
import com.datastax.driver.core.policies.ReconnectionPolicy;
import com.datastax.driver.core.policies.SpeculativeExecutionPolicy;
import io.nosqlbench.activitytype.cqld4.core.CQLOptions;
import org.junit.Test;
import static org.assertj.core.api.Assertions.assertThat;
public class CQLOptionsTest {
@Test
public void testSpeculative() {
SpeculativeExecutionPolicy p1 = CQLOptions.speculativeFor("p99:5");
assertThat(p1).isNotNull();
SpeculativeExecutionPolicy p2 = CQLOptions.speculativeFor("p99:5:5000ms");
assertThat(p2).isNotNull();
}
@Test
public void testConstant() {
SpeculativeExecutionPolicy p1 = CQLOptions.speculativeFor("5000ms:5");
assertThat(p1).isNotNull();
}
@Test
public void testWhitelist() {
LoadBalancingPolicy lbp = CQLOptions.whitelistFor("127.0.0.1,127.0.0.2:123", null);
assertThat(lbp).isNotNull();
}
@Test
public void testReconnectPolicyPatterns() {
ReconnectionPolicy rp = CQLOptions.reconnectPolicyFor("exponential(123,321)");
rp = CQLOptions.reconnectPolicyFor("constant(123)");
}
@Test
public void testSocketOptionPatterns() {
SocketOptions so = CQLOptions.socketOptionsFor("read_timeout_ms=23423,connect_timeout_ms=2344;keep_alive:true,reuse_address:true;so_linger:323;tcp_no_delay=true;receive_buffer_size:100,send_buffer_size=1000");
assertThat(so.getConnectTimeoutMillis()).isEqualTo(2344);
assertThat(so.getKeepAlive()).isEqualTo(true);
assertThat(so.getReadTimeoutMillis()).isEqualTo(23423);
assertThat(so.getReceiveBufferSize()).isEqualTo(100);
assertThat(so.getReuseAddress()).isEqualTo(true);
assertThat(so.getSendBufferSize()).isEqualTo(1000);
assertThat(so.getSoLinger()).isEqualTo(323);
assertThat(so.getTcpNoDelay()).isEqualTo(true);
}
@Test
public void testConnectionsPatterns() {
PoolingOptions po = CQLOptions.poolingOptionsFor("2345");
assertThat(po.getCoreConnectionsPerHost(HostDistance.LOCAL)).isEqualTo(2345);
assertThat(po.getMaxConnectionsPerHost(HostDistance.LOCAL)).isEqualTo(Integer.MIN_VALUE);
assertThat(po.getMaxRequestsPerConnection(HostDistance.LOCAL)).isEqualTo(Integer.MIN_VALUE);
PoolingOptions po2 = CQLOptions.poolingOptionsFor("1:2:3,4:5:6");
assertThat(po2.getCoreConnectionsPerHost(HostDistance.LOCAL)).isEqualTo(1);
assertThat(po2.getMaxConnectionsPerHost(HostDistance.LOCAL)).isEqualTo(2);
assertThat(po2.getMaxRequestsPerConnection(HostDistance.LOCAL)).isEqualTo(3);
assertThat(po2.getCoreConnectionsPerHost(HostDistance.REMOTE)).isEqualTo(4);
assertThat(po2.getMaxConnectionsPerHost(HostDistance.REMOTE)).isEqualTo(5);
assertThat(po2.getMaxRequestsPerConnection(HostDistance.REMOTE)).isEqualTo(6);
PoolingOptions po3 = CQLOptions.poolingOptionsFor("1:2:3,4:5:6,heartbeat_interval_s:100,idle_timeout_s:123,pool_timeout_ms:234");
assertThat(po3.getIdleTimeoutSeconds()).isEqualTo(123);
assertThat(po3.getPoolTimeoutMillis()).isEqualTo(234);
assertThat(po3.getHeartbeatIntervalSeconds()).isEqualTo(100);
}
}

View File

@ -0,0 +1,14 @@
tags:
group: read
statements:
- name: read-telemetry
statement: |
select * from <<KEYSPACE:testks>>.<<TABLE:testtable>>_telemetry
where source={source}
and epoch_hour={epoch_hour}
and param={param}
limit 10
bindings:
source: ThreadNumGenerator
epoch_hour: DateSequenceFieldGenerator(1000,'YYYY-MM-dd-HH')
param: LineExtractGenerator('data/variable_words.txt')

View File

@ -87,6 +87,12 @@
<version>3.12.100-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>io.nosqlbench</groupId>
<artifactId>driver-cqld4</artifactId>
<version>3.12.100-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>io.nosqlbench</groupId>
<artifactId>driver-cqlverify</artifactId>