create cqld3 module

This commit is contained in:
Jonathan Shook 2021-03-30 10:38:07 -05:00
parent cfa0bd5a23
commit e52abbf2d6
138 changed files with 9369 additions and 0 deletions

223
driver-cqld3-shaded/pom.xml Normal file
View File

@ -0,0 +1,223 @@
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>io.nosqlbench</groupId>
<artifactId>mvn-defaults</artifactId>
<version>4.15.30-SNAPSHOT</version>
<relativePath>../mvn-defaults</relativePath>
</parent>
<artifactId>driver-cqld3-shaded</artifactId>
<packaging>jar</packaging>
<name>${project.artifactId}</name>
<description>
A Shaded CQL ActivityType driver for http://nosqlbench.io/,
using version 3.* of the DataStax Driver for Apache Cassandra
</description>
<dependencies>
<!-- core dependencies -->
<dependency>
<groupId>io.nosqlbench</groupId>
<artifactId>engine-api</artifactId>
<version>4.15.30-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>io.nosqlbench</groupId>
<artifactId>drivers-api</artifactId>
<version>4.15.30-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>com.datastax.cassandra</groupId>
<artifactId>cassandra-driver-core</artifactId>
<version>3.9.0</version>
</dependency>
<dependency>
<groupId>com.datastax.cassandra</groupId>
<artifactId>cassandra-driver-mapping</artifactId>
<version>3.9.0</version>
</dependency>
<dependency>
<groupId>com.datastax.cassandra</groupId>
<artifactId>cassandra-driver-extras</artifactId>
<version>3.9.0</version>
</dependency>
<!-- For CQL compression option -->
<dependency>
<groupId>org.lz4</groupId>
<artifactId>lz4-java</artifactId>
</dependency>
<!-- For CQL compression option -->
<dependency>
<groupId>org.xerial.snappy</groupId>
<artifactId>snappy-java</artifactId>
</dependency>
<dependency>
<groupId>org.antlr</groupId>
<artifactId>antlr4-runtime</artifactId>
<version>4.8</version>
</dependency>
<dependency>
<groupId>io.netty</groupId>
<artifactId>netty-codec-haproxy</artifactId>
<version>4.1.54.Final</version>
</dependency>
<!-- <dependency>-->
<!-- <groupId>io.netty</groupId>-->
<!-- <artifactId>netty-transport-native-epoll</artifactId>-->
<!-- <version>4.1.47.Final</version>-->
<!-- <classifier>linux-x86_64</classifier>-->
<!-- </dependency>-->
<!-- test only scope -->
</dependencies>
<build>
<plugins>
<!--
If this plugin is re-enabled, the local CQL grammar will
be overwritten. The grammar has some syntax issues, so
fixes will be made to it before it is submitted back.
(lack of composite key syntax, nested type syntax, etc)
-->
<!-- <plugin>-->
<!-- <groupId>com.googlecode.maven-download-plugin</groupId>-->
<!-- <artifactId>download-maven-plugin</artifactId>-->
<!-- <version>1.4.0</version>-->
<!-- <executions>-->
<!-- <execution>-->
<!-- <id>get-cql-lexer</id>-->
<!-- <phase>generate-sources</phase>-->
<!-- <goals>-->
<!-- <goal>wget</goal>-->
<!-- </goals>-->
<!-- <configuration>-->
<!-- <url>-->
<!-- https://raw.githubusercontent.com/antlr/grammars-v4/master/cql3/CqlLexer.g4-->
<!-- </url>-->
<!-- <outputFileName>CqlLexer.g4</outputFileName>-->
<!-- <outputDirectory>src/main/grammars/cql3/-->
<!-- </outputDirectory>-->
<!-- </configuration>-->
<!-- </execution>-->
<!-- <execution>-->
<!-- <id>get-cql-parser</id>-->
<!-- <phase>generate-sources</phase>-->
<!-- <goals>-->
<!-- <goal>wget</goal>-->
<!-- </goals>-->
<!-- <configuration>-->
<!-- <url>-->
<!-- https://raw.githubusercontent.com/antlr/grammars-v4/master/cql3/CqlParser.g4-->
<!-- </url>-->
<!-- <outputFileName>CqlParser.g4</outputFileName>-->
<!-- <outputDirectory>src/main/grammars/cql3/-->
<!-- </outputDirectory>-->
<!-- </configuration>-->
<!-- </execution>-->
<!-- </executions>-->
<!-- </plugin>-->
<plugin>
<groupId>org.antlr</groupId>
<artifactId>antlr4-maven-plugin</artifactId>
<version>4.8</version>
<configuration>
<sourceDirectory>src/main/grammars/cql3
</sourceDirectory>
<arguments>
<argument>-package</argument>
<argument>io.nosqlbench.generators.cql.generated
</argument>
</arguments>
<outputDirectory>
src/main/java/io/nosqlbench/generators/cql/generated
</outputDirectory>
</configuration>
<executions>
<execution>
<id>antlr</id>
<goals>
<goal>antlr4</goal>
</goals>
<phase>generate-sources</phase>
</execution>
</executions>
</plugin>
<plugin>
<artifactId>maven-shade-plugin</artifactId>
<version>3.2.3</version>
<executions>
<execution>
<phase>package</phase>
<goals>
<goal>shade</goal>
</goals>
</execution>
</executions>
<configuration>
<createDependencyReducedPom>false</createDependencyReducedPom>
<promoteTransitiveDependencies>true</promoteTransitiveDependencies>
<createSourcesJar>true</createSourcesJar>
<!-- <shadedArtifactAttached>true</shadedArtifactAttached>-->
<!-- <shadedClassifierName>shaded</shadedClassifierName>-->
<relocations>
<relocation>
<pattern>com.google.common</pattern>
<shadedPattern>com.datastax.internal.com_google_common</shadedPattern>
</relocation>
<!-- <relocation>-->
<!-- <pattern>com.datastax</pattern>-->
<!-- <shadedPattern>dse19.com.datastax</shadedPattern>-->
<!-- </relocation>-->
<!-- <relocation>-->
<!-- <pattern>io.netty</pattern>-->
<!-- <shadedPattern>dse19.io.netty</shadedPattern>-->
<!-- </relocation>-->
</relocations>
<artifactSet>
<includes>
<include>*:*</include>
</includes>
</artifactSet>
<transformers combine.children="append">
<transformer implementation="org.apache.maven.plugins.shade.resource.ManifestResourceTransformer">
<mainClass>io.nosqlbench.engine.cli.NBCLI</mainClass>
</transformer>
</transformers>
<!-- <finalName>${project.artifactId}</finalName>-->
<filters>
<filter>
<artifact>*:*</artifact>
<excludes>
<exclude>META-INF/*.SF</exclude>
<exclude>META-INF/*.DSA</exclude>
<exclude>META-INF/*.RSA</exclude>
</excludes>
</filter>
</filters>
</configuration>
</plugin>
</plugins>
</build>
</project>

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,194 @@
LR_BRACKET=1
RR_BRACKET=2
LC_BRACKET=3
RC_BRACKET=4
LS_BRACKET=5
RS_BRACKET=6
COMMA=7
SEMI=8
COLON=9
SPACE=10
SPEC_MYSQL_COMMENT=11
COMMENT_INPUT=12
LINE_COMMENT=13
DOT=14
STAR=15
DIVIDE=16
MODULE=17
PLUS=18
MINUSMINUS=19
MINUS=20
DQUOTE=21
SQUOTE=22
K_ADD=23
K_AGGREGATE=24
K_ALL=25
K_ALLOW=26
K_ALTER=27
K_AND=28
K_ANY=29
K_APPLY=30
K_AS=31
K_ASC=32
K_AUTHORIZE=33
K_BATCH=34
K_BEGIN=35
K_BY=36
K_CALLED=37
K_CLUSTERING=38
K_COLUMNFAMILY=39
K_COMPACT=40
K_CONSISTENCY=41
K_CONTAINS=42
K_CREATE=43
K_CUSTOM=44
K_DELETE=45
K_DESC=46
K_DESCRIBE=47
K_DISTINCT=48
K_DROP=49
K_DURABLE_WRITES=50
K_EACH_QUORUM=51
K_ENTRIES=52
K_EXECUTE=53
K_EXISTS=54
K_FALSE=55
K_FILTERING=56
K_FINALFUNC=57
K_FROM=58
K_FULL=59
K_FUNCTION=60
K_FUNCTIONS=61
K_GRANT=62
K_IF=63
K_IN=64
K_INDEX=65
K_INFINITY=66
K_INITCOND=67
K_INPUT=68
K_INSERT=69
K_INTO=70
K_IS=71
K_JSON=72
K_KEY=73
K_KEYS=74
K_KEYSPACE=75
K_KEYSPACES=76
K_LANGUAGE=77
K_LEVEL=78
K_LIMIT=79
K_LOCAL_ONE=80
K_LOCAL_QUORUM=81
K_LOGGED=82
K_LOGIN=83
K_MATERIALIZED=84
K_MODIFY=85
K_NAN=86
K_NORECURSIVE=87
K_NOSUPERUSER=88
K_NOT=89
K_NULL=90
K_OF=91
K_ON=92
K_ONE=93
K_OPTIONS=94
K_OR=95
K_ORDER=96
K_PARTITION=97
K_PASSWORD=98
K_PER=99
K_PERMISSION=100
K_PERMISSIONS=101
K_PRIMARY=102
K_QUORUM=103
K_RENAME=104
K_REPLACE=105
K_REPLICATION=106
K_RETURNS=107
K_REVOKE=108
K_ROLE=109
K_ROLES=110
K_SCHEMA=111
K_SELECT=112
K_SET=113
K_SFUNC=114
K_STATIC=115
K_STORAGE=116
K_STYPE=117
K_SUPERUSER=118
K_TABLE=119
K_THREE=120
K_TIMESTAMP=121
K_TO=122
K_TOKEN=123
K_TRIGGER=124
K_TRUE=125
K_TRUNCATE=126
K_TTL=127
K_TWO=128
K_TYPE=129
K_UNLOGGED=130
K_UPDATE=131
K_USE=132
K_USER=133
K_USING=134
K_UUID=135
K_VALUES=136
K_VIEW=137
K_WHERE=138
K_WITH=139
K_WRITETIME=140
K_ASCII=141
K_BIGINT=142
K_BLOB=143
K_BOOLEAN=144
K_COUNTER=145
K_DATE=146
K_DECIMAL=147
K_DOUBLE=148
K_FLOAT=149
K_FROZEN=150
K_INET=151
K_INT=152
K_LIST=153
K_MAP=154
K_SMALLINT=155
K_TEXT=156
K_TIMEUUID=157
K_TIME=158
K_TINYINT=159
K_TUPLE=160
K_VARCHAR=161
K_VARINT=162
CODE_BLOCK=163
STRING_LITERAL=164
DECIMAL_LITERAL=165
FLOAT_LITERAL=166
HEXADECIMAL_LITERAL=167
REAL_LITERAL=168
OBJECT_NAME=169
UUID=170
OPERATOR_EQ=171
OPERATOR_LT=172
OPERATOR_GT=173
OPERATOR_LTE=174
OPERATOR_GTE=175
K_USERS=176
'('=1
')'=2
'{'=3
'}'=4
'['=5
']'=6
','=7
';'=8
':'=9
'.'=14
'*'=15
'/'=16
'%'=17
'+'=18
'--'=19
'-'=20
'"'=21
'\''=22

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,47 @@
package com.datastax.driver.core;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.List;
import java.util.OptionalLong;
import java.util.Set;
public class M3PTokenFilter {
private final TokenRange[] ranges;
private final ProtocolVersion protocolVersion;
private final CodecRegistry codecRegistry;
private final Metadata clusterMetadata;
private final Token.Factory factory;
public M3PTokenFilter(Set<TokenRange> ranges, Cluster cluster) {
protocolVersion = cluster.getConfiguration().getProtocolOptions().getProtocolVersion();
codecRegistry = cluster.getConfiguration().getCodecRegistry();
clusterMetadata = cluster.getMetadata();
factory = Token.getFactory(clusterMetadata.partitioner);
List<TokenRange> rangeList = new ArrayList<>();
for (TokenRange range : ranges) {
if (!range.getStart().getType().equals(DataType.bigint())) {
throw new RuntimeException("This filter only works with bigint valued token types");
}
rangeList.add(range);
}
this.ranges=rangeList.toArray(new TokenRange[0]);
if (this.ranges.length<1) {
throw new RuntimeException("There were no tokens found. Please check your keyspace and cluster settings.");
}
}
public OptionalLong matches(Statement statement) {
ByteBuffer routingKey = statement.getRoutingKey(protocolVersion, codecRegistry);
Token token = factory.hash(routingKey);
for (TokenRange range : ranges) {
if (range.contains(token)) {
return OptionalLong.of((long)token.getValue());
}
}
return OptionalLong.empty();
}
}

View File

@ -0,0 +1,60 @@
package com.datastax.driver.core;
import io.nosqlbench.activitytype.cql.api.StatementFilter;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.stream.Collectors;
public class TokenRangeStmtFilter implements StatementFilter {
private final Metadata clusterMetadata;
private final ProtocolVersion protocolVersion;
private final CodecRegistry codecRegistry;
private final Token.Factory factory;
private final TokenRange[] ranges;
public TokenRangeStmtFilter(Cluster cluster, String rangesSpec) {
clusterMetadata = cluster.getMetadata();
protocolVersion = cluster.getConfiguration().getProtocolOptions().getProtocolVersion();
codecRegistry = cluster.getConfiguration().getCodecRegistry();
factory = Token.getFactory(clusterMetadata.partitioner);
ranges = parseRanges(factory, rangesSpec);
}
private TokenRange[] parseRanges(Token.Factory factory, String rangesStr) {
String[] ranges = rangesStr.split(",");
List<TokenRange> tr = new ArrayList<>();
for (String range : ranges) {
String[] interval = range.split(":");
Token start = factory.fromString(interval[0]);
Token end = factory.fromString(interval[1]);
TokenRange tokenRange = new TokenRange(start, end, factory);
tr.add(tokenRange);
}
return tr.toArray(new TokenRange[tr.size()]);
}
@Override
public boolean matches(Statement statement) {
ByteBuffer routingKey = statement.getRoutingKey(protocolVersion, codecRegistry);
Token token = factory.hash(routingKey);
for (TokenRange range : ranges) {
if (range.contains(token)) {
return true;
}
}
return false;
}
@Override
public String toString() {
return "including token ranges: " +
Arrays.stream(ranges)
.map(String::valueOf)
.collect(Collectors.joining(","));
}
}

View File

@ -0,0 +1,71 @@
package com.datastax.driver.core;
import java.io.BufferedWriter;
import java.io.FileWriter;
import java.io.IOException;
import java.util.Comparator;
import java.util.Set;
public class TokenRangeUtil {
private final Metadata clusterMetadata;
private final ProtocolVersion protocolVersion;
private final CodecRegistry codecRegistry;
private final Token.Factory factory;
private final Cluster cluster;
public TokenRangeUtil(Cluster cluster) {
this.cluster= cluster;
clusterMetadata = cluster.getMetadata();
protocolVersion = cluster.getConfiguration().getProtocolOptions().getProtocolVersion();
codecRegistry = cluster.getConfiguration().getCodecRegistry();
factory = Token.getFactory(clusterMetadata.partitioner);
}
public Set<TokenRange> getTokenRangesFor(String keyspace, String hostaddress) {
Host host=null;
if (hostaddress.matches("\\d+")) {
int hostenum = Integer.parseInt(hostaddress);
host = clusterMetadata.getAllHosts().stream()
.sorted(Comparator.comparing(h -> h.getAddress().toString()))
.skip(hostenum)
.findFirst()
.orElseThrow();
} else if (!hostaddress.isEmpty()) {
host = clusterMetadata.getAllHosts().stream()
.filter(h -> h.getAddress().toString().replaceAll("/","").equals(hostaddress))
.findFirst()
.orElseThrow();
} else {
throw new RuntimeException("You must specify a host enum in order or a host address.");
}
return clusterMetadata.getTokenRanges(keyspace,host);
}
public void printRanges(String tokensks) {
Set<Host> hosts = clusterMetadata.getAllHosts();
for (Host host : hosts) {
String address = host.getAddress().toString().substring(1);
BufferedWriter writer = null;
try {
writer = new BufferedWriter(new FileWriter("ranges-"+address));
String ranges = getTokenRangesFor(tokensks, address).toString();
writer.write(ranges);
writer.close();
} catch (IOException e) {
e.printStackTrace();
throw new RuntimeException("Can't write token range files");
}
}
}
public M3PTokenFilter getFilterFor(Set<TokenRange> ranges) {
return new M3PTokenFilter(ranges, this.cluster);
}
}

View File

@ -0,0 +1,24 @@
package io.nosqlbench.activitytype.cql.api;
/**
* When an error filter allows us to see and handle an error in a specific way,
* the ErrorResponse determines exactly how we handle it. Each level represents
* a starting point in handling, including everything after the starting point.
* The first enum is the most severe response
*/
public enum ErrorResponse {
stop("S"), // Rethrow this error to the runtime, forcing it to handle the error or stop
warn("W"), // log a warning with some details about this error
retry("R"), // resubmit this operation up to the available tries
histogram("H"), // record this metric in a histogram
count("C"), // count this metric separately
counter("C"),
ignore("I"); // do nothing
private final String symbol;
ErrorResponse(String symbol) {
this.symbol = symbol;
}
}

View File

@ -0,0 +1,18 @@
package io.nosqlbench.activitytype.cql.api;
import com.datastax.driver.core.ResultSet;
import com.datastax.driver.core.Statement;
/**
* An operator interface for performing a modular action on CQL ResultSets per-cycle.
*/
public interface ResultSetCycleOperator {
/**
* Perform an action on a result set for a specific cycle.
* @param resultSet The ResultSet for the given cycle
* @param statement The statement for the given cycle
* @param cycle The cycle for which the statement was submitted
* @return A value, only meaningful when used with aggregated operators
*/
int apply(ResultSet resultSet, Statement statement, long cycle);
}

View File

@ -0,0 +1,11 @@
package io.nosqlbench.activitytype.cql.api;
import com.datastax.driver.core.Row;
/**
* An operator interface for consuming ResultSets and producing some
* int that can be used as a status code in activities.
*/
public interface RowCycleOperator {
int apply(Row row, long cycle);
}

View File

@ -0,0 +1,7 @@
package io.nosqlbench.activitytype.cql.api;
import com.datastax.driver.core.Statement;
public interface StatementFilter {
boolean matches(Statement statement);
}

View File

@ -0,0 +1,7 @@
package io.nosqlbench.activitytype.cql.api;
public enum VerifyApplied {
ignore,
error,
retry
}

View File

@ -0,0 +1,12 @@
package io.nosqlbench.activitytype.cql.codecsupport;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.TYPE)
public @interface CQLUserTypeNames {
String[] value();
}

View File

@ -0,0 +1,12 @@
package io.nosqlbench.activitytype.cql.codecsupport;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.TYPE)
public @interface UDTCodecClasses {
Class<? extends UDTTransformCodec>[] value();
}

View File

@ -0,0 +1,33 @@
package io.nosqlbench.activitytype.cql.codecsupport;
import com.datastax.driver.core.CodecRegistry;
import com.datastax.driver.core.Session;
import com.datastax.driver.core.UserType;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.LogManager;
import java.util.ArrayList;
import java.util.List;
import java.util.ServiceLoader;
public class UDTCodecInjector {
private final static Logger logger = LogManager.getLogger(UDTCodecInjector.class);
private final List<UserCodecProvider> codecProviders = new ArrayList<>();
private final List<UserType> userTypes = new ArrayList<>();
public void injectUserProvidedCodecs(Session session, boolean allowAcrossKeyspaces) {
CodecRegistry registry = session.getCluster().getConfiguration().getCodecRegistry();
ServiceLoader<UserCodecProvider> codecLoader = ServiceLoader.load(UserCodecProvider.class);
for (UserCodecProvider userCodecProvider : codecLoader) {
codecProviders.add(userCodecProvider);
}
for (UserCodecProvider codecProvider : codecProviders) {
codecProvider.registerCodecsForCluster(session,true);
}
}
}

View File

@ -0,0 +1,12 @@
package io.nosqlbench.activitytype.cql.codecsupport;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.TYPE)
public @interface UDTJavaType {
Class<?> value();
}

View File

@ -0,0 +1,22 @@
package io.nosqlbench.activitytype.cql.codecsupport;
import com.datastax.driver.core.TypeCodec;
import com.datastax.driver.core.UDTValue;
import com.datastax.driver.core.UserType;
import com.datastax.driver.extras.codecs.MappingCodec;
public abstract class UDTTransformCodec<T> extends MappingCodec<T,UDTValue> {
protected UserType userType;
public UDTTransformCodec(UserType userType, Class<T> javaType) {
super(TypeCodec.userType(userType), javaType);
this.userType = userType;
}
public UserType getUserType() {
return userType;
}
}

View File

@ -0,0 +1,138 @@
package io.nosqlbench.activitytype.cql.codecsupport;
import com.datastax.driver.core.*;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.LogManager;
import java.lang.reflect.Constructor;
import java.util.*;
import java.util.stream.Collectors;
public abstract class UserCodecProvider {
private final static Logger logger = LogManager.getLogger(UserCodecProvider.class);
public List<UDTTransformCodec> registerCodecsForCluster(
Session session,
boolean allowAcrossKeyspaces
) {
List<UDTTransformCodec> typeCodecs = new ArrayList<>();
List<KeyspaceMetadata> ksMetas = new ArrayList<>(session.getCluster().getMetadata().getKeyspaces());
for (KeyspaceMetadata keyspace : ksMetas) {
List<UDTTransformCodec> keyspaceCodecs = registerCodecsForKeyspace(session, keyspace.getName());
for (UDTTransformCodec typeCodec : keyspaceCodecs) {
if (typeCodecs.contains(typeCodec) && !allowAcrossKeyspaces) {
throw new RuntimeException("codec " + typeCodec + " could be registered" +
"in multiple keyspaces, but this is not allowed.");
}
typeCodecs.add(typeCodec);
logger.debug("Found user-provided codec for ks:" + keyspace + ", udt:" + typeCodec);
}
}
return typeCodecs;
}
public List<UDTTransformCodec> registerCodecsForKeyspace(Session session, String keyspace) {
CodecRegistry registry = session.getCluster().getConfiguration().getCodecRegistry();
List<UDTTransformCodec> codecsForKeyspace = new ArrayList<>();
KeyspaceMetadata ksMeta = session.getCluster().getMetadata().getKeyspace(keyspace);
if (ksMeta==null) {
logger.warn("No metadata for " + keyspace);
return Collections.emptyList();
}
Collection<UserType> typesInKeyspace = ksMeta.getUserTypes();
List<Class<? extends UDTTransformCodec>> providedCodecClasses = getUDTCodecClasses();
Map<UserType, Class<? extends UDTTransformCodec>> codecMap = new HashMap<>();
for (Class<? extends TypeCodec> providedCodecClass : providedCodecClasses) {
Class<? extends UDTTransformCodec> udtCodecClass = (Class<? extends UDTTransformCodec>) providedCodecClass;
List<String> targetUDTTypes = getUDTTypeNames(udtCodecClass);
for (UserType keyspaceUserType : typesInKeyspace) {
String ksTypeName = keyspaceUserType.getTypeName();
String globalTypeName = (ksTypeName.contains(".") ? ksTypeName.split("\\.",2)[1] : ksTypeName);
if (targetUDTTypes.contains(ksTypeName) || targetUDTTypes.contains(globalTypeName)) {
codecMap.put(keyspaceUserType, udtCodecClass);
}
}
}
for (UserType userType : codecMap.keySet()) {
Class<? extends UDTTransformCodec> codecClass = codecMap.get(userType);
Class<?> udtJavaType = getUDTJavaType(codecClass);
UDTTransformCodec udtCodec = instantiate(userType, codecClass, udtJavaType);
codecsForKeyspace.add(udtCodec);
registry.register(udtCodec);
logger.info("registered codec:" + udtCodec);
}
return codecsForKeyspace;
}
private UDTTransformCodec instantiate(UserType key, Class<? extends UDTTransformCodec> codecClass, Class<?> javaType) {
try {
Constructor<? extends UDTTransformCodec> ctor = codecClass.getConstructor(UserType.class, Class.class);
UDTTransformCodec typeCodec = ctor.newInstance(key, javaType);
return typeCodec;
} catch (Exception e) {
e.printStackTrace();
throw new RuntimeException(e);
}
}
private List<Class<? extends UDTTransformCodec>> getUDTCodecClasses() {
UDTCodecClasses[] annotationsByType = this.getClass().getAnnotationsByType(UDTCodecClasses.class);
List<Class<? extends UDTTransformCodec>> codecClasses = Arrays.stream(annotationsByType)
.map(UDTCodecClasses::value)
.flatMap(Arrays::stream)
.collect(Collectors.toList());
return codecClasses;
}
/**
* Allows simple annotation of implementations of this class to use
* {@code @CQLUserTypeNames({"type1","type2",...}}
*
* @param codecClass the UDTTransformCode class which is to be inspected
* @return THe list of target UDT type names, as defined in CQL
*/
private List<String> getUDTTypeNames(Class<? extends UDTTransformCodec> codecClass) {
CQLUserTypeNames[] annotationsByType = codecClass.getAnnotationsByType(CQLUserTypeNames.class);
List<String> cqlTypeNames = new ArrayList<>();
for (CQLUserTypeNames cqlUserTypeNames : annotationsByType) {
cqlTypeNames.addAll(Arrays.asList(cqlUserTypeNames.value()));
}
return cqlTypeNames;
}
/**
* Allows simple annotation of implementations of this class to use
* {@code @UDTJavaType(POJOType.class)}
*
* @param codecClass the UDTTransformCode class which is to be inspected
* @return The class type of the POJO which this codec maps to and from
*/
private Class<?> getUDTJavaType(Class<? extends UDTTransformCodec> codecClass) {
UDTJavaType[] annotationsByType = codecClass.getAnnotationsByType(UDTJavaType.class);
Class<?> javaType = Arrays.stream(annotationsByType)
.map(UDTJavaType::value)
.findFirst()
.orElseThrow(
() -> new RuntimeException("Unable to find UDTJavaType annotation for " + codecClass.getCanonicalName())
);
return javaType;
}
}

View File

@ -0,0 +1,164 @@
package io.nosqlbench.activitytype.cql.core;
import com.datastax.driver.core.*;
import io.nosqlbench.engine.api.activityconfig.ParsedStmt;
import io.nosqlbench.engine.api.activityconfig.yaml.OpDef;
import java.math.BigDecimal;
import java.net.InetAddress;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
public class CQLBindHelper {
private final static Pattern stmtToken = Pattern.compile("\\?(\\w+[-_\\d\\w]*)|\\{(\\w+[-_\\d\\w.]*)}");
public static Statement rebindUnappliedStatement(Statement statement, ColumnDefinitions defs, Row row) {
for (ColumnDefinitions.Definition def : defs) {
String name = def.getName();
def.getType();
if (!name.equals("[applied]")) {
DataType.Name typeName = def.getType().getName();
switch (typeName) {
case ASCII: // ASCII(1, String.class)
((BoundStatement) statement).bind().setString(name, row.getString(name));
case VARCHAR: // VARCHAR(13, String.class)
((BoundStatement) statement).bind().setString(name, row.getString(name));
case TEXT: // TEXT(10, String.class)
((BoundStatement) statement).bind().setString(name, row.getString(name));
case BIGINT: // BIGINT(2, Long.class)
((BoundStatement) statement).bind().setLong(name, row.getLong(name));
case COUNTER: // COUNTER(5, Long.class)
((BoundStatement) statement).bind().setLong(name, row.getLong(name));
case BLOB: // BLOB(3, ByteBuffer.class)
((BoundStatement) statement).bind().setBytes(name, row.getBytes(name));
case CUSTOM: // CUSTOM(0, ByteBuffer.class)
throw new RuntimeException("The diagnostic binder does not understand custom types yet.");
case BOOLEAN: // BOOLEAN(4, Boolean.class)
((BoundStatement) statement).bind().setBool(name, row.getBool(name));
case DECIMAL: // DECIMAL(6, BigDecimal.class)
((BoundStatement) statement).bind().setDecimal(name, row.getDecimal(name));
case DOUBLE: // DOUBLE(7, Double.class)
((BoundStatement) statement).bind().setDouble(name, row.getDouble(name));
case FLOAT: // FLOAT(8, Float.class)
((BoundStatement) statement).bind().setFloat(name, row.getFloat(name));
case INET: // INET(16, InetAddress.class)
((BoundStatement) statement).bind().setInet(name, row.getInet(name));
case INT: // INT(9, Integer.class)
((BoundStatement) statement).bind().setInt(name, row.getInt(name));
case TIMESTAMP: // TIMESTAMP(11, Date.class)
((BoundStatement) statement).bind().setTimestamp(name, row.getTimestamp(name));
case UUID: // UUID(12, UUID.class)
((BoundStatement) statement).bind().setUUID(name, row.getUUID(name));
case TIMEUUID: // TIMEUUID(15, UUID.class)
((BoundStatement) statement).bind().setUUID(name, row.getUUID(name));
case VARINT: // VARINT(14, BigInteger.class)
((BoundStatement) statement).bind().setInt(name, row.getInt(name));
case UDT: // UDT(48, UDTValue.class)
((BoundStatement) statement).bind().setUDTValue(name, row.getUDTValue(name));
case TUPLE: // TUPLE(49, TupleValue.class)
((BoundStatement) statement).bind().setTupleValue(name, row.getTupleValue(name));
case SMALLINT:
((BoundStatement) statement).bind().setInt(name, row.getInt(name));
case TINYINT:
((BoundStatement) statement).bind().setInt(name, row.getInt(name));
case DATE:
((BoundStatement) statement).bind().setDate(name, row.getDate(name));
case TIME:
((BoundStatement) statement).bind().setTime(name, row.getTime(name));
default:
throw new RuntimeException("Unrecognized type:" + typeName);
}
}
}
return statement;
}
public static BoundStatement bindStatement(Statement statement, String name, Object value, DataType.Name typeName) {
switch (typeName) {
case ASCII: // ASCII(1, String.class)
return ((BoundStatement) statement).bind().setString(name, (String) value);
case VARCHAR: // VARCHAR(13, String.class)
return ((BoundStatement) statement).bind().setString(name, (String) value);
case TEXT: // TEXT(10, String.class)
return ((BoundStatement) statement).bind().setString(name, (String) value);
case BIGINT: // BIGINT(2, Long.class)
return ((BoundStatement) statement).bind().setLong(name, (long) value);
case COUNTER: // COUNTER(5, Long.class)
return ((BoundStatement) statement).bind().setLong(name, (long) value);
case BLOB: // BLOB(3, ByteBuffer.class)
return ((BoundStatement) statement).bind().setBytes(name, (ByteBuffer) value);
case CUSTOM: // CUSTOM(0, ByteBuffer.class)
throw new RuntimeException("The diagnostic binder does not understand custom types yet.");
case BOOLEAN: // BOOLEAN(4, Boolean.class)
return ((BoundStatement) statement).bind().setBool(name, (boolean) value);
case DECIMAL: // DECIMAL(6, BigDecimal.class)
return ((BoundStatement) statement).bind().setDecimal(name, (BigDecimal) value);
case DOUBLE: // DOUBLE(7, Double.class)
return ((BoundStatement) statement).bind().setDouble(name, (double) value);
case FLOAT: // FLOAT(8, Float.class)
return ((BoundStatement) statement).bind().setFloat(name, (float) value);
case INET: // INET(16, InetAddress.class)
return ((BoundStatement) statement).bind().setInet(name, (InetAddress) value);
case INT: // INT(9, Integer.class)
return ((BoundStatement) statement).bind().setInt(name, (int) value);
case TIMESTAMP: // TIMESTAMP(11, Date.class)
return ((BoundStatement) statement).bind().setTimestamp(name, (Date) value);
case UUID: // UUID(12, UUID.class)
return ((BoundStatement) statement).bind().setUUID(name, (UUID) value);
case TIMEUUID: // TIMEUUID(15, UUID.class)
return ((BoundStatement) statement).bind().setUUID(name, (UUID) value);
case VARINT: // VARINT(14, BigInteger.class)
return ((BoundStatement) statement).bind().setInt(name, (int) value);
case UDT: // UDT(48, UDTValue.class)
return ((BoundStatement) statement).bind().setUDTValue(name, (UDTValue) value);
case TUPLE: // TUPLE(49, TupleValue.class
return ((BoundStatement) statement).bind().setTupleValue(name, (TupleValue) value);
case SMALLINT:
return ((BoundStatement) statement).bind().setInt(name, (int) value);
case TINYINT:
return ((BoundStatement) statement).bind().setInt(name, (int) value);
case DATE:
return ((BoundStatement) statement).bind().setDate(name, (LocalDate) value);
case TIME:
return ((BoundStatement) statement).bind().setTime(name, (long) value);
default:
throw new RuntimeException("Unrecognized type:" + typeName);
}
}
public static Map<String, String> parseAndGetSpecificBindings(OpDef opDef, ParsedStmt parsed) {
List<String> spans = new ArrayList<>();
String statement = opDef.getStmt();
Set<String> extraBindings = new HashSet<>();
extraBindings.addAll(opDef.getBindings().keySet());
Map<String, String> specificBindings = new LinkedHashMap<>();
Matcher m = stmtToken.matcher(statement);
int lastMatch = 0;
String remainder = "";
while (m.find(lastMatch)) {
String pre = statement.substring(lastMatch, m.start());
String form1 = m.group(1);
String form2 = m.group(2);
String tokenName = (form1 != null && !form1.isEmpty()) ? form1 : form2;
lastMatch = m.end();
spans.add(pre);
if (extraBindings.contains(tokenName)) {
if (specificBindings.get(tokenName) != null){
String postfix = UUID.randomUUID().toString();
specificBindings.put(tokenName + postfix, opDef.getBindings().get(tokenName));
}else {
specificBindings.put(tokenName, opDef.getBindings().get(tokenName));
}
}
}
return specificBindings;
}
}

View File

@ -0,0 +1,361 @@
package io.nosqlbench.activitytype.cql.core;
import com.datastax.driver.core.*;
import com.datastax.driver.core.policies.*;
import io.netty.util.HashedWheelTimer;
import io.nosqlbench.nb.api.errors.BasicError;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.LogManager;
import java.net.InetSocketAddress;
import java.util.*;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.TimeUnit;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
public class CQLOptions {
private final static Logger logger = LogManager.getLogger(CQLOptions.class);
private final static Pattern CORE_AND_MAX_RQ_PATTERN = Pattern.compile("(?<core>\\d+)(:(?<max>\\d+)(:(?<rq>\\d+))?)?(,(?<rcore>\\d+)(:(?<rmax>\\d+)(:(?<rrq>\\d+))?)?)?(,?heartbeat_interval_s:(?<heartbeatinterval>\\d+))?(,?idle_timeout_s:(?<idletimeout>\\d+))?(,?pool_timeout_ms:(?<pooltimeout>\\d+))?");
private final static Pattern PERCENTILE_EAGER_PATTERN = Pattern.compile("^p(?<pctile>[^:]+)(:(?<executions>\\d+))?(:(?<tracked>\\d+)ms)?$");
private final static Pattern CONSTANT_EAGER_PATTERN = Pattern.compile("^((?<msThreshold>\\d++)ms)(:(?<executions>\\d+))?$");
private static ConstantSpeculativeExecutionPolicy constantPolicy(int threshold, int executions) {
return new ConstantSpeculativeExecutionPolicy(threshold, executions);
}
private static SpeculativeExecutionPolicy percentilePolicy(long tracked, double threshold, int executions) {
PerHostPercentileTracker tracker = newTracker(tracked);
return new PercentileSpeculativeExecutionPolicy(tracker, threshold, executions);
}
private static PerHostPercentileTracker newTracker(long millis) {
return PerHostPercentileTracker.builder(millis).build();
}
public static PoolingOptions poolingOptionsFor(String spec) {
Matcher matcher = CORE_AND_MAX_RQ_PATTERN.matcher(spec);
if (matcher.matches()) {
PoolingOptions poolingOptions = new PoolingOptions();
Optional.ofNullable(matcher.group("core")).map(Integer::valueOf)
.ifPresent(core -> poolingOptions.setCoreConnectionsPerHost(HostDistance.LOCAL, core));
Optional.ofNullable(matcher.group("max")).map(Integer::valueOf)
.ifPresent(max -> poolingOptions.setMaxConnectionsPerHost(HostDistance.LOCAL, max));
Optional.ofNullable(matcher.group("rq")).map(Integer::valueOf)
.ifPresent(rq -> poolingOptions.setMaxRequestsPerConnection(HostDistance.LOCAL, rq));
Optional.ofNullable(matcher.group("rcore")).map(Integer::valueOf)
.ifPresent(rcore -> poolingOptions.setCoreConnectionsPerHost(HostDistance.REMOTE, rcore));
Optional.ofNullable(matcher.group("rmax")).map(Integer::valueOf)
.ifPresent(rmax -> poolingOptions.setMaxConnectionsPerHost(HostDistance.REMOTE, rmax));
Optional.ofNullable(matcher.group("rrq")).map(Integer::valueOf)
.ifPresent(rrq -> poolingOptions.setMaxRequestsPerConnection(HostDistance.REMOTE, rrq));
Optional.ofNullable(matcher.group("heartbeatinterval")).map(Integer::valueOf)
.ifPresent(poolingOptions::setHeartbeatIntervalSeconds);
Optional.ofNullable(matcher.group("idletimeout")).map(Integer::valueOf)
.ifPresent(poolingOptions::setIdleTimeoutSeconds);
Optional.ofNullable(matcher.group("pooltimeout")).map(Integer::valueOf)
.ifPresent(poolingOptions::setPoolTimeoutMillis);
return poolingOptions;
}
throw new RuntimeException("No pooling options could be parsed from spec: " + spec);
}
public static RetryPolicy retryPolicyFor(String spec) {
Set<String> retryBehaviors = Arrays.stream(spec.split(",")).map(String::toLowerCase).collect(Collectors.toSet());
RetryPolicy retryPolicy = DefaultRetryPolicy.INSTANCE;
if (retryBehaviors.contains("default")) {
return retryPolicy;
} // add other mutually-exclusive behaviors here with checks, if we want to extend beyond "default"
if (retryBehaviors.contains("logging")) {
retryPolicy = new LoggingRetryPolicy(retryPolicy);
}
return retryPolicy;
}
public static ReconnectionPolicy reconnectPolicyFor(String spec) {
if (spec.startsWith("exponential(")) {
String argsString = spec.substring(12);
String[] args = argsString.substring(0, argsString.length() - 1).split("[,;]");
if (args.length != 2) {
throw new BasicError("Invalid reconnectionpolicy, try reconnectionpolicy=exponential(<baseDelay>, <maxDelay>)");
}
long baseDelay = Long.parseLong(args[0]);
long maxDelay = Long.parseLong(args[1]);
return new ExponentialReconnectionPolicy(baseDelay, maxDelay);
} else if (spec.startsWith("constant(")) {
String argsString = spec.substring(9);
long constantDelayMs = Long.parseLong(argsString.substring(0, argsString.length() - 1));
return new ConstantReconnectionPolicy(constantDelayMs);
}
throw new BasicError("Invalid reconnectionpolicy, try reconnectionpolicy=exponential(<baseDelay>, <maxDelay>) or constant(<constantDelayMs>)");
}
public static SocketOptions socketOptionsFor(String spec) {
String[] assignments = spec.split("[,;]");
Map<String, String> values = new HashMap<>();
for (String assignment : assignments) {
String[] namevalue = assignment.split("[:=]", 2);
String name = namevalue[0];
String value = namevalue[1];
values.put(name, value);
}
SocketOptions options = new SocketOptions();
Optional.ofNullable(values.get("read_timeout_ms")).map(Integer::parseInt).ifPresent(
options::setReadTimeoutMillis
);
Optional.ofNullable(values.get("connect_timeout_ms")).map(Integer::parseInt).ifPresent(
options::setConnectTimeoutMillis
);
Optional.ofNullable(values.get("keep_alive")).map(Boolean::parseBoolean).ifPresent(
options::setKeepAlive
);
Optional.ofNullable(values.get("reuse_address")).map(Boolean::parseBoolean).ifPresent(
options::setReuseAddress
);
Optional.ofNullable(values.get("so_linger")).map(Integer::parseInt).ifPresent(
options::setSoLinger
);
Optional.ofNullable(values.get("tcp_no_delay")).map(Boolean::parseBoolean).ifPresent(
options::setTcpNoDelay
);
Optional.ofNullable(values.get("receive_buffer_size")).map(Integer::parseInt).ifPresent(
options::setReceiveBufferSize
);
Optional.ofNullable(values.get("send_buffer_size")).map(Integer::parseInt).ifPresent(
options::setSendBufferSize
);
return options;
}
public static SpeculativeExecutionPolicy defaultSpeculativePolicy() {
PerHostPercentileTracker tracker = PerHostPercentileTracker
.builder(15000)
.build();
PercentileSpeculativeExecutionPolicy defaultSpecPolicy =
new PercentileSpeculativeExecutionPolicy(tracker, 99.0, 5);
return defaultSpecPolicy;
}
public static SpeculativeExecutionPolicy speculativeFor(String spec) {
Matcher pctileMatcher = PERCENTILE_EAGER_PATTERN.matcher(spec);
Matcher constantMatcher = CONSTANT_EAGER_PATTERN.matcher(spec);
if (spec.toLowerCase().trim().matches("disabled|none")) {
return null;
} else if (spec.toLowerCase().trim().equals("default")) {
return defaultSpeculativePolicy();
} else if (pctileMatcher.matches()) {
double pctile = Double.valueOf(pctileMatcher.group("pctile"));
if (pctile > 100.0 || pctile < 0.0) {
throw new RuntimeException("pctile must be between 0.0 and 100.0");
}
String executionsSpec = pctileMatcher.group("executions");
String trackedSpec = pctileMatcher.group("tracked");
int executions = (executionsSpec != null && !executionsSpec.isEmpty()) ? Integer.valueOf(executionsSpec) : 5;
int tracked = (trackedSpec != null && !trackedSpec.isEmpty()) ? Integer.valueOf(trackedSpec) : 15000;
logger.debug("speculative: Creating new percentile tracker policy from spec '" + spec + "'");
return percentilePolicy(tracked, pctile, executions);
} else if (constantMatcher.matches()) {
int threshold = Integer.valueOf(constantMatcher.group("msThreshold"));
String executionsSpec = constantMatcher.group("executions");
int executions = (executionsSpec != null && !executionsSpec.isEmpty()) ? Integer.valueOf(executionsSpec) : 5;
logger.debug("speculative: Creating new constant policy from spec '" + spec + "'");
return constantPolicy(threshold, executions);
} else {
throw new RuntimeException("Unable to parse pattern for speculative option: " + spec + ", it must be in " +
"an accepted form, like p99.0:5:15000, or p99.0:5, or 5000ms:5");
}
}
public static LoadBalancingPolicy whitelistFor(String s, LoadBalancingPolicy innerPolicy) {
String[] addrSpecs = s.split(",");
List<InetSocketAddress> sockAddrs = Arrays.stream(addrSpecs)
.map(CQLOptions::toSocketAddr)
.collect(Collectors.toList());
if (innerPolicy == null) {
innerPolicy = new RoundRobinPolicy();
}
return new WhiteListPolicy(innerPolicy, sockAddrs);
}
public static LoadBalancingPolicy lbpolicyFor(String polspec, LoadBalancingPolicy policy) {
Pattern polcall = Pattern.compile(",?(?<policyname>\\w+)\\((?<args>[^)]+)?\\)");
Matcher matcher = polcall.matcher(polspec);
Deque<List<String>> policies = new ArrayDeque<>();
while (matcher.find()) {
String policyname = matcher.group("policyname");
String argsgroup = matcher.group("args");
String args = argsgroup==null ? "" : argsgroup;
logger.debug("policyname=" + policyname);
logger.debug("args=" + args);
policies.push(List.of(policyname,args));
}
// reverse order for proper nesting
while (policies.size()>0) {
List<String> nextpolicy = policies.pop();
String policyname = nextpolicy.get(0)
.replaceAll("_", "")
.replaceAll("policy", "");
String argslist = nextpolicy.get(1);
String[] args= argslist.isBlank() ? new String[0] : argslist.split(",");
switch (policyname) {
case "WLP":
case "whitelist":
List<InetSocketAddress> sockAddrs = Arrays.stream(args)
.map(CQLOptions::toSocketAddr)
.collect(Collectors.toList());
policy = new WhiteListPolicy(policy, sockAddrs);
break;
case "TAP":
case "tokenaware":
TokenAwarePolicy.ReplicaOrdering ordering = TokenAwarePolicy.ReplicaOrdering.NEUTRAL;
if (args.length==1) {
if (args[0].startsWith("ordering=") || args[0].startsWith("ordering:")) {
String orderingSpec = args[0].substring("ordering=".length()).toUpperCase();
ordering=TokenAwarePolicy.ReplicaOrdering.valueOf(orderingSpec);
} else {
throw new BasicError("Unrecognized option for " + TokenAwarePolicy.class.getCanonicalName());
}
}
policy = new TokenAwarePolicy(policy, ordering);
break;
case "LAP":
case "latencyaware":
policy = latencyAwarePolicyFor(args,policy);
break;
case "DCARRP":
case "dcawareroundrobin":
case "datacenterawareroundrobin":
if (policy!=null) {
throw new BasicError(DCAwareRoundRobinPolicy.class.getCanonicalName() + " can not wrap another policy.");
}
policy = dcAwareRoundRobinPolicyFor(args);
break;
default:
throw new BasicError("Unrecognized policy selector '" + policyname + "', please select one of WLP,TAP,LAP,DCARRP, or " +
"one of whitelist, tokenaware, latencyaware, dcawareroundrobin.");
}
}
return policy;
}
private static LoadBalancingPolicy dcAwareRoundRobinPolicyFor(String[] args) {
if (args.length==0){
throw new BasicError(DCAwareRoundRobinPolicy.class.getCanonicalName() + " requires a local DC name.");
}
DCAwareRoundRobinPolicy.Builder builder = DCAwareRoundRobinPolicy.builder();
for (String arg : args) {
String[] kv = arg.split("[:=]", 2);
if (kv.length != 2) {
throw new BasicError("LatencyAwarePolicy specifier requires named parameters like `exclusion_threshold=23.0`");
}
switch(kv[0]) {
case "local":
case "localdc":
builder.withLocalDc(kv[1]);
break;
default:
throw new BasicError("Unknown option for " + DCAwareRoundRobinPolicy.class.getSimpleName() + ": '" + kv[0] + "'");
}
}
return builder.build();
}
private static LoadBalancingPolicy latencyAwarePolicyFor(String[] args, LoadBalancingPolicy childPolicy) {
LatencyAwarePolicy.Builder builder = LatencyAwarePolicy.builder(childPolicy);
for (String arg : args) {
String[] kv = arg.split("[:=]", 2);
if (kv.length != 2) {
throw new BasicError("LatencyAwarePolicy specifier requires named parameters like `exclusion_threshold=23.0`");
}
switch (kv[0]) {
case "exclusion_threshold":
case "et":
builder = builder.withExclusionThreshold(Double.parseDouble(kv[1]));
break;
case "minimum_measurements":
case "mm":
builder = builder.withMininumMeasurements(Integer.parseInt(kv[1]));
break;
case "retry_period_ms":
case "rp_ms":
builder = builder.withRetryPeriod(Long.parseLong(kv[1]), TimeUnit.MILLISECONDS);
break;
case "retry_period":
case "rp":
builder = builder.withRetryPeriod(Long.parseLong(kv[1]), TimeUnit.SECONDS);
break;
case "scale":
case "s":
builder = builder.withScale(Long.parseLong(kv[1]), TimeUnit.SECONDS);
break;
case "scale_ms":
case "s_ms":
builder = builder.withScale(Long.parseLong(kv[1]), TimeUnit.MILLISECONDS);
break;
case "update_rate":
case "ur":
builder.withUpdateRate(Long.parseLong(kv[1]), TimeUnit.SECONDS);
break;
case "update_rate_ms":
case "ur_ms":
builder.withUpdateRate(Long.parseLong(kv[1]), TimeUnit.MILLISECONDS);
break;
}
}
return builder.build();
}
public static NettyOptions withTickDuration(String tick) {
logger.info("Cluster builder using custom tick duration value for HashedWheelTimer: " + tick + " milliseconds");
int tickDuration = Integer.valueOf(tick);
return new NettyOptions() {
public io.netty.util.Timer timer(ThreadFactory threadFactory) {
return new HashedWheelTimer(
threadFactory, tickDuration, TimeUnit.MILLISECONDS);
}
};
}
private static InetSocketAddress toSocketAddr(String addr) {
String[] addrs = addr.split(":", 2);
String inetHost = addrs[0];
String inetPort = (addrs.length == 2) ? addrs[1] : "9042";
return new InetSocketAddress(inetHost, Integer.valueOf(inetPort));
}
public static ProtocolOptions.Compression withCompression(String compspec) {
try {
return ProtocolOptions.Compression.valueOf(compspec);
} catch (IllegalArgumentException iae) {
throw new RuntimeException("Compression option '" + compspec + "' was specified, but only " +
Arrays.toString(ProtocolOptions.Compression.values()) + " are available.");
}
}
}

View File

@ -0,0 +1,363 @@
package io.nosqlbench.activitytype.cql.core;
import com.codahale.metrics.Timer;
import com.datastax.driver.core.*;
import io.nosqlbench.activitytype.cql.api.ResultSetCycleOperator;
import io.nosqlbench.activitytype.cql.api.RowCycleOperator;
import io.nosqlbench.activitytype.cql.api.StatementFilter;
import io.nosqlbench.activitytype.cql.errorhandling.ErrorStatus;
import io.nosqlbench.activitytype.cql.errorhandling.HashedCQLErrorHandler;
import io.nosqlbench.activitytype.cql.errorhandling.exceptions.CQLCycleWithStatementException;
import io.nosqlbench.activitytype.cql.errorhandling.exceptions.ChangeUnappliedCycleException;
import io.nosqlbench.activitytype.cql.errorhandling.exceptions.MaxTriesExhaustedException;
import io.nosqlbench.activitytype.cql.errorhandling.exceptions.UnexpectedPagingException;
import io.nosqlbench.activitytype.cql.statements.core.ReadyCQLStatement;
import com.google.common.util.concurrent.ListenableFuture;
import io.nosqlbench.activitytype.cql.statements.modifiers.StatementModifier;
import io.nosqlbench.engine.api.activityapi.core.ActivityDefObserver;
import io.nosqlbench.engine.api.activityapi.core.MultiPhaseAction;
import io.nosqlbench.engine.api.activityapi.core.SyncAction;
import io.nosqlbench.engine.api.activityapi.planning.OpSequence;
import io.nosqlbench.engine.api.activityimpl.ActivityDef;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.LogManager;
import java.util.List;
import java.util.concurrent.TimeUnit;
@SuppressWarnings("Duplicates")
public class CqlAction implements SyncAction, MultiPhaseAction, ActivityDefObserver {
private final static Logger logger = LogManager.getLogger(CqlAction.class);
private final int slot;
private final CqlActivity cqlActivity;
private final ActivityDef activityDef;
private List<RowCycleOperator> rowOps;
private List<ResultSetCycleOperator> cycleOps;
private List<StatementModifier> modifiers;
private StatementFilter statementFilter;
private OpSequence<ReadyCQLStatement> sequencer;
private int maxTries = 10; // how many cycles a statement will be attempted for before giving up
private HashedCQLErrorHandler ebdseErrorHandler;
private int pagesFetched = 0;
private long totalRowsFetchedForQuery = 0L;
private ResultSet pagingResultSet;
private Statement pagingStatement;
private ReadyCQLStatement pagingReadyStatement;
private boolean showcql;
private long nanoStartTime;
private long retryDelay;
private long maxRetryDelay;
private boolean retryReplace;
public CqlAction(ActivityDef activityDef, int slot, CqlActivity cqlActivity) {
this.activityDef = activityDef;
this.cqlActivity = cqlActivity;
this.slot = slot;
onActivityDefUpdate(activityDef);
}
@Override
public void init() {
onActivityDefUpdate(activityDef);
this.sequencer = cqlActivity.getOpSequencer();
}
@Override
public int runCycle(long cycle) {
// In this activity type, we use the same phase
// logic for the initial phase (runCycle(...))
// as well as subsequent phases.
return runPhase(cycle);
}
public int runPhase(long cycleValue) {
HashedCQLErrorHandler.resetThreadStatusCode();
if (pagingResultSet == null) {
totalRowsFetchedForQuery = 0L;
Statement statement;
ResultSetFuture resultSetFuture;
ReadyCQLStatement readyCQLStatement;
int tries = 0;
try (Timer.Context bindTime = cqlActivity.bindTimer.time()) {
readyCQLStatement = sequencer.get(cycleValue);
readyCQLStatement.onStart();
statement = readyCQLStatement.bind(cycleValue);
if (statementFilter != null) {
if (!statementFilter.matches(statement)) {
cqlActivity.skippedTokensHisto.update(cycleValue);
return 0;
}
}
if (modifiers != null) {
for (StatementModifier modifier : modifiers) {
statement = modifier.modify(statement, cycleValue);
}
}
if (showcql) {
logger.info("CQL(cycle=" + cycleValue + "):\n" + readyCQLStatement.getQueryString(cycleValue));
}
}
nanoStartTime = System.nanoTime();
while (tries < maxTries) {
tries++;
if (tries > maxTries) {
throw new MaxTriesExhaustedException(cycleValue, maxTries);
}
if (tries > 1) {
try (Timer.Context retryTime = cqlActivity.retryDelayTimer.time()) {
Thread.sleep(Math.min((retryDelay << tries) / 1000, maxRetryDelay / 1000));
} catch (InterruptedException ignored) {
}
}
try (Timer.Context executeTime = cqlActivity.executeTimer.time()) {
resultSetFuture = cqlActivity.getSession().executeAsync(statement);
}
Timer.Context resultTime = cqlActivity.resultTimer.time();
try {
ResultSet resultSet = resultSetFuture.getUninterruptibly();
if (cycleOps != null) {
for (ResultSetCycleOperator cycleOp : cycleOps) {
cycleOp.apply(resultSet, statement, cycleValue);
}
}
ResultSetCycleOperator[] perStmtRSOperators = readyCQLStatement.getResultSetOperators();
if (perStmtRSOperators != null) {
for (ResultSetCycleOperator perStmtRSOperator : perStmtRSOperators) {
perStmtRSOperator.apply(resultSet, statement, cycleValue);
}
}
if (!resultSet.wasApplied()) {
//resultSet.b
Row row = resultSet.one();
ColumnDefinitions defs = row.getColumnDefinitions();
if (retryReplace) {
statement = CQLBindHelper.rebindUnappliedStatement(statement, defs, row);
}
logger.trace(readyCQLStatement.getQueryString(cycleValue));
// To make exception handling logic flow more uniformly
throw new ChangeUnappliedCycleException(
cycleValue, resultSet, readyCQLStatement.getQueryString(cycleValue)
);
}
int pageRows = resultSet.getAvailableWithoutFetching();
int remaining = pageRows;
RowCycleOperator[] perStmtRowOperators = readyCQLStatement.getRowCycleOperators();
if (rowOps == null && perStmtRowOperators==null) {
while (remaining-- > 0) {
Row row = resultSet.one();
// NOTE: This has been replaced by:
// params:
// rowops: savevars
// You must add this to the YAML for statements that are meant to capture vars
// HashMap<String, Object> bindings = SharedState.tl_ObjectMap.get();
// for (ColumnDefinitions.Definition cdef : row.getColumnDefinitions()) {
// bindings.put(cdef.getName(), row.getObject(cdef.getName()));
// }
//
}
} else {
while (remaining-- > 0) {
Row onerow = resultSet.one();
if (rowOps!=null) {
for (RowCycleOperator rowOp : rowOps) {
rowOp.apply(onerow, cycleValue);
}
}
if (perStmtRowOperators!=null) {
for (RowCycleOperator rowOp : perStmtRowOperators) {
rowOp.apply(onerow, cycleValue);
}
}
}
}
cqlActivity.rowsCounter.mark(pageRows);
totalRowsFetchedForQuery += pageRows;
if (resultSet.isFullyFetched()) {
long resultNanos = System.nanoTime() - nanoStartTime;
cqlActivity.resultSuccessTimer.update(resultNanos, TimeUnit.NANOSECONDS);
cqlActivity.resultSetSizeHisto.update(totalRowsFetchedForQuery);
readyCQLStatement.onSuccess(cycleValue, resultNanos, totalRowsFetchedForQuery);
} else {
if (cqlActivity.maxpages > 1) {
pagingResultSet = resultSet;
pagingStatement = statement;
pagingReadyStatement = readyCQLStatement;
pagesFetched = 1;
} else {
throw new UnexpectedPagingException(
cycleValue,
resultSet,
readyCQLStatement.getQueryString(cycleValue),
1,
cqlActivity.maxpages,
cqlActivity.getSession().getCluster().getConfiguration().getQueryOptions().getFetchSize()
);
}
}
break; // This is normal termination of this loop, when retries aren't needed
} catch (Exception e) {
long resultNanos = resultTime.stop();
resultTime = null;
readyCQLStatement.onError(cycleValue, resultNanos, e);
CQLCycleWithStatementException cqlCycleException = new CQLCycleWithStatementException(cycleValue, resultNanos, e, readyCQLStatement);
ErrorStatus errorStatus = ebdseErrorHandler.handleError(cycleValue, cqlCycleException);
if (!errorStatus.isRetryable()) {
cqlActivity.triesHisto.update(tries);
return errorStatus.getResultCode();
}
} finally {
if (resultTime != null) {
resultTime.stop();
}
}
}
cqlActivity.triesHisto.update(tries);
} else {
int tries = 0;
while (tries < maxTries) {
tries++;
if (tries > maxTries) {
throw new MaxTriesExhaustedException(cycleValue, maxTries);
}
ListenableFuture<ResultSet> pagingFuture;
try (Timer.Context pagingTime = cqlActivity.pagesTimer.time()) {
try (Timer.Context executeTime = cqlActivity.executeTimer.time()) {
pagingFuture = pagingResultSet.fetchMoreResults();
}
Timer.Context resultTime = cqlActivity.resultTimer.time();
try {
ResultSet resultSet = pagingFuture.get();
if (cycleOps != null) {
for (ResultSetCycleOperator cycleOp : cycleOps) {
cycleOp.apply(resultSet, pagingStatement, cycleValue);
}
}
ResultSetCycleOperator[] perStmtRSOperators = pagingReadyStatement.getResultSetOperators();
if (perStmtRSOperators != null) {
for (ResultSetCycleOperator perStmtRSOperator : perStmtRSOperators) {
perStmtRSOperator.apply(resultSet, pagingStatement, cycleValue);
}
}
pagesFetched++;
int pageRows = resultSet.getAvailableWithoutFetching();
int remaining = pageRows;
if (rowOps == null) {
while (remaining-- > 0) {
resultSet.one();
}
} else {
while (remaining-- > 0) {
for (RowCycleOperator rowOp : rowOps) {
rowOp.apply(resultSet.one(), cycleValue);
}
}
}
cqlActivity.rowsCounter.mark(pageRows);
totalRowsFetchedForQuery += pageRows;
if (resultSet.isFullyFetched()) {
long nanoTime = System.nanoTime() - nanoStartTime;
cqlActivity.resultSuccessTimer.update(nanoTime, TimeUnit.NANOSECONDS);
cqlActivity.resultSetSizeHisto.update(totalRowsFetchedForQuery);
pagingReadyStatement.onSuccess(cycleValue, nanoTime, totalRowsFetchedForQuery);
pagingResultSet = null;
} else {
if (pagesFetched > cqlActivity.maxpages) {
throw new UnexpectedPagingException(
cycleValue,
pagingResultSet,
pagingReadyStatement.getQueryString(cycleValue),
pagesFetched,
cqlActivity.maxpages,
cqlActivity.getSession().getCluster().getConfiguration().getQueryOptions().getFetchSize()
);
}
pagingResultSet = resultSet;
}
break; // This is normal termination of this loop, when retries aren't needed
} catch (Exception e) {
long resultNanos = resultTime.stop();
resultTime = null;
pagingReadyStatement.onError(cycleValue, resultNanos, e);
CQLCycleWithStatementException cqlCycleException = new CQLCycleWithStatementException(cycleValue, resultNanos, e, pagingReadyStatement);
ErrorStatus errorStatus = ebdseErrorHandler.handleError(cycleValue, cqlCycleException);
if (!errorStatus.isRetryable()) {
cqlActivity.triesHisto.update(tries);
return errorStatus.getResultCode();
}
} finally {
if (resultTime != null) {
resultTime.stop();
}
}
}
}
cqlActivity.triesHisto.update(tries);
}
return 0;
}
@Override
public boolean incomplete() {
return pagingResultSet != null;
}
@Override
public void onActivityDefUpdate(ActivityDef activityDef) {
this.maxTries = cqlActivity.getMaxTries();
this.retryDelay = cqlActivity.getRetryDelay();
this.maxRetryDelay = cqlActivity.getMaxRetryDelay();
this.retryReplace = cqlActivity.isRetryReplace();
this.showcql = cqlActivity.isShowCql();
this.ebdseErrorHandler = cqlActivity.getCqlErrorHandler();
this.statementFilter = cqlActivity.getStatementFilter();
this.rowOps = cqlActivity.getRowCycleOperators();
this.cycleOps = cqlActivity.getResultSetCycleOperators();
this.modifiers = cqlActivity.getStatementModifiers();
}
protected CqlActivity getCqlActivity() {
return cqlActivity;
}
}

View File

@ -0,0 +1,27 @@
package io.nosqlbench.activitytype.cql.core;
import io.nosqlbench.engine.api.activityapi.core.Action;
import io.nosqlbench.engine.api.activityapi.core.ActionDispenser;
public class CqlActionDispenser implements ActionDispenser {
public CqlActivity getCqlActivity() {
return cqlActivity;
}
private final CqlActivity cqlActivity;
public CqlActionDispenser(CqlActivity activityContext) {
this.cqlActivity = activityContext;
}
public Action getAction(int slot) {
long async= cqlActivity.getActivityDef().getParams().getOptionalLong("async").orElse(0L);
if (async>0) {
return new CqlAsyncAction(cqlActivity, slot);
} else {
return new CqlAction(cqlActivity.getActivityDef(), slot, cqlActivity);
}
}
}

View File

@ -0,0 +1,805 @@
package io.nosqlbench.activitytype.cql.core;
import com.codahale.metrics.Histogram;
import com.codahale.metrics.Meter;
import com.codahale.metrics.Timer;
import com.datastax.driver.core.*;
import io.nosqlbench.activitytype.cql.api.ErrorResponse;
import io.nosqlbench.activitytype.cql.api.ResultSetCycleOperator;
import io.nosqlbench.activitytype.cql.api.RowCycleOperator;
import io.nosqlbench.activitytype.cql.api.StatementFilter;
import io.nosqlbench.activitytype.cql.codecsupport.UDTCodecInjector;
import io.nosqlbench.activitytype.cql.errorhandling.HashedCQLErrorHandler;
import io.nosqlbench.activitytype.cql.errorhandling.NBCycleErrorHandler;
import io.nosqlbench.activitytype.cql.statements.binders.CqlBinderTypes;
import io.nosqlbench.activitytype.cql.statements.core.*;
import io.nosqlbench.activitytype.cql.statements.modifiers.StatementModifier;
import io.nosqlbench.activitytype.cql.statements.rowoperators.RowCycleOperators;
import io.nosqlbench.activitytype.cql.statements.rowoperators.Save;
import io.nosqlbench.activitytype.cql.statements.rowoperators.verification.DiffType;
import io.nosqlbench.activitytype.cql.statements.rowoperators.verification.RowDifferencer;
import io.nosqlbench.activitytype.cql.statements.rowoperators.verification.VerificationMetrics;
import io.nosqlbench.activitytype.cql.statements.rowoperators.verification.VerifierBuilder;
import io.nosqlbench.activitytype.cql.statements.rsoperators.AssertSingleRowResultSet;
import io.nosqlbench.activitytype.cql.statements.rsoperators.ResultSetCycleOperators;
import io.nosqlbench.activitytype.cql.statements.rsoperators.TraceLogger;
import io.nosqlbench.engine.api.activityapi.core.Activity;
import io.nosqlbench.engine.api.activityapi.core.ActivityDefObserver;
import io.nosqlbench.engine.api.activityapi.planning.OpSequence;
import io.nosqlbench.engine.api.activityapi.planning.SequencePlanner;
import io.nosqlbench.engine.api.activityapi.planning.SequencerType;
import io.nosqlbench.engine.api.activityconfig.ParsedStmt;
import io.nosqlbench.engine.api.activityconfig.StatementsLoader;
import io.nosqlbench.engine.api.activityconfig.rawyaml.RawStmtDef;
import io.nosqlbench.engine.api.activityconfig.rawyaml.RawStmtsBlock;
import io.nosqlbench.engine.api.activityconfig.rawyaml.RawStmtsDoc;
import io.nosqlbench.engine.api.activityconfig.rawyaml.RawStmtsDocList;
import io.nosqlbench.engine.api.activityconfig.yaml.OpTemplate;
import io.nosqlbench.engine.api.activityconfig.yaml.StmtsDocList;
import io.nosqlbench.engine.api.activityimpl.ActivityDef;
import io.nosqlbench.engine.api.activityimpl.ParameterMap;
import io.nosqlbench.engine.api.activityimpl.SimpleActivity;
import io.nosqlbench.engine.api.metrics.ActivityMetrics;
import io.nosqlbench.engine.api.metrics.ExceptionCountMetrics;
import io.nosqlbench.engine.api.metrics.ExceptionHistoMetrics;
import io.nosqlbench.engine.api.metrics.ThreadLocalNamedTimers;
import io.nosqlbench.engine.api.templating.StrInterpolator;
import io.nosqlbench.engine.api.util.SimpleConfig;
import io.nosqlbench.engine.api.util.TagFilter;
import io.nosqlbench.engine.api.util.Unit;
import io.nosqlbench.nb.api.config.params.Element;
import io.nosqlbench.nb.api.errors.BasicError;
import io.nosqlbench.virtdata.core.bindings.Bindings;
import org.apache.commons.lang3.StringUtils;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import java.io.FileWriter;
import java.io.IOException;
import java.io.Writer;
import java.nio.charset.StandardCharsets;
import java.util.*;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
@SuppressWarnings("Duplicates")
public class CqlActivity extends SimpleActivity implements Activity, ActivityDefObserver {
private final static Logger logger = LogManager.getLogger(CqlActivity.class);
private final ExceptionCountMetrics exceptionCountMetrics;
private final ExceptionHistoMetrics exceptionHistoMetrics;
private final ActivityDef activityDef;
private final Map<String, Writer> namedWriters = new HashMap<>();
protected List<OpTemplate> stmts;
Timer retryDelayTimer;
Timer bindTimer;
Timer executeTimer;
Timer resultTimer;
Timer resultSuccessTimer;
Timer pagesTimer;
Histogram triesHisto;
Histogram skippedTokensHisto;
Histogram resultSetSizeHisto;
int maxpages;
Meter rowsCounter;
private HashedCQLErrorHandler errorHandler;
private OpSequence<ReadyCQLStatement> opsequence;
private Session session;
private int maxTries;
private StatementFilter statementFilter;
private Boolean showcql;
private List<RowCycleOperator> rowCycleOperators;
private List<ResultSetCycleOperator> resultSetCycleOperators;
private List<StatementModifier> statementModifiers;
private Long maxTotalOpsInFlight;
private long retryDelay;
private long maxRetryDelay;
private boolean retryReplace;
private String pooling;
private VerificationMetrics verificationMetrics;
public CqlActivity(ActivityDef activityDef) {
super(activityDef);
this.activityDef = activityDef;
exceptionCountMetrics = new ExceptionCountMetrics(activityDef);
exceptionHistoMetrics = new ExceptionHistoMetrics(activityDef);
}
private void registerCodecs(Session session) {
UDTCodecInjector injector = new UDTCodecInjector();
injector.injectUserProvidedCodecs(session, true);
}
@Override
public synchronized void initActivity() {
logger.debug("initializing activity: " + this.activityDef.getAlias());
session = getSession();
if (getParams().getOptionalBoolean("usercodecs").orElse(false)) {
registerCodecs(session);
}
initSequencer();
setDefaultsFromOpSequence(this.opsequence);
retryDelayTimer = ActivityMetrics.timer(activityDef, "retry-delay");
bindTimer = ActivityMetrics.timer(activityDef, "bind");
executeTimer = ActivityMetrics.timer(activityDef, "execute");
resultTimer = ActivityMetrics.timer(activityDef, "result");
triesHisto = ActivityMetrics.histogram(activityDef, "tries");
pagesTimer = ActivityMetrics.timer(activityDef, "pages");
rowsCounter = ActivityMetrics.meter(activityDef, "rows");
skippedTokensHisto = ActivityMetrics.histogram(activityDef, "skipped-tokens");
resultSuccessTimer = ActivityMetrics.timer(activityDef, "result-success");
resultSetSizeHisto = ActivityMetrics.histogram(activityDef, "resultset-size");
onActivityDefUpdate(activityDef);
logger.debug("activity fully initialized: " + this.activityDef.getAlias());
}
public synchronized Session getSession() {
if (session == null) {
session = CQLSessionCache.get().getSession(this.getActivityDef());
}
return session;
}
// for testing
public static String canonicalizeBindings(String input) {
StringBuilder sb = new StringBuilder();
Pattern questionPattern = Pattern.compile("\\?(?<arg>\\w+)");
Matcher matcher = questionPattern.matcher(input);
int count = 0;
while (matcher.find()) {
matcher.appendReplacement(sb, "{" + matcher.group("arg") + "}");
count++;
}
matcher.appendTail(sb);
if (count > 0) {
logger.warn("You are using deprecated data binding syntax in '" + input + "'. This is supported in the classic CQL driver," +
" but it is not recognized by other workloads. Please change to the {standard} binding syntax. The canonical" +
" syntax for CQL is rendered automatically.");
}
return sb.toString();
}
private void initSequencer() {
Session session = getSession();
Map<String, Object> fconfig = Map.of("session", session);
SequencerType sequencerType = SequencerType.valueOf(
getParams().getOptionalString("seq").orElse("bucket")
);
SequencePlanner<ReadyCQLStatement> planner = new SequencePlanner<>(sequencerType);
StmtsDocList unfiltered = loadStmtsYaml();
// log tag filtering results
String tagfilter = activityDef.getParams().getOptionalString("tags").orElse("");
TagFilter tagFilter = new TagFilter(tagfilter);
unfiltered.getStmts().stream().map(tagFilter::matchesTaggedResult).forEach(r -> logger.debug(r.getLog()));
stmts = unfiltered.getStmts(tagfilter);
if (stmts.size() == 0) {
throw new RuntimeException("There were no unfiltered statements found for this activity.");
}
Set<String> timerStarts = new HashSet<>();
Set<String> timerStops = new HashSet<>();
for (OpTemplate stmtDef : stmts) {
ParsedStmt parsed = stmtDef.getParsed(CqlActivity::canonicalizeBindings).orError();
boolean prepared = stmtDef.getParamOrDefault("prepared", true);
boolean parameterized = stmtDef.getParamOrDefault("parameterized", false);
long ratio = stmtDef.getParamOrDefault("ratio", 1);
//parsed.applyStmtTransform(this::canonicalizeBindings);
Optional<ConsistencyLevel> cl = stmtDef.getOptionalStringParam("cl", String.class).map(ConsistencyLevel::valueOf);
Optional<ConsistencyLevel> serial_cl = stmtDef.getOptionalStringParam("serial_cl").map(ConsistencyLevel::valueOf);
Optional<Boolean> idempotent = stmtDef.getOptionalStringParam("idempotent", Boolean.class);
StringBuilder psummary = new StringBuilder();
boolean instrument = stmtDef.getOptionalStringParam("instrument", Boolean.class)
.or(() -> getParams().getOptionalBoolean("instrument"))
.orElse(false);
String logresultcsv = stmtDef.getParamOrDefault("logresultcsv", "");
String logresultcsv_act = getParams().getOptionalString("logresultcsv").orElse("");
if (!logresultcsv_act.isEmpty() && !logresultcsv_act.equalsIgnoreCase("true")) {
throw new RuntimeException("At the activity level, only logresultcsv=true is allowed, no other values.");
}
logresultcsv = !logresultcsv.isEmpty() ? logresultcsv : logresultcsv_act;
logresultcsv = !logresultcsv.equalsIgnoreCase("true") ? logresultcsv : stmtDef.getName() + "--results.csv";
logger.debug("readying statement[" + (prepared ? "" : "un") + "prepared]:" + parsed.getStmt());
ReadyCQLStatementTemplate template;
String stmtForDriver = parsed.getPositionalStatement(s -> "?");
if (prepared) {
psummary.append(" prepared=>true");
PreparedStatement prepare = getSession().prepare(stmtForDriver);
cl.ifPresent((conlvl) -> {
psummary.append(" consistency_level=>").append(conlvl);
prepare.setConsistencyLevel(conlvl);
});
serial_cl.ifPresent((scl) -> {
psummary.append(" serial_consistency_level=>").append(serial_cl);
prepare.setSerialConsistencyLevel(scl);
});
idempotent.ifPresent((i) -> {
psummary.append(" idempotent=").append(idempotent);
prepare.setIdempotent(i);
});
CqlBinderTypes binderType = stmtDef.getOptionalStringParam("binder")
.map(CqlBinderTypes::valueOf)
.orElse(CqlBinderTypes.DEFAULT);
template = new ReadyCQLStatementTemplate(fconfig, binderType, getSession(), prepare, ratio,
parsed.getName());
} else {
SimpleStatement simpleStatement = new SimpleStatement(stmtForDriver);
cl.ifPresent((conlvl) -> {
psummary.append(" consistency_level=>").append(conlvl);
simpleStatement.setConsistencyLevel(conlvl);
});
serial_cl.ifPresent((scl) -> {
psummary.append(" serial_consistency_level=>").append(scl);
simpleStatement.setSerialConsistencyLevel(scl);
});
idempotent.ifPresent((i) -> {
psummary.append(" idempotent=>").append(i);
simpleStatement.setIdempotent(i);
});
template = new ReadyCQLStatementTemplate(fconfig, getSession(), simpleStatement, ratio,
parsed.getName(), parameterized, null, null);
}
Element params = parsed.getParamReader();
params.get("start-timers", String.class)
.map(s -> s.split(", *"))
.map(Arrays::asList)
.orElse(List.of())
.stream()
.forEach(name -> {
ThreadLocalNamedTimers.addTimer(activityDef, name);
template.addTimerStart(name);
timerStarts.add(name);
});
params.get("stop-timers", String.class)
.map(s -> s.split(", *"))
.map(Arrays::asList)
.orElse(List.of())
.stream()
.forEach(name -> {
template.addTimerStop(name);
timerStops.add(name);
});
stmtDef.getOptionalStringParam("save")
.map(s -> s.split("[,: ]"))
.map(Save::new)
.ifPresent(save_op -> {
psummary.append(" save=>").append(save_op.toString());
template.addRowCycleOperators(save_op);
});
stmtDef.getOptionalStringParam("rsoperators")
.map(s -> s.split(","))
.stream().flatMap(Arrays::stream)
.map(ResultSetCycleOperators::newOperator)
.forEach(rso -> {
psummary.append(" rsop=>").append(rso.toString());
template.addResultSetOperators(rso);
});
stmtDef.getOptionalStringParam("rowoperators")
.map(s -> s.split(","))
.stream().flatMap(Arrays::stream)
.map(RowCycleOperators::newOperator)
.forEach(ro -> {
psummary.append(" rowop=>").append(ro.toString());
template.addRowCycleOperators(ro);
});
// If verify is set on activity, assume all fields should be verified for every
// statement, otherwise, do per-statement verification for ops which have
// a verify param
if (activityDef.getParams().containsKey("verify") ||
stmtDef.getParams().containsKey("verify") ||
stmtDef.getParams().containsKey("verify-fields")) {
String verify = stmtDef.getOptionalStringParam("verify")
.or(() -> stmtDef.getOptionalStringParam("verify-fields"))
.or(() -> activityDef.getParams().getOptionalString("verify"))
.orElse("*");
DiffType diffType = stmtDef.getOptionalStringParam("compare")
.or(() -> activityDef.getParams().getOptionalString("compare"))
.map(DiffType::valueOf).orElse(DiffType.reffields);
Bindings expected = VerifierBuilder.getExpectedValuesTemplate(stmtDef).resolveBindings();
VerificationMetrics vmetrics = getVerificationMetrics();
RowDifferencer.ThreadLocalWrapper differencer = new RowDifferencer.ThreadLocalWrapper(vmetrics, expected, diffType);
psummary.append(" rowop=>verify-fields:").append(differencer.toString());
template.addResultSetOperators(new AssertSingleRowResultSet());
template.addRowCycleOperators(differencer);
}
if (instrument) {
logger.info("Adding per-statement success and error and resultset-size timers to statement '" + parsed.getName() + "'");
template.instrument(this);
psummary.append(" instrument=>true");
}
if (!logresultcsv.isEmpty()) {
logger.info("Adding per-statement result CSV logging to statement '" + parsed.getName() + "'");
template.logResultCsv(this, logresultcsv);
psummary.append(" logresultcsv=>").append(logresultcsv);
}
template.getContextualBindings().getBindingsTemplate().addFieldBindings(stmtDef.getParsed().getBindPoints());
if (psummary.length() > 0) {
logger.info("statement named '" + stmtDef.getName() + "' has custom settings:" + psummary.toString());
}
planner.addOp(template.resolve(), ratio);
}
if (!timerStarts.equals(timerStops)) {
throw new BasicError("The names for timer-starts and timer-stops must be matched up. " +
"timer-starts:" + timerStarts + ", timer-stops:" + timerStops);
}
opsequence = planner.resolve();
}
private synchronized VerificationMetrics getVerificationMetrics() {
if (verificationMetrics == null) {
verificationMetrics = new VerificationMetrics(this.activityDef);
}
return verificationMetrics;
}
private StmtsDocList loadStmtsYaml() {
StmtsDocList doclist = null;
String yaml_loc = activityDef.getParams().getOptionalString("yaml", "workload").orElse("default");
StrInterpolator interp = new StrInterpolator(activityDef);
String yamlVersion = "unset";
if (yaml_loc.endsWith(":1") || yaml_loc.endsWith(":2")) {
yamlVersion = yaml_loc.substring(yaml_loc.length() - 1);
yaml_loc = yaml_loc.substring(0, yaml_loc.length() - 2);
}
switch (yamlVersion) {
case "1":
doclist = getVersion1StmtsDoc(interp, yaml_loc);
if (activityDef.getParams().getOptionalBoolean("ignore_important_warnings").orElse(false)) {
logger.warn("DEPRECATED-FORMAT: Loaded yaml " + yaml_loc + " with compatibility mode. " +
"This will be deprecated in a future release.");
logger.warn("DEPRECATED-FORMAT: Please refer to " +
"http://docs.engineblock.io/user-guide/standard_yaml/ for more details.");
} else {
throw new BasicError("DEPRECATED-FORMAT: Loaded yaml " + yaml_loc + " with compatibility mode. " +
"This has been deprecated for a long time now. You should use the modern yaml format, which is easy" +
"to convert to. If you want to ignore this and kick the issue" +
" down the road to someone else, then you can add ignore_important_warnings=true. " +
"Please refer to " +
"http://docs.engineblock.io/user-guide/standard_yaml/ for more details.");
}
break;
case "2":
doclist = StatementsLoader.loadPath(logger, yaml_loc, interp, "activities");
break;
case "unset":
try {
logger.debug("You can suffix your yaml filename or url with the " +
"format version, such as :1 or :2. Assuming version 2.");
doclist = StatementsLoader.loadPath(null, yaml_loc, interp, "activities");
} catch (Exception ignored) {
try {
doclist = getVersion1StmtsDoc(interp, yaml_loc);
logger.warn("DEPRECATED-FORMAT: Loaded yaml " + yaml_loc +
" with compatibility mode. This will be deprecated in a future release.");
logger.warn("DEPRECATED-FORMAT: Please refer to " +
"http://docs.engineblock.io/user-guide/standard_yaml/ for more details.");
} catch (Exception compatError) {
logger.warn("Tried to load yaml in compatibility mode, " +
"since it failed to load with the standard format, " +
"but found an error:" + compatError);
logger.warn("The following detailed errors are provided only " +
"for the standard format. To force loading version 1 with detailed logging, add" +
" a version qualifier to your yaml filename or url like ':1'");
// retrigger the error again, this time with logging enabled.
doclist = StatementsLoader.loadPath(logger, yaml_loc, interp, "activities");
}
}
break;
default:
throw new RuntimeException("Unrecognized yaml format version, expected :1 or :2 " +
"at end of yaml file, but got " + yamlVersion + " instead.");
}
return doclist;
}
@Deprecated
private StmtsDocList getVersion1StmtsDoc(StrInterpolator interp, String yaml_loc) {
StmtsDocList unfiltered;
List<RawStmtsBlock> blocks = new ArrayList<>();
YamlCQLStatementLoader deprecatedLoader = new YamlCQLStatementLoader(interp);
AvailableCQLStatements rawDocs = deprecatedLoader.load(yaml_loc, "activities");
List<TaggedCQLStatementDefs> rawTagged = rawDocs.getRawTagged();
for (TaggedCQLStatementDefs rawdef : rawTagged) {
for (CQLStatementDef rawstmt : rawdef.getStatements()) {
RawStmtsBlock rawblock = new RawStmtsBlock();
// tags
rawblock.setTags(rawdef.getTags());
// params
Map<String, Object> params = new HashMap<>(rawdef.getParams());
if (rawstmt.getConsistencyLevel() != null && !rawstmt.getConsistencyLevel().isEmpty())
params.put("cl", rawstmt.getConsistencyLevel());
if (!rawstmt.isPrepared()) params.put("prepared", "false");
if (rawstmt.getRatio() != 1L)
params.put("ratio", String.valueOf(rawstmt.getRatio()));
rawblock.setParams(params);
// stmts
List<RawStmtDef> stmtslist = new ArrayList<>();
stmtslist.add(new RawStmtDef(rawstmt.getName(), rawstmt.getStatement()));
rawblock.setRawStmtDefs(stmtslist);
// bindings
rawblock.setBindings(rawstmt.getBindings());
blocks.add(rawblock);
}
}
RawStmtsDoc rawStmtsDoc = new RawStmtsDoc();
rawStmtsDoc.setBlocks(blocks);
List<RawStmtsDoc> rawStmtsDocs = new ArrayList<>();
rawStmtsDocs.add(rawStmtsDoc);
RawStmtsDocList rawStmtsDocList = new RawStmtsDocList(rawStmtsDocs);
unfiltered = new StmtsDocList(rawStmtsDocList);
return unfiltered;
}
public ExceptionCountMetrics getExceptionCountMetrics() {
return exceptionCountMetrics;
}
@Override
public void shutdownActivity() {
super.shutdownActivity();
if (verificationMetrics != null) {
VerificationMetrics metrics = getVerificationMetrics();
long unverifiedValues = metrics.unverifiedValuesCounter.getCount();
long unverifiedRows = metrics.unverifiedRowsCounter.getCount();
if (unverifiedRows > 0 || unverifiedValues > 0) {
throw new RuntimeException(
"There were " + unverifiedValues + " unverified values across " + unverifiedRows + " unverified rows."
);
}
logger.info("verified " + metrics.verifiedValuesCounter.getCount() + " values across " + metrics.verifiedRowsCounter.getCount() + " verified rows");
}
}
@Override
public String toString() {
return "CQLActivity {" +
"activityDef=" + activityDef +
", session=" + session +
", opSequence=" + this.opsequence +
'}';
}
@Override
public void onActivityDefUpdate(ActivityDef activityDef) {
super.onActivityDefUpdate(activityDef);
clearResultSetCycleOperators();
clearRowCycleOperators();
clearStatementModifiers();
ParameterMap params = activityDef.getParams();
Optional<String> fetchSizeOption = params.getOptionalString("fetchsize");
Cluster cluster = getSession().getCluster();
if (fetchSizeOption.isPresent()) {
int fetchSize = fetchSizeOption.flatMap(Unit::bytesFor).map(Double::intValue).orElseThrow(() -> new RuntimeException(
"Unable to parse fetch size from " + fetchSizeOption.get()
));
if (fetchSize > 10000000 && fetchSize < 1000000000) {
logger.warn("Setting the fetchsize to " + fetchSize + " is unlikely to give good performance.");
} else if (fetchSize > 1000000000) {
throw new RuntimeException("Setting the fetch size to " + fetchSize + " is likely to cause instability.");
}
logger.trace("setting fetchSize to " + fetchSize);
cluster.getConfiguration().getQueryOptions().setFetchSize(fetchSize);
}
this.retryDelay = params.getOptionalLong("retrydelay").orElse(0L);
this.maxRetryDelay = params.getOptionalLong("maxretrydelay").orElse(500L);
this.retryReplace = params.getOptionalBoolean("retryreplace").orElse(false);
this.maxTries = params.getOptionalInteger("maxtries").orElse(10);
this.showcql = params.getOptionalBoolean("showcql").orElse(false);
this.maxpages = params.getOptionalInteger("maxpages").orElse(1);
this.statementFilter = params.getOptionalString("tokens")
.map(s -> new TokenRangeStmtFilter(cluster, s))
.orElse(null);
if (statementFilter != null) {
logger.info("filtering statements" + statementFilter);
}
errorHandler = configureErrorHandler();
params.getOptionalString("trace")
.map(SimpleConfig::new)
.map(TraceLogger::new)
.ifPresent(
tl -> {
addResultSetCycleOperator(tl);
addStatementModifier(tl);
});
this.maxTotalOpsInFlight = params.getOptionalLong("async").orElse(1L);
Optional<String> dynpooling = params.getOptionalString("pooling");
if (dynpooling.isPresent()) {
logger.info("dynamically updating pooling");
if (!dynpooling.get().equals(this.pooling)) {
PoolingOptions opts = CQLOptions.poolingOptionsFor(dynpooling.get());
logger.info("pooling=>" + dynpooling.get());
PoolingOptions cfg = getSession().getCluster().getConfiguration().getPoolingOptions();
// This looks funny, because we have to set max conns per host
// in an order that will appease the driver, as there is no "apply settings"
// to do that for us, so we raise max first if it goes higher, and we lower
// it last, if it goes lower
int prior_mcph_l = cfg.getMaxConnectionsPerHost(HostDistance.LOCAL);
int mcph_l = opts.getMaxConnectionsPerHost(HostDistance.LOCAL);
int ccph_l = opts.getCoreConnectionsPerHost(HostDistance.LOCAL);
if (prior_mcph_l < mcph_l) {
logger.info("setting mcph_l to " + mcph_l);
cfg.setMaxConnectionsPerHost(HostDistance.LOCAL, mcph_l);
}
logger.info("setting ccph_l to " + ccph_l);
cfg.setCoreConnectionsPerHost(HostDistance.LOCAL, ccph_l);
if (mcph_l < prior_mcph_l) {
logger.info("setting mcph_l to " + mcph_l);
cfg.setMaxRequestsPerConnection(HostDistance.LOCAL, mcph_l);
}
cfg.setMaxRequestsPerConnection(HostDistance.LOCAL, opts.getMaxRequestsPerConnection(HostDistance.LOCAL));
int prior_mcph_r = cfg.getMaxConnectionsPerHost(HostDistance.REMOTE);
int mcph_r = opts.getMaxConnectionsPerHost(HostDistance.REMOTE);
int ccph_r = opts.getCoreConnectionsPerHost(HostDistance.REMOTE);
if (mcph_r > 0) {
if (mcph_r > prior_mcph_r) opts.setMaxConnectionsPerHost(HostDistance.REMOTE, mcph_r);
opts.setCoreConnectionsPerHost(HostDistance.REMOTE, ccph_r);
if (prior_mcph_r > mcph_r) opts.setMaxConnectionsPerHost(HostDistance.REMOTE, mcph_r);
if (opts.getMaxConnectionsPerHost(HostDistance.REMOTE) > 0) {
cfg.setMaxRequestsPerConnection(HostDistance.REMOTE, opts.getMaxRequestsPerConnection(HostDistance.REMOTE));
}
}
this.pooling = dynpooling.get();
}
}
}
// TODO: make error handler updates consistent under concurrent updates
private HashedCQLErrorHandler configureErrorHandler() {
HashedCQLErrorHandler newerrorHandler = new HashedCQLErrorHandler(exceptionCountMetrics);
String errors = activityDef.getParams()
.getOptionalString("errors")
.orElse("stop,retryable->retry,unverified->stop");
String[] handlerSpecs = errors.split(",");
for (String spec : handlerSpecs) {
String[] keyval = spec.split("=|->|:", 2);
if (keyval.length == 1) {
String verb = keyval[0];
ErrorResponse errorResponse = getErrorResponseOrBasicError(verb);
newerrorHandler.setDefaultHandler(
new NBCycleErrorHandler(
errorResponse,
exceptionCountMetrics,
exceptionHistoMetrics,
!getParams().getOptionalLong("async").isPresent()
)
);
} else {
String pattern = keyval[0];
String verb = keyval[1];
if (newerrorHandler.getGroupNames().contains(pattern)) {
ErrorResponse errorResponse = getErrorResponseOrBasicError(verb);
NBCycleErrorHandler handler =
new NBCycleErrorHandler(
errorResponse,
exceptionCountMetrics,
exceptionHistoMetrics,
!getParams().getOptionalLong("async").isPresent()
);
logger.info("Handling error group '" + pattern + "' with handler:" + handler);
newerrorHandler.setHandlerForGroup(pattern, handler);
} else {
ErrorResponse errorResponse = ErrorResponse.valueOf(keyval[1]);
NBCycleErrorHandler handler = new NBCycleErrorHandler(
errorResponse,
exceptionCountMetrics,
exceptionHistoMetrics,
!getParams().getOptionalLong("async").isPresent()
);
logger.info("Handling error pattern '" + pattern + "' with handler:" + handler);
newerrorHandler.setHandlerForPattern(keyval[0], handler);
}
}
}
return newerrorHandler;
}
private ErrorResponse getErrorResponseOrBasicError(String verb) {
try {
return ErrorResponse.valueOf(verb);
} catch (IllegalArgumentException e) {
throw new BasicError("Invalid parameter for errors: '" + verb + "' should be one of: " + StringUtils.join(ErrorResponse.values(), ", "));
}
}
public int getMaxTries() {
return maxTries;
}
public HashedCQLErrorHandler getCqlErrorHandler() {
return this.errorHandler;
}
public StatementFilter getStatementFilter() {
return statementFilter;
}
public void setStatementFilter(StatementFilter statementFilter) {
this.statementFilter = statementFilter;
}
public Boolean isShowCql() {
return showcql;
}
public OpSequence<ReadyCQLStatement> getOpSequencer() {
return opsequence;
}
public List<RowCycleOperator> getRowCycleOperators() {
return rowCycleOperators;
}
protected synchronized void addRowCycleOperator(RowCycleOperator rsOperator) {
if (rowCycleOperators == null) {
rowCycleOperators = new ArrayList<>();
}
rowCycleOperators.add(rsOperator);
}
private void clearRowCycleOperators() {
this.rowCycleOperators = null;
}
public List<ResultSetCycleOperator> getResultSetCycleOperators() {
return resultSetCycleOperators;
}
protected synchronized void addResultSetCycleOperator(ResultSetCycleOperator resultSetCycleOperator) {
if (this.resultSetCycleOperators == null) {
this.resultSetCycleOperators = new ArrayList<>();
}
this.resultSetCycleOperators.add(resultSetCycleOperator);
}
private void clearResultSetCycleOperators() {
this.resultSetCycleOperators = null;
}
public List<StatementModifier> getStatementModifiers() {
return this.statementModifiers;
}
protected synchronized void addStatementModifier(StatementModifier modifier) {
if (this.statementModifiers == null) {
this.statementModifiers = new ArrayList<>();
}
this.statementModifiers.add(modifier);
}
private void clearStatementModifiers() {
statementModifiers = null;
}
public long getMaxOpsInFlight(int slot) {
int threads = this.getActivityDef().getThreads();
return maxTotalOpsInFlight / threads + (slot < (maxTotalOpsInFlight % threads) ? 1 : 0);
}
public long getRetryDelay() {
return retryDelay;
}
public void setRetryDelay(long retryDelay) {
this.retryDelay = retryDelay;
}
public long getMaxRetryDelay() {
return maxRetryDelay;
}
public void setMaxRetryDelay(long maxRetryDelay) {
this.maxRetryDelay = maxRetryDelay;
}
public boolean isRetryReplace() {
return retryReplace;
}
public void setRetryReplace(boolean retryReplace) {
this.retryReplace = retryReplace;
}
public synchronized Writer getNamedWriter(String name) {
Writer writer = namedWriters.computeIfAbsent(name, s -> {
try {
return new FileWriter(name, StandardCharsets.UTF_8);
} catch (IOException e) {
throw new RuntimeException(e);
}
});
this.registerAutoCloseable(writer);
return writer;
}
}

View File

@ -0,0 +1,78 @@
package io.nosqlbench.activitytype.cql.core;
import com.datastax.driver.core.LocalDate;
import com.datastax.driver.core.TupleValue;
import com.datastax.driver.core.UDTValue;
import io.nosqlbench.engine.api.activityapi.core.ActionDispenser;
import io.nosqlbench.engine.api.activityapi.core.ActivityType;
import io.nosqlbench.engine.api.activityimpl.ActivityDef;
import io.nosqlbench.nb.annotations.Service;
import java.math.BigDecimal;
import java.math.BigInteger;
import java.net.InetAddress;
import java.nio.ByteBuffer;
import java.time.Instant;
import java.time.LocalTime;
import java.util.*;
@Service(value = ActivityType.class, selector = "cqld3")
public class CqlActivityType implements ActivityType<CqlActivity> {
@Override
public CqlActivity getActivity(ActivityDef activityDef) {
Optional<String> yaml = activityDef.getParams().getOptionalString("yaml", "workload");
// sanity check that we have a yaml parameter, which contains our statements and bindings
if (yaml.isEmpty()) {
throw new RuntimeException("Currently, the cql activity type requires yaml/workload activity parameter.");
}
return new CqlActivity(activityDef);
}
/**
* Returns the per-activity level dispenser. The ActionDispenser can then dispense
* per-thread actions within the activity instance.
* @param activity The activity instance which will parameterize this action
*/
@Override
public ActionDispenser getActionDispenser(CqlActivity activity) {
return new CqlActionDispenser(activity);
}
@Override
public Map<String, Class<?>> getTypeMap() {
Map<String,Class<?>> typemap = new LinkedHashMap<>();
typemap.put("ascii",String.class);
typemap.put("bigint",long.class);
typemap.put("blob", ByteBuffer.class);
typemap.put("boolean",boolean.class);
typemap.put("counter",long.class);
typemap.put("date", LocalDate.class);
typemap.put("decimal", BigDecimal.class);
typemap.put("double",double.class);
// typemap.put("duration",CqlDuration.class);
typemap.put("float",float.class);
typemap.put("inet", InetAddress.class);
typemap.put("int",int.class);
typemap.put("list", List.class);
typemap.put("map",Map.class);
typemap.put("set", Set.class);
typemap.put("smallint",short.class);
typemap.put("text",String.class);
typemap.put("time", LocalTime.class);
typemap.put("timestamp", Instant.class);
typemap.put("tinyint",byte.class);
typemap.put("tuple", TupleValue.class);
typemap.put("<udt>", UDTValue.class);
typemap.put("uuid",UUID.class);
typemap.put("timeuuid",UUID.class);
typemap.put("varchar",String.class);
typemap.put("varint", BigInteger.class);
return typemap;
}
}

View File

@ -0,0 +1,266 @@
package io.nosqlbench.activitytype.cql.core;
import com.codahale.metrics.Timer;
import com.datastax.driver.core.ResultSet;
import com.datastax.driver.core.ResultSetFuture;
import io.nosqlbench.activitytype.cql.api.ErrorResponse;
import io.nosqlbench.activitytype.cql.api.ResultSetCycleOperator;
import io.nosqlbench.activitytype.cql.api.RowCycleOperator;
import io.nosqlbench.activitytype.cql.api.StatementFilter;
import io.nosqlbench.activitytype.cql.errorhandling.ErrorStatus;
import io.nosqlbench.activitytype.cql.errorhandling.HashedCQLErrorHandler;
import io.nosqlbench.activitytype.cql.errorhandling.exceptions.CQLCycleWithStatementException;
import io.nosqlbench.activitytype.cql.errorhandling.exceptions.ChangeUnappliedCycleException;
import io.nosqlbench.activitytype.cql.errorhandling.exceptions.UnexpectedPagingException;
import io.nosqlbench.activitytype.cql.statements.core.ReadyCQLStatement;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import io.nosqlbench.activitytype.cql.statements.modifiers.StatementModifier;
import io.nosqlbench.engine.api.activityapi.core.BaseAsyncAction;
import io.nosqlbench.engine.api.activityapi.core.ops.fluent.opfacets.FailedOp;
import io.nosqlbench.engine.api.activityapi.core.ops.fluent.opfacets.StartedOp;
import io.nosqlbench.engine.api.activityapi.core.ops.fluent.opfacets.SucceededOp;
import io.nosqlbench.engine.api.activityapi.core.ops.fluent.opfacets.TrackedOp;
import io.nosqlbench.engine.api.activityapi.planning.OpSequence;
import io.nosqlbench.engine.api.activityimpl.ActivityDef;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.LogManager;
import java.util.List;
import java.util.concurrent.TimeUnit;
import java.util.function.LongFunction;
@SuppressWarnings("Duplicates")
public class CqlAsyncAction extends BaseAsyncAction<CqlOpData, CqlActivity> {
private final static Logger logger = LogManager.getLogger(CqlAsyncAction.class);
private final ActivityDef activityDef;
private List<RowCycleOperator> rowOps;
private List<ResultSetCycleOperator> cycleOps;
private List<StatementModifier> modifiers;
private StatementFilter statementFilter;
private OpSequence<ReadyCQLStatement> sequencer;
// how many cycles a statement will be attempted for before giving up
private int maxTries = 10;
private HashedCQLErrorHandler cqlActivityErrorHandler;
// private int pagesFetched = 0;
// private long totalRowsFetchedForQuery = 0L;
// private ResultSet pagingResultSet;
// private Statement pagingStatement;
// private ReadyCQLStatement pagingReadyStatement;
private boolean showcql;
// private long opsInFlight = 0L;
// private long maxOpsInFlight = 1L;
// private long pendingResults = 0;
// private LinkedBlockingQueue<CqlOpContext> resultQueue = new LinkedBlockingQueue<>();
public CqlAsyncAction(CqlActivity activity, int slot) {
super(activity, slot);
onActivityDefUpdate(activity.getActivityDef());
this.activityDef = activity.getActivityDef();
}
@Override
public void init() {
onActivityDefUpdate(activityDef);
this.sequencer = activity.getOpSequencer();
}
@Override
public LongFunction<CqlOpData> getOpInitFunction() {
return (l) -> {
return new CqlOpData(l, this);
};
}
@Override
public void startOpCycle(TrackedOp<CqlOpData> opc) {
CqlOpData cqlop = opc.getOpData();
long cycle = opc.getCycle();
// bind timer covers all statement selection and binding, skipping, transforming logic
try (Timer.Context bindTime = activity.bindTimer.time()) {
cqlop.readyCQLStatement = sequencer.get(cycle);
cqlop.statement = cqlop.readyCQLStatement.bind(cycle);
// If a filter is defined, skip and count any statements that do not match it
if (statementFilter != null) {
if (!statementFilter.matches(cqlop.statement)) {
activity.skippedTokensHisto.update(cycle);
//opc.start().stop(-2);
cqlop.skipped = true;
opc.skip(0);
return;
}
}
// Transform the statement if there are any statement transformers defined for this CQL activity
if (modifiers != null) {
for (StatementModifier modifier : modifiers) {
cqlop.statement = modifier.modify(cqlop.statement, cycle);
}
}
// Maybe show the CQl in log/console - only for diagnostic use
if (showcql) {
logger.info("CQL(cycle=" + cycle + "):\n" + cqlop.readyCQLStatement.getQueryString(cycle));
}
}
StartedOp<CqlOpData> startedOp = opc.start();
cqlop.startedOp = startedOp;
// The execute timer covers only the point at which EB hands the op to the driver to be executed
try (Timer.Context executeTime = activity.executeTimer.time()) {
cqlop.future = activity.getSession().executeAsync(cqlop.statement);
Futures.addCallback(cqlop.future, cqlop);
}
}
public void onSuccess(StartedOp<CqlOpData> sop) {
CqlOpData cqlop = sop.getOpData();
HashedCQLErrorHandler.resetThreadStatusCode();
if (cqlop.skipped) {
return;
}
try {
ResultSet resultSet = cqlop.resultSet;
cqlop.totalPagesFetchedForQuery++;
// Apply any defined ResultSetCycleOperators
if (cycleOps != null) {
for (ResultSetCycleOperator cycleOp : cycleOps) {
cycleOp.apply(resultSet, cqlop.statement, cqlop.cycle);
}
}
int pageRows = resultSet.getAvailableWithoutFetching();
int remaining = pageRows;
if (rowOps == null) {
while (remaining-- > 0) {
resultSet.one();
}
} else {
while (remaining-- > 0) {
for (RowCycleOperator rowOp : rowOps) {
rowOp.apply(resultSet.one(), cqlop.cycle);
}
}
}
cqlop.totalRowsFetchedForQuery += pageRows;
if (cqlop.totalPagesFetchedForQuery++ > activity.maxpages) {
throw new UnexpectedPagingException(
cqlop.cycle,
resultSet,
cqlop.readyCQLStatement.getQueryString(cqlop.cycle),
1,
activity.maxpages,
activity.getSession().getCluster().getConfiguration().getQueryOptions().getFetchSize()
);
}
if (!resultSet.wasApplied()) {
// To make exception handling logic flow more uniformly
throw new ChangeUnappliedCycleException(
cqlop.cycle, resultSet, cqlop.readyCQLStatement.getQueryString(cqlop.cycle)
);
}
if (!resultSet.isFullyFetched()) {
logger.trace("async paging request " + cqlop.totalPagesFetchedForQuery + " for cycle " + cqlop.cycle);
ListenableFuture<ResultSet> resultSetListenableFuture = resultSet.fetchMoreResults();
Futures.addCallback(resultSetListenableFuture, cqlop);
return;
}
SucceededOp<CqlOpData> success = sop.succeed(0);
cqlop.readyCQLStatement.onSuccess(cqlop.cycle, success.getServiceTimeNanos(), cqlop.totalRowsFetchedForQuery);
activity.triesHisto.update(cqlop.triesAttempted);
activity.rowsCounter.mark(cqlop.totalRowsFetchedForQuery);
activity.resultSuccessTimer.update(success.getServiceTimeNanos(), TimeUnit.NANOSECONDS);
activity.resultSetSizeHisto.update(cqlop.totalRowsFetchedForQuery);
activity.resultTimer.update(success.getServiceTimeNanos(), TimeUnit.NANOSECONDS);
} catch (Exception e) {
long currentServiceTime = sop.getCurrentServiceTimeNanos();
cqlop.readyCQLStatement.onError(cqlop.cycle, currentServiceTime, e);
CQLCycleWithStatementException cqlCycleException = new CQLCycleWithStatementException(cqlop.cycle, currentServiceTime, e, cqlop.readyCQLStatement);
ErrorStatus errorStatus = cqlActivityErrorHandler.handleError(cqlop.cycle, cqlCycleException);
if (errorStatus.isRetryable() && ++cqlop.triesAttempted < maxTries) {
ResultSetFuture resultSetFuture = activity.getSession().executeAsync(cqlop.statement);
sop.retry();
Futures.addCallback(resultSetFuture, cqlop);
return;
} else {
sop.fail(errorStatus.getResultCode());
if (errorStatus.getResponse() == ErrorResponse.stop) {
cqlop.throwable = cqlCycleException;
activity.getActivityController().stopActivityWithErrorAsync(cqlCycleException);
}
}
}
}
public void onFailure(StartedOp<CqlOpData> startedOp) {
CqlOpData cqlop = startedOp.getOpData();
long serviceTime = startedOp.getCurrentServiceTimeNanos();
// Even if this is retryable, we expose error events
cqlop.readyCQLStatement.onError(startedOp.getCycle(),serviceTime,cqlop.throwable);
long cycle = startedOp.getCycle();
CQLCycleWithStatementException cqlCycleException1 = new CQLCycleWithStatementException(cqlop.cycle, serviceTime, cqlop.throwable, cqlop.readyCQLStatement);
ErrorStatus errorStatus = cqlActivityErrorHandler.handleError(startedOp.getCycle(), cqlCycleException1);
if (errorStatus.getResponse() == ErrorResponse.stop) {
activity.getActivityController().stopActivityWithErrorAsync(cqlop.throwable);
return;
}
if (errorStatus.isRetryable() && cqlop.triesAttempted < maxTries) {
startedOp.retry();
try (Timer.Context executeTime = activity.executeTimer.time()) {
cqlop.future = activity.getSession().executeAsync(cqlop.statement);
Futures.addCallback(cqlop.future, cqlop);
return;
}
}
FailedOp<CqlOpData> failed = startedOp.fail(errorStatus.getResultCode());
activity.resultTimer.update(failed.getServiceTimeNanos(), TimeUnit.NANOSECONDS);
activity.triesHisto.update(cqlop.triesAttempted);
}
@Override
public void onActivityDefUpdate(ActivityDef activityDef) {
this.maxTries = activity.getMaxTries();
this.showcql = activity.isShowCql();
this.cqlActivityErrorHandler = activity.getCqlErrorHandler();
this.statementFilter = activity.getStatementFilter();
this.rowOps = activity.getRowCycleOperators();
this.cycleOps = activity.getResultSetCycleOperators();
this.modifiers = activity.getStatementModifiers();
}
public String toString() {
return "CqlAsyncAction["+this.slot+"]";
}
}

View File

@ -0,0 +1,52 @@
package io.nosqlbench.activitytype.cql.core;
import com.datastax.driver.core.ResultSet;
import com.datastax.driver.core.ResultSetFuture;
import com.datastax.driver.core.Statement;
import io.nosqlbench.activitytype.cql.statements.core.ReadyCQLStatement;
import com.google.common.util.concurrent.FutureCallback;
import io.nosqlbench.engine.api.activityapi.core.ops.fluent.opfacets.StartedOp;
public class CqlOpData implements FutureCallback<ResultSet> {
final long cycle;
// op state is managed via callbacks, we keep a ref here
StartedOp<CqlOpData> startedOp;
boolean skipped=false;
private final CqlAsyncAction action;
int triesAttempted =0;
ReadyCQLStatement readyCQLStatement;
Statement statement;
ResultSetFuture future;
ResultSet resultSet;
long totalRowsFetchedForQuery;
long totalPagesFetchedForQuery;
public Throwable throwable;
public long resultAt;
private long errorAt;
public CqlOpData(long cycle, CqlAsyncAction action) {
this.cycle = cycle;
this.action = action;
}
@Override
public void onSuccess(ResultSet result) {
this.resultSet = result;
this.resultAt = System.nanoTime();
action.onSuccess(startedOp);
}
@Override
public void onFailure(Throwable throwable) {
this.throwable=throwable;
this.errorAt = System.nanoTime();
action.onFailure(startedOp);
}
}

View File

@ -0,0 +1,32 @@
package io.nosqlbench.activitytype.cql.core;
import com.datastax.driver.core.policies.AddressTranslator;
import com.datastax.driver.core.Cluster;
import java.net.InetSocketAddress;
public class ProxyTranslator implements AddressTranslator {
private final int hostsIndex = 0;
private final InetSocketAddress address;
public ProxyTranslator(InetSocketAddress host){
this.address= host;
}
@Override
public void init(Cluster cluster) {
// Nothing to do
}
@Override
public InetSocketAddress translate(InetSocketAddress address) {
return address;
}
@Override
public void close() {
}
}

View File

@ -0,0 +1,113 @@
package io.nosqlbench.activitytype.cql.errorhandling;
import com.datastax.driver.core.exceptions.*;
import io.nosqlbench.activitytype.cql.errorhandling.exceptions.*;
import io.nosqlbench.engine.api.activityapi.cyclelog.buffers.results.ResultReadable;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.LogManager;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* This enumerates all known exception classes, including supertypes,
* for the purposes of stable naming in error handling.
* This is current as of com.datastax.cassandra:cassandra-driver-core:3.2.0
*/
public enum CQLExceptionEnum implements ResultReadable {
FrameTooLongException(FrameTooLongException.class, 1),
CodecNotFoundException(CodecNotFoundException.class, 2),
DriverException(DriverException.class, 3),
AuthenticationException(AuthenticationException.class, 4),
TraceRetrievalException(TraceRetrievalException.class, 5),
UnsupportedProtocolVersionException(UnsupportedProtocolVersionException.class, 6),
NoHostAvailableException(NoHostAvailableException.class, 7),
QueryValidationException(QueryValidationException.class, 8),
InvalidQueryException(InvalidQueryException.class, 9),
InvalidConfigurationInQueryException(InvalidConfigurationInQueryException.class, 10),
UnauthorizedException(UnauthorizedException.class, 11),
SyntaxError(SyntaxError.class, 12),
AlreadyExistsException(AlreadyExistsException.class, 13),
UnpreparedException(UnpreparedException.class, 14),
InvalidTypeException(InvalidTypeException.class, 15),
QueryExecutionException(QueryExecutionException.class, 16),
UnavailableException(UnavailableException.class, 17),
BootstrappingException(BootstrappingException.class, 18),
OverloadedException(OverloadedException.class, 19),
TruncateException(TruncateException.class, 20),
QueryConsistencyException(QueryConsistencyException.class, 21),
WriteTimeoutException(WriteTimeoutException.class, 22),
WriteFailureException(WriteFailureException.class, 23),
ReadFailureException(ReadFailureException.class, 24),
ReadTimeoutException(ReadTimeoutException.class, 25),
FunctionExecutionException(FunctionExecutionException.class, 26),
DriverInternalError(DriverInternalError.class, 27),
ProtocolError(ProtocolError.class, 28),
ServerError(ServerError.class, 29),
BusyPoolException(BusyPoolException.class, 30),
ConnectionException(ConnectionException.class, 31),
TransportException(TransportException.class, 32),
OperationTimedOutException(OperationTimedOutException.class, 33),
PagingStateException(PagingStateException.class, 34),
UnresolvedUserTypeException(UnresolvedUserTypeException.class, 35),
UnsupportedFeatureException(UnsupportedFeatureException.class, 36),
BusyConnectionException(BusyConnectionException.class, 37),
ChangeUnappliedCycleException(ChangeUnappliedCycleException.class, 38),
ResultSetVerificationException(io.nosqlbench.activitytype.cql.errorhandling.exceptions.ResultSetVerificationException.class, 39),
RowVerificationException(io.nosqlbench.activitytype.cql.errorhandling.exceptions.RowVerificationException.class, 40),
UnexpectedPagingException(io.nosqlbench.activitytype.cql.errorhandling.exceptions.UnexpectedPagingException.class, 41),
EbdseCycleException(CqlGenericCycleException.class, 42),
MaxTriesExhaustedException(io.nosqlbench.activitytype.cql.errorhandling.exceptions.MaxTriesExhaustedException.class,43);
private final static Logger logger = LogManager.getLogger(CQLExceptionEnum.class);
private static Map<String, Integer> codesByName = getCodesByName();
private static final String[] namesByCode = getNamesByCode();
private final Class<? extends Exception> exceptionClass;
private final int resultCode;
CQLExceptionEnum(Class<? extends Exception> clazz, int resultCode) {
this.exceptionClass = clazz;
this.resultCode = resultCode;
}
public Class<? extends Exception> getExceptionClass() {
return exceptionClass;
}
public int getResultCode() {
return resultCode;
}
public int getResult() {
return this.resultCode;
}
private static Map<String,Integer> getCodesByName() {
codesByName = new HashMap<>();
for (CQLExceptionEnum cqlExceptionEnum : CQLExceptionEnum.values()) {
codesByName.put(cqlExceptionEnum.toString(), cqlExceptionEnum.resultCode);
}
codesByName.put("NONE",0);
return codesByName;
}
private static String[] getNamesByCode() {
List<String> namesByCode = new ArrayList<>();
namesByCode.add("NONE");
for (CQLExceptionEnum cqlExceptionEnum : CQLExceptionEnum.values()) {
int code = cqlExceptionEnum.resultCode;
for (int i = namesByCode.size(); i <= code ; i++) {
namesByCode.add("UNKNOWN");
}
namesByCode.set(code, cqlExceptionEnum.toString());
}
return namesByCode.toArray(new String[0]);
}
}

View File

@ -0,0 +1,31 @@
package io.nosqlbench.activitytype.cql.errorhandling;
import io.nosqlbench.activitytype.cql.api.ErrorResponse;
public class ErrorStatus {
private final boolean retryable;
private int resultCode;
private final ErrorResponse response;
public ErrorStatus(ErrorResponse response, boolean retryable, int resultCode) {
this.response = response;
this.retryable = retryable;
this.resultCode = resultCode;
}
public boolean isRetryable() {
return retryable;
}
public int getResultCode() {
return resultCode;
}
public void setResultCode(int resultCode) {
this.resultCode = resultCode;
}
public ErrorResponse getResponse() {
return response;
}
}

View File

@ -0,0 +1,80 @@
package io.nosqlbench.activitytype.cql.errorhandling;
import com.datastax.driver.core.exceptions.*;
import io.nosqlbench.activitytype.cql.errorhandling.exceptions.*;
import java.util.LinkedHashMap;
import java.util.Map;
/**
* This enumerates all known exception classes, including supertypes,
* for the purposes of stable naming in error handling.
* This is current as of com.datastax.cassandra:cassandra-driver-core:3.2.0
*/
public class ExceptionMap {
private final static Map<Class<? extends Exception>, Class<? extends Exception>> map
= new LinkedHashMap<Class<? extends Exception>, Class<? extends Exception>>() {
{
put(FrameTooLongException.class, DriverException.class);
put(CodecNotFoundException.class, DriverException.class);
put(AuthenticationException.class, DriverException.class);
put(TraceRetrievalException.class, DriverException.class);
put(UnsupportedProtocolVersionException.class, DriverException.class);
put(NoHostAvailableException.class, DriverException.class);
put(QueryValidationException.class, DriverException.class);
put(InvalidQueryException.class, QueryValidationException.class);
put(InvalidConfigurationInQueryException.class, InvalidQueryException.class);
put(UnauthorizedException.class, QueryValidationException.class);
put(SyntaxError.class, QueryValidationException.class);
put(AlreadyExistsException.class, QueryValidationException.class);
put(UnpreparedException.class, QueryValidationException.class);
put(InvalidTypeException.class, DriverException.class);
put(QueryExecutionException.class, DriverException.class);
put(UnavailableException.class, QueryValidationException.class);
put(BootstrappingException.class, QueryValidationException.class);
put(OverloadedException.class, QueryValidationException.class);
put(TruncateException.class, QueryValidationException.class);
put(QueryConsistencyException.class, QueryValidationException.class);
put(WriteTimeoutException.class, QueryConsistencyException.class);
put(WriteFailureException.class, QueryConsistencyException.class);
put(ReadFailureException.class, QueryConsistencyException.class);
put(ReadTimeoutException.class, QueryConsistencyException.class);
put(FunctionExecutionException.class, QueryValidationException.class);
put(DriverInternalError.class, DriverException.class);
put(ProtocolError.class, DriverInternalError.class);
put(ServerError.class, DriverInternalError.class);
put(BusyPoolException.class, DriverException.class);
put(ConnectionException.class, DriverException.class);
put(TransportException.class, ConnectionException.class);
put(OperationTimedOutException.class, ConnectionException.class);
put(PagingStateException.class, DriverException.class);
put(UnresolvedUserTypeException.class, DriverException.class);
put(UnsupportedFeatureException.class, DriverException.class);
put(BusyConnectionException.class, DriverException.class);
put(ChangeUnappliedCycleException.class, CqlGenericCycleException.class);
put(ResultSetVerificationException.class, CqlGenericCycleException.class);
put(RowVerificationException.class, CqlGenericCycleException.class);
put(UnexpectedPagingException.class, CqlGenericCycleException.class);
put(CqlGenericCycleException.class, RuntimeException.class);
}
};
public Class<? extends Exception> put(
Class<? extends Exception> exceptionClass,
Class<? extends Exception> parentClass) {
if (exceptionClass.getSuperclass() != parentClass) {
throw new RuntimeException("Sanity check failed: " + exceptionClass +
" is not a parent class of " + parentClass);
}
return map.put(exceptionClass, parentClass);
}
public static Map<Class<? extends Exception>, Class<? extends Exception>> getMap() {
return map;
}
}

View File

@ -0,0 +1,82 @@
package io.nosqlbench.activitytype.cql.errorhandling;
import com.datastax.driver.core.exceptions.*;
import io.nosqlbench.activitytype.cql.errorhandling.exceptions.CQLCycleWithStatementException;
import io.nosqlbench.activitytype.cql.errorhandling.exceptions.ChangeUnappliedCycleException;
import io.nosqlbench.activitytype.cql.errorhandling.exceptions.ResultSetVerificationException;
import io.nosqlbench.activitytype.cql.errorhandling.exceptions.RowVerificationException;
import io.nosqlbench.engine.api.activityapi.errorhandling.CycleErrorHandler;
import io.nosqlbench.engine.api.activityapi.errorhandling.HashedErrorHandler;
import io.nosqlbench.engine.api.metrics.ExceptionCountMetrics;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.LogManager;
public class HashedCQLErrorHandler extends HashedErrorHandler<Throwable, ErrorStatus> {
private static final Logger logger = LogManager.getLogger(HashedCQLErrorHandler.class);
// private static Set<Class<? extends Throwable>> UNVERIFIED_ERRORS = new HashSet<Class<? extends Throwable>>() {{
// add(RowVerificationException.class);
// add(ResultSetVerificationException.class);
// }};
private final ExceptionCountMetrics exceptionCountMetrics;
private static final ThreadLocal<Integer> tlResultCode = ThreadLocal.withInitial(() -> (0));
public HashedCQLErrorHandler(ExceptionCountMetrics exceptionCountMetrics) {
this.exceptionCountMetrics = exceptionCountMetrics;
this.setGroup("retryable",
NoHostAvailableException.class,
UnavailableException.class,
OperationTimedOutException.class,
OverloadedException.class,
WriteTimeoutException.class,
ReadTimeoutException.class
);
this.setGroup(
"unapplied",
ChangeUnappliedCycleException.class
);
this.setGroup("unverified",
RowVerificationException.class,
ResultSetVerificationException.class
);
// realerrors is everything else but the above
}
private static class UncaughtErrorHandler implements CycleErrorHandler<Throwable, ErrorStatus> {
@Override
public ErrorStatus handleError(long cycle, Throwable error, String errMsg) {
throw new RuntimeException(
"An exception was thrown in cycle " + cycle + " that has no error: " + errMsg + ", error:" + error
);
}
}
@Override
public ErrorStatus handleError(long cycle, Throwable throwable, String errMsg) {
int resultCode = 127;
if (throwable instanceof CQLCycleWithStatementException) {
CQLCycleWithStatementException cce = (CQLCycleWithStatementException) throwable;
Throwable cause = cce.getCause();
try {
String simpleName = cause.getClass().getSimpleName();
CQLExceptionEnum cqlExceptionEnum = CQLExceptionEnum.valueOf(simpleName);
resultCode = cqlExceptionEnum.getResult();
} catch (Throwable t) {
logger.warn("unrecognized exception while mapping status code via Enum: " + throwable.getClass());
}
} else {
logger.warn("un-marshaled exception while mapping status code: " + throwable.getClass());
}
ErrorStatus errorStatus = super.handleError(cycle, throwable, errMsg);
errorStatus.setResultCode(resultCode);
return errorStatus;
}
public static int getThreadStatusCode() {
return tlResultCode.get();
}
public static void resetThreadStatusCode() {
tlResultCode.set(0);
}
}

View File

@ -0,0 +1,102 @@
package io.nosqlbench.activitytype.cql.errorhandling;
import io.nosqlbench.activitytype.cql.api.ErrorResponse;
import io.nosqlbench.activitytype.cql.errorhandling.exceptions.CQLCycleWithStatementException;
import io.nosqlbench.activitytype.cql.errorhandling.exceptions.CQLExceptionDetailer;
import io.nosqlbench.engine.api.activityapi.errorhandling.CycleErrorHandler;
import io.nosqlbench.engine.api.metrics.ExceptionCountMetrics;
import io.nosqlbench.engine.api.metrics.ExceptionHistoMetrics;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.LogManager;
/**
* A contextualized error handler that can catch a cycle-specific error.
* In this class, the error handlers return a boolean, which indicates
* to the call whether or not to retry the operation. This handler implements
* the error handling stack approach, which allows the user to select an
* entry point in the stack, with all lesser impacting handler rules
* applied from most impacting to least impacting order.
*
* For simplicity, the handler stack is fixed as described below. It is not
* possible to rearrange the verbs. Some care has been given to making sure
* that the selected handlers are complete and intuitive.
*
* The standard handler stack looks like this:
*
* <ol>
* <li>stop - log and throw an exception, which should escape to the
* next level of exception handling, the level which causes ebdse
* to stop running. In this case, and only in this case, the remaining
* handlers in the stack are not used.
* are not reached.</li>
* <li>warn - log an exception without stopping execution.</li>
* <li>retry - retry an operation up to a limit, IFF it is retryable</li>
* <li>count - count, in metrics, the number of this particular error type</li>
* <li>ignore - do nothing</li>
* </ol>
*
* As indicated above, if you specify "warn" for a particular error type, this means
* that also retry, count, will apply, as well as ignore, in that order. "ignore" is
* simply a no-op that allows you to specify it as the minimum case.
*/
@SuppressWarnings("Duplicates")
public class NBCycleErrorHandler implements CycleErrorHandler<Throwable, ErrorStatus> {
private static final Logger logger = LogManager.getLogger(NBCycleErrorHandler.class);
private final ErrorResponse errorResponse;
private final ExceptionCountMetrics exceptionCountMetrics;
private final ExceptionHistoMetrics exceptionHistoMetrics;
private boolean throwExceptionOnStop=false;
public NBCycleErrorHandler(
ErrorResponse errorResponse,
ExceptionCountMetrics exceptionCountMetrics,
ExceptionHistoMetrics exceptionHistoMetrics,
boolean throwExceptionOnStop) {
this.errorResponse = errorResponse;
this.exceptionCountMetrics = exceptionCountMetrics;
this.exceptionHistoMetrics = exceptionHistoMetrics;
this.throwExceptionOnStop = throwExceptionOnStop;
}
@Override
public ErrorStatus handleError(long cycle, Throwable contextError) {
CQLCycleWithStatementException cce = (CQLCycleWithStatementException) contextError;
Throwable error = cce.getCause();
boolean retry = false;
switch (errorResponse) {
case stop:
logger.error("error with cycle " + cycle + ": statement: " + cce.getStatement() + " errmsg: " +
CQLExceptionDetailer.messageFor(cycle, error));
if (throwExceptionOnStop) {
throw new RuntimeException(error);
}
case warn:
logger.warn("error with cycle " + cycle + ": statement: " + cce.getStatement() + " errmsg: " +
CQLExceptionDetailer.messageFor(cycle, error));
case retry:
retry = true;
case histogram:
exceptionHistoMetrics.update(error,cce.getDurationNanos());
case count:
case counter:
exceptionCountMetrics.count(error);
case ignore:
default:
break;
}
return new ErrorStatus(errorResponse, retry,-1);
}
@Override
public ErrorStatus handleError(long cycle, Throwable contextError, String errMsg) {
return handleError(cycle,contextError);
}
public String toString() {
return this.errorResponse.toString();
}
}

View File

@ -0,0 +1,38 @@
package io.nosqlbench.activitytype.cql.errorhandling.exceptions;
import io.nosqlbench.activitytype.cql.statements.core.ReadyCQLStatement;
/**
* In internal exception type that is used to saverow exception
* context from within a CQL activity cycle.
*/
public class CQLCycleWithStatementException extends Exception {
private final long cycleValue;
private final long durationNanos;
private final ReadyCQLStatement readyCQLStatement;
public CQLCycleWithStatementException(long cycleValue, long durationNanos, Throwable e, ReadyCQLStatement readyCQLStatement) {
super(e);
this.cycleValue = cycleValue;
this.durationNanos = durationNanos;
this.readyCQLStatement = readyCQLStatement;
}
public long getCycleValue() {
return cycleValue;
}
public long getDurationNanos() {
return durationNanos;
}
public ReadyCQLStatement getReadyCQLStatement() {
return readyCQLStatement;
}
public String getStatement() {
return readyCQLStatement.getQueryString(cycleValue);
}
}

View File

@ -0,0 +1,25 @@
package io.nosqlbench.activitytype.cql.errorhandling.exceptions;
import com.datastax.driver.core.exceptions.ReadTimeoutException;
import com.datastax.driver.core.exceptions.WriteTimeoutException;
public class CQLExceptionDetailer {
public static String messageFor(long cycle, Throwable e) {
if (e instanceof ReadTimeoutException) {
ReadTimeoutException rte = (ReadTimeoutException) e;
return rte.getMessage() +
", coordinator: " + rte.getHost() +
", wasDataRetrieved: " + rte.wasDataRetrieved();
}
if (e instanceof WriteTimeoutException) {
WriteTimeoutException wte = (WriteTimeoutException) e;
return wte.getMessage() +
", coordinator: " + wte.getHost();
}
return e.getMessage();
}
}

View File

@ -0,0 +1,56 @@
package io.nosqlbench.activitytype.cql.errorhandling.exceptions;
import com.datastax.driver.core.BoundStatement;
import com.datastax.driver.core.ResultSet;
import com.datastax.driver.core.SimpleStatement;
import com.datastax.driver.core.Statement;
public abstract class CQLResultSetException extends CqlGenericCycleException {
private final Statement statement;
private final ResultSet resultSet;
public CQLResultSetException(long cycle, ResultSet resultSet, Statement statement, String message, Throwable cause) {
super(cycle,message,cause);
this.resultSet = resultSet;
this.statement = statement;
}
public CQLResultSetException(long cycle, ResultSet resultSet, Statement statement) {
super(cycle);
this.resultSet = resultSet;
this.statement = statement;
}
public CQLResultSetException(long cycle, ResultSet resultSet, Statement statement, String message) {
super(cycle,message);
this.resultSet = resultSet;
this.statement=statement;
}
public CQLResultSetException(long cycle, ResultSet resultSet, Statement statement, Throwable cause) {
super(cycle,cause);
this.resultSet = resultSet;
this.statement = statement;
}
public Statement getStatement() {
return statement;
}
public ResultSet getResultSet() {
return resultSet;
}
protected static String getQueryString(Statement stmt) {
if (stmt instanceof BoundStatement) {
return ((BoundStatement)stmt).preparedStatement().getQueryString();
} else if (stmt instanceof SimpleStatement) {
return ((SimpleStatement) stmt).getQueryString();
} else {
return "UNKNOWN Statement type:" + stmt.getClass().getSimpleName();
}
}
}

View File

@ -0,0 +1,26 @@
package io.nosqlbench.activitytype.cql.errorhandling.exceptions;
import com.datastax.driver.core.ResultSet;
/**
* This was added to nosqlbench because the error handling logic was
* starting to look a bit contrived. Because we need to be able
* to respond to different result outcomes, it
* is just simpler to have a single type of error-handling logic for all outcomes.
*/
public class ChangeUnappliedCycleException extends CqlGenericCycleException {
private final ResultSet resultSet;
private final String queryString;
public ChangeUnappliedCycleException(long cycle, ResultSet resultSet, String queryString) {
super(cycle, "Operation was not applied:" + queryString);
this.resultSet = resultSet;
this.queryString = queryString;
}
public ResultSet getResultSet() {
return resultSet;
}
public String getQueryString() { return queryString; }
}

View File

@ -0,0 +1,38 @@
package io.nosqlbench.activitytype.cql.errorhandling.exceptions;
public abstract class CqlGenericCycleException extends RuntimeException {
private final long cycle;
public CqlGenericCycleException(long cycle, Throwable cause) {
super(cause);
this.cycle = cycle;
}
public CqlGenericCycleException(long cycle, String message) {
super(message);
this.cycle = cycle;
}
public CqlGenericCycleException(long cycle, String message, Throwable cause) {
super(message, cause);
this.cycle = cycle;
}
public CqlGenericCycleException(long cycle) {
super();
this.cycle = cycle;
}
@Override
public String getMessage() {
return "cycle:" + cycle + " caused by:" + super.getMessage();
}
public long getCycle() {
return cycle;
}
}

View File

@ -0,0 +1,20 @@
package io.nosqlbench.activitytype.cql.errorhandling.exceptions;
public class MaxTriesExhaustedException extends CqlGenericCycleException {
private final int maxtries;
public MaxTriesExhaustedException(long cycle, int maxtries) {
super(cycle);
this.maxtries = maxtries;
}
public int getMaxTries() {
return maxtries;
}
@Override
public String getMessage() {
return "Exhausted max tries (" + getMaxTries() + ") on cycle " + getCycle() + ".";
}
}

View File

@ -0,0 +1,17 @@
package io.nosqlbench.activitytype.cql.errorhandling.exceptions;
import com.datastax.driver.core.ResultSet;
import com.datastax.driver.core.Statement;
public class ResultSetVerificationException extends CQLResultSetException {
public ResultSetVerificationException(
long cycle, ResultSet resultSet, Statement statement, Throwable cause) {
super(cycle, resultSet, statement, cause);
}
public ResultSetVerificationException(
long cycle, ResultSet resultSet, Statement statement, String s) {
super(cycle, resultSet, statement, s + ", \nquery string:\n" + getQueryString(statement));
}
}

View File

@ -0,0 +1,33 @@
package io.nosqlbench.activitytype.cql.errorhandling.exceptions;
import com.datastax.driver.core.Row;
import java.util.Map;
/**
* This exception is thrown when read verification fails.
*/
public class RowVerificationException extends CqlGenericCycleException {
private final Map<String, Object> expected;
private final Row row;
public RowVerificationException(long cycle, Row row, Map<String, Object> expected, String detail) {
super(cycle, detail);
this.expected = expected;
this.row = row;
}
@Override
public String getMessage() {
return "cycle:" + getCycle() + ": " + super.getMessage();
}
public Map<String,Object> getExpectedValues() {
return expected;
}
public Row getRow() {
return row;
}
}

View File

@ -0,0 +1,55 @@
package io.nosqlbench.activitytype.cql.errorhandling.exceptions;
import com.datastax.driver.core.ResultSet;
/**
* <p>This is not a core exception. It was added to the CQL activity type
* driver for nosqlbench specifically to catch the following unexpected
* condition:
* Paging would be needed to read all the results from a read query, but the user
* is not expecting to intentionally check and iterate the result sets for paging.
* <p>
* This should only be thrown if a result set would need paging, but configuration
* options specific that it should not expect to. Rather than assume paging is completely
* expected or unexpected, we simply assume that only 1 page is allowed, being the
* first page, or what is thought of as "not paging".
* <p>If this error is thrown, and paging is expected, then the user can adjust
* fetchsize or maxpages in order to open up paging to the degree that is allowable or
* expected.
*/
public class UnexpectedPagingException extends CqlGenericCycleException {
private final ResultSet resultSet;
private final String queryString;
private final int fetchSize;
private final int fetchedPages;
private final int maxpages;
public UnexpectedPagingException(
long cycle,
ResultSet resultSet,
String queryString,
int fetchedPages,
int maxpages,
int fetchSize) {
super(cycle);
this.resultSet = resultSet;
this.queryString = queryString;
this.fetchedPages = fetchedPages;
this.maxpages = maxpages;
this.fetchSize = fetchSize;
}
public ResultSet getResultSet() {
return resultSet;
}
public String getMessage() {
StringBuilder sb = new StringBuilder();
sb.append("Additional paging would be required to read the results from this query fully" +
", but the user has not explicitly indicated that paging was expected.")
.append(" fetched/allowed: ").append(fetchedPages).append("/").append(maxpages)
.append(" fetchSize(").append(fetchSize).append("): ").append(queryString);
return sb.toString();
}
}

View File

@ -0,0 +1,60 @@
package io.nosqlbench.activitytype.cql.filtering;
import io.nosqlbench.activitytype.cql.errorhandling.CQLExceptionEnum;
import io.nosqlbench.engine.api.activityapi.cyclelog.buffers.results.ResultReadable;
import io.nosqlbench.engine.api.activityapi.cyclelog.filters.ResultFilterDispenser;
import io.nosqlbench.engine.api.activityapi.cyclelog.filters.ResultValueFilterType;
import io.nosqlbench.engine.api.activityapi.cyclelog.filters.tristate.EnumReadableMappingFilter;
import io.nosqlbench.engine.api.activityapi.cyclelog.filters.tristate.TristateFilter;
import io.nosqlbench.engine.api.util.ConfigTuples;
import io.nosqlbench.nb.annotations.Service;
import java.util.function.Predicate;
@Service(value = ResultValueFilterType.class, selector = "cql")
public class CQLResultFilterType implements ResultValueFilterType {
@Override
public ResultFilterDispenser getDispenser(String config) {
return new Dispenser(config);
}
private class Dispenser implements ResultFilterDispenser {
private final ConfigTuples conf;
private final EnumReadableMappingFilter<CQLExceptionEnum> enumFilter;
private final Predicate<ResultReadable> filter;
public Dispenser(String config) {
this.conf = new ConfigTuples(config);
ConfigTuples inout = conf.getAllMatching("in.*", "ex.*");
// Default policy is opposite of leading rule
TristateFilter.Policy defaultPolicy = TristateFilter.Policy.Discard;
if (conf.get(0).get(0).startsWith("ex")) {
defaultPolicy = TristateFilter.Policy.Keep;
}
this.enumFilter =
new EnumReadableMappingFilter<>(CQLExceptionEnum.values(), TristateFilter.Policy.Ignore);
for (ConfigTuples.Section section : inout) {
if (section.get(0).startsWith("in")) {
this.enumFilter.addPolicy(section.get(1), TristateFilter.Policy.Keep);
} else if (section.get(0).startsWith("ex")) {
this.enumFilter.addPolicy(section.get(1), TristateFilter.Policy.Discard);
} else {
throw new RuntimeException("Section must start with in(clude) or ex(clude), but instead it is " + section);
}
}
this.filter = this.enumFilter.toDefaultingPredicate(defaultPolicy);
}
@Override
public Predicate<ResultReadable> getResultFilter() {
return filter;
}
}
}

View File

@ -0,0 +1,27 @@
package io.nosqlbench.activitytype.cql.statements.binders;
import com.datastax.driver.core.PreparedStatement;
import com.datastax.driver.core.Session;
import com.datastax.driver.core.Statement;
import io.nosqlbench.virtdata.core.bindings.ValuesArrayBinder;
public enum CqlBinderTypes {
direct_array,
unset_aware,
diagnostic;
public final static CqlBinderTypes DEFAULT = unset_aware;
public ValuesArrayBinder<PreparedStatement, Statement> get(Session session) {
if (this==direct_array) {
return new DirectArrayValuesBinder();
} else if (this== unset_aware) {
return new UnsettableValuesBinder(session);
} else if (this==diagnostic) {
return new DiagnosticPreparedBinder();
} else {
throw new RuntimeException("Impossible-ish statement branch");
}
}
}

View File

@ -0,0 +1,48 @@
package io.nosqlbench.activitytype.cql.statements.binders;
import com.datastax.driver.core.*;
import io.nosqlbench.activitytype.cql.core.CQLBindHelper;
import io.nosqlbench.virtdata.core.bindings.ValuesArrayBinder;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.LogManager;
import java.util.List;
/**
* This binder is not meant to be used primarily by default. It gives detailed
* diagnostics, but in order to do so by default it does lots of processing.
* Other binders will call to this one in an exception handler when needed in
* order to explain in more detail what is happening for users.
*/
public class DiagnosticPreparedBinder implements ValuesArrayBinder<PreparedStatement, Statement> {
public static final Logger logger = LogManager.getLogger(DiagnosticPreparedBinder.class);
@Override
public Statement bindValues(PreparedStatement prepared, Object[] values) {
ColumnDefinitions columnDefinitions = prepared.getVariables();
BoundStatement bound = prepared.bind();
List<ColumnDefinitions.Definition> columnDefList;
if (columnDefinitions.asList().size() == values.length) {
columnDefList = columnDefinitions.asList();
} else {
throw new RuntimeException("The number of named anchors in your statement does not match the number of bindings provided.");
}
int i = 0;
for (Object value : values) {
if (columnDefList.size() <= i) {
logger.error("what gives?");
}
ColumnDefinitions.Definition columnDef = columnDefList.get(i);
String colName = columnDef.getName();
DataType.Name type = columnDef.getType().getName();
try {
bound = CQLBindHelper.bindStatement(bound, colName, value, type);
} catch (ClassCastException e) {
logger.error(String.format("Unable to bind column %s to cql type %s with value %s", colName, type, value));
throw e;
}
i++;
}
return bound;
}
}

View File

@ -0,0 +1,37 @@
package io.nosqlbench.activitytype.cql.statements.binders;
import com.datastax.driver.core.PreparedStatement;
import com.datastax.driver.core.Statement;
import io.nosqlbench.virtdata.core.bindings.ValuesArrayBinder;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.LogManager;
import java.util.Arrays;
/**
* This is now the main binder again, but if there are any exceptions, it delegates to the diagnostic
* one in order to explain what happened. This is to allow for higher performance in the general
* case, but with better user support when something goes wrong.
*
* If you want to force the client to use the array passing method of initializing a statement,
* use this one, known as 'directarray'. This does give up the benefit of allowing unset values
* to be modeled, and at no clear benefit. Thus the {@link CqlBinderTypes#unset_aware} one
* will become the default.
*/
public class DirectArrayValuesBinder implements ValuesArrayBinder<PreparedStatement, Statement> {
public final static Logger logger = LogManager.getLogger(DirectArrayValuesBinder.class);
@Override
public Statement bindValues(PreparedStatement preparedStatement, Object[] objects) {
try {
return preparedStatement.bind(objects);
} catch (Exception e) {
StringBuilder sb = new StringBuilder();
sb.append("Error binding objects to prepared statement directly, falling back to diagnostic binding layer:");
sb.append(Arrays.toString(objects));
logger.warn(sb.toString(),e);
DiagnosticPreparedBinder diag = new DiagnosticPreparedBinder();
return diag.bindValues(preparedStatement, objects);
}
}
}

View File

@ -0,0 +1,55 @@
package io.nosqlbench.activitytype.cql.statements.binders;
import com.datastax.driver.core.ConsistencyLevel;
import com.datastax.driver.core.SimpleStatement;
import com.datastax.driver.core.Statement;
import io.nosqlbench.virtdata.core.bindings.ValuesArrayBinder;
/**
* This binder is not meant to be used with anything but DDL or statements
* which should not be trying to parameterize values in general.
* Parametrized values are still possible through parameterized constructor parameter.
* This binder should be avoided in favor of binders returning PreparedStatement
*/
public class SimpleStatementValuesBinder
implements ValuesArrayBinder<SimpleStatement, Statement> {
private final boolean parameterized;
public SimpleStatementValuesBinder(boolean parameterized) {
this.parameterized = parameterized;
}
@Override
public Statement bindValues(SimpleStatement context, Object[] values) {
String query = context.getQueryString();
if (parameterized) {
String[] splits = query.split("\\?");
assert splits.length == values.length + 1;
StringBuilder sb = new StringBuilder();
sb.append(splits[0]);
for (int i = 1; i < splits.length; i++) {
sb.append(values[i - 1]);
sb.append(splits[i]);
}
query = sb.toString();
System.out.println(query);
}
SimpleStatement simpleStatement = new SimpleStatement(query);
ConsistencyLevel cl = context.getConsistencyLevel();
if(cl != null){
simpleStatement.setConsistencyLevel(context.getConsistencyLevel());
}
//Does it really makes senses?
ConsistencyLevel serial_cl = context.getSerialConsistencyLevel();
if(serial_cl != null){
simpleStatement.setSerialConsistencyLevel(context.getSerialConsistencyLevel());
}
Boolean idempotent = context.isIdempotent();
if(idempotent != null){
simpleStatement.setIdempotent(idempotent);
}
return simpleStatement;
}
}

View File

@ -0,0 +1,73 @@
package io.nosqlbench.activitytype.cql.statements.binders;
import com.datastax.driver.core.*;
import io.nosqlbench.virtdata.api.bindings.VALUE;
import io.nosqlbench.virtdata.core.bindings.ValuesArrayBinder;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.LogManager;
import java.nio.ByteBuffer;
import java.util.List;
public class UnsettableValuesBinder implements ValuesArrayBinder<PreparedStatement, Statement> {
private final static Logger logger = LogManager.getLogger(UnsettableValuesBinder.class);
private final Session session;
private final CodecRegistry codecRegistry;
private final ProtocolVersion protocolVersion;
public UnsettableValuesBinder(Session session) {
this.session = session;
this.codecRegistry = session.getCluster().getConfiguration().getCodecRegistry();
this.protocolVersion = this.session.getCluster().getConfiguration().getProtocolOptions().getProtocolVersion();
}
// TODO: Allow for warning when nulls are passed and they aren't expected
@Override
public Statement bindValues(PreparedStatement preparedStatement, Object[] objects) {
int i=-1;
try {
BoundStatement boundStmt = preparedStatement.bind();
List<ColumnDefinitions.Definition> defs = preparedStatement.getVariables().asList();
for (i = 0; i < objects.length; i++) {
Object value = objects[i];
if (VALUE.unset != value) {
if (null==value) {
boundStmt.setToNull(i);
} else {
DataType cqlType = defs.get(i).getType();
TypeCodec<Object> codec = codecRegistry.codecFor(cqlType, value);
ByteBuffer serialized = codec.serialize(value, protocolVersion);
boundStmt.setBytesUnsafe(i,serialized);
}
}
}
return boundStmt;
} catch (Exception e) {
String typNam = (objects[i]==null ? "NULL" : objects[i].getClass().getCanonicalName());
logger.error("Error binding column " + preparedStatement.getVariables().asList().get(i).getName() + " with class " + typNam, e);
throw e;
// StringBuilder sb = new StringBuilder();
// sb.append("Error binding objects to prepared statement directly, falling back to diagnostic binding layer:");
// sb.append(Arrays.toString(objects));
// logger.warn(sb.toString(),e);
// DiagnosticPreparedBinder diag = new DiagnosticPreparedBinder();
// return diag.bindValues(preparedStatement, objects);
}
}
// static void setObject(Session session, BoundStatement bs, int index, Object value) {
//
// DataType cqlType = bs.preparedStatement().getVariables().getType(index);
//
// CodecRegistry codecRegistry = session.getCluster().getConfiguration().getCodecRegistry();
// ProtocolVersion protocolVersion =
// session.getCluster().getConfiguration().getProtocolOptions().getProtocolVersion();
//
// TypeCodec<Object> codec = codecRegistry.codecFor(cqlType, value);
// bs.setBytesUnsafe(index, codec.serialize(value, protocolVersion));
// }
}

View File

@ -0,0 +1,50 @@
package io.nosqlbench.activitytype.cql.statements.core;
import io.nosqlbench.engine.api.util.TagFilter;
import java.util.*;
import java.util.stream.Collectors;
public class AvailableCQLStatements {
private List<TaggedCQLStatementDefs> availableDefs = new ArrayList<>();
public AvailableCQLStatements(List<TaggedCQLStatementDefs> allStatementDef) {
this.availableDefs = allStatementDef;
}
public List<TaggedCQLStatementDefs> getRawTagged() {
return availableDefs;
}
public Map<String, String> getFilteringDetails(String tagSpec) {
Map<String, String> details = new LinkedHashMap<>();
TagFilter ts = new TagFilter(tagSpec);
for (TaggedCQLStatementDefs availableDef : availableDefs) {
TagFilter.Result result = ts.matchesTaggedResult(availableDef);
String names = availableDef.getStatements().stream()
.map(CQLStatementDef::getName).collect(Collectors.joining(","));
details.put(names, result.getLog());
}
return details;
}
public List<CQLStatementDefParser> getMatching(String tagSpec) {
List<CQLStatementDefParser> defs = new ArrayList<>();
TagFilter ts = new TagFilter(tagSpec);
List<CQLStatementDefParser> CQLStatementDefParsers =
availableDefs.stream()
.filter(ts::matchesTagged)
.map(TaggedCQLStatementDefs::getStatements)
.flatMap(Collection::stream)
.map(p -> new CQLStatementDefParser(p.getName(), p.getStatement()))
.collect(Collectors.toList());
return CQLStatementDefParsers;
}
public List<CQLStatementDefParser> getAll() {
return getMatching("");
}
}

View File

@ -0,0 +1,334 @@
package io.nosqlbench.activitytype.cql.statements.core;
import com.datastax.driver.core.*;
import com.datastax.driver.core.policies.*;
import io.netty.channel.socket.SocketChannel;
import io.netty.handler.codec.haproxy.HAProxyCommand;
import io.netty.handler.codec.haproxy.HAProxyMessage;
import io.netty.handler.codec.haproxy.HAProxyProtocolVersion;
import io.netty.handler.codec.haproxy.HAProxyProxiedProtocol;
import io.nosqlbench.activitytype.cql.core.CQLOptions;
import io.nosqlbench.activitytype.cql.core.ProxyTranslator;
import io.nosqlbench.engine.api.activityapi.core.Shutdownable;
import io.nosqlbench.engine.api.activityimpl.ActivityDef;
import io.nosqlbench.engine.api.metrics.ActivityMetrics;
import io.nosqlbench.engine.api.scripting.ExprEvaluator;
import io.nosqlbench.engine.api.scripting.GraalJsEvaluator;
import io.nosqlbench.engine.api.util.SSLKsFactory;
import io.nosqlbench.nb.api.errors.BasicError;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import javax.net.ssl.SSLContext;
import java.io.File;
import java.io.IOException;
import java.net.Inet6Address;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.UnknownHostException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.*;
public class CQLSessionCache implements Shutdownable {
private final static Logger logger = LogManager.getLogger(CQLSessionCache.class);
private final static String DEFAULT_SESSION_ID = "default";
private static final CQLSessionCache instance = new CQLSessionCache();
private final Map<String, Session> sessionCache = new HashMap<>();
private CQLSessionCache() {
}
public static CQLSessionCache get() {
return instance;
}
public void stopSession(ActivityDef activityDef) {
String key = activityDef.getParams().getOptionalString("clusterid").orElse(DEFAULT_SESSION_ID);
Session session = sessionCache.get(key);
session.getCluster().close();
session.close();
}
public Session getSession(ActivityDef activityDef) {
String key = activityDef.getParams().getOptionalString("clusterid").orElse(DEFAULT_SESSION_ID);
return sessionCache.computeIfAbsent(key, (cid) -> createSession(activityDef, key));
}
// cbopts=\".withLoadBalancingPolicy(LatencyAwarePolicy.builder(new TokenAwarePolicy(new DCAwareRoundRobinPolicy(\"dc1-us-east\", 0, false))).build()).withRetryPolicy(new LoggingRetryPolicy(DefaultRetryPolicy.INSTANCE))\"
private Session createSession(ActivityDef activityDef, String sessid) {
String host = activityDef.getParams().getOptionalString("host").orElse("localhost");
int port = activityDef.getParams().getOptionalInteger("port").orElse(9042);
String driverType = activityDef.getParams().getOptionalString("cqldriver").orElse("oss");
if (!driverType.equals("oss")) {
throw new BasicError("This driver (cqlv3) does not use the cqldriver option. It only initializes sessions through the OSS API path via the Cluster.builder() chain. Thus, the setting of '" + driverType + "' is not possible.");
}
Cluster.Builder builder = Cluster.builder();
logger.info("Using driver type '" + driverType.toUpperCase() + "'");
Optional<String> scb = activityDef.getParams()
.getOptionalString("secureconnectbundle");
scb.map(File::new)
.ifPresent(builder::withCloudSecureConnectBundle);
activityDef.getParams()
.getOptionalString("insights")
.map(Boolean::parseBoolean)
.ifPresent(b -> {
throw new BasicError("This driver (cqlv3) does not support the insights reporting feature.");
});
String[] contactPoints = activityDef.getParams().getOptionalString("host")
.map(h -> h.split(",")).orElse(null);
if (contactPoints == null) {
contactPoints = activityDef.getParams().getOptionalString("hosts")
.map(h -> h.split(",")).orElse(null);
}
if (contactPoints == null && scb.isEmpty()) {
contactPoints = new String[]{"localhost"};
}
if (contactPoints != null) {
builder.addContactPoints(contactPoints);
}
activityDef.getParams().getOptionalInteger("port").ifPresent(builder::withPort);
builder.withCompression(ProtocolOptions.Compression.NONE);
Optional<String> usernameOpt = activityDef.getParams().getOptionalString("username");
Optional<String> passwordOpt = activityDef.getParams().getOptionalString("password");
Optional<String> passfileOpt = activityDef.getParams().getOptionalString("passfile");
if (usernameOpt.isPresent()) {
String username = usernameOpt.get();
String password;
if (passwordOpt.isPresent()) {
password = passwordOpt.get();
} else if (passfileOpt.isPresent()) {
Path path = Paths.get(passfileOpt.get());
try {
password = Files.readAllLines(path).get(0);
} catch (IOException e) {
String error = "Error while reading password from file:" + passfileOpt;
logger.error(error, e);
throw new RuntimeException(e);
}
} else {
String error = "username is present, but neither password nor passfile are defined.";
logger.error(error);
throw new RuntimeException(error);
}
builder.withCredentials(username, password);
}
Optional<String> clusteropts = activityDef.getParams().getOptionalString("cbopts");
if (clusteropts.isPresent()) {
try {
logger.info("applying cbopts:" + clusteropts.get());
ExprEvaluator<Cluster.Builder> clusterEval = new GraalJsEvaluator<>(Cluster.Builder.class);
clusterEval.put("builder", builder);
String importEnv =
"load(\"nashorn:mozilla_compat.js\");\n" +
" importPackage(com.google.common.collect.Lists);\n" +
" importPackage(com.google.common.collect.Maps);\n" +
" importPackage(com.datastax.driver);\n" +
" importPackage(com.datastax.driver.core);\n" +
" importPackage(com.datastax.driver.core.policies);\n" +
"builder" + clusteropts.get() + "\n";
clusterEval.script(importEnv);
builder = clusterEval.eval();
logger.info("successfully applied:" + clusteropts.get());
} catch (Exception e) {
throw new RuntimeException("Unable to evaluate: " + clusteropts.get() + " in script context:", e);
}
}
if (activityDef.getParams().getOptionalString("whitelist").isPresent() &&
activityDef.getParams().getOptionalString("lbp", "loadbalancingpolicy").isPresent()) {
throw new BasicError("You specified both whitelist=.. and lbp=..., if you need whitelist and other policies together," +
" be sure to use the lbp option only with a whitelist policy included.");
}
Optional<String> specSpec = activityDef.getParams()
.getOptionalString("speculative");
if (specSpec.isPresent()) {
specSpec
.map(speculative -> {
logger.info("speculative=>" + speculative);
return speculative;
})
.map(CQLOptions::speculativeFor)
.ifPresent(builder::withSpeculativeExecutionPolicy);
}
activityDef.getParams().getOptionalString("protocol_version")
.map(String::toUpperCase)
.map(ProtocolVersion::valueOf)
.map(pv -> {
logger.info("protocol_version=>" + pv);
return pv;
})
.ifPresent(builder::withProtocolVersion);
activityDef.getParams().getOptionalString("socketoptions")
.map(sockopts -> {
logger.info("socketoptions=>" + sockopts);
return sockopts;
})
.map(CQLOptions::socketOptionsFor)
.ifPresent(builder::withSocketOptions);
activityDef.getParams().getOptionalString("reconnectpolicy")
.map(reconnectpolicy -> {
logger.info("reconnectpolicy=>" + reconnectpolicy);
return reconnectpolicy;
})
.map(CQLOptions::reconnectPolicyFor)
.ifPresent(builder::withReconnectionPolicy);
activityDef.getParams().getOptionalString("pooling")
.map(pooling -> {
logger.info("pooling=>" + pooling);
return pooling;
})
.map(CQLOptions::poolingOptionsFor)
.ifPresent(builder::withPoolingOptions);
activityDef.getParams().getOptionalString("whitelist")
.map(whitelist -> {
logger.info("whitelist=>" + whitelist);
return whitelist;
})
.map(p -> CQLOptions.whitelistFor(p, null))
.ifPresent(builder::withLoadBalancingPolicy);
activityDef.getParams().getOptionalString("lbp")
.map(lbp -> {
logger.info("lbp=>" + lbp);
return lbp;
})
.map(p -> CQLOptions.lbpolicyFor(p, null))
.ifPresent(builder::withLoadBalancingPolicy);
activityDef.getParams().getOptionalString("tickduration")
.map(tickduration -> {
logger.info("tickduration=>" + tickduration);
return tickduration;
})
.map(CQLOptions::withTickDuration)
.ifPresent(builder::withNettyOptions);
activityDef.getParams().getOptionalString("compression")
.map(compression -> {
logger.info("compression=>" + compression);
return compression;
})
.map(CQLOptions::withCompression)
.ifPresent(builder::withCompression);
SSLContext context = SSLKsFactory.get().getContext(activityDef);
if (context != null) {
builder.withSSL(RemoteEndpointAwareJdkSSLOptions.builder().withSSLContext(context).build());
}
RetryPolicy retryPolicy = activityDef.getParams()
.getOptionalString("retrypolicy")
.map(CQLOptions::retryPolicyFor).orElse(DefaultRetryPolicy.INSTANCE);
if (retryPolicy instanceof LoggingRetryPolicy) {
logger.info("using LoggingRetryPolicy");
}
builder.withRetryPolicy(retryPolicy);
if (!activityDef.getParams().getOptionalBoolean("jmxreporting").orElse(false)) {
builder.withoutJMXReporting();
}
// Proxy Translator and Whitelist for use with DS Cloud on-demand single-endpoint setup
if (activityDef.getParams().getOptionalBoolean("single-endpoint").orElse(false)) {
InetSocketAddress inetHost = new InetSocketAddress(host, port);
final List<InetSocketAddress> whiteList = new ArrayList<>();
whiteList.add(inetHost);
LoadBalancingPolicy whitelistPolicy = new WhiteListPolicy(new RoundRobinPolicy(), whiteList);
builder.withAddressTranslator(new ProxyTranslator(inetHost)).withLoadBalancingPolicy(whitelistPolicy);
}
activityDef.getParams().getOptionalString("haproxy_source_ip").map(
ip -> {
return new NettyOptions() {
@Override
public void afterChannelInitialized(SocketChannel channel) throws Exception {
try {
InetAddress sourceIp = InetAddress.getByName(ip);
InetAddress destIp = activityDef.getParams().getOptionalString("haproxy_dest_ip").map(destip -> {
try {
return InetAddress.getByName(destip);
} catch (UnknownHostException e) {
logger.warn("Invalid haproxy_dest_ip {}", destip);
return sourceIp;
}
}
).orElse(sourceIp);
channel.pipeline().addFirst("proxyProtocol", new ProxyProtocolHander(
new HAProxyMessage(
HAProxyProtocolVersion.V1,
HAProxyCommand.PROXY,
sourceIp instanceof Inet6Address ? HAProxyProxiedProtocol.TCP6 : HAProxyProxiedProtocol.TCP4,
sourceIp.getHostAddress(),
destIp.getHostAddress(),
8000,
8000)));
} catch (UnknownHostException e) {
logger.warn("Invalid haproxy_source_ip {}", ip);
}
}
};
}
).ifPresent(builder::withNettyOptions);
Cluster cl = builder.build();
// Apply default idempotence, if set
activityDef.getParams().getOptionalBoolean("defaultidempotence").map(
b -> cl.getConfiguration().getQueryOptions().setDefaultIdempotence(b)
);
Session session = cl.newSession();
// This also forces init of metadata
logger.info("cluster-metadata-allhosts:\n" + session.getCluster().getMetadata().getAllHosts());
if (activityDef.getParams().getOptionalBoolean("drivermetrics").orElse(false)) {
String driverPrefix = "driver." + sessid;
driverPrefix = activityDef.getParams().getOptionalString("driverprefix").orElse(driverPrefix) + ".";
ActivityMetrics.mountSubRegistry(driverPrefix, cl.getMetrics().getRegistry());
}
return session;
}
@Override
public void shutdown() {
for (Session session : sessionCache.values()) {
Cluster cluster = session.getCluster();
session.close();
cluster.close();
}
}
}

View File

@ -0,0 +1,105 @@
package io.nosqlbench.activitytype.cql.statements.core;
import com.datastax.driver.core.ConsistencyLevel;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.LogManager;
import java.util.*;
import java.util.stream.Collectors;
public class CQLStatementDef {
private final static Logger logger = LogManager.getLogger(CQLStatementDef.class);
private Map<String,String> params = new HashMap<>();
private String name = "";
private String statement = "";
private boolean prepared = true;
private String cl = ConsistencyLevel.LOCAL_ONE.name();
private Map<String, String> bindings = new HashMap<>();
public CQLStatementDef() {
}
public String getGenSpec(String s) {
return bindings.get(s);
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getStatement() {
return statement;
}
public void setStatement(String statement) {
this.statement = statement;
}
public Map<String, String> getBindings() {
return bindings;
}
public void setBindings(Map<String, String> bindings) {
this.bindings = bindings;
}
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append(" name:").append(this.getName()).append("\n");
sb.append(" statement: |").append("\n");
String formattedStmt = Arrays.asList(getStatement().split("\\r*\n"))
.stream().map(s -> " " + s)
.collect(Collectors.joining("\n"));
sb.append(formattedStmt);
if (bindings.size() > 0) {
sb.append(" bindings:\n");
Optional<Integer> maxLen = this.bindings.keySet().stream().map(String::length).reduce(Integer::max);
for (String bindName : this.bindings.keySet()) {
sb
.append(String.format(" %-" + (maxLen.orElse(20) + 2) + "s", bindName)).append(" : ")
.append(bindings.get(bindName))
.append("\n");
}
}
return sb.toString();
}
public boolean isPrepared() {
return prepared;
}
public void setPrepared(boolean prepared) {
this.prepared = prepared;
}
public String getConsistencyLevel() {
return this.cl;
}
public void setConsistencyLevel(String consistencyLevel) {
this.cl = consistencyLevel;
}
public void setCl(String consistencyLevel) {
setConsistencyLevel(consistencyLevel);
}
public Map<String, String> getParams() {
return params;
}
public void setParams(Map<String, String> params) {
this.params = params;
}
public long getRatio() {
return Long.parseLong(Optional.ofNullable(params.get("ratio")).orElse("1"));
}
}

View File

@ -0,0 +1,159 @@
package io.nosqlbench.activitytype.cql.statements.core;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.LogManager;
import java.util.*;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
public class CQLStatementDefParser {
private final static Logger logger = LogManager.getLogger(CQLStatementDefParser.class);
// private final static Pattern templateToken = Pattern.compile("<<(\\w+(:(.+?))?)>>");
private final static Pattern stmtToken = Pattern.compile("\\?(\\w+[-_\\d\\w]*)|\\{(\\w+[-_\\d\\w.]*)}");
private final static String UNSET_VALUE = "UNSET-VALUE";
private final String stmt;
private final String name;
private CQLStatementDef deprecatedDef; // deprecated, to be removed
public void setBindings(Map<String, String> bindings) {
this.bindings = bindings;
}
private Map<String, String> bindings;
public CQLStatementDef getDeprecatedDef() {
return deprecatedDef;
}
public void setDeprecatedDef(CQLStatementDef deprecatedDef) {
this.deprecatedDef = deprecatedDef;
}
public CQLStatementDefParser(String name, String stmt) {
this.stmt = stmt;
this.name = name;
this.bindings = bindings;
}
public Map<String,String> getBindings() {
return bindings;
}
/**
* @return bindableNames in order as specified in the parameter placeholders
*/
public List<String> getBindableNames() {
Matcher m = stmtToken.matcher(stmt);
List<String> bindNames = new ArrayList<>();
while (m.find()) {
String form1 = m.group(1);
String form2 = m.group(2);
bindNames.add( (form1!=null && !form1.isEmpty()) ? form1 : form2 );
}
return bindNames;
}
public String getName() {
return name;
}
public String getParsedStatementOrError(Set<String> namedBindings) {
ParseResult result = getParseResult(namedBindings);
if (result.hasError()) {
throw new RuntimeException("Statement template has errors:\n" + result.toString());
}
return result.getStatement();
}
public ParseResult getParseResult(Set<String> namedBindings) {
HashSet<String> missingAnchors = new HashSet<String>() {{ addAll(namedBindings); }};
HashSet<String> missingBindings = new HashSet<String>();
String statement = this.stmt;
StringBuilder cooked = new StringBuilder();
Matcher m = stmtToken.matcher(statement);
int lastMatch = 0;
String remainder = "";
while (m.find(lastMatch)) {
String pre = statement.substring(lastMatch, m.start());
String form1 = m.group(1);
String form2 = m.group(2);
String tokenName = (form1!=null && !form1.isEmpty()) ? form1 : form2;
lastMatch = m.end();
cooked.append(pre);
cooked.append("?");
if (!namedBindings.contains(tokenName)) {
missingBindings.add(tokenName);
} else {
missingAnchors.remove(tokenName);
}
}
// add remainder of unmatched
if (lastMatch>=0) {
cooked.append(statement.substring(lastMatch));
}
else {
cooked.append(statement);
}
logger.info("Parsed statement as: " + cooked.toString().replaceAll("\\n","\\\\n"));
return new ParseResult(cooked.toString(),name,bindings,missingBindings,missingAnchors);
}
public static class ParseResult {
private final Set<String> missingGenerators;
private final Set<String> missingAnchors;
private final String statement;
private Map<String,String> bindings;
private final String name;
public ParseResult(String stmt, String name, Map<String,String> bindings, Set<String> missingGenerators, Set<String> missingAnchors) {
this.missingGenerators = missingGenerators;
this.missingAnchors = missingAnchors;
this.statement = stmt;
this.name = name;
}
public String toString() {
String generatorsSummary = (this.missingGenerators.size() > 0) ?
"\nundefined generators:" + this.missingGenerators.stream().collect(Collectors.joining(",", "[", "]")) : "";
return "STMT:" + statement + "\n" + generatorsSummary;
}
public String getName() {
return name;
}
public Map<String,String> getBindings() {
return bindings;
}
public boolean hasError() {
return missingGenerators.size() > 0;
}
public String getStatement() {
return statement;
}
public Set<String> getMissingAnchors() {
return missingAnchors;
}
public Set<String> getMissingGenerators() {
return missingGenerators;
}
}
}

View File

@ -0,0 +1,37 @@
package io.nosqlbench.activitytype.cql.statements.core;
import java.util.*;
public class CQLStatementGroups {
private Map<String,List<CQLStatementDefParser>> statementGroups = new HashMap<>();
public CQLStatementGroups(Map<String,List<CQLStatementDefParser>> statementGroups) {
this.statementGroups = statementGroups;
}
public List<CQLStatementDefParser> getGroups(String... groupNames) {
List<CQLStatementDefParser> statements = new ArrayList<CQLStatementDefParser>();
for (String groupName : groupNames) {
List<CQLStatementDefParser> adding = statementGroups.getOrDefault(groupName, Collections.emptyList());
statements.addAll(adding);
}
return statements;
}
public String toString() {
StringBuilder sb = new StringBuilder();
List<String> groups = new ArrayList<String>(statementGroups.keySet());
Collections.sort(groups);
sb.append("groups:\n");
for (String group : groups) {
// sb.append("section:").append(section).append("\n");
for (CQLStatementDefParser statementDef : statementGroups.get(group)) {
sb.append(statementDef.toString());
}
sb.append("\n");
}
return sb.toString();
}
}

View File

@ -0,0 +1,27 @@
package io.nosqlbench.activitytype.cql.statements.core;
import java.util.concurrent.atomic.AtomicBoolean;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.ChannelOutboundHandlerAdapter;
import io.netty.channel.ChannelPromise;
import io.netty.handler.codec.haproxy.HAProxyMessage;
import io.netty.handler.codec.haproxy.HAProxyMessageEncoder;
class ProxyProtocolHander extends ChannelOutboundHandlerAdapter
{
private final AtomicBoolean sent = new AtomicBoolean(false);
private final HAProxyMessage message;
ProxyProtocolHander(HAProxyMessage message) {
this.message = message;
}
@Override
public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) throws Exception {
if (sent.compareAndSet(false, true))
HAProxyMessageEncoder.INSTANCE.write(ctx, message, ctx.voidPromise());
super.write(ctx, msg, promise);
}
}

View File

@ -0,0 +1,213 @@
package io.nosqlbench.activitytype.cql.statements.core;
import com.codahale.metrics.Histogram;
import com.codahale.metrics.Timer;
import com.datastax.driver.core.PreparedStatement;
import com.datastax.driver.core.SimpleStatement;
import com.datastax.driver.core.Statement;
import io.nosqlbench.activitytype.cql.api.ResultSetCycleOperator;
import io.nosqlbench.activitytype.cql.api.RowCycleOperator;
import io.nosqlbench.engine.api.metrics.ThreadLocalNamedTimers;
import io.nosqlbench.virtdata.core.bindings.ContextualArrayBindings;
import java.io.IOException;
import java.io.Writer;
import java.util.List;
import java.util.concurrent.TimeUnit;
/**
* A ReadyCQLStatement instantiates new statements to be executed at some mix ratio.
* It optionally holds metrics objects for a named statement.
*/
public class ReadyCQLStatement {
private final String name;
private final ContextualArrayBindings<?, Statement> contextualBindings;
private long ratio;
private ResultSetCycleOperator[] resultSetOperators = null;
private RowCycleOperator[] rowCycleOperators = null;
private Timer successTimer;
private Timer errorTimer;
private Histogram rowsFetchedHisto;
private Writer resultCsvWriter;
private List<String> startTimers;
private List<String> stopTimers;
public ReadyCQLStatement(ContextualArrayBindings<?, Statement> contextualBindings, long ratio, String name) {
this.contextualBindings = contextualBindings;
this.ratio = ratio;
this.name = name;
}
public ReadyCQLStatement withMetrics(Timer successTimer, Timer errorTimer, Histogram rowsFetchedHisto) {
this.successTimer = successTimer;
this.errorTimer = errorTimer;
this.rowsFetchedHisto = rowsFetchedHisto;
return this;
}
public Statement bind(long value) {
return contextualBindings.bind(value);
}
public ResultSetCycleOperator[] getResultSetOperators() {
return resultSetOperators;
}
public ContextualArrayBindings getContextualBindings() {
return this.contextualBindings;
}
public String getQueryString(long value) {
Object stmt = contextualBindings.getContext();
if (stmt instanceof PreparedStatement) {
String queryString = ((PreparedStatement)stmt).getQueryString();
StringBuilder sb = new StringBuilder(queryString.length()*2);
sb.append("(prepared) ");
return getQueryStringValues(value, queryString, sb);
} else if (stmt instanceof SimpleStatement) {
String queryString = ((SimpleStatement) stmt).getQueryString();
StringBuilder sb = new StringBuilder();
sb.append("(simple) ");
return getQueryStringValues(value, queryString, sb);
}
if (stmt instanceof String) {
return (String)stmt;
}
throw new RuntimeException("context object not recognized for query string:" + stmt.getClass().getCanonicalName());
}
private String getQueryStringValues(long value, String queryString, StringBuilder sb) {
if (!queryString.endsWith("\n")) {
sb.append("\n");
}
sb.append(queryString).append(" VALUES[");
Object[] all = contextualBindings.getBindings().getAll(value);
String delim="";
for (Object o : all) {
sb.append(delim);
delim=",";
sb.append(o.toString());
}
sb.append("]");
return sb.toString();
}
public long getRatio() {
return ratio;
}
public void setRatio(long ratio) {
this.ratio = ratio;
}
public void onStart() {
if (startTimers != null) {
ThreadLocalNamedTimers.TL_INSTANCE.get().start(startTimers);
}
}
/**
* This method should be called when an associated statement is executed successfully.
*
* @param cycleValue The cycle associated with the execution.
* @param nanoTime The nanoTime duration of the execution.
* @param rowsFetched The number of rows fetched for this cycle
*/
public void onSuccess(long cycleValue, long nanoTime, long rowsFetched) {
if (successTimer != null) {
successTimer.update(nanoTime, TimeUnit.NANOSECONDS);
}
if (stopTimers != null) {
ThreadLocalNamedTimers.TL_INSTANCE.get().stop(stopTimers);
}
if (rowsFetchedHisto != null) {
rowsFetchedHisto.update(rowsFetched);
}
if (resultCsvWriter != null) {
try {
synchronized (resultCsvWriter) {
// <cycle>,(SUCCESS|FAILURE),<nanos>,<rowsfetched>,<errorname>\n
resultCsvWriter
.append(String.valueOf(cycleValue)).append(",")
.append("SUCCESS,")
.append(String.valueOf(nanoTime)).append(",")
.append(String.valueOf(rowsFetched))
.append(",NONE")
.append("\n");
}
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}
/**
* This method should be called when an associated statement is executed unsuccessfully.
* It should be called only once per cycle in the case of execution error.
* @param cycleValue The cycle associated with the erred execution.
* @param resultNanos The nanoTime duration of the execution.
* @param t The associated throwable
*/
public void onError(long cycleValue, long resultNanos, Throwable t) {
if (errorTimer != null) {
errorTimer.update(resultNanos, TimeUnit.NANOSECONDS);
}
if (stopTimers != null) {
ThreadLocalNamedTimers.TL_INSTANCE.get().stop(stopTimers);
}
if (resultCsvWriter != null) {
try {
synchronized (resultCsvWriter) {
// <cycle>,(SUCCESS|FAILURE),<nanos>,<rowsfetched>,<errorname>\n
resultCsvWriter
.append(String.valueOf(cycleValue)).append(",")
.append("FAILURE,")
.append(String.valueOf(resultNanos)).append(",")
.append("0,")
.append(t.getClass().getSimpleName()).append(",")
.append("\n");
}
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}
public ReadyCQLStatement withResultSetCycleOperators(ResultSetCycleOperator[] resultSetCycleOperators) {
this.resultSetOperators = resultSetCycleOperators;
return this;
}
public ReadyCQLStatement withRowCycleOperators(RowCycleOperator[] rowCycleOperators) {
this.rowCycleOperators = rowCycleOperators;
return this;
}
public RowCycleOperator[] getRowCycleOperators() {
return this.rowCycleOperators;
}
public ReadyCQLStatement withResultCsvWriter(Writer resultCsvWriter) {
this.resultCsvWriter = resultCsvWriter;
return this;
}
public ReadyCQLStatement withStartTimers(List<String> startTimers) {
this.startTimers = startTimers;
return this;
}
public ReadyCQLStatement withStopTimers(List<String> stopTimers) {
this.stopTimers = stopTimers;
return this;
}
public String toString() {
return "ReadyCQLStatement: " + contextualBindings.toString();
}
}

View File

@ -0,0 +1,138 @@
package io.nosqlbench.activitytype.cql.statements.core;
import com.codahale.metrics.Histogram;
import com.codahale.metrics.Timer;
import com.datastax.driver.core.PreparedStatement;
import com.datastax.driver.core.Session;
import com.datastax.driver.core.SimpleStatement;
import com.datastax.driver.core.Statement;
import io.nosqlbench.activitytype.cql.api.ResultSetCycleOperator;
import io.nosqlbench.activitytype.cql.api.RowCycleOperator;
import io.nosqlbench.activitytype.cql.core.CqlActivity;
import io.nosqlbench.activitytype.cql.statements.binders.CqlBinderTypes;
import io.nosqlbench.activitytype.cql.statements.binders.SimpleStatementValuesBinder;
import io.nosqlbench.engine.api.metrics.ActivityMetrics;
import io.nosqlbench.virtdata.core.bindings.BindingsTemplate;
import io.nosqlbench.virtdata.core.bindings.ContextualBindingsArrayTemplate;
import io.nosqlbench.virtdata.core.bindings.ValuesArrayBinder;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import java.io.Writer;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
public class ReadyCQLStatementTemplate {
private final static Logger logger = LogManager.getLogger(ReadyCQLStatementTemplate.class);
private final Session session;
private final ContextualBindingsArrayTemplate<?, Statement> template;
private final long ratio;
private final String name;
private ResultSetCycleOperator[] resultSetCycleOperators;
private RowCycleOperator[] rowCycleOperators;
private Timer successTimer;
private Timer errorTimer;
private Histogram rowsFetchedHisto;
private Writer resultCsvWriter;
private List<String> startTimers;
private List<String> stopTimers;
public ReadyCQLStatementTemplate(Map<String, Object> fconfig, CqlBinderTypes binderType, Session session,
PreparedStatement preparedStmt, long ratio, String name) {
this.session = session;
this.name = name;
ValuesArrayBinder<PreparedStatement, Statement> binder = binderType.get(session);
logger.trace("Using binder_type=>" + binder.toString());
template = new ContextualBindingsArrayTemplate<>(
preparedStmt,
new BindingsTemplate(fconfig),
binder
);
this.ratio = ratio;
}
public void addTimerStart(String name) {
if (startTimers == null) {
startTimers = new ArrayList<>();
}
startTimers.add(name);
}
public void addTimerStop(String name) {
if (stopTimers == null) {
stopTimers = new ArrayList<>();
}
stopTimers.add(name);
}
public ReadyCQLStatementTemplate(
Map<String, Object> fconfig,
Session session,
SimpleStatement simpleStatement,
long ratio, String name,
boolean parameterized,
List<String> startTimers,
List<String> stopTimers) {
this.session = session;
this.name = name;
template = new ContextualBindingsArrayTemplate<>(
simpleStatement,
new BindingsTemplate(fconfig),
new SimpleStatementValuesBinder(parameterized)
);
this.ratio = ratio;
}
public ReadyCQLStatement resolve() {
return new ReadyCQLStatement(template.resolveBindings(), ratio, name)
.withMetrics(this.successTimer, this.errorTimer, this.rowsFetchedHisto)
.withResultSetCycleOperators(resultSetCycleOperators)
.withRowCycleOperators(rowCycleOperators)
.withResultCsvWriter(resultCsvWriter)
.withStartTimers(startTimers)
.withStopTimers(stopTimers);
}
public ContextualBindingsArrayTemplate<?, Statement> getContextualBindings() {
return template;
}
public String getName() {
return name;
}
public void instrument(CqlActivity activity) {
this.successTimer = ActivityMetrics.timer(activity.getActivityDef(), name + "--success");
this.errorTimer = ActivityMetrics.timer(activity.getActivityDef(), name + "--error");
this.rowsFetchedHisto = ActivityMetrics.histogram(activity.getActivityDef(), name + "--resultset-size");
}
public void logResultCsv(CqlActivity activity, String name) {
this.resultCsvWriter = activity.getNamedWriter(name);
}
public void addResultSetOperators(ResultSetCycleOperator... addingOperators) {
resultSetCycleOperators = (resultSetCycleOperators==null) ? new ResultSetCycleOperator[0]: resultSetCycleOperators;
ResultSetCycleOperator[] newOperators = new ResultSetCycleOperator[resultSetCycleOperators.length + addingOperators.length];
System.arraycopy(resultSetCycleOperators,0,newOperators,0,resultSetCycleOperators.length);
System.arraycopy(addingOperators,0,newOperators,resultSetCycleOperators.length,addingOperators.length);
this.resultSetCycleOperators=newOperators;
}
public void addRowCycleOperators(RowCycleOperator... addingOperators) {
rowCycleOperators = (rowCycleOperators==null) ? new RowCycleOperator[0]: rowCycleOperators;
RowCycleOperator[] newOperators = new RowCycleOperator[rowCycleOperators.length + addingOperators.length];
System.arraycopy(rowCycleOperators,0,newOperators,0,rowCycleOperators.length);
System.arraycopy(addingOperators, 0, newOperators,rowCycleOperators.length,addingOperators.length);
this.rowCycleOperators = newOperators;
}
}

View File

@ -0,0 +1,25 @@
package io.nosqlbench.activitytype.cql.statements.core;
import java.util.ArrayList;
import java.util.List;
import java.util.stream.Collectors;
public class ReadyCQLStatementsTemplate {
private final List<ReadyCQLStatementTemplate> readyStatementList = new ArrayList<>();
public void addTemplate(ReadyCQLStatementTemplate t) {
this.readyStatementList.add(t);
}
public List<ReadyCQLStatement> resolve() {
return readyStatementList.stream()
.map(ReadyCQLStatementTemplate::resolve)
.collect(Collectors.toList());
}
public int size() {
return readyStatementList.size();
}
}

View File

@ -0,0 +1,57 @@
package io.nosqlbench.activitytype.cql.statements.core;
import io.nosqlbench.engine.api.util.Tagged;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
public class TaggedCQLStatementDefs implements Tagged {
private List<CQLStatementDef> statements = new ArrayList<>();
private Map<String,String> tags = new HashMap<>();
private Map<String,String> params = new HashMap<>();
public TaggedCQLStatementDefs(Map<String,String> tags, Map<String,String> params, List<CQLStatementDef> statements) {
this.tags = tags;
this.params = params;
this.statements = statements;
}
public TaggedCQLStatementDefs(Map<String,String> tags, List<CQLStatementDef> statements) {
this.tags = tags;
this.statements = statements;
}
public TaggedCQLStatementDefs(List<CQLStatementDef> statements) {
this.statements = statements;
}
public TaggedCQLStatementDefs() {
}
public List<CQLStatementDef> getStatements() {
return statements;
}
public void setStatements(List<CQLStatementDef> statements) {
this.statements = statements;
}
public Map<String, String> getTags() {
return tags;
}
public void setTags(Map<String, String> tags) {
this.tags = tags;
}
public Map<String, String> getParams() {
return params;
}
public void setParams(Map<String, String> params) {
this.params = params;
}
}

View File

@ -0,0 +1,71 @@
package io.nosqlbench.activitytype.cql.statements.core;
import io.nosqlbench.engine.api.activityimpl.ActivityInitializationError;
import io.nosqlbench.nb.api.content.Content;
import io.nosqlbench.nb.api.content.NBIO;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.yaml.snakeyaml.TypeDescription;
import org.yaml.snakeyaml.Yaml;
import org.yaml.snakeyaml.constructor.Constructor;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.function.Function;
@SuppressWarnings("ALL")
public class YamlCQLStatementLoader {
private final static Logger logger = LogManager.getLogger(YamlCQLStatementLoader.class);
List<Function<String, String>> transformers = new ArrayList<>();
public YamlCQLStatementLoader() {
}
public YamlCQLStatementLoader(Function<String, String>... transformers) {
this.transformers.addAll(Arrays.asList(transformers));
}
public AvailableCQLStatements load(String fromPath, String... searchPaths) {
Content<?> yamlContent = NBIO.all().prefix(searchPaths).name(fromPath).extension("yaml").one();
String data = yamlContent.asString();
for (Function<String, String> xform : transformers) {
try {
logger.debug("Applying string transformer to yaml data:" + xform);
data = xform.apply(data);
} catch (Exception e) {
RuntimeException t = new ActivityInitializationError("Error applying string transform to input", e);
throw t;
}
}
Yaml yaml = getCustomYaml();
try {
Iterable<Object> objects = yaml.loadAll(data);
List<TaggedCQLStatementDefs> stmtListList = new ArrayList<>();
for (Object object : objects) {
TaggedCQLStatementDefs tsd = (TaggedCQLStatementDefs) object;
stmtListList.add(tsd);
}
return new AvailableCQLStatements(stmtListList);
} catch (Exception e) {
logger.error("Error loading yaml from " + fromPath, e);
throw e;
}
}
private Yaml getCustomYaml() {
Constructor constructor = new Constructor(TaggedCQLStatementDefs.class);
TypeDescription tds = new TypeDescription(TaggedCQLStatementDefs.class);
tds.putListPropertyType("statements", CQLStatementDef.class);
constructor.addTypeDescription(tds);
return new Yaml(constructor);
}
}

View File

@ -0,0 +1,12 @@
package io.nosqlbench.activitytype.cql.statements.modifiers;
import com.datastax.driver.core.Statement;
public class StartTimerOp implements StatementModifier {
@Override
public Statement modify(Statement unmodified, long cycleNum) {
return unmodified;
}
}

View File

@ -0,0 +1,11 @@
package io.nosqlbench.activitytype.cql.statements.modifiers;
import com.datastax.driver.core.Statement;
/**
* Provides a modular way for any CQL activities to modify statements before execution.
* Each active modifier returns a statement in turn.
*/
public interface StatementModifier {
Statement modify(Statement unmodified, long cycleNum);
}

View File

@ -0,0 +1,17 @@
package io.nosqlbench.activitytype.cql.statements.rowoperators;
import com.datastax.driver.core.Row;
import io.nosqlbench.activitytype.cql.api.RowCycleOperator;
/**
* Save specific variables to the thread local object map
*/
public class Print implements RowCycleOperator {
@Override
public int apply(Row row, long cycle) {
System.out.println("ROW:" + row);
return 0;
}
}

View File

@ -0,0 +1,34 @@
package io.nosqlbench.activitytype.cql.statements.rowoperators;
import io.nosqlbench.activitytype.cql.api.RowCycleOperator;
public enum RowCycleOperators {
saverows(SaveThreadRows.class),
savevars(SaveThreadVars.class),
saveglobalvars(SaveGlobalVars.class),
print(Print.class);
private final Class<? extends RowCycleOperator> implClass;
RowCycleOperators(Class<? extends RowCycleOperator> traceLoggerClass) {
this.implClass = traceLoggerClass;
}
public Class<? extends RowCycleOperator> getImplementation() {
return implClass;
}
public RowCycleOperator getInstance() {
try {
return getImplementation().getConstructor().newInstance();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
public static RowCycleOperator newOperator(String name) {
return RowCycleOperators.valueOf(name).getInstance();
}
}

View File

@ -0,0 +1,47 @@
package io.nosqlbench.activitytype.cql.statements.rowoperators;
import com.datastax.driver.core.ColumnDefinitions;
import com.datastax.driver.core.Row;
import io.nosqlbench.activitytype.cql.api.RowCycleOperator;
import io.nosqlbench.virtdata.library.basics.core.threadstate.SharedState;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.LogManager;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.stream.Collectors;
/**
* Save specific variables to the thread local object map
*/
public class Save implements RowCycleOperator {
private final static Logger logger = LogManager.getLogger(Save.class);
ThreadLocal<HashMap<String, Object>> tl_objectMap = SharedState.tl_ObjectMap;
private final String[] varnames;
public Save(String... varnames) {
this.varnames = varnames;
}
@Override
public int apply(Row row, long cycle) {
try {
HashMap<String, Object> tlvars= tl_objectMap.get();
for (String varname : varnames) {
Object object = row.getObject(varname);
tlvars.put(varname,object);
}
} catch (Exception e) {
List<ColumnDefinitions.Definition> definitions = row.getColumnDefinitions().asList();
logger.error("Unable to save '" + Arrays.toString(varnames) + "' from " +
definitions.stream().map(ColumnDefinitions.Definition::getName)
.collect(Collectors.joining(",","[","]")) + ": ",e);
throw e;
}
return 0;
}
}

View File

@ -0,0 +1,33 @@
package io.nosqlbench.activitytype.cql.statements.rowoperators;
import com.datastax.driver.core.ColumnDefinitions;
import com.datastax.driver.core.Row;
import io.nosqlbench.activitytype.cql.api.RowCycleOperator;
import io.nosqlbench.virtdata.library.basics.core.threadstate.SharedState;
import java.util.List;
import java.util.concurrent.ConcurrentHashMap;
/**
* Stores the current row into the global object map. Key names are set from the field names. Null values are stored
* as empty strings.
*/
public class SaveGlobalVars implements RowCycleOperator {
ConcurrentHashMap<String, Object> gl_vars = SharedState.gl_ObjectMap;
@Override
public int apply(Row row, long cycle) {
List<ColumnDefinitions.Definition> cdlist = row.getColumnDefinitions().asList();
for (ColumnDefinitions.Definition definition : cdlist) {
String name = definition.getName();
Object object = row.getObject(name);
if (object == null){
object = "";
}
gl_vars.put(name,object);
}
return 0;
}
}

View File

@ -0,0 +1,21 @@
package io.nosqlbench.activitytype.cql.statements.rowoperators;
import com.datastax.driver.core.Row;
import io.nosqlbench.activitytype.cql.api.RowCycleOperator;
import io.nosqlbench.activitytype.cql.statements.rsoperators.PerThreadCQLData;
import java.util.LinkedList;
/**
* Adds the current row to the per-thread row cache.
*/
public class SaveThreadRows implements RowCycleOperator {
@Override
public int apply(Row row, long cycle) {
LinkedList<Row>rows = PerThreadCQLData.rows.get();
rows.add(row);
return 0;
}
}

View File

@ -0,0 +1,31 @@
package io.nosqlbench.activitytype.cql.statements.rowoperators;
import com.datastax.driver.core.ColumnDefinitions;
import com.datastax.driver.core.Row;
import io.nosqlbench.activitytype.cql.api.RowCycleOperator;
import io.nosqlbench.virtdata.library.basics.core.threadstate.SharedState;
import java.util.HashMap;
import java.util.List;
/**
* Saves all the values in this row to the thread-local object map,
* with the field names as keys.
*/
public class SaveThreadVars implements RowCycleOperator {
ThreadLocal<HashMap<String, Object>> tl_objectMap = SharedState.tl_ObjectMap;
@Override
public int apply(Row row, long cycle) {
HashMap<String, Object> tlvars= tl_objectMap.get();
List<ColumnDefinitions.Definition> cdlist = row.getColumnDefinitions().asList();
for (ColumnDefinitions.Definition definition : cdlist) {
String name = definition.getName();
Object object = row.getObject(name);
tlvars.put(name,object);
}
return 0;
}
}

View File

@ -0,0 +1,36 @@
package io.nosqlbench.activitytype.cql.statements.rowoperators.verification;
public enum DiffType {
/// Verify nothing for this statement
none(0),
/// Verify that fields named in the row are present in the reference map.
rowfields(0x1),
/// Verify that fields in the reference map are present in the row data.
reffields(0x1 << 1),
/// Verify that all fields present in either the row or the reference data
/// are also present in the other.
fields(0x1 | 0x1 << 1),
/// Verify that all values of the same named field are equal, according to
/// {@link Object#equals(Object)}}.
values(0x1<<2),
/// Cross-verify all fields and field values between the reference data and
/// the actual data.
all(0x1|0x1<<1|0x1<<2);
public int bitmask;
DiffType(int bit) {
this.bitmask = bit;
}
public boolean is(DiffType option) {
return (bitmask & option.bitmask) > 0;
}
}

View File

@ -0,0 +1,312 @@
package io.nosqlbench.activitytype.cql.statements.rowoperators.verification;
import com.datastax.driver.core.*;
import io.nosqlbench.activitytype.cql.api.RowCycleOperator;
import io.nosqlbench.activitytype.cql.errorhandling.exceptions.RowVerificationException;
import io.nosqlbench.virtdata.core.bindings.Bindings;
import java.math.BigDecimal;
import java.math.BigInteger;
import java.net.InetAddress;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.stream.Collectors;
/**
* <p>RowDifferencer uses the metadata associated with a row to access and compare
* {@link Row} values in a type-specific way.
* </p>
*/
public class RowDifferencer implements RowCycleOperator {
private final StringBuilder logbuffer = new StringBuilder();
private final Map<String, Object> refMap = new HashMap<>();
private final DiffType difftype;
private final Bindings bindings;
private final VerificationMetrics metrics;
private RowDifferencer(VerificationMetrics metrics, Bindings bindings, DiffType diffType) {
this.metrics = metrics;
this.bindings = bindings;
this.difftype = diffType;
}
/**
* see {@link DataType}
*
* @param typeName The DataType.Name of the field in question
* @param row The row to read the field value from
* @param fieldName The field name to read
* @param genValue the generated value to compare against
* @return true, if the value is equal
*/
private static boolean isEqual(DataType.Name typeName, Row row, String fieldName, Object genValue) {
switch (typeName) {
case ASCII: // ASCII(1, String.class)
case VARCHAR: // VARCHAR(13, String.class)
case TEXT: // TEXT(10, String.class)
String textValue = row.getString(fieldName);
return textValue.equals(genValue);
case BIGINT: // BIGINT(2, Long.class)
case COUNTER: // COUNTER(5, Long.class)
long longValue = row.getLong(fieldName);
return longValue == (long) genValue;
case BLOB: // BLOB(3, ByteBuffer.class)
// TODO: How do we test this one?
case CUSTOM: // CUSTOM(0, ByteBuffer.class)
ByteBuffer blobValue = row.getBytes(fieldName);
return blobValue.equals(genValue);
case BOOLEAN: // BOOLEAN(4, Boolean.class)
boolean boolValue = row.getBool(fieldName);
return boolValue == (boolean) genValue;
case DECIMAL: // DECIMAL(6, BigDecimal.class)
BigDecimal bigDecimalValue = row.getDecimal(fieldName);
return bigDecimalValue.equals(genValue);
case DOUBLE: // DOUBLE(7, Double.class)
double doubleValue = row.getDouble(fieldName);
return doubleValue == (double) genValue;
case FLOAT: // FLOAT(8, Float.class)
float floatValue = row.getFloat(fieldName);
return floatValue == (float) genValue;
case INET: // INET(16, InetAddress.class)
InetAddress inetAddressValue = row.getInet(fieldName);
return inetAddressValue.equals(genValue);
case INT: // INT(9, Integer.class)
int intValue = row.getInt(fieldName);
return intValue == (int) genValue;
case TIMESTAMP: // TIMESTAMP(11, Date.class)
Date timestamp = row.getTimestamp(fieldName);
return timestamp.equals(genValue);
case UUID: // UUID(12, UUID.class)
case TIMEUUID: // TIMEUUID(15, UUID.class)
UUID uuidValue = row.getUUID(fieldName);
return uuidValue.equals(genValue);
case VARINT: // VARINT(14, BigInteger.class)
BigInteger bigIntValue = row.getVarint(fieldName);
return bigIntValue.equals(genValue);
case LIST: // LIST(32, List.class)
// TODO: How do we make getCollection methods work with non-String CQL types?
List<?> list = row.getList(fieldName, String.class);
return list.equals(genValue);
case SET: // SET(34, Set.class)
Set<?> set = row.getSet(fieldName, String.class);
return set.equals(genValue);
case MAP: // MAP(33, Map.class)
Map<?, ?> map = row.getMap(fieldName, String.class, String.class);
return map.equals(genValue);
case UDT: // UDT(48, UDTValue.class)
UDTValue udtValue = row.getUDTValue(fieldName);
return udtValue.equals(genValue);
case TUPLE: // TUPLE(49, TupleValue.class)
TupleValue tupleValue = row.getTupleValue(fieldName);
return tupleValue.equals(genValue);
case SMALLINT:
short shortVal = row.getShort(fieldName);
return shortVal == (Short) genValue;
case TINYINT:
byte byteValue = row.getByte(fieldName);
return byteValue == (byte) genValue;
case DATE:
LocalDate dateValue = row.getDate(fieldName);
return dateValue.equals(genValue);
case TIME:
long timeValue = row.getTime(fieldName);
return timeValue == (long) genValue;
default:
throw new RuntimeException("Unrecognized type:" + typeName);
}
}
private static String prettyPrint(DataType.Name typeName, Row row, String fieldName) {
switch (typeName) {
case ASCII: // ASCII(1, String.class)
case VARCHAR: // VARCHAR(13, String.class)
case TEXT: // TEXT(10, String.class)
return row.getString(fieldName);
case BIGINT: // BIGINT(2, Long.class)
case COUNTER: // COUNTER(5, Long.class)
long counterValue = row.getLong(fieldName);
return String.valueOf(counterValue);
case BLOB: // BLOB(3, ByteBuffer.class)
case CUSTOM: // CUSTOM(0, ByteBuffer.class)
ByteBuffer blobValue = row.getBytes(fieldName);
return String.valueOf(blobValue);
case BOOLEAN: // BOOLEAN(4, Boolean.class)
boolean boolValue = row.getBool(fieldName);
return String.valueOf(boolValue);
case DECIMAL: // DECIMAL(6, BigDecimal.class)
BigDecimal bigDecimalValue = row.getDecimal(fieldName);
return String.valueOf(bigDecimalValue);
case DOUBLE: // DOUBLE(7, Double.class)
double doubleValue = row.getDouble(fieldName);
return String.valueOf(doubleValue);
case FLOAT: // FLOAT(8, Float.class)
float floatValue = row.getFloat(fieldName);
return String.valueOf(floatValue);
case INET: // INET(16, InetAddress.class)
InetAddress inetAddressValue = row.getInet(fieldName);
return String.valueOf(inetAddressValue);
case INT: // INT(9, Integer.class)
int intValue = row.getInt(fieldName);
return String.valueOf(intValue);
case TIMESTAMP: // TIMESTAMP(11, Date.class)
Date timestamp = row.getTimestamp(fieldName);
return String.valueOf(timestamp);
case UUID: // UUID(12, UUID.class)
case TIMEUUID: // TIMEUUID(15, UUID.class)
UUID uuidValue = row.getUUID(fieldName);
return String.valueOf(uuidValue);
case VARINT: // VARINT(14, BigInteger.class)
BigInteger bigIntValue = row.getVarint(fieldName);
return String.valueOf(bigIntValue);
case LIST: // LIST(32, List.class)
List<?> list = row.getList(fieldName, String.class);
return String.valueOf(list);
case SET: // SET(34, Set.class)
Set<?> set = row.getSet(fieldName, String.class);
return String.valueOf(set);
case MAP: // MAP(33, Map.class)
Map<?, ?> map = row.getMap(fieldName, String.class, String.class);
return String.valueOf(map);
case UDT: // UDT(48, UDTValue.class)
UDTValue udtValue = row.getUDTValue(fieldName);
return String.valueOf(udtValue);
case TUPLE: // TUPLE(49, TupleValue.class)
TupleValue tupleValue = row.getTupleValue(fieldName);
return String.valueOf(tupleValue);
case SMALLINT:
short val = row.getShort(fieldName);
return String.valueOf(val);
case TINYINT:
byte byteValue = row.getByte(fieldName);
return String.valueOf(byteValue);
case DATE:
LocalDate dateValue = row.getDate(fieldName);
return String.valueOf(dateValue);
case TIME:
long timeValue = row.getTime(fieldName);
return String.valueOf(timeValue);
default:
throw new RuntimeException("Type not recognized:" + typeName);
}
}
/**
* Compare the values of the row with the values generated.
* <p>
* Specifically,
* <ol>
* <li>Ensure the same number of fields.</li>
* <li>Ensure the same pair-wise field names.</li>
* <li>Ensure that each pair of same-named fields has the same data type.</li>
* <li>Ensure that the value of each pair of fields is equal according to the equals
* operator for the respective type.</li>
* </ol>
* *
*
* @param row A row of data
* @param referenceMap a map of values
* @return a count of differences between the row and the reference values
*/
private int compare(Row row, Map<String, Object> referenceMap) {
int diff = 0;
ColumnDefinitions cdefs = row.getColumnDefinitions();
logbuffer.setLength(0);
if (difftype.is(DiffType.reffields)) {
List<String> missingRowFields = referenceMap.keySet().stream()
.filter(gk -> !cdefs.contains(gk))
.collect(Collectors.toList());
if (missingRowFields.size() > 0) {
diff += missingRowFields.size();
logbuffer.append("\nexpected fields '");
logbuffer.append(String.join("','", missingRowFields));
logbuffer.append("' not in row.");
}
}
if (difftype.is(DiffType.rowfields)) {
List<String> missingRefFields = cdefs.asList().stream()
.map(ColumnDefinitions.Definition::getName)
.filter(k -> !referenceMap.containsKey(k))
.collect(Collectors.toList());
if (missingRefFields.size() > 0) {
diff += missingRefFields.size();
logbuffer.append("\nexpected fields '");
logbuffer.append(String.join("','", missingRefFields));
logbuffer.append("' not in reference data: " + referenceMap);
}
}
if (difftype.is(DiffType.values)) {
for (ColumnDefinitions.Definition definition : row.getColumnDefinitions()) {
String name = definition.getName();
if (referenceMap.containsKey(name)) {
DataType type = definition.getType();
if (!isEqual(type.getName(), row, name, referenceMap.get(name))) {
logbuffer.append("\nvalue differs for '").append(name).append("' ");
logbuffer.append("expected:'").append(referenceMap.get(name).toString()).append("'");
logbuffer.append(" actual:'").append(prettyPrint(type.getName(), row, name)).append("'");
diff++;
metrics.unverifiedValuesCounter.inc();
} else {
metrics.verifiedValuesCounter.inc();
}
}
}
}
if (diff == 0) {
metrics.verifiedRowsCounter.inc();
} else {
metrics.unverifiedRowsCounter.inc();
}
return diff;
}
/**
* Get the most recent detail log recorded by this thread.
*
* @return a logbuffer string, with one entry per line
*/
public String getDetail() {
return this.logbuffer.toString();
}
@Override
public int apply(Row row, long cycle) {
refMap.clear();
bindings.setMap(refMap, cycle);
int diffs = compare(row, refMap);
if (diffs > 0) {
HashMap<String, Object> mapcopy = new HashMap<>();
mapcopy.putAll(refMap);
throw new RowVerificationException(cycle, row, mapcopy, getDetail());
} else {
return 0;
}
}
public static class ThreadLocalWrapper implements RowCycleOperator {
private final VerificationMetrics metrics;
private final Bindings bindings;
private final DiffType diffType;
private final ThreadLocal<RowDifferencer> tl;
public ThreadLocalWrapper(VerificationMetrics metrics, Bindings bindings, DiffType diffType) {
this.metrics = metrics;
this.bindings = bindings;
this.diffType = diffType;
tl = ThreadLocal.withInitial(() -> new RowDifferencer(metrics,bindings,diffType));
}
@Override
public int apply(Row row, long cycle) {
return tl.get().apply(row,cycle);
}
}
}

View File

@ -0,0 +1,21 @@
package io.nosqlbench.activitytype.cql.statements.rowoperators.verification;
import com.codahale.metrics.Counter;
import io.nosqlbench.engine.api.activityimpl.ActivityDef;
import io.nosqlbench.engine.api.metrics.ActivityMetrics;
public class VerificationMetrics {
public final Counter verifiedRowsCounter;
public final Counter unverifiedRowsCounter;
public final Counter verifiedValuesCounter;
public final Counter unverifiedValuesCounter;
public VerificationMetrics(ActivityDef activityDef) {
verifiedRowsCounter = ActivityMetrics.counter(activityDef,"verifiedrows");
unverifiedRowsCounter= ActivityMetrics.counter(activityDef,"unverifiedrows");
verifiedValuesCounter = ActivityMetrics.counter(activityDef,"verifiedvalues");
unverifiedValuesCounter = ActivityMetrics.counter(activityDef,"unverifiedvalues");
}
}

View File

@ -0,0 +1,53 @@
package io.nosqlbench.activitytype.cql.statements.rowoperators.verification;
import io.nosqlbench.engine.api.activityconfig.yaml.OpTemplate;
import io.nosqlbench.virtdata.core.bindings.BindingsTemplate;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
public class VerifierBuilder {
public static BindingsTemplate getExpectedValuesTemplate(OpTemplate stmtDef) {
BindingsTemplate expected = new BindingsTemplate();
if (!stmtDef.getParams().containsKey("verify-fields") && !stmtDef.getParams().containsKey("verify")) {
throw new RuntimeException("Unable to create expected values template with no 'verify' param");
}
Map<String, String> reading = stmtDef.getBindings();
List<String> fields = new ArrayList<>();
String fieldSpec = stmtDef.getOptionalStringParam("verify-fields")
.or(() -> stmtDef.getOptionalStringParam("verify"))
.orElse("*");
String[] vfields = fieldSpec.split("\\s*,\\s*");
for (String vfield : vfields) {
if (vfield.equals("*")) {
reading.forEach((k, v) -> fields.add(k));
} else if (vfield.startsWith("+")) {
fields.add(vfield.substring(1));
} else if (vfield.startsWith("-")) {
fields.remove(vfield.substring(1));
} else if (vfield.matches("\\w+(\\w+->[\\w-]+)?")) {
fields.add(vfield);
} else {
throw new RuntimeException("unknown verify-fields format: '" + vfield + "'");
}
}
for (String vfield : fields) {
String[] fieldNameAndBindingName = vfield.split("\\s*->\\s*", 2);
String fieldName = fieldNameAndBindingName[0];
String bindingName = fieldNameAndBindingName.length == 1 ? fieldName : fieldNameAndBindingName[1];
if (!reading.containsKey(bindingName)) {
throw new RuntimeException("binding name '" + bindingName +
"' referenced in verify-fields, but it is not present in available bindings.");
}
expected.addFieldBinding(fieldName, reading.get(bindingName));
}
return expected;
}
}

View File

@ -0,0 +1,25 @@
package io.nosqlbench.activitytype.cql.statements.rsoperators;
import com.datastax.driver.core.ResultSet;
import com.datastax.driver.core.Statement;
import io.nosqlbench.activitytype.cql.api.ResultSetCycleOperator;
import io.nosqlbench.activitytype.cql.errorhandling.exceptions.ResultSetVerificationException;
/**
* Throws a {@link ResultSetVerificationException} unless there is exactly one row in the result set.
*/
public class AssertSingleRowResultSet implements ResultSetCycleOperator {
@Override
public int apply(ResultSet resultSet, Statement statement, long cycle) {
int rowsIncoming = resultSet.getAvailableWithoutFetching();
if (rowsIncoming<1) {
throw new ResultSetVerificationException(cycle, resultSet, statement, "no row in result set, expected exactly 1");
}
if (rowsIncoming>1) {
throw new ResultSetVerificationException(cycle, resultSet, statement, "more than one row in result set, expected exactly 1");
}
return rowsIncoming;
}
}

View File

@ -0,0 +1,15 @@
package io.nosqlbench.activitytype.cql.statements.rsoperators;
import com.datastax.driver.core.ResultSet;
import com.datastax.driver.core.Statement;
import io.nosqlbench.activitytype.cql.api.ResultSetCycleOperator;
import io.nosqlbench.virtdata.library.basics.core.threadstate.SharedState;
public class ClearVars implements ResultSetCycleOperator {
@Override
public int apply(ResultSet resultSet, Statement statement, long cycle) {
SharedState.tl_ObjectMap.get().clear();
return 0;
}
}

View File

@ -0,0 +1,40 @@
package io.nosqlbench.activitytype.cql.statements.rsoperators;
import com.datastax.driver.core.*;
import io.nosqlbench.activitytype.cql.api.ResultSetCycleOperator;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.LogManager;
/**
* Logs a trace-level event for the result set, including
* cycles, rows, fetched row count, and the statement.
*/
public class CqlResultSetLogger implements ResultSetCycleOperator {
private final static Logger logger = LogManager.getLogger(CqlResultSetLogger.class);
private static String getQueryString(Statement stmt) {
if (stmt instanceof PreparedStatement) {
return "(prepared) " + ((PreparedStatement) stmt).getQueryString();
} else if (stmt instanceof SimpleStatement) {
return "(simple) " + ((SimpleStatement) stmt).getQueryString();
} else if (stmt instanceof BoundStatement) {
return "(bound) " + ((BoundStatement) stmt).preparedStatement().getQueryString();
} else {
return "(unknown) " + stmt.toString();
}
}
@Override
public int apply(ResultSet resultSet, Statement statement, long cycle) {
logger.debug("result-set-logger: "
+ " cycle=" + cycle
+ " rows=" + resultSet.getAvailableWithoutFetching()
+ " fetched=" + resultSet.isFullyFetched()
+ " statement=" + getQueryString(statement).stripTrailing()
);
for (Row row : resultSet) {
logger.trace(row.toString());
}
return 0;
}
}

View File

@ -0,0 +1,14 @@
package io.nosqlbench.activitytype.cql.statements.rsoperators;
import com.datastax.driver.core.Row;
import java.util.LinkedList;
/**
* This contains a linked list of {@link Row} objects. This is per-thread.
* You can use this list as a per-thread data cache for sharing data between
* cycles in the same thread.
*/
public class PerThreadCQLData {
public final static ThreadLocal<LinkedList<Row>> rows = ThreadLocal.withInitial(LinkedList::new);
}

View File

@ -0,0 +1,23 @@
package io.nosqlbench.activitytype.cql.statements.rsoperators;
import com.datastax.driver.core.ResultSet;
import com.datastax.driver.core.Statement;
import io.nosqlbench.activitytype.cql.api.ResultSetCycleOperator;
import io.nosqlbench.virtdata.library.basics.core.threadstate.SharedState;
import java.util.HashMap;
public class PopVars implements ResultSetCycleOperator {
@Override
public int apply(ResultSet resultSet, Statement statement, long cycle) {
HashMap<String, Object> stringObjectHashMap = SharedState.tl_ObjectMap.get();
Object o = SharedState.tl_ObjectStack.get().pollLast();
if (o != null && o instanceof HashMap) {
SharedState.tl_ObjectMap.set((HashMap) o);
return 0;
} else {
throw new RuntimeException("Tried to pop thread local data from stack, but there was none.");
}
}
}

View File

@ -0,0 +1,14 @@
package io.nosqlbench.activitytype.cql.statements.rsoperators;
import com.datastax.driver.core.ResultSet;
import com.datastax.driver.core.Statement;
import io.nosqlbench.activitytype.cql.api.ResultSetCycleOperator;
public class Print implements ResultSetCycleOperator {
@Override
public int apply(ResultSet resultSet, Statement statement, long cycle) {
System.out.println("RS:"+ resultSet.toString());
return 0;
}
}

View File

@ -0,0 +1,20 @@
package io.nosqlbench.activitytype.cql.statements.rsoperators;
import com.datastax.driver.core.ResultSet;
import com.datastax.driver.core.Statement;
import io.nosqlbench.activitytype.cql.api.ResultSetCycleOperator;
import io.nosqlbench.virtdata.library.basics.core.threadstate.SharedState;
import java.util.HashMap;
public class PushVars implements ResultSetCycleOperator {
@Override
public int apply(ResultSet resultSet, Statement statement, long cycle) {
HashMap<String, Object> existingVars = SharedState.tl_ObjectMap.get();
HashMap<String, Object> topush = new HashMap<>(existingVars);
SharedState.tl_ObjectStack.get().addLast(topush);
return 0;
}
}

View File

@ -0,0 +1,40 @@
package io.nosqlbench.activitytype.cql.statements.rsoperators;
import io.nosqlbench.activitytype.cql.api.ResultSetCycleOperator;
public enum ResultSetCycleOperators {
pushvars(PushVars.class),
popvars(PopVars.class),
clearvars(ClearVars.class),
trace(TraceLogger.class),
log(CqlResultSetLogger.class),
assert_singlerow(AssertSingleRowResultSet.class),
print(Print.class);
private final Class<? extends ResultSetCycleOperator> implClass;
ResultSetCycleOperators(Class<? extends ResultSetCycleOperator> traceLoggerClass) {
this.implClass = traceLoggerClass;
}
public Class<? extends ResultSetCycleOperator> getImplementation() {
return implClass;
}
public ResultSetCycleOperator getInstance() {
try {
return getImplementation().getConstructor().newInstance();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
public static ResultSetCycleOperator newOperator(String name) {
return ResultSetCycleOperators.valueOf(name).getInstance();
}
}

View File

@ -0,0 +1,16 @@
package io.nosqlbench.activitytype.cql.statements.rsoperators;
import com.datastax.driver.core.ResultSet;
import com.datastax.driver.core.Row;
import com.datastax.driver.core.Statement;
import io.nosqlbench.activitytype.cql.api.ResultSetCycleOperator;
import java.util.LinkedList;
public class RowCapture implements ResultSetCycleOperator {
@Override
public int apply(ResultSet resultSet, Statement statement, long cycle) {
ThreadLocal<LinkedList<Row>> rows = PerThreadCQLData.rows;
return 0;
}
}

View File

@ -0,0 +1,13 @@
package io.nosqlbench.activitytype.cql.statements.rsoperators;
import com.datastax.driver.core.ResultSet;
import com.datastax.driver.core.Statement;
import io.nosqlbench.activitytype.cql.api.ResultSetCycleOperator;
public class StopTimerOp implements ResultSetCycleOperator {
@Override
public int apply(ResultSet resultSet, Statement statement, long cycle) {
return 0;
}
}

View File

@ -0,0 +1,97 @@
package io.nosqlbench.activitytype.cql.statements.rsoperators;
import com.datastax.driver.core.ExecutionInfo;
import com.datastax.driver.core.QueryTrace;
import com.datastax.driver.core.ResultSet;
import com.datastax.driver.core.Statement;
import io.nosqlbench.activitytype.cql.api.ResultSetCycleOperator;
import io.nosqlbench.activitytype.cql.statements.modifiers.StatementModifier;
import io.nosqlbench.engine.api.util.SimpleConfig;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.LogManager;
import java.io.FileDescriptor;
import java.io.FileWriter;
import java.io.IOException;
import java.text.SimpleDateFormat;
import java.util.Date;
public class TraceLogger implements ResultSetCycleOperator, StatementModifier {
private final static Logger logger = LogManager.getLogger(TraceLogger.class);
private static final SimpleDateFormat sdf = new SimpleDateFormat("HH:mm:ss.SSS");
private final long modulo;
private final String filename;
private final FileWriter writer;
private final ThreadLocal<StringBuilder> tlsb = ThreadLocal.withInitial(StringBuilder::new);
public TraceLogger(SimpleConfig conf) {
this(
conf.getLong("modulo").orElse(1L),
conf.getString("filename").orElse("tracelog")
);
}
public TraceLogger(long modulo, String filename) {
this.modulo = modulo;
this.filename = filename;
try {
if (filename.equals("stdout")) {
writer = new FileWriter(FileDescriptor.out);
} else {
writer = new FileWriter(filename);
}
} catch (IOException e) {
throw new RuntimeException(e);
}
}
@Override
public int apply(ResultSet rs, Statement statement, long cycle) {
if ((cycle%modulo)!=0) {
return 0;
}
ExecutionInfo ei = rs.getExecutionInfo();
QueryTrace qt = ei.getQueryTrace();
StringBuilder sb = tlsb.get();
sb.setLength(0);
sb.append("\n---------------------------- QueryTrace Summary ---------------------------\n");
sb.append("\n Coordinator: ").append(qt.getCoordinator());
sb.append("\n Cycle: ").append(cycle);
sb.append("\nServer-side query duration (us): ").append(qt.getDurationMicros());
sb.append("\n Request type: ").append(qt.getRequestType());
sb.append("\n Start time: ").append(qt.getStartedAt());
sb.append("\n Trace UUID: ").append(qt.getTraceId());
sb.append("\n Params: ").append(qt.getParameters());
sb.append("\n--------------------------------------------------------------------------\n");
sb.append("\n---------------------------- QueryTrace Events ---------------------------\n");
for (QueryTrace.Event event : qt.getEvents()) {
sb.append("\n Date: ").append(sdf.format(new Date(event.getTimestamp())));
sb.append("\n Source: ").append(event.getSource());
sb.append("\nSourceElapsedMicros: ").append(event.getSourceElapsedMicros());
sb.append("\n Thread: ").append(event.getThreadName());
sb.append("\n Description: ").append(event.getDescription()).append("\n");
}
sb.append("\n--------------------------------------------------------------------------\n");
try {
writer.append(sb.toString());
writer.flush();
} catch (IOException e) {
throw new RuntimeException(e);
}
return 0;
}
@Override
public Statement modify(Statement statement, long cycle) {
if ((cycle%modulo)==0) {
statement.enableTracing();
}
return statement;
}
}

View File

@ -0,0 +1,29 @@
package io.nosqlbench.endpoints.cql;
import io.nosqlbench.docsys.api.WebServiceObject;
import io.nosqlbench.generators.cql.lang.CqlWorkloadGen;
import io.nosqlbench.nb.annotations.Service;
import jakarta.inject.Singleton;
import jakarta.ws.rs.POST;
import jakarta.ws.rs.Path;
import jakarta.ws.rs.core.MediaType;
import jakarta.ws.rs.core.Response;
@Service(value = WebServiceObject.class, selector = "cql-workload-generator")
@Path("/services/cql/")
@Singleton
public class CqlWorkloadGeneratorEndpoint implements WebServiceObject {
@POST
@Path("generate")
public Response generate(String cqlSchema) {
try {
CqlWorkloadGen generator = new CqlWorkloadGen();
String generated = generator.generate(cqlSchema);
return Response.ok(generated).type(MediaType.TEXT_PLAIN).build();
} catch (Exception e) {
return Response.serverError().entity(e).build();
}
}
}

View File

@ -0,0 +1,87 @@
package io.nosqlbench.generators.cql.lang;
import io.nosqlbench.generators.cql.generated.CqlParser;
import io.nosqlbench.generators.cql.generated.CqlParserBaseListener;
import java.util.ArrayList;
import java.util.List;
public class CQLAstBuilder extends CqlParserBaseListener {
CqlWorkloadBuffer buf = new CqlWorkloadBuffer();
@Override
public void exitCreateTable(CqlParser.CreateTableContext ctx) {
buf.newTable(ctx.keyspace().getText(), ctx.table().getText());
}
@Override
public void exitColumnDefinition(CqlParser.ColumnDefinitionContext ctx) {
List<String> typedef = new ArrayList<>();
CqlParser.DataTypeContext dt = ctx.dataType();
typedef.add(dt.dataTypeName().getText());
CqlParser.DataTypeDefinitionContext dtd = dt.dataTypeDefinition();
if (dtd != null) {
typedef.add("<");
dtd.dataTypeName().forEach(dtn -> {
typedef.add(dtn.getText());
typedef.add(",");
});
}
typedef.remove(typedef.size() - 1);
buf.newColumn(
ctx.column().OBJECT_NAME().getText(),
typedef.toArray(new String[0])
);
}
@Override
public void exitPrimaryKeyDefinition(CqlParser.PrimaryKeyDefinitionContext ctx) {
super.exitPrimaryKeyDefinition(ctx);
}
@Override
public void exitPrimaryKeyColumn(CqlParser.PrimaryKeyColumnContext ctx) {
}
@Override
public void exitPartitionKey(CqlParser.PartitionKeyContext ctx) {
}
@Override
public void exitClusteringKey(CqlParser.ClusteringKeyContext ctx) {
}
// This is the form of a primary key that is tacked onto the end of a column def
@Override
public void enterPrimaryKeyColumn(CqlParser.PrimaryKeyColumnContext ctx) {
}
// This is the form of a primary key that is added to the column def list as an element
@Override
public void enterPrimaryKeyElement(CqlParser.PrimaryKeyElementContext ctx) {
}
// @Override
// public void exitCreateTable(CqlParser.CreateTableContext ctx) {
// List<CqlParser.ColumnDefinitionContext> columnDefinitionContexts =
// ctx.columnDefinitionList().columnDefinition();
// for (CqlParser.ColumnDefinitionContext coldef : columnDefinitionContexts) {
// CqlParser.ColumnContext column = coldef.column();
// Token symbol = column.OBJECT_NAME().getSymbol();
// CqlParser.DataTypeContext datatype = coldef.dataType();
// CqlParser.DataTypeNameContext dtn = datatype.dataTypeName();
// CqlParser.DataTypeDefinitionContext dtd = datatype.dataTypeDefinition();
// if (dtd != null) {
// List<CqlParser.DataTypeNameContext> dataTypeNameContexts = dtd.dataTypeName();
// for (CqlParser.DataTypeNameContext dtnc : dataTypeNameContexts) {
// System.out.println("here");
// }
// }
// }
// }
}

View File

@ -0,0 +1,28 @@
package io.nosqlbench.generators.cql.lang;
import org.antlr.v4.runtime.ParserRuleContext;
import org.antlr.v4.runtime.tree.ErrorNode;
import org.antlr.v4.runtime.tree.ParseTreeListener;
import org.antlr.v4.runtime.tree.TerminalNode;
public class CQLTreeBuilder implements ParseTreeListener {
@Override
public void visitTerminal(TerminalNode node) {
}
@Override
public void visitErrorNode(ErrorNode node) {
}
@Override
public void enterEveryRule(ParserRuleContext ctx) {
}
@Override
public void exitEveryRule(ParserRuleContext ctx) {
}
}

View File

@ -0,0 +1,23 @@
package io.nosqlbench.generators.cql.lang;
public class CqlWorkloadBuffer {
public void newTable(String keyspace, String table) {
}
/**
* type is the parsed tokens of the type definition,
* with each type token and each bracket taking a position.
* For example, both <pre>{@code
* "timeuuid"
* }</pre> and <pre>{@code
* "set","<","text",">"
* }</pre> are valid. This is just an opaque transfer type to
* allow simple decoupling of the upstream parser and the workload
* generator.
*
* @param colname The name of the column
* @param type A token stream representing the type of the column
*/
public void newColumn(String colname, String... type) {
}
}

View File

@ -0,0 +1,9 @@
package io.nosqlbench.generators.cql.lang;
public class CqlWorkloadGen {
public String generate(String cqlSchema) {
return null;
}
}

View File

@ -0,0 +1,27 @@
package io.nosqlbench.generators.cql.lang;
import io.nosqlbench.generators.cql.generated.CqlLexer;
import io.nosqlbench.generators.cql.generated.CqlParser;
import org.antlr.v4.runtime.CharStream;
import org.antlr.v4.runtime.CharStreams;
import org.antlr.v4.runtime.CommonTokenStream;
import org.antlr.v4.runtime.Lexer;
public class ParserForCql {
public static void parse(String input) {
CharStream instream = CharStreams.fromString(input);
Lexer lexer = new CqlLexer(instream);
CommonTokenStream commonTokenStream = new CommonTokenStream(lexer);
CqlParser cqlParser = new CqlParser(commonTokenStream);
cqlParser.addParseListener(new CQLAstBuilder());
CqlParser.RootContext root = cqlParser.root();
}
// public static String fingerprint(String input) {
//
// }
}

View File

@ -0,0 +1,67 @@
# CQL Statements
This guide is a work in progress, thus it is not added to any topic index yet.
This guide is a deep dive on how NoSQLBench works with CQL statements specifically with the DataStax
Java Driver.
## Raw Statement
A raw statement is neither prepared nor parameterized. That is, it is sent as a single string with
no separate values object. The values must be parsed out during query processing.
In the driver, you might make one of these with the various statement builder APIs, or maybe even
directly like this:
new SimpleStatement(
"insert into foo.bar (baz) values ('beetle')"
);
This is the least efficient type of statement as it will always require the statement structure and
the values to be parsed out when the statement is handled on the server side.
## Parameterized Statement
A parameterized statement is one where the statement form and the parameters are provided
separately.
You can create a parameterized SimpleStatement with named parameters like this:
new SimpleStatement(
"insert into foo.bar (baz) values(:bazvalue)",
Map.of("bazvalue","beetle")
);
This shows the driver conventions for assigning a named parameter anchor in the statement
`:bazvalue`.
The positional form which uses `?` *is not recommended* for use with NoSQLBench. Named anchors allow
a basic cross-checking ability that is done automatically by NoSQLBench. Thus, positional use will
not be covered here.
### non-parameterizable fields
Some elements of CQL syntax, like row-based-access-control principle names are not parameterizable,
yet you may want to template them at the op template level in nosqlbench. This distinction is very
subtle, but important. When dealing with these forms, it is best to avoid using prepared statements,
since each operation will have a different rendered form.
## Prepared Statement
Prepared statements are the fastest way to invoke a CQL operation from the driver as they avoid
reprocessing the query form on the client and server. However, this means that they act as a
statement template which can be combined with statement parameters to yield an executable statement.
Thus, in pracice, all prepared statements are also parameterized statements.
What makes prepared statement faster is that they aren't parsed by the server (or the client) once
they are prepared. Thus, part of the processing required for a raw statement has already been done
and cached with prepared statements.
Putting these together, the taxonomy of CQL statement forms supported by the NoSQLBench CQL driver
are: (TBD))

View File

@ -0,0 +1,2 @@
io.nosqlbench.virtdata.api.processors.FunctionDocInfoProcessor
io.nosqlbench.nb.annotations.ServiceProcessor

View File

@ -0,0 +1,18 @@
# cqld3 driver
This is a CQL driver based on the DataStax driver for Apache
Cassandra version 3.+.
It is identical to the previous "cql" driver except for the version of
the native driver used and the exclusion of certain DSE Capabilities, such
as graph and some extended data types. This driver is meant to be used
as a bridge until we have the 4.+ driver ready for use. The 1.9 driver
which NoSQLBench included originally is no longer actively supported.
Given the similarity to the original cql driver, the docs for both are
the same with the exception of the 'cqldriver' and 'insights' options,
which are both removed here.
Once the 4.+ driver is ready and proven out (cqld4), both the cql and the
cqlv3 will be gently deprecated, but they will remain in the
NoSQLBench project until they are no longer needed.

View File

@ -0,0 +1,4 @@
# cql help topics
- cql
- cql-errors
- cql-exception-list

View File

@ -0,0 +1,22 @@
package com.datastax.ebdrivers.cql;
import io.nosqlbench.activitytype.cql.core.CqlAction;
import io.nosqlbench.activitytype.cql.core.CqlActivity;
import io.nosqlbench.engine.api.activityimpl.ActivityDef;
import org.junit.Ignore;
import org.junit.Test;
public class CqlActionTest {
@Test
@Ignore
public void testCqlAction() {
ActivityDef ad = ActivityDef.parseActivityDef("driver=ebdrivers;alias=foo;yaml=write-telemetry.yaml;");
CqlActivity cac = new CqlActivity(ad);
CqlAction cq = new CqlAction(ad, 0, cac);
cq.init();
cq.runCycle(5);
}
}

View File

@ -0,0 +1,61 @@
package com.datastax.ebdrivers.cql.statements;
import io.nosqlbench.activitytype.cql.statements.core.CQLStatementDefParser;
import org.junit.Test;
import java.util.HashMap;
import java.util.List;
import static org.assertj.core.api.Assertions.assertThat;
public class CQLCQLStatementDefParserTest {
// TODO: Implment support for default values in yaml
@Test
public void testBasicParsing() {
HashMap<String, String> bindings = new HashMap<String, String>() {{
put("not", "even");
}};
CQLStatementDefParser sdp = new CQLStatementDefParser("test-name","This is ?not an error.");
CQLStatementDefParser.ParseResult r = sdp.getParseResult(bindings.keySet());
assertThat(r.hasError()).isFalse();
assertThat(r.getStatement()).isEqualTo("This is ? an error.");
assertThat(r.getMissingAnchors().size()).isEqualTo(0);
assertThat(r.getMissingGenerators().size()).isEqualTo(0);
}
@Test
public void testParsingDiagnostics() {
HashMap<String, String> bindings = new HashMap<String, String>() {{
put("BINDABLE", "two");
put("EXTRABINDING", "5");
}};
CQLStatementDefParser sdp = new CQLStatementDefParser("test-name","This is a test of ?BINDABLE interpolation and ?MISSINGBINDING.");
List<String> bindableNames = sdp.getBindableNames();
CQLStatementDefParser.ParseResult result = sdp.getParseResult(bindings.keySet());
assertThat(result.hasError()).isTrue();
assertThat(result.getStatement()).isEqualTo("This is a test of ? interpolation and ?.");
assertThat(result.getMissingAnchors().size()).isEqualTo(1);
assertThat(result.getMissingGenerators().size()).isEqualTo(1);
assertThat(result.getMissingAnchors()).contains("EXTRABINDING");
assertThat(result.getMissingGenerators()).contains("MISSINGBINDING");
}
@Test
public void testParsingPatterns() {
HashMap<String, String> bindings = new HashMap<String, String>() {{
put("B-1", "one");
put("B_-1.2", "two");
}};
CQLStatementDefParser sdp = new CQLStatementDefParser("test-name","This is a test of ?B-1 and {B_-1.2}");
List<String> bindableNames = sdp.getBindableNames();
assertThat(bindableNames).containsExactly("B-1","B_-1.2");
CQLStatementDefParser.ParseResult parseResult = sdp.getParseResult(bindings.keySet());
assertThat(parseResult.hasError()).isFalse();
assertThat(parseResult.getStatement()).isEqualTo("This is a test of ? and ?");
}
}

View File

@ -0,0 +1,79 @@
package com.datastax.ebdrivers.cql.statements;
import com.datastax.driver.core.HostDistance;
import com.datastax.driver.core.PoolingOptions;
import com.datastax.driver.core.SocketOptions;
import com.datastax.driver.core.policies.LoadBalancingPolicy;
import com.datastax.driver.core.policies.ReconnectionPolicy;
import com.datastax.driver.core.policies.RetryPolicy;
import com.datastax.driver.core.policies.SpeculativeExecutionPolicy;
import io.nosqlbench.activitytype.cql.core.CQLOptions;
import org.junit.Test;
import static org.assertj.core.api.Assertions.assertThat;
public class CQLOptionsTest {
@Test
public void testSpeculative() {
SpeculativeExecutionPolicy p1 = CQLOptions.speculativeFor("p99:5");
assertThat(p1).isNotNull();
SpeculativeExecutionPolicy p2 = CQLOptions.speculativeFor("p99:5:5000ms");
assertThat(p2).isNotNull();
}
@Test
public void testConstant() {
SpeculativeExecutionPolicy p1 = CQLOptions.speculativeFor("5000ms:5");
assertThat(p1).isNotNull();
}
@Test
public void testWhitelist() {
LoadBalancingPolicy lbp = CQLOptions.whitelistFor("127.0.0.1,127.0.0.2:123", null);
assertThat(lbp).isNotNull();
}
@Test
public void testReconnectPolicyPatterns() {
ReconnectionPolicy rp = CQLOptions.reconnectPolicyFor("exponential(123,321)");
rp = CQLOptions.reconnectPolicyFor("constant(123)");
}
@Test
public void testSocketOptionPatterns() {
SocketOptions so = CQLOptions.socketOptionsFor("read_timeout_ms=23423,connect_timeout_ms=2344;keep_alive:true,reuse_address:true;so_linger:323;tcp_no_delay=true;receive_buffer_size:100,send_buffer_size=1000");
assertThat(so.getConnectTimeoutMillis()).isEqualTo(2344);
assertThat(so.getKeepAlive()).isEqualTo(true);
assertThat(so.getReadTimeoutMillis()).isEqualTo(23423);
assertThat(so.getReceiveBufferSize()).isEqualTo(100);
assertThat(so.getReuseAddress()).isEqualTo(true);
assertThat(so.getSendBufferSize()).isEqualTo(1000);
assertThat(so.getSoLinger()).isEqualTo(323);
assertThat(so.getTcpNoDelay()).isEqualTo(true);
}
@Test
public void testConnectionsPatterns() {
PoolingOptions po = CQLOptions.poolingOptionsFor("2345");
assertThat(po.getCoreConnectionsPerHost(HostDistance.LOCAL)).isEqualTo(2345);
assertThat(po.getMaxConnectionsPerHost(HostDistance.LOCAL)).isEqualTo(Integer.MIN_VALUE);
assertThat(po.getMaxRequestsPerConnection(HostDistance.LOCAL)).isEqualTo(Integer.MIN_VALUE);
PoolingOptions po2 = CQLOptions.poolingOptionsFor("1:2:3,4:5:6");
assertThat(po2.getCoreConnectionsPerHost(HostDistance.LOCAL)).isEqualTo(1);
assertThat(po2.getMaxConnectionsPerHost(HostDistance.LOCAL)).isEqualTo(2);
assertThat(po2.getMaxRequestsPerConnection(HostDistance.LOCAL)).isEqualTo(3);
assertThat(po2.getCoreConnectionsPerHost(HostDistance.REMOTE)).isEqualTo(4);
assertThat(po2.getMaxConnectionsPerHost(HostDistance.REMOTE)).isEqualTo(5);
assertThat(po2.getMaxRequestsPerConnection(HostDistance.REMOTE)).isEqualTo(6);
PoolingOptions po3 = CQLOptions.poolingOptionsFor("1:2:3,4:5:6,heartbeat_interval_s:100,idle_timeout_s:123,pool_timeout_ms:234");
assertThat(po3.getIdleTimeoutSeconds()).isEqualTo(123);
assertThat(po3.getPoolTimeoutMillis()).isEqualTo(234);
assertThat(po3.getHeartbeatIntervalSeconds()).isEqualTo(100);
}
}

View File

@ -0,0 +1,14 @@
package io.nosqlbench.activitytype.cql.core;
import org.junit.Test;
import static org.assertj.core.api.Assertions.assertThat;
public class CqlActivityTest {
@Test
public void testCanonicalize() {
String cb = CqlActivity.canonicalizeBindings("A ?b C");
assertThat(cb).isEqualTo("A {b} C");
}
}

View File

@ -0,0 +1,51 @@
package io.nosqlbench.generators.cql.lang;
import org.junit.Test;
import java.io.IOException;
import java.net.URL;
import java.nio.charset.StandardCharsets;
import java.nio.file.*;
import java.util.ArrayList;
import java.util.Enumeration;
import java.util.List;
public class ParserForCqlTest {
@Test
public void parseAll() {
List<Path> cql3_examples = getSubPaths("cql3_examples");
for (Path examplePath : cql3_examples) {
try {
String example = Files.readString(examplePath, StandardCharsets.UTF_8);
ParserForCql.parse(example);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}
private static List<Path> getSubPaths(String resourcePath) {
List<Path> subpaths = new ArrayList<>();
try {
Enumeration<URL> resources = ParserForCqlTest.class.getClassLoader().getResources(resourcePath);
while (resources.hasMoreElements()) {
URL url = resources.nextElement();
System.out.println("url=" + url.toExternalForm());
Path path = Paths.get(url.toURI());
Files.walk(path, FileVisitOption.FOLLOW_LINKS)
.filter(p -> !Files.isDirectory(p, LinkOption.NOFOLLOW_LINKS))
.forEach(subpaths::add);
}
} catch (Exception e) {
throw new RuntimeException(e);
}
return subpaths;
}
}

View File

@ -0,0 +1,14 @@
tags:
group: read
statements:
- name: read-telemetry
statement: |
select * from <<KEYSPACE:testks>>.<<TABLE:testtable>>_telemetry
where source={source}
and epoch_hour={epoch_hour}
and param={param}
limit 10
bindings:
source: ThreadNumGenerator
epoch_hour: DateSequenceFieldGenerator(1000,'YYYY-MM-dd-HH')
param: LineExtractGenerator('data/variable_words.txt')

View File

@ -0,0 +1,6 @@
ALTER KEYSPACE cycling
WITH REPLICATION = {
'class' : 'NetworkTopologyStrategy',
'datacenter1' : 3 }
AND DURABLE_WRITES = false ;

View File

@ -0,0 +1,3 @@
ALTER MATERIALIZED VIEW cycling.cyclist_by_age
WITH comment = 'A most excellent and useful view'
AND bloom_filter_fp_chance = 0.02;

View File

@ -0,0 +1 @@
ALTER ROLE coach WITH PASSWORD='bestTeam';

Some files were not shown because too many files have changed in this diff Show More