mirror of
https://github.com/nosqlbench/nosqlbench.git
synced 2025-02-25 18:55:28 -06:00
reset cqld4 for reinit
This commit is contained in:
parent
11348a394d
commit
a27ea6cdb2
@ -1,50 +0,0 @@
|
||||
package com.datastax.driver.core;
|
||||
|
||||
import com.datastax.oss.driver.api.core.ProtocolVersion;
|
||||
import com.datastax.oss.driver.api.core.cql.Statement;
|
||||
import com.datastax.oss.driver.api.core.metadata.Metadata;
|
||||
import com.datastax.oss.driver.api.core.metadata.TokenMap;
|
||||
import com.datastax.oss.driver.api.core.metadata.token.Token;
|
||||
import com.datastax.oss.driver.api.core.metadata.token.TokenRange;
|
||||
import com.datastax.oss.driver.api.core.session.Session;
|
||||
import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry;
|
||||
import com.datastax.oss.driver.internal.core.metadata.token.Murmur3Token;
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.OptionalLong;
|
||||
import java.util.Set;
|
||||
|
||||
public class M3PTokenFilter {
|
||||
|
||||
private final TokenRange[] ranges;
|
||||
|
||||
public M3PTokenFilter(Set<TokenRange> ranges, Session session) {
|
||||
TokenMap tokenMap = session.getMetadata().getTokenMap().orElseThrow();
|
||||
|
||||
List<TokenRange> rangelist = new ArrayList<>();
|
||||
|
||||
for (TokenRange range : ranges) {
|
||||
rangelist.add(range);
|
||||
}
|
||||
this.ranges = rangelist.toArray(new TokenRange[0]);
|
||||
if (this.ranges.length<1) {
|
||||
throw new RuntimeException("There were no tokens found. Please check your keyspace and cluster settings.");
|
||||
}
|
||||
}
|
||||
|
||||
public boolean matches(Statement statement) {
|
||||
Token token = statement.getRoutingToken();
|
||||
|
||||
for (TokenRange range : ranges) {
|
||||
if (range.contains(token)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
}
|
@ -1,70 +0,0 @@
|
||||
package com.datastax.driver.core;
|
||||
|
||||
import com.datastax.oss.driver.api.core.ProtocolVersion;
|
||||
import com.datastax.oss.driver.api.core.cql.Statement;
|
||||
import com.datastax.oss.driver.api.core.metadata.Metadata;
|
||||
import com.datastax.oss.driver.api.core.metadata.token.Token;
|
||||
import com.datastax.oss.driver.api.core.metadata.token.TokenRange;
|
||||
import com.datastax.oss.driver.api.core.session.Session;
|
||||
import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry;
|
||||
import com.datastax.oss.driver.internal.core.metadata.token.Murmur3Token;
|
||||
import com.datastax.oss.driver.internal.core.metadata.token.Murmur3TokenFactory;
|
||||
import com.datastax.oss.driver.internal.core.metadata.token.Murmur3TokenRange;
|
||||
import io.nosqlbench.activitytype.cqld4.api.StatementFilter;
|
||||
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
public class TokenRangeStmtFilter implements StatementFilter {
|
||||
|
||||
private final Metadata clusterMetadata;
|
||||
private final ProtocolVersion protocolVersion;
|
||||
private final CodecRegistry codecRegistry;
|
||||
// private final Token.Factory factory;
|
||||
private TokenRange[] ranges;
|
||||
|
||||
public TokenRangeStmtFilter(Session session, String rangesSpec) {
|
||||
clusterMetadata = session.getMetadata();
|
||||
protocolVersion = session.getContext().getProtocolVersion();
|
||||
codecRegistry = session.getContext().getCodecRegistry();
|
||||
ranges = parseRanges(session, rangesSpec);
|
||||
}
|
||||
|
||||
private TokenRange[] parseRanges(Session session, String rangesStr) {
|
||||
String[] ranges = rangesStr.split(",");
|
||||
List<TokenRange> tr = new ArrayList<>();
|
||||
|
||||
for (String range : ranges) {
|
||||
String[] interval = range.split(":");
|
||||
Murmur3TokenFactory m3f = new Murmur3TokenFactory();
|
||||
Token start = m3f.parse(interval[0]);
|
||||
Token end = m3f.parse(interval[1]);
|
||||
TokenRange tokenRange = m3f.range(start,end);
|
||||
tr.add(tokenRange);
|
||||
}
|
||||
return tr.toArray(new TokenRange[0]);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean matches(Statement<?> statement) {
|
||||
Token routingToken = statement.getRoutingToken();
|
||||
for (TokenRange range : ranges) {
|
||||
if (range.contains(routingToken)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "including token ranges: " +
|
||||
Arrays.stream(ranges)
|
||||
.map(String::valueOf)
|
||||
.collect(Collectors.joining(","));
|
||||
}
|
||||
}
|
@ -1,19 +0,0 @@
|
||||
package io.nosqlbench.activitytype.cqld4.api;
|
||||
|
||||
import com.datastax.oss.driver.api.core.cql.AsyncResultSet;
|
||||
import com.datastax.oss.driver.api.core.cql.ResultSet;
|
||||
import com.datastax.oss.driver.api.core.cql.Statement;
|
||||
|
||||
/**
|
||||
* An operator interface for performing a modular action on CQL ResultSets per-cycle.
|
||||
*/
|
||||
public interface D4ResultSetCycleOperator {
|
||||
/**
|
||||
* Perform an action on a result set for a specific cycle.
|
||||
* @param pageInfo The ResultSet for the given cycle
|
||||
* @param statement The statement for the given cycle
|
||||
* @param cycle The cycle for which the statement was submitted
|
||||
* @return A value, only meaningful when used with aggregated operators
|
||||
*/
|
||||
int apply(AsyncResultSet pageInfo, Statement<?> statement, long cycle);
|
||||
}
|
@ -1,23 +0,0 @@
|
||||
package io.nosqlbench.activitytype.cqld4.api;
|
||||
|
||||
/**
|
||||
* When an error filter allows us to see and handle an error in a specific way,
|
||||
* the ErrorResponse determines exactly how we handle it. Each level represents
|
||||
* a starting point in handling, including everything after the starting point.
|
||||
* The first enum is the most severe response
|
||||
*/
|
||||
public enum ErrorResponse {
|
||||
|
||||
stop("S"), // Rethrow this error to the runtime, forcing it to handle the error or stop
|
||||
warn("W"), // log a warning with some details about this error
|
||||
retry("R"), // resubmit this operation up to the available tries
|
||||
histogram("H"), // record this metric in a histogram
|
||||
count("C"), // count this metric separately
|
||||
ignore("I"); // do nothing
|
||||
|
||||
private String symbol;
|
||||
|
||||
ErrorResponse(String symbol) {
|
||||
this.symbol = symbol;
|
||||
}
|
||||
}
|
@ -1,11 +0,0 @@
|
||||
package io.nosqlbench.activitytype.cqld4.api;
|
||||
|
||||
import com.datastax.oss.driver.api.core.cql.Row;
|
||||
|
||||
/**
|
||||
* An operator interface for consuming ResultSets and producing some
|
||||
* int that can be used as a status code in activities.
|
||||
*/
|
||||
public interface RowCycleOperator {
|
||||
int apply(Row row, long cycle);
|
||||
}
|
@ -1,7 +0,0 @@
|
||||
package io.nosqlbench.activitytype.cqld4.api;
|
||||
|
||||
import com.datastax.oss.driver.api.core.cql.Statement;
|
||||
|
||||
public interface StatementFilter {
|
||||
boolean matches(Statement<?> statement);
|
||||
}
|
@ -1,7 +0,0 @@
|
||||
package io.nosqlbench.activitytype.cqld4.api;
|
||||
|
||||
public enum VerifyApplied {
|
||||
ignore,
|
||||
error,
|
||||
retry
|
||||
}
|
@ -1,12 +0,0 @@
|
||||
package io.nosqlbench.activitytype.cqld4.codecsupport;
|
||||
|
||||
import java.lang.annotation.ElementType;
|
||||
import java.lang.annotation.Retention;
|
||||
import java.lang.annotation.RetentionPolicy;
|
||||
import java.lang.annotation.Target;
|
||||
|
||||
@Retention(RetentionPolicy.RUNTIME)
|
||||
@Target(ElementType.TYPE)
|
||||
public @interface CQLUserTypeNames {
|
||||
String[] value();
|
||||
}
|
@ -1,12 +0,0 @@
|
||||
package io.nosqlbench.activitytype.cqld4.codecsupport;
|
||||
|
||||
import java.lang.annotation.ElementType;
|
||||
import java.lang.annotation.Retention;
|
||||
import java.lang.annotation.RetentionPolicy;
|
||||
import java.lang.annotation.Target;
|
||||
|
||||
@Retention(RetentionPolicy.RUNTIME)
|
||||
@Target(ElementType.TYPE)
|
||||
public @interface UDTCodecClasses {
|
||||
Class<? extends UDTTransformCodec>[] value();
|
||||
}
|
@ -1,31 +0,0 @@
|
||||
package io.nosqlbench.activitytype.cqld4.codecsupport;
|
||||
|
||||
import com.datastax.oss.driver.api.core.session.Session;
|
||||
import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.ServiceLoader;
|
||||
|
||||
public class UDTCodecInjector {
|
||||
private final static Logger logger = LogManager.getLogger(UDTCodecInjector.class);
|
||||
|
||||
private final List<UserCodecProvider> codecProviders = new ArrayList<>();
|
||||
|
||||
public void injectUserProvidedCodecs(Session session, boolean allowAcrossKeyspaces) {
|
||||
|
||||
CodecRegistry registry = session.getContext().getCodecRegistry();
|
||||
|
||||
ServiceLoader<UserCodecProvider> codecLoader = ServiceLoader.load(UserCodecProvider.class);
|
||||
|
||||
for (UserCodecProvider userCodecProvider : codecLoader) {
|
||||
codecProviders.add(userCodecProvider);
|
||||
}
|
||||
|
||||
for (UserCodecProvider codecProvider : codecProviders) {
|
||||
codecProvider.registerCodecsForCluster(session,true);
|
||||
}
|
||||
}
|
||||
}
|
@ -1,12 +0,0 @@
|
||||
package io.nosqlbench.activitytype.cqld4.codecsupport;
|
||||
|
||||
import java.lang.annotation.ElementType;
|
||||
import java.lang.annotation.Retention;
|
||||
import java.lang.annotation.RetentionPolicy;
|
||||
import java.lang.annotation.Target;
|
||||
|
||||
@Retention(RetentionPolicy.RUNTIME)
|
||||
@Target(ElementType.TYPE)
|
||||
public @interface UDTJavaType {
|
||||
Class<?> value();
|
||||
}
|
@ -1,31 +0,0 @@
|
||||
package io.nosqlbench.activitytype.cqld4.codecsupport;
|
||||
|
||||
import com.datastax.oss.driver.api.core.data.UdtValue;
|
||||
import com.datastax.oss.driver.api.core.type.codec.MappingCodec;
|
||||
import com.datastax.oss.driver.api.core.type.codec.TypeCodec;
|
||||
import com.datastax.oss.driver.api.core.type.reflect.GenericType;
|
||||
import edu.umd.cs.findbugs.annotations.NonNull;
|
||||
|
||||
public abstract class UDTTransformCodec<T> extends MappingCodec<T, UdtValue> {
|
||||
|
||||
// protected UserType userType;
|
||||
|
||||
public UDTTransformCodec(
|
||||
@NonNull TypeCodec<T> innerCodec,
|
||||
@NonNull GenericType<UdtValue> outerJavaType
|
||||
) {
|
||||
super(innerCodec, outerJavaType);
|
||||
}
|
||||
|
||||
//
|
||||
// public UDTTransformCodec(GenericType userType, Class<T> javaType) {
|
||||
// super(TypeCodec.userType(userType), javaType);
|
||||
// this.userType = userType;
|
||||
// }
|
||||
|
||||
// public UserType getUserType() {
|
||||
// return userType;
|
||||
// }
|
||||
|
||||
|
||||
}
|
@ -1,146 +0,0 @@
|
||||
package io.nosqlbench.activitytype.cqld4.codecsupport;
|
||||
|
||||
import com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata;
|
||||
import com.datastax.oss.driver.api.core.session.Session;
|
||||
import com.datastax.oss.driver.api.core.type.UserDefinedType;
|
||||
import com.datastax.oss.driver.api.core.type.codec.TypeCodec;
|
||||
import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry;
|
||||
import com.datastax.oss.driver.api.core.type.codec.registry.MutableCodecRegistry;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
|
||||
import java.lang.reflect.Constructor;
|
||||
import java.util.*;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
public abstract class UserCodecProvider {
|
||||
|
||||
private final static Logger logger = LogManager.getLogger(UserCodecProvider.class);
|
||||
|
||||
public List<UDTTransformCodec<?>> registerCodecsForCluster(
|
||||
Session session,
|
||||
boolean allowAcrossKeyspaces
|
||||
) {
|
||||
List<UDTTransformCodec<?>> typeCodecs = new ArrayList<>();
|
||||
|
||||
|
||||
List<KeyspaceMetadata> ksMetas = new ArrayList<>(session.getMetadata().getKeyspaces().values());
|
||||
|
||||
for (KeyspaceMetadata keyspace : ksMetas) {
|
||||
|
||||
List<UDTTransformCodec> keyspaceCodecs = registerCodecsForKeyspace(session, keyspace.getName().toString());
|
||||
|
||||
for (UDTTransformCodec typeCodec : keyspaceCodecs) {
|
||||
if (typeCodecs.contains(typeCodec) && !allowAcrossKeyspaces) {
|
||||
throw new RuntimeException("codec " + typeCodec + " could be registered" +
|
||||
"in multiple keyspaces, but this is not allowed.");
|
||||
}
|
||||
typeCodecs.add(typeCodec);
|
||||
logger.debug("Found user-provided codec for ks:" + keyspace + ", udt:" + typeCodec);
|
||||
}
|
||||
}
|
||||
return typeCodecs;
|
||||
}
|
||||
|
||||
public List<UDTTransformCodec> registerCodecsForKeyspace(Session session, String keyspace) {
|
||||
|
||||
CodecRegistry registry = session.getContext().getCodecRegistry();
|
||||
|
||||
List<UDTTransformCodec> codecsForKeyspace = new ArrayList<>();
|
||||
|
||||
KeyspaceMetadata ksMeta = session.getMetadata().getKeyspace(keyspace).orElseThrow();
|
||||
if (ksMeta==null) {
|
||||
logger.warn("No metadata for " + keyspace);
|
||||
return Collections.emptyList();
|
||||
}
|
||||
Collection<UserDefinedType> typesInKeyspace = ksMeta.getUserDefinedTypes().values();
|
||||
|
||||
List<Class<? extends UDTTransformCodec>> providedCodecClasses = getUDTCodecClasses();
|
||||
|
||||
Map<UserDefinedType, Class<? extends UDTTransformCodec>> codecMap = new HashMap<>();
|
||||
|
||||
for (Class<? extends TypeCodec> providedCodecClass : providedCodecClasses) {
|
||||
Class<? extends UDTTransformCodec> udtCodecClass = (Class<? extends UDTTransformCodec>) providedCodecClass;
|
||||
|
||||
List<String> targetUDTTypes = getUDTTypeNames(udtCodecClass);
|
||||
for (UserDefinedType keyspaceUserType : typesInKeyspace) {
|
||||
String ksTypeName = keyspaceUserType.getName().toString();
|
||||
String globalTypeName = (ksTypeName.contains(".") ? ksTypeName.split("\\.",2)[1] : ksTypeName);
|
||||
if (targetUDTTypes.contains(ksTypeName) || targetUDTTypes.contains(globalTypeName)) {
|
||||
codecMap.put(keyspaceUserType, udtCodecClass);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (UserDefinedType userType : codecMap.keySet()) {
|
||||
Class<? extends UDTTransformCodec> codecClass = codecMap.get(userType);
|
||||
Class<?> udtJavaType = getUDTJavaType(codecClass);
|
||||
UDTTransformCodec udtCodec = instantiate(userType, codecClass, udtJavaType);
|
||||
codecsForKeyspace.add(udtCodec);
|
||||
((MutableCodecRegistry)registry).register(udtCodec);
|
||||
|
||||
logger.info("registered codec:" + udtCodec);
|
||||
}
|
||||
|
||||
return codecsForKeyspace;
|
||||
|
||||
}
|
||||
|
||||
private UDTTransformCodec instantiate(UserDefinedType key, Class<? extends UDTTransformCodec> codecClass,
|
||||
Class<?> javaType) {
|
||||
try {
|
||||
Constructor<? extends UDTTransformCodec> ctor = codecClass.getConstructor(UserDefinedType.class, Class.class);
|
||||
UDTTransformCodec typeCodec = ctor.newInstance(key, javaType);
|
||||
return typeCodec;
|
||||
} catch (Exception e) {
|
||||
e.printStackTrace();
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
private List<Class<? extends UDTTransformCodec>> getUDTCodecClasses() {
|
||||
UDTCodecClasses[] annotationsByType = this.getClass().getAnnotationsByType(UDTCodecClasses.class);
|
||||
List<Class<? extends UDTTransformCodec>> codecClasses = Arrays.stream(annotationsByType)
|
||||
.map(UDTCodecClasses::value)
|
||||
.flatMap(Arrays::stream)
|
||||
.collect(Collectors.toList());
|
||||
return codecClasses;
|
||||
}
|
||||
|
||||
/**
|
||||
* Allows simple annotation of implementations of this class to use
|
||||
* {@code @CQLUserTypeNames({"type1","type2",...}}
|
||||
*
|
||||
* @param codecClass the UDTTransformCode class which is to be inspected
|
||||
* @return THe list of target UDT type names, as defined in CQL
|
||||
*/
|
||||
private List<String> getUDTTypeNames(Class<? extends UDTTransformCodec> codecClass) {
|
||||
CQLUserTypeNames[] annotationsByType = codecClass.getAnnotationsByType(CQLUserTypeNames.class);
|
||||
List<String> cqlTypeNames = new ArrayList<>();
|
||||
|
||||
for (CQLUserTypeNames cqlUserTypeNames : annotationsByType) {
|
||||
cqlTypeNames.addAll(Arrays.asList(cqlUserTypeNames.value()));
|
||||
}
|
||||
return cqlTypeNames;
|
||||
}
|
||||
|
||||
/**
|
||||
* Allows simple annotation of implementations of this class to use
|
||||
* {@code @UDTJavaType(POJOType.class)}
|
||||
*
|
||||
* @param codecClass the UDTTransformCode class which is to be inspected
|
||||
* @return The class type of the POJO which this codec maps to and from
|
||||
*/
|
||||
private Class<?> getUDTJavaType(Class<? extends UDTTransformCodec> codecClass) {
|
||||
UDTJavaType[] annotationsByType = codecClass.getAnnotationsByType(UDTJavaType.class);
|
||||
Class<?> javaType = Arrays.stream(annotationsByType)
|
||||
.map(UDTJavaType::value)
|
||||
.findFirst()
|
||||
.orElseThrow(
|
||||
() -> new RuntimeException("Unable to find UDTJavaType annotation for " + codecClass.getCanonicalName())
|
||||
);
|
||||
return javaType;
|
||||
}
|
||||
|
||||
|
||||
}
|
@ -1,92 +0,0 @@
|
||||
package io.nosqlbench.activitytype.cqld4.config;
|
||||
|
||||
import com.datastax.oss.driver.api.core.config.DriverOption;
|
||||
import com.datastax.oss.driver.api.core.config.OptionsMap;
|
||||
import com.datastax.oss.driver.api.core.config.TypedDriverOption;
|
||||
import com.datastax.oss.driver.api.core.data.CqlDuration;
|
||||
|
||||
import java.math.BigDecimal;
|
||||
import java.math.BigInteger;
|
||||
import java.net.InetAddress;
|
||||
import java.net.UnknownHostException;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.time.*;
|
||||
import java.util.UUID;
|
||||
|
||||
public class CQLD4OptionsMapper {
|
||||
|
||||
public static void apply(OptionsMap optionsMap, String name, String value) {
|
||||
|
||||
for (TypedDriverOption<?> builtin : TypedDriverOption.builtInValues()) {
|
||||
DriverOption rawOption = builtin.getRawOption();
|
||||
String path = rawOption.getPath();
|
||||
if (name.equals(path)) {
|
||||
Class<?> rawType = builtin.getExpectedType().getRawType();
|
||||
Object convertedValue = adaptTypeValue(value, rawType, name);
|
||||
TypedDriverOption<? super Object> option = (TypedDriverOption<? super Object>) builtin;
|
||||
optionsMap.put(option, convertedValue);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
throw new RuntimeException("Driver option " + name + " was not found in the available options.");
|
||||
}
|
||||
|
||||
private static Object adaptTypeValue(String value, Class<?> rawOption, String optionName) {
|
||||
switch (rawOption.getCanonicalName()) {
|
||||
case "java.lang.Boolean":
|
||||
return Boolean.parseBoolean(value);
|
||||
case "java.lang.Byte":
|
||||
return Byte.parseByte(value);
|
||||
case "java.lang.Double":
|
||||
return Double.parseDouble(value);
|
||||
case "java.lang.Float":
|
||||
return Float.parseFloat(value);
|
||||
case "java.lang.Integer":
|
||||
return Integer.parseInt(value);
|
||||
case "java.lang.Long":
|
||||
return Long.parseLong(value);
|
||||
case "java.lang.Short":
|
||||
return Short.parseShort(value);
|
||||
case "java.time.Instant":
|
||||
return Instant.parse(value);
|
||||
case "java.time.ZonedDateTime":
|
||||
return ZonedDateTime.parse(value);
|
||||
case "java.time.LocalDate":
|
||||
return LocalDate.parse(value);
|
||||
case "java.time.LocalTime":
|
||||
return LocalTime.parse(value);
|
||||
case "java.nio.ByteBuffer":
|
||||
return ByteBuffer.wrap(value.getBytes(StandardCharsets.UTF_8)); // What else to do here?
|
||||
case "java.lang.String":
|
||||
return value;
|
||||
case "java.math.BigInteger":
|
||||
return new BigInteger(value);
|
||||
case "java.math.BigDecimal":
|
||||
return new BigDecimal(value);
|
||||
case "java.util.UUID":
|
||||
return UUID.fromString(value);
|
||||
case "java.net.InetAddress":
|
||||
try {
|
||||
return InetAddress.getByName(value);
|
||||
} catch (UnknownHostException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
case "com.datastax.oss.driver.api.core.data.CqlDuration":
|
||||
return CqlDuration.from(value);
|
||||
case "java.time.Duration:":
|
||||
return Duration.parse(value);
|
||||
default:
|
||||
// These appear to be valid types, but there is no record of them used in driver configuration,
|
||||
// nor a convenient way to convert them directly from known type and string value without invoking
|
||||
// connected metadata machinery from an active session.
|
||||
// case "com.datastax.oss.driver.api.core.data.TupleValue":
|
||||
// case "com.datastax.oss.driver.api.core.data.UdtValue":
|
||||
|
||||
throw new RuntimeException("The type converter for driver option named " + optionName + " was not " +
|
||||
"found, or is unimplemented. Please file an issue at nosqlbench.io");
|
||||
}
|
||||
}
|
||||
|
||||
}
|
@ -1,100 +0,0 @@
|
||||
package io.nosqlbench.activitytype.cqld4.core;
|
||||
|
||||
import com.datastax.oss.driver.api.core.CqlSession;
|
||||
import com.datastax.oss.driver.api.core.ProtocolVersion;
|
||||
import com.datastax.oss.driver.api.core.cql.*;
|
||||
import com.datastax.oss.driver.api.core.session.Session;
|
||||
import com.datastax.oss.driver.api.core.type.DataType;
|
||||
import com.datastax.oss.driver.api.core.type.codec.TypeCodec;
|
||||
import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry;
|
||||
import io.nosqlbench.engine.api.activityconfig.ParsedStmt;
|
||||
import io.nosqlbench.engine.api.activityconfig.yaml.StmtDef;
|
||||
import io.nosqlbench.virtdata.api.bindings.VALUE;
|
||||
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.*;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
public class CQLBindHelper {
|
||||
|
||||
private final ProtocolVersion protocolVersion;
|
||||
private final CodecRegistry codecRegistry;
|
||||
// private final ColumnDefinitions definitions;
|
||||
|
||||
// refrence ProtocolConstants.DataType
|
||||
|
||||
public CQLBindHelper(CqlSession session) {
|
||||
this.protocolVersion = session.getContext().getProtocolVersion();
|
||||
this.codecRegistry = session.getContext().getCodecRegistry();
|
||||
|
||||
}
|
||||
|
||||
private final static Pattern stmtToken = Pattern.compile("\\?(\\w+[-_\\d\\w]*)|\\{(\\w+[-_\\d\\w.]*)}");
|
||||
|
||||
public Statement<?> rebindUnappliedStatement(
|
||||
Statement<?> statement,
|
||||
ColumnDefinitions defs,
|
||||
Row row) {
|
||||
|
||||
if (!(statement instanceof BoundStatement)) {
|
||||
throw new RuntimeException("Unable to rebind a non-bound statement: " + statement.toString());
|
||||
}
|
||||
|
||||
BoundStatement bound = (BoundStatement) statement;
|
||||
|
||||
for (ColumnDefinition def : defs) {
|
||||
ByteBuffer byteBuffer = row.getByteBuffer(def.getName());
|
||||
bound=bound.setBytesUnsafe(def.getName(), byteBuffer);
|
||||
}
|
||||
return bound;
|
||||
}
|
||||
|
||||
public BoundStatement bindStatement(Statement<?> statement, String name, Object value, DataType dataType) {
|
||||
|
||||
if (!(statement instanceof BoundStatement)) {
|
||||
throw new RuntimeException("only BoundStatement is supported here");
|
||||
}
|
||||
BoundStatement bound = (BoundStatement) statement;
|
||||
|
||||
if (value == VALUE.unset) {
|
||||
return bound.unset(name);
|
||||
} else {
|
||||
TypeCodec<Object> codec = codecRegistry.codecFor(dataType);
|
||||
return bound.set(name, value, codec);
|
||||
}
|
||||
}
|
||||
|
||||
public static Map<String, String> parseAndGetSpecificBindings(StmtDef stmtDef, ParsedStmt parsed) {
|
||||
List<String> spans = new ArrayList<>();
|
||||
|
||||
String statement = stmtDef.getStmt();
|
||||
|
||||
Set<String> extraBindings = new HashSet<>();
|
||||
extraBindings.addAll(stmtDef.getBindings().keySet());
|
||||
Map<String, String> specificBindings = new LinkedHashMap<>();
|
||||
|
||||
Matcher m = stmtToken.matcher(statement);
|
||||
int lastMatch = 0;
|
||||
String remainder = "";
|
||||
while (m.find(lastMatch)) {
|
||||
String pre = statement.substring(lastMatch, m.start());
|
||||
|
||||
String form1 = m.group(1);
|
||||
String form2 = m.group(2);
|
||||
String tokenName = (form1 != null && !form1.isEmpty()) ? form1 : form2;
|
||||
lastMatch = m.end();
|
||||
spans.add(pre);
|
||||
|
||||
if (extraBindings.contains(tokenName)) {
|
||||
if (specificBindings.get(tokenName) != null) {
|
||||
String postfix = UUID.randomUUID().toString();
|
||||
specificBindings.put(tokenName + postfix, stmtDef.getBindings().get(tokenName));
|
||||
} else {
|
||||
specificBindings.put(tokenName, stmtDef.getBindings().get(tokenName));
|
||||
}
|
||||
}
|
||||
}
|
||||
return specificBindings;
|
||||
}
|
||||
}
|
@ -1,202 +0,0 @@
|
||||
package io.nosqlbench.activitytype.cqld4.core;
|
||||
|
||||
public class CQLOptions {
|
||||
// private final static Logger logger = LogManager.getLogger(CQLOptions.class);
|
||||
//
|
||||
// private final static Pattern CORE_AND_MAX_RQ_PATTERN = Pattern.compile("(?<core>\\d+)(:(?<max>\\d+)(:(?<rq>\\d+))?)?(,(?<rcore>\\d+)(:(?<rmax>\\d+)(:(?<rrq>\\d+))?)?)?(,?heartbeat_interval_s:(?<heartbeatinterval>\\d+))?(,?idle_timeout_s:(?<idletimeout>\\d+))?(,?pool_timeout_ms:(?<pooltimeout>\\d+))?");
|
||||
// private final static Pattern PERCENTILE_EAGER_PATTERN = Pattern.compile("^p(?<pctile>[^:]+)(:(?<executions>\\d+))?(:(?<tracked>\\d+)ms)?$");
|
||||
// private final static Pattern CONSTANT_EAGER_PATTERN = Pattern.compile("^((?<msThreshold>\\d++)ms)(:(?<executions>\\d+))?$");
|
||||
//
|
||||
// private static ConstantSpeculativeExecutionPolicy constantPolicy(DriverContext context, int threshold, int executions) {
|
||||
// return new ConstantSpeculativeExecutionPolicy(threshold, executions);
|
||||
// }
|
||||
//
|
||||
// private static SpeculativeExecutionPolicy percentilePolicy(long tracked, double threshold, int executions) {
|
||||
// PerHostPercentileTracker tracker = newTracker(tracked);
|
||||
// return new PercentileSpeculativeExecutionPolicy(tracker, threshold, executions);
|
||||
// }
|
||||
//
|
||||
// private static PerHostPercentileTracker newTracker(long millis) {
|
||||
// return PerHostPercentileTracker.builder(millis).build();
|
||||
// }
|
||||
//
|
||||
// public static PoolingOptions poolingOptionsFor(String spec) {
|
||||
// Matcher matcher = CORE_AND_MAX_RQ_PATTERN.matcher(spec);
|
||||
// if (matcher.matches()) {
|
||||
// PoolingOptions poolingOptions = new PoolingOptions();
|
||||
//
|
||||
// Optional.ofNullable(matcher.group("core")).map(Integer::valueOf)
|
||||
// .ifPresent(core -> poolingOptions.setCoreConnectionsPerHost(HostDistance.LOCAL, core));
|
||||
// Optional.ofNullable(matcher.group("max")).map(Integer::valueOf)
|
||||
// .ifPresent(max -> poolingOptions.setMaxConnectionsPerHost(HostDistance.LOCAL, max));
|
||||
// Optional.ofNullable(matcher.group("rq")).map(Integer::valueOf)
|
||||
// .ifPresent(rq -> poolingOptions.setMaxRequestsPerConnection(HostDistance.LOCAL, rq));
|
||||
//
|
||||
// Optional.ofNullable(matcher.group("rcore")).map(Integer::valueOf)
|
||||
// .ifPresent(rcore -> poolingOptions.setCoreConnectionsPerHost(HostDistance.REMOTE, rcore));
|
||||
// Optional.ofNullable(matcher.group("rmax")).map(Integer::valueOf)
|
||||
// .ifPresent(rmax -> poolingOptions.setMaxConnectionsPerHost(HostDistance.REMOTE, rmax));
|
||||
// Optional.ofNullable(matcher.group("rrq")).map(Integer::valueOf)
|
||||
// .ifPresent(rrq -> poolingOptions.setMaxRequestsPerConnection(HostDistance.REMOTE, rrq));
|
||||
//
|
||||
// Optional.ofNullable(matcher.group("heartbeatinterval")).map(Integer::valueOf)
|
||||
// .ifPresent(poolingOptions::setHeartbeatIntervalSeconds);
|
||||
//
|
||||
// Optional.ofNullable(matcher.group("idletimeout")).map(Integer::valueOf)
|
||||
// .ifPresent(poolingOptions::setIdleTimeoutSeconds);
|
||||
//
|
||||
// Optional.ofNullable(matcher.group("pooltimeout")).map(Integer::valueOf)
|
||||
// .ifPresent(poolingOptions::setPoolTimeoutMillis);
|
||||
//
|
||||
// return poolingOptions;
|
||||
// }
|
||||
// throw new RuntimeException("No pooling options could be parsed from spec: " + spec);
|
||||
//
|
||||
// }
|
||||
//
|
||||
// public static RetryPolicy retryPolicyFor(String spec, Session session) {
|
||||
// Set<String> retryBehaviors = Arrays.stream(spec.split(",")).map(String::toLowerCase).collect(Collectors.toSet());
|
||||
// RetryPolicy retryPolicy = new DefaultRetryPolicy(session.getContext(),"default");
|
||||
//
|
||||
// if (retryBehaviors.contains("default")) {
|
||||
// return retryPolicy;
|
||||
// } // add other mutually-exclusive behaviors here with checks, if we want to extend beyond "default"
|
||||
//
|
||||
// if (retryBehaviors.contains("logging")) {
|
||||
// retryPolicy = new LoggingRetryPolicy(retryPolicy);
|
||||
// }
|
||||
//
|
||||
// return retryPolicy;
|
||||
// }
|
||||
//
|
||||
// public static ReconnectionPolicy reconnectPolicyFor(String spec, Session session) {
|
||||
// if(spec.startsWith("exponential(")){
|
||||
// String argsString = spec.substring(12);
|
||||
// String[] args = argsString.substring(0, argsString.length() - 1).split("[,;]");
|
||||
// if (args.length != 2){
|
||||
// throw new BasicError("Invalid reconnectionpolicy, try reconnectionpolicy=exponential(<baseDelay>, <maxDelay>)");
|
||||
// }
|
||||
// long baseDelay = Long.parseLong(args[0]);
|
||||
// long maxDelay = Long.parseLong(args[1]);
|
||||
// ExponentialReconnectionPolicy exponentialReconnectionPolicy = new ExponentialReconnectionPolicy(session.getContext());
|
||||
// }else if(spec.startsWith("constant(")){
|
||||
// String argsString = spec.substring(9);
|
||||
// long constantDelayMs= Long.parseLong(argsString.substring(0, argsString.length() - 1));
|
||||
// return new ConstantReconnectionPolicy(constantDelayMs);
|
||||
// }
|
||||
// throw new BasicError("Invalid reconnectionpolicy, try reconnectionpolicy=exponential(<baseDelay>, <maxDelay>) or constant(<constantDelayMs>)");
|
||||
// }
|
||||
//
|
||||
// public static SocketOptions socketOptionsFor(String spec) {
|
||||
// String[] assignments = spec.split("[,;]");
|
||||
// Map<String, String> values = new HashMap<>();
|
||||
// for (String assignment : assignments) {
|
||||
// String[] namevalue = assignment.split("[:=]", 2);
|
||||
// String name = namevalue[0];
|
||||
// String value = namevalue[1];
|
||||
// values.put(name, value);
|
||||
// }
|
||||
//
|
||||
// SocketOptions options = new SocketOptions();
|
||||
// Optional.ofNullable(values.get("read_timeout_ms")).map(Integer::parseInt).ifPresent(
|
||||
// options::setReadTimeoutMillis
|
||||
// );
|
||||
// Optional.ofNullable(values.get("connect_timeout_ms")).map(Integer::parseInt).ifPresent(
|
||||
// options::setConnectTimeoutMillis
|
||||
// );
|
||||
// Optional.ofNullable(values.get("keep_alive")).map(Boolean::parseBoolean).ifPresent(
|
||||
// options::setKeepAlive
|
||||
// );
|
||||
// Optional.ofNullable(values.get("reuse_address")).map(Boolean::parseBoolean).ifPresent(
|
||||
// options::setReuseAddress
|
||||
// );
|
||||
// Optional.ofNullable(values.get("so_linger")).map(Integer::parseInt).ifPresent(
|
||||
// options::setSoLinger
|
||||
// );
|
||||
// Optional.ofNullable(values.get("tcp_no_delay")).map(Boolean::parseBoolean).ifPresent(
|
||||
// options::setTcpNoDelay
|
||||
// );
|
||||
// Optional.ofNullable(values.get("receive_buffer_size")).map(Integer::parseInt).ifPresent(
|
||||
// options::setReceiveBufferSize
|
||||
// );
|
||||
// Optional.ofNullable(values.get("send_buffer_size")).map(Integer::parseInt).ifPresent(
|
||||
// options::setSendBufferSize
|
||||
// );
|
||||
//
|
||||
// return options;
|
||||
// }
|
||||
//
|
||||
// public static SpeculativeExecutionPolicy defaultSpeculativePolicy() {
|
||||
// PerHostPercentileTracker tracker = PerHostPercentileTracker
|
||||
// .builder(15000)
|
||||
// .build();
|
||||
// PercentileSpeculativeExecutionPolicy defaultSpecPolicy =
|
||||
// new PercentileSpeculativeExecutionPolicy(tracker, 99.0, 5);
|
||||
// return defaultSpecPolicy;
|
||||
// }
|
||||
//
|
||||
// public static SpeculativeExecutionPolicy speculativeFor(String spec) {
|
||||
// Matcher pctileMatcher = PERCENTILE_EAGER_PATTERN.matcher(spec);
|
||||
// Matcher constantMatcher = CONSTANT_EAGER_PATTERN.matcher(spec);
|
||||
// if (pctileMatcher.matches()) {
|
||||
// double pctile = Double.valueOf(pctileMatcher.group("pctile"));
|
||||
// if (pctile > 100.0 || pctile < 0.0) {
|
||||
// throw new RuntimeException("pctile must be between 0.0 and 100.0");
|
||||
// }
|
||||
// String executionsSpec = pctileMatcher.group("executions");
|
||||
// String trackedSpec = pctileMatcher.group("tracked");
|
||||
// int executions = (executionsSpec != null && !executionsSpec.isEmpty()) ? Integer.valueOf(executionsSpec) : 5;
|
||||
// int tracked = (trackedSpec != null && !trackedSpec.isEmpty()) ? Integer.valueOf(trackedSpec) : 15000;
|
||||
// logger.debug("speculative: Creating new percentile tracker policy from spec '" + spec + "'");
|
||||
// return percentilePolicy(tracked, pctile, executions);
|
||||
// } else if (constantMatcher.matches()) {
|
||||
// int threshold = Integer.valueOf(constantMatcher.group("msThreshold"));
|
||||
// String executionsSpec = constantMatcher.group("executions");
|
||||
// int executions = (executionsSpec != null && !executionsSpec.isEmpty()) ? Integer.valueOf(executionsSpec) : 5;
|
||||
// logger.debug("speculative: Creating new constant policy from spec '" + spec + "'");
|
||||
// return constantPolicy(threshold, executions);
|
||||
// } else {
|
||||
// throw new RuntimeException("Unable to parse pattern for speculative option: " + spec + ", it must be in " +
|
||||
// "an accepted form, like p99.0:5:15000, or p99.0:5, or 5000ms:5");
|
||||
// }
|
||||
//
|
||||
// }
|
||||
//
|
||||
// public static LoadBalancingPolicy whitelistFor(String s, LoadBalancingPolicy innerPolicy) {
|
||||
// String[] addrSpecs = s.split(",");
|
||||
// List<InetSocketAddress> sockAddrs = Arrays.stream(addrSpecs)
|
||||
// .map(CQLOptions::toSocketAddr)
|
||||
// .collect(Collectors.toList());
|
||||
// if (innerPolicy == null) {
|
||||
// innerPolicy = new RoundRobinPolicy();
|
||||
// }
|
||||
// return new WhiteListPolicy(innerPolicy, sockAddrs);
|
||||
// }
|
||||
//
|
||||
// public static NettyOptions withTickDuration(String tick) {
|
||||
// logger.info("Cluster builder using custom tick duration value for HashedWheelTimer: " + tick + " milliseconds");
|
||||
// int tickDuration = Integer.valueOf(tick);
|
||||
// return new NettyOptions() {
|
||||
// public io.netty.util.Timer timer(ThreadFactory threadFactory) {
|
||||
// return new HashedWheelTimer(
|
||||
// threadFactory, tickDuration, TimeUnit.MILLISECONDS);
|
||||
// }
|
||||
// };
|
||||
// }
|
||||
//
|
||||
// private static InetSocketAddress toSocketAddr(String addr) {
|
||||
// String[] addrs = addr.split(":", 2);
|
||||
// String inetHost = addrs[0];
|
||||
// String inetPort = (addrs.length == 2) ? addrs[1] : "9042";
|
||||
// return new InetSocketAddress(inetHost, Integer.valueOf(inetPort));
|
||||
// }
|
||||
//
|
||||
// public static ProtocolOptions.Compression withCompression(String compspec) {
|
||||
// try {
|
||||
// return ProtocolOptions.Compression.valueOf(compspec);
|
||||
// } catch (IllegalArgumentException iae) {
|
||||
// throw new RuntimeException("Compression option '" + compspec + "' was specified, but only " +
|
||||
// Arrays.toString(ProtocolOptions.Compression.values()) + " are available.");
|
||||
// }
|
||||
// }
|
||||
}
|
@ -1,363 +0,0 @@
|
||||
package io.nosqlbench.activitytype.cqld4.core;
|
||||
|
||||
import com.codahale.metrics.Timer;
|
||||
import com.datastax.oss.driver.api.core.config.DefaultDriverOption;
|
||||
import com.datastax.oss.driver.api.core.cql.*;
|
||||
import io.nosqlbench.activitytype.cqld4.api.D4ResultSetCycleOperator;
|
||||
import io.nosqlbench.activitytype.cqld4.api.RowCycleOperator;
|
||||
import io.nosqlbench.activitytype.cqld4.api.StatementFilter;
|
||||
import io.nosqlbench.activitytype.cqld4.errorhandling.ErrorStatus;
|
||||
import io.nosqlbench.activitytype.cqld4.errorhandling.HashedCQLErrorHandler;
|
||||
import io.nosqlbench.activitytype.cqld4.errorhandling.exceptions.CQLCycleWithStatementException;
|
||||
import io.nosqlbench.activitytype.cqld4.errorhandling.exceptions.MaxTriesExhaustedException;
|
||||
import io.nosqlbench.activitytype.cqld4.errorhandling.exceptions.UnexpectedPagingException;
|
||||
import io.nosqlbench.activitytype.cqld4.statements.core.ReadyCQLStatement;
|
||||
import io.nosqlbench.engine.api.activityapi.core.ActivityDefObserver;
|
||||
import io.nosqlbench.engine.api.activityapi.core.MultiPhaseAction;
|
||||
import io.nosqlbench.engine.api.activityapi.core.SyncAction;
|
||||
import io.nosqlbench.engine.api.activityapi.planning.OpSequence;
|
||||
import io.nosqlbench.engine.api.activityimpl.ActivityDef;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.concurrent.CompletionStage;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
@SuppressWarnings("Duplicates")
|
||||
public class CqlAction implements SyncAction, MultiPhaseAction, ActivityDefObserver {
|
||||
|
||||
private final static Logger logger = LogManager.getLogger(CqlAction.class);
|
||||
private final int slot;
|
||||
private final CqlActivity cqlActivity;
|
||||
private final ActivityDef activityDef;
|
||||
private List<RowCycleOperator> rowOps;
|
||||
private List<D4ResultSetCycleOperator> cycleOps;
|
||||
private List<StatementModifier> modifiers;
|
||||
private StatementFilter statementFilter;
|
||||
private OpSequence<ReadyCQLStatement> sequencer;
|
||||
private int maxTries = 10; // how many cycles a statement will be attempted for before giving up
|
||||
|
||||
private HashedCQLErrorHandler ebdseErrorHandler;
|
||||
|
||||
private int pagesFetched = 0;
|
||||
private long totalRowsFetchedForQuery = 0L;
|
||||
private AsyncResultSet pagingResultSet;
|
||||
private Statement pagingStatement;
|
||||
private ReadyCQLStatement pagingReadyStatement;
|
||||
private boolean showcql;
|
||||
private long nanoStartTime;
|
||||
private long retryDelay;
|
||||
private long maxRetryDelay;
|
||||
private boolean retryReplace;
|
||||
|
||||
public CqlAction(ActivityDef activityDef, int slot, CqlActivity cqlActivity) {
|
||||
this.activityDef = activityDef;
|
||||
this.cqlActivity = cqlActivity;
|
||||
this.slot = slot;
|
||||
onActivityDefUpdate(activityDef);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void init() {
|
||||
onActivityDefUpdate(activityDef);
|
||||
this.sequencer = cqlActivity.getOpSequencer();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int runCycle(long value) {
|
||||
// In this activity type, we use the same phase
|
||||
// logic for the initial phase (runCycle(...))
|
||||
// as well as subsequent phases.
|
||||
return runPhase(value);
|
||||
}
|
||||
|
||||
public int runPhase(long cycleValue) {
|
||||
|
||||
HashedCQLErrorHandler.resetThreadStatusCode();
|
||||
|
||||
if (pagingResultSet == null) {
|
||||
|
||||
totalRowsFetchedForQuery = 0L;
|
||||
|
||||
Statement statement;
|
||||
CompletionStage<AsyncResultSet> resultSetFuture;
|
||||
ReadyCQLStatement readyCQLStatement;
|
||||
|
||||
int tries = 0;
|
||||
|
||||
try (Timer.Context bindTime = cqlActivity.bindTimer.time()) {
|
||||
readyCQLStatement = sequencer.get(cycleValue);
|
||||
statement = readyCQLStatement.bind(cycleValue);
|
||||
|
||||
if (statementFilter != null) {
|
||||
if (!statementFilter.matches(statement)) {
|
||||
cqlActivity.skippedTokensHisto.update(cycleValue);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
if (modifiers != null) {
|
||||
for (StatementModifier modifier : modifiers) {
|
||||
statement = modifier.modify(statement, cycleValue);
|
||||
}
|
||||
}
|
||||
|
||||
if (showcql) {
|
||||
logger.info("CQL(cycle=" + cycleValue + "):\n" + readyCQLStatement.getQueryString(cycleValue));
|
||||
}
|
||||
}
|
||||
nanoStartTime = System.nanoTime();
|
||||
|
||||
while (tries < maxTries) {
|
||||
tries++;
|
||||
|
||||
if (tries > maxTries) {
|
||||
throw new MaxTriesExhaustedException(cycleValue, maxTries);
|
||||
}
|
||||
|
||||
if (tries > 1) {
|
||||
try (Timer.Context retryTime = cqlActivity.retryDelayTimer.time()) {
|
||||
Thread.sleep(Math.min((retryDelay << tries) / 1000, maxRetryDelay / 1000));
|
||||
} catch (InterruptedException ignored) {
|
||||
}
|
||||
}
|
||||
|
||||
CompletionStage<AsyncResultSet> completion;
|
||||
try (Timer.Context executeTime = cqlActivity.executeTimer.time()) {
|
||||
completion = cqlActivity.getSession().executeAsync(statement);
|
||||
}
|
||||
|
||||
Timer.Context resultTime = cqlActivity.resultTimer.time();
|
||||
try {
|
||||
AsyncResultSet resultSet = completion.toCompletableFuture().get();
|
||||
|
||||
if (cycleOps != null) {
|
||||
for (D4ResultSetCycleOperator cycleOp : cycleOps) {
|
||||
cycleOp.apply(resultSet, statement, cycleValue);
|
||||
}
|
||||
}
|
||||
|
||||
D4ResultSetCycleOperator[] rsOperators = readyCQLStatement.getResultSetOperators();
|
||||
if (rsOperators != null) {
|
||||
for (D4ResultSetCycleOperator perStmtRSOperator : rsOperators) {
|
||||
perStmtRSOperator.apply(resultSet, statement, cycleValue);
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Add parameter rebind support in cqld4 via op
|
||||
// if (!resultSet.wasApplied()) {
|
||||
// //resultSet.b
|
||||
// Row row = resultSet.one();
|
||||
// ColumnDefinitions defs = row.getColumnDefinitions();
|
||||
// if (retryReplace) {
|
||||
// statement =
|
||||
// new CQLBindHelper(getCqlActivity().getSession()).rebindUnappliedStatement(statement, defs,row);
|
||||
// }
|
||||
//
|
||||
// logger.trace(readyCQLStatement.getQueryString(cycleValue));
|
||||
// // To make exception handling logic flow more uniformly
|
||||
// throw new ChangeUnappliedCycleException(
|
||||
// cycleValue, resultSet, readyCQLStatement.getQueryString(cycleValue)
|
||||
// );
|
||||
// }
|
||||
|
||||
// int pageRows = resultSet.getAvailableWithoutFetching();
|
||||
|
||||
int rowsInPage=0;
|
||||
RowCycleOperator[] perStmtRowOperators = readyCQLStatement.getRowCycleOperators();
|
||||
|
||||
if (rowOps==null && perStmtRowOperators==null) {
|
||||
for (Row row : resultSet.currentPage()) {
|
||||
rowsInPage++;
|
||||
}
|
||||
} else {
|
||||
for (Row row : resultSet.currentPage()) {
|
||||
if (rowOps!=null) {
|
||||
for (RowCycleOperator rowOp : rowOps) {
|
||||
rowOp.apply(row, cycleValue);
|
||||
}
|
||||
}
|
||||
if (perStmtRowOperators!=null) {
|
||||
for (RowCycleOperator rowOp : perStmtRowOperators) {
|
||||
rowOp.apply(row, cycleValue);
|
||||
}
|
||||
}
|
||||
rowsInPage++;
|
||||
}
|
||||
}
|
||||
|
||||
cqlActivity.rowsCounter.mark(rowsInPage);
|
||||
totalRowsFetchedForQuery += rowsInPage;
|
||||
|
||||
if (resultSet.hasMorePages()) {
|
||||
if (cqlActivity.maxpages > 1) {
|
||||
pagingResultSet = resultSet;
|
||||
pagingStatement = statement;
|
||||
pagingReadyStatement = readyCQLStatement;
|
||||
pagesFetched = 1;
|
||||
} else {
|
||||
throw new UnexpectedPagingException(
|
||||
cycleValue,
|
||||
resultSet,
|
||||
readyCQLStatement.getQueryString(cycleValue),
|
||||
1,
|
||||
cqlActivity.maxpages,
|
||||
cqlActivity.getSession().getContext().getConfig().getDefaultProfile().getInt(DefaultDriverOption.REQUEST_PAGE_SIZE)
|
||||
);
|
||||
}
|
||||
} else {
|
||||
long resultNanos = System.nanoTime() - nanoStartTime;
|
||||
cqlActivity.resultSuccessTimer.update(resultNanos, TimeUnit.NANOSECONDS);
|
||||
cqlActivity.resultSetSizeHisto.update(totalRowsFetchedForQuery);
|
||||
readyCQLStatement.onSuccess(cycleValue, resultNanos, totalRowsFetchedForQuery);
|
||||
}
|
||||
break; // This is normal termination of this loop, when retries aren't needed
|
||||
} catch (Exception e) {
|
||||
long resultNanos = resultTime.stop();
|
||||
resultTime = null;
|
||||
readyCQLStatement.onError(cycleValue, resultNanos, e);
|
||||
CQLCycleWithStatementException cqlCycleException = new CQLCycleWithStatementException(cycleValue, resultNanos, e, readyCQLStatement);
|
||||
ErrorStatus errorStatus = ebdseErrorHandler.handleError(cycleValue, cqlCycleException);
|
||||
if (!errorStatus.isRetryable()) {
|
||||
cqlActivity.triesHisto.update(tries);
|
||||
return errorStatus.getResultCode();
|
||||
}
|
||||
} finally {
|
||||
if (resultTime != null) {
|
||||
resultTime.stop();
|
||||
}
|
||||
}
|
||||
}
|
||||
cqlActivity.triesHisto.update(tries);
|
||||
|
||||
} else {
|
||||
|
||||
int tries = 0;
|
||||
|
||||
while (tries < maxTries) {
|
||||
tries++;
|
||||
if (tries > maxTries) {
|
||||
throw new MaxTriesExhaustedException(cycleValue, maxTries);
|
||||
}
|
||||
|
||||
try (Timer.Context pagingTime = cqlActivity.pagesTimer.time()) {
|
||||
|
||||
CompletionStage<AsyncResultSet> completion;
|
||||
try (Timer.Context executeTime = cqlActivity.executeTimer.time()) {
|
||||
completion = pagingResultSet.fetchNextPage();
|
||||
}
|
||||
|
||||
Timer.Context resultTime = cqlActivity.resultTimer.time();
|
||||
try {
|
||||
AsyncResultSet resultSet = completion.toCompletableFuture().get();
|
||||
|
||||
if (cycleOps != null) {
|
||||
for (D4ResultSetCycleOperator cycleOp : cycleOps) {
|
||||
cycleOp.apply(resultSet, pagingStatement, cycleValue);
|
||||
}
|
||||
}
|
||||
D4ResultSetCycleOperator[] perStmtRSOperators = pagingReadyStatement.getResultSetOperators();
|
||||
if (perStmtRSOperators != null) {
|
||||
for (D4ResultSetCycleOperator perStmtRSOperator : perStmtRSOperators) {
|
||||
perStmtRSOperator.apply(resultSet, pagingStatement, cycleValue);
|
||||
}
|
||||
}
|
||||
|
||||
pagesFetched++;
|
||||
|
||||
RowCycleOperator[] perStmtRowCycleOp = pagingReadyStatement.getRowCycleOperators();
|
||||
int rowsInPage=0;
|
||||
|
||||
if (rowOps==null && perStmtRowCycleOp==null) {
|
||||
for (Row row : resultSet.currentPage()) {
|
||||
rowsInPage++;
|
||||
}
|
||||
} else {
|
||||
for (Row row : resultSet.currentPage()) {
|
||||
rowsInPage++;
|
||||
if (rowOps!=null) {
|
||||
for (RowCycleOperator rowOp : rowOps) {
|
||||
rowOp.apply(row,cycleValue);
|
||||
}
|
||||
}
|
||||
if (perStmtRowCycleOp!=null) {
|
||||
for (RowCycleOperator rowCycleOperator : perStmtRowCycleOp) {
|
||||
rowCycleOperator.apply(row,cycleValue);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
cqlActivity.rowsCounter.mark(rowsInPage);
|
||||
totalRowsFetchedForQuery += rowsInPage;
|
||||
|
||||
if (resultSet.hasMorePages()) {
|
||||
if (pagesFetched > cqlActivity.maxpages) {
|
||||
throw new UnexpectedPagingException(
|
||||
cycleValue,
|
||||
pagingResultSet,
|
||||
pagingReadyStatement.getQueryString(cycleValue),
|
||||
pagesFetched,
|
||||
cqlActivity.maxpages,
|
||||
cqlActivity.getSession().getContext().getConfig().getDefaultProfile().getInt(DefaultDriverOption.REQUEST_PAGE_SIZE)
|
||||
);
|
||||
}
|
||||
pagingResultSet = resultSet;
|
||||
} else {
|
||||
long nanoTime = System.nanoTime() - nanoStartTime;
|
||||
cqlActivity.resultSuccessTimer.update(nanoTime, TimeUnit.NANOSECONDS);
|
||||
cqlActivity.resultSetSizeHisto.update(totalRowsFetchedForQuery);
|
||||
pagingReadyStatement.onSuccess(cycleValue, nanoTime, totalRowsFetchedForQuery);
|
||||
pagingResultSet = null;
|
||||
}
|
||||
break; // This is normal termination of this loop, when retries aren't needed
|
||||
} catch (Exception e) {
|
||||
long resultNanos = resultTime.stop();
|
||||
resultTime = null;
|
||||
|
||||
pagingReadyStatement.onError(cycleValue, resultNanos, e);
|
||||
CQLCycleWithStatementException cqlCycleException = new CQLCycleWithStatementException(cycleValue, resultNanos, e, pagingReadyStatement);
|
||||
ErrorStatus errorStatus = ebdseErrorHandler.handleError(cycleValue, cqlCycleException);
|
||||
if (!errorStatus.isRetryable()) {
|
||||
cqlActivity.triesHisto.update(tries);
|
||||
return errorStatus.getResultCode();
|
||||
}
|
||||
} finally {
|
||||
if (resultTime != null) {
|
||||
resultTime.stop();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
cqlActivity.triesHisto.update(tries);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public boolean incomplete() {
|
||||
return pagingResultSet != null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onActivityDefUpdate(ActivityDef activityDef) {
|
||||
this.maxTries = cqlActivity.getMaxTries();
|
||||
this.retryDelay = cqlActivity.getRetryDelay();
|
||||
this.maxRetryDelay = cqlActivity.getMaxRetryDelay();
|
||||
this.retryReplace = cqlActivity.isRetryReplace();
|
||||
this.showcql = cqlActivity.isShowCql();
|
||||
this.ebdseErrorHandler = cqlActivity.getCqlErrorHandler();
|
||||
this.statementFilter = cqlActivity.getStatementFilter();
|
||||
this.rowOps = cqlActivity.getRowCycleOperators();
|
||||
this.cycleOps = cqlActivity.getPageInfoCycleOperators();
|
||||
this.modifiers = cqlActivity.getStatementModifiers();
|
||||
}
|
||||
|
||||
protected CqlActivity getCqlActivity() {
|
||||
return cqlActivity;
|
||||
}
|
||||
|
||||
|
||||
}
|
@ -1,27 +0,0 @@
|
||||
package io.nosqlbench.activitytype.cqld4.core;
|
||||
|
||||
|
||||
import io.nosqlbench.engine.api.activityapi.core.Action;
|
||||
import io.nosqlbench.engine.api.activityapi.core.ActionDispenser;
|
||||
|
||||
public class CqlActionDispenser implements ActionDispenser {
|
||||
|
||||
public CqlActivity getCqlActivity() {
|
||||
return cqlActivity;
|
||||
}
|
||||
|
||||
private CqlActivity cqlActivity;
|
||||
|
||||
public CqlActionDispenser(CqlActivity activityContext) {
|
||||
this.cqlActivity = activityContext;
|
||||
}
|
||||
|
||||
public Action getAction(int slot) {
|
||||
long async= cqlActivity.getActivityDef().getParams().getOptionalLong("async").orElse(0L);
|
||||
if (async>0) {
|
||||
return new CqlAsyncAction(cqlActivity, slot);
|
||||
} else {
|
||||
return new CqlAction(cqlActivity.getActivityDef(), slot, cqlActivity);
|
||||
}
|
||||
}
|
||||
}
|
@ -1,679 +0,0 @@
|
||||
package io.nosqlbench.activitytype.cqld4.core;
|
||||
|
||||
import com.codahale.metrics.Histogram;
|
||||
import com.codahale.metrics.Meter;
|
||||
import com.codahale.metrics.Timer;
|
||||
import com.datastax.oss.driver.api.core.CqlSession;
|
||||
import com.datastax.oss.driver.api.core.DefaultConsistencyLevel;
|
||||
import com.datastax.oss.driver.api.core.config.DefaultDriverOption;
|
||||
import com.datastax.oss.driver.api.core.cql.*;
|
||||
import com.datastax.oss.driver.api.core.session.Session;
|
||||
import io.nosqlbench.activitytype.cqld4.codecsupport.UDTCodecInjector;
|
||||
import com.datastax.driver.core.TokenRangeStmtFilter;
|
||||
import io.nosqlbench.activitytype.cqld4.api.ErrorResponse;
|
||||
import io.nosqlbench.activitytype.cqld4.api.D4ResultSetCycleOperator;
|
||||
import io.nosqlbench.activitytype.cqld4.api.RowCycleOperator;
|
||||
import io.nosqlbench.activitytype.cqld4.api.StatementFilter;
|
||||
import io.nosqlbench.activitytype.cqld4.errorhandling.NBCycleErrorHandler;
|
||||
import io.nosqlbench.activitytype.cqld4.errorhandling.HashedCQLErrorHandler;
|
||||
import io.nosqlbench.activitytype.cqld4.statements.binders.CqlBinderTypes;
|
||||
import io.nosqlbench.activitytype.cqld4.statements.rowoperators.RowCycleOperators;
|
||||
import io.nosqlbench.activitytype.cqld4.statements.rowoperators.Save;
|
||||
import io.nosqlbench.activitytype.cqld4.statements.rsoperators.ResultSetCycleOperators;
|
||||
import io.nosqlbench.activitytype.cqld4.statements.rsoperators.TraceLogger;
|
||||
import io.nosqlbench.activitytype.cqld4.statements.core.*;
|
||||
import io.nosqlbench.engine.api.activityapi.core.Activity;
|
||||
import io.nosqlbench.engine.api.activityapi.core.ActivityDefObserver;
|
||||
import io.nosqlbench.engine.api.activityapi.planning.OpSequence;
|
||||
import io.nosqlbench.engine.api.activityapi.planning.SequencePlanner;
|
||||
import io.nosqlbench.engine.api.activityapi.planning.SequencerType;
|
||||
import io.nosqlbench.engine.api.activityconfig.ParsedStmt;
|
||||
import io.nosqlbench.engine.api.activityconfig.StatementsLoader;
|
||||
import io.nosqlbench.engine.api.activityconfig.rawyaml.RawStmtDef;
|
||||
import io.nosqlbench.engine.api.activityconfig.rawyaml.RawStmtsBlock;
|
||||
import io.nosqlbench.engine.api.activityconfig.rawyaml.RawStmtsDoc;
|
||||
import io.nosqlbench.engine.api.activityconfig.rawyaml.RawStmtsDocList;
|
||||
import io.nosqlbench.engine.api.activityconfig.yaml.OpTemplate;
|
||||
import io.nosqlbench.engine.api.activityconfig.yaml.StmtsDocList;
|
||||
import io.nosqlbench.engine.api.activityimpl.ActivityDef;
|
||||
import io.nosqlbench.engine.api.activityimpl.ParameterMap;
|
||||
import io.nosqlbench.engine.api.activityimpl.SimpleActivity;
|
||||
import io.nosqlbench.engine.api.metrics.ActivityMetrics;
|
||||
import io.nosqlbench.engine.api.metrics.ExceptionCountMetrics;
|
||||
import io.nosqlbench.engine.api.metrics.ExceptionHistoMetrics;
|
||||
import io.nosqlbench.engine.api.util.SimpleConfig;
|
||||
import io.nosqlbench.engine.api.templating.StrInterpolator;
|
||||
import io.nosqlbench.engine.api.util.TagFilter;
|
||||
import io.nosqlbench.engine.api.util.Unit;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
|
||||
import java.io.FileWriter;
|
||||
import java.io.IOException;
|
||||
import java.io.Writer;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.*;
|
||||
|
||||
@SuppressWarnings("Duplicates")
|
||||
public class CqlActivity extends SimpleActivity implements Activity, ActivityDefObserver {
|
||||
|
||||
private final static Logger logger = LogManager.getLogger(CqlActivity.class);
|
||||
private final ExceptionCountMetrics exceptionCountMetrics;
|
||||
private final ExceptionHistoMetrics exceptionHistoMetrics;
|
||||
private final ActivityDef activityDef;
|
||||
private final Map<String, Writer> namedWriters = new HashMap<>();
|
||||
protected List<OpTemplate> stmts;
|
||||
Timer retryDelayTimer;
|
||||
Timer bindTimer;
|
||||
Timer executeTimer;
|
||||
Timer resultTimer;
|
||||
Timer resultSuccessTimer;
|
||||
Timer pagesTimer;
|
||||
Histogram triesHisto;
|
||||
Histogram skippedTokensHisto;
|
||||
Histogram resultSetSizeHisto;
|
||||
int maxpages;
|
||||
Meter rowsCounter;
|
||||
private HashedCQLErrorHandler errorHandler;
|
||||
private OpSequence<ReadyCQLStatement> opsequence;
|
||||
private CqlSession session;
|
||||
private int maxTries;
|
||||
private StatementFilter statementFilter;
|
||||
private Boolean showcql;
|
||||
private List<RowCycleOperator> rowCycleOperators;
|
||||
private List<D4ResultSetCycleOperator> pageInfoCycleOperators;
|
||||
private List<StatementModifier> statementModifiers;
|
||||
private Long maxTotalOpsInFlight;
|
||||
private long retryDelay;
|
||||
private long maxRetryDelay;
|
||||
private boolean retryReplace;
|
||||
private String pooling;
|
||||
private String profileName;
|
||||
|
||||
|
||||
public CqlActivity(ActivityDef activityDef) {
|
||||
super(activityDef);
|
||||
this.activityDef = activityDef;
|
||||
exceptionCountMetrics = new ExceptionCountMetrics(activityDef);
|
||||
exceptionHistoMetrics = new ExceptionHistoMetrics(activityDef);
|
||||
}
|
||||
|
||||
private void registerCodecs(Session session) {
|
||||
UDTCodecInjector injector = new UDTCodecInjector();
|
||||
injector.injectUserProvidedCodecs(session, true);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public synchronized void initActivity() {
|
||||
logger.debug("initializing activity: " + this.activityDef.getAlias());
|
||||
profileName = getParams().getOptionalString("profile").orElse("default");
|
||||
session = getSession();
|
||||
|
||||
if (getParams().getOptionalBoolean("usercodecs").orElse(false)) {
|
||||
registerCodecs(session);
|
||||
}
|
||||
initSequencer();
|
||||
setDefaultsFromOpSequence(this.opsequence);
|
||||
|
||||
retryDelayTimer = ActivityMetrics.timer(activityDef, "retry-delay");
|
||||
bindTimer = ActivityMetrics.timer(activityDef, "bind");
|
||||
executeTimer = ActivityMetrics.timer(activityDef, "execute");
|
||||
resultTimer = ActivityMetrics.timer(activityDef, "result");
|
||||
triesHisto = ActivityMetrics.histogram(activityDef, "tries");
|
||||
pagesTimer = ActivityMetrics.timer(activityDef, "pages");
|
||||
rowsCounter = ActivityMetrics.meter(activityDef, "rows");
|
||||
skippedTokensHisto = ActivityMetrics.histogram(activityDef, "skipped-tokens");
|
||||
resultSuccessTimer = ActivityMetrics.timer(activityDef, "result-success");
|
||||
resultSetSizeHisto = ActivityMetrics.histogram(activityDef, "resultset-size");
|
||||
onActivityDefUpdate(activityDef);
|
||||
logger.debug("activity fully initialized: " + this.activityDef.getAlias());
|
||||
}
|
||||
|
||||
public synchronized CqlSession getSession() {
|
||||
if (session == null) {
|
||||
session = CQLSessionCache.get().getSession(this.getActivityDef()).session;
|
||||
}
|
||||
return session;
|
||||
}
|
||||
|
||||
private void initSequencer() {
|
||||
|
||||
Session session = getSession();
|
||||
Map<String, Object> fconfig = Map.of("session", session);
|
||||
|
||||
SequencerType sequencerType = SequencerType.valueOf(
|
||||
getParams().getOptionalString("seq").orElse("bucket")
|
||||
);
|
||||
SequencePlanner<ReadyCQLStatement> planner = new SequencePlanner<>(sequencerType);
|
||||
|
||||
StmtsDocList unfiltered = loadStmtsYaml();
|
||||
|
||||
// log tag filtering results
|
||||
String tagfilter = activityDef.getParams().getOptionalString("tags").orElse("");
|
||||
TagFilter tagFilter = new TagFilter(tagfilter);
|
||||
unfiltered.getStmts().stream().map(tagFilter::matchesTaggedResult).forEach(r -> logger.info(r.getLog()));
|
||||
|
||||
stmts = unfiltered.getStmts(tagfilter);
|
||||
|
||||
if (stmts.size() == 0) {
|
||||
throw new RuntimeException("There were no unfiltered statements found for this activity.");
|
||||
}
|
||||
|
||||
for (OpTemplate stmtDef : stmts) {
|
||||
|
||||
ParsedStmt parsed = stmtDef.getParsed().orError();
|
||||
|
||||
boolean prepared = stmtDef.getParamOrDefault("prepared", true);
|
||||
boolean parameterized = stmtDef.getParamOrDefault("parameterized", false);
|
||||
long ratio = stmtDef.getParamOrDefault("ratio", 1);
|
||||
|
||||
StringBuilder psummary = new StringBuilder();
|
||||
|
||||
boolean instrument = stmtDef.getOptionalStringParam("instrument")
|
||||
.or(() -> getParams().getOptionalString("instrument"))
|
||||
.map(Boolean::parseBoolean)
|
||||
.orElse(false);
|
||||
|
||||
String logresultcsv = stmtDef.getParamOrDefault("logresultcsv","");
|
||||
String logresultcsv_act = getParams().getOptionalString("logresultcsv").orElse("");
|
||||
|
||||
if (!logresultcsv_act.isEmpty() && !logresultcsv_act.toLowerCase().equals("true")) {
|
||||
throw new RuntimeException("At the activity level, only logresultcsv=true is allowed, no other values.");
|
||||
}
|
||||
logresultcsv = !logresultcsv.isEmpty() ? logresultcsv : logresultcsv_act;
|
||||
logresultcsv = !logresultcsv.toLowerCase().equals("true") ? logresultcsv : stmtDef.getName() + "--results.csv";
|
||||
|
||||
logger.debug("readying statement[" + (prepared ? "" : "un") + "prepared]:" + parsed.getStmt());
|
||||
|
||||
ReadyCQLStatementTemplate template;
|
||||
String stmtForDriver = parsed.getPositionalStatement(s -> "?");
|
||||
|
||||
SimpleStatementBuilder stmtBuilder = SimpleStatement.builder(stmtForDriver);
|
||||
psummary.append(" statement=>").append(stmtForDriver);
|
||||
|
||||
|
||||
stmtDef.getOptionalStringParam("cl")
|
||||
.map(DefaultConsistencyLevel::valueOf)
|
||||
.map(conlvl -> {
|
||||
psummary.append(" consistency_level=>").append(conlvl);
|
||||
return conlvl;
|
||||
})
|
||||
.ifPresent(stmtBuilder::setConsistencyLevel);
|
||||
|
||||
stmtDef.getOptionalStringParam("serial_cl")
|
||||
.map(DefaultConsistencyLevel::valueOf)
|
||||
.map(sconlvel -> {
|
||||
psummary.append(" serial_consistency_level=>").append(sconlvel);
|
||||
return sconlvel;
|
||||
})
|
||||
.ifPresent(stmtBuilder::setSerialConsistencyLevel);
|
||||
|
||||
stmtDef.getOptionalStringParam("idempotent")
|
||||
.map(Boolean::valueOf)
|
||||
.map(idempotent -> {
|
||||
psummary.append(" idempotent=").append(idempotent);
|
||||
return idempotent;
|
||||
})
|
||||
.ifPresent(stmtBuilder::setIdempotence);
|
||||
|
||||
|
||||
if (prepared) {
|
||||
PreparedStatement preparedStatement = getSession().prepare(stmtBuilder.build());
|
||||
|
||||
CqlBinderTypes binderType = stmtDef.getOptionalStringParam("binder")
|
||||
.map(CqlBinderTypes::valueOf)
|
||||
.orElse(CqlBinderTypes.DEFAULT);
|
||||
|
||||
template = new ReadyCQLStatementTemplate(
|
||||
fconfig,
|
||||
binderType,
|
||||
getSession(),
|
||||
preparedStatement,
|
||||
ratio,
|
||||
parsed.getName()
|
||||
);
|
||||
} else {
|
||||
SimpleStatement simpleStatement = SimpleStatement.newInstance(stmtForDriver);
|
||||
template = new ReadyCQLStatementTemplate(fconfig, getSession(), simpleStatement, ratio,
|
||||
parsed.getName(), parameterized);
|
||||
}
|
||||
|
||||
|
||||
stmtDef.getOptionalStringParam("save")
|
||||
.map(s -> s.split("[,; ]"))
|
||||
.map(Save::new)
|
||||
.ifPresent(save_op -> {
|
||||
psummary.append(" save=>").append(save_op.toString());
|
||||
template.addRowCycleOperators(save_op);
|
||||
});
|
||||
|
||||
stmtDef.getOptionalStringParam("rsoperators")
|
||||
.map(s -> s.split(","))
|
||||
.stream().flatMap(Arrays::stream)
|
||||
.map(ResultSetCycleOperators::newOperator)
|
||||
.forEach(rso -> {
|
||||
psummary.append(" rsop=>").append(rso.toString());
|
||||
template.addResultSetOperators(rso);
|
||||
});
|
||||
|
||||
stmtDef.getOptionalStringParam("rowoperators")
|
||||
.map(s -> s.split(","))
|
||||
.stream().flatMap(Arrays::stream)
|
||||
.map(RowCycleOperators::newOperator)
|
||||
.forEach(ro -> {
|
||||
psummary.append(" rowop=>").append(ro.toString());
|
||||
template.addRowCycleOperators(ro);
|
||||
});
|
||||
|
||||
if (instrument) {
|
||||
logger.info("Adding per-statement success and error and resultset-size timers to statement '" + parsed.getName() + "'");
|
||||
template.instrument(this);
|
||||
psummary.append(" instrument=>").append(instrument);
|
||||
}
|
||||
|
||||
if (!logresultcsv.isEmpty()) {
|
||||
logger.info("Adding per-statement result CSV logging to statement '" + parsed.getName() + "'");
|
||||
template.logResultCsv(this, logresultcsv);
|
||||
psummary.append(" logresultcsv=>").append(logresultcsv);
|
||||
}
|
||||
|
||||
template.getContextualBindings().getBindingsTemplate().addFieldBindings(stmtDef.getParsed().getBindPoints());
|
||||
|
||||
if (psummary.length() > 0) {
|
||||
logger.info("statement named '" + stmtDef.getName() + "' has custom settings:" + psummary.toString());
|
||||
}
|
||||
|
||||
planner.addOp(template.resolve(), ratio);
|
||||
}
|
||||
|
||||
opsequence = planner.resolve();
|
||||
|
||||
}
|
||||
|
||||
private StmtsDocList loadStmtsYaml() {
|
||||
StmtsDocList doclist = null;
|
||||
|
||||
|
||||
String yaml_loc = activityDef.getParams().getOptionalString("yaml", "workload").orElse("default");
|
||||
|
||||
StrInterpolator interp = new StrInterpolator(activityDef);
|
||||
|
||||
String yamlVersion = "unset";
|
||||
if (yaml_loc.endsWith(":1") || yaml_loc.endsWith(":2")) {
|
||||
yamlVersion = yaml_loc.substring(yaml_loc.length() - 1);
|
||||
yaml_loc = yaml_loc.substring(0, yaml_loc.length() - 2);
|
||||
}
|
||||
|
||||
switch (yamlVersion) {
|
||||
case "1":
|
||||
doclist = getVersion1StmtsDoc(interp, yaml_loc);
|
||||
logger.warn("DEPRECATED-FORMAT: Loaded yaml " + yaml_loc + " with compatibility mode. " +
|
||||
"This will be deprecated in a future release.");
|
||||
logger.warn("DEPRECATED-FORMAT: Please refer to " +
|
||||
"http://docs.engineblock.io/user-guide/standard_yaml/ for more details.");
|
||||
break;
|
||||
case "2":
|
||||
doclist = StatementsLoader.loadPath(logger, yaml_loc, interp, "activities");
|
||||
break;
|
||||
case "unset":
|
||||
try {
|
||||
logger.debug("You can suffix your yaml filename or url with the " +
|
||||
"format version, such as :1 or :2. Assuming version 2.");
|
||||
doclist = StatementsLoader.loadPath(null, yaml_loc, interp, "activities");
|
||||
} catch (Exception ignored) {
|
||||
try {
|
||||
doclist = getVersion1StmtsDoc(interp, yaml_loc);
|
||||
logger.warn("DEPRECATED-FORMAT: Loaded yaml " + yaml_loc +
|
||||
" with compatibility mode. This will be deprecated in a future release.");
|
||||
logger.warn("DEPRECATED-FORMAT: Please refer to " +
|
||||
"http://docs.engineblock.io/user-guide/standard_yaml/ for more details.");
|
||||
} catch (Exception compatError) {
|
||||
logger.warn("Tried to load yaml in compatibility mode, " +
|
||||
"since it failed to load with the standard format, " +
|
||||
"but found an error:" + compatError);
|
||||
logger.warn("The following detailed errors are provided only " +
|
||||
"for the standard format. To force loading version 1 with detailed logging, add" +
|
||||
" a version qualifier to your yaml filename or url like ':1'");
|
||||
// retrigger the error again, this time with logging enabled.
|
||||
doclist = StatementsLoader.loadPath(logger, yaml_loc, interp, "activities");
|
||||
}
|
||||
}
|
||||
break;
|
||||
default:
|
||||
throw new RuntimeException("Unrecognized yaml format version, expected :1 or :2 " +
|
||||
"at end of yaml file, but got " + yamlVersion + " instead.");
|
||||
}
|
||||
|
||||
return doclist;
|
||||
|
||||
}
|
||||
|
||||
@Deprecated
|
||||
private StmtsDocList getVersion1StmtsDoc(StrInterpolator interp, String yaml_loc) {
|
||||
StmtsDocList unfiltered;
|
||||
List<RawStmtsBlock> blocks = new ArrayList<>();
|
||||
|
||||
YamlCQLStatementLoader deprecatedLoader = new YamlCQLStatementLoader(interp);
|
||||
AvailableCQLStatements rawDocs = deprecatedLoader.load(yaml_loc, "activities");
|
||||
|
||||
List<TaggedCQLStatementDefs> rawTagged = rawDocs.getRawTagged();
|
||||
|
||||
for (TaggedCQLStatementDefs rawdef : rawTagged) {
|
||||
for (CQLStatementDef rawstmt : rawdef.getStatements()) {
|
||||
RawStmtsBlock rawblock = new RawStmtsBlock();
|
||||
|
||||
// tags
|
||||
rawblock.setTags(rawdef.getTags());
|
||||
|
||||
// params
|
||||
Map<String, Object> params = new HashMap<>(rawdef.getParams());
|
||||
if (rawstmt.getConsistencyLevel() != null && !rawstmt.getConsistencyLevel().isEmpty())
|
||||
params.put("cl", rawstmt.getConsistencyLevel());
|
||||
if (!rawstmt.isPrepared()) params.put("prepared", "false");
|
||||
if (rawstmt.getRatio() != 1L)
|
||||
params.put("ratio", String.valueOf(rawstmt.getRatio()));
|
||||
|
||||
rawblock.setParams(params);
|
||||
|
||||
|
||||
// stmts
|
||||
List<RawStmtDef> stmtslist = new ArrayList<>();
|
||||
stmtslist.add(new RawStmtDef(rawstmt.getName(), rawstmt.getStatement()));
|
||||
rawblock.setRawStmtDefs(stmtslist);
|
||||
|
||||
// bindings
|
||||
rawblock.setBindings(rawstmt.getBindings());
|
||||
|
||||
blocks.add(rawblock);
|
||||
}
|
||||
}
|
||||
|
||||
RawStmtsDoc rawStmtsDoc = new RawStmtsDoc();
|
||||
rawStmtsDoc.setBlocks(blocks);
|
||||
List<RawStmtsDoc> rawStmtsDocs = new ArrayList<>();
|
||||
rawStmtsDocs.add(rawStmtsDoc);
|
||||
RawStmtsDocList rawStmtsDocList = new RawStmtsDocList(rawStmtsDocs);
|
||||
unfiltered = new StmtsDocList(rawStmtsDocList);
|
||||
|
||||
return unfiltered;
|
||||
}
|
||||
|
||||
public ExceptionCountMetrics getExceptionCountMetrics() {
|
||||
return exceptionCountMetrics;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "CQLActivity {" +
|
||||
"activityDef=" + activityDef +
|
||||
", session=" + session +
|
||||
", opSequence=" + this.opsequence +
|
||||
'}';
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onActivityDefUpdate(ActivityDef activityDef) {
|
||||
super.onActivityDefUpdate(activityDef);
|
||||
|
||||
clearResultSetCycleOperators();
|
||||
clearRowCycleOperators();
|
||||
clearStatementModifiers();
|
||||
|
||||
ParameterMap params = activityDef.getParams();
|
||||
Optional<String> fetchSizeOption = params.getOptionalString("fetchsize");
|
||||
|
||||
if (fetchSizeOption.isPresent()) {
|
||||
int fetchSize = fetchSizeOption.flatMap(Unit::bytesFor).map(Double::intValue).orElseThrow(() -> new RuntimeException(
|
||||
"Unable to parse fetch size from " + fetchSizeOption.get()
|
||||
));
|
||||
if (fetchSize > 10000000 && fetchSize < 1000000000) {
|
||||
logger.warn("Setting the fetchsize to " + fetchSize + " is unlikely to give good performance.");
|
||||
} else if (fetchSize > 1000000000) {
|
||||
throw new RuntimeException("Setting the fetch size to " + fetchSize + " is likely to cause instability.");
|
||||
}
|
||||
logger.trace("setting fetchSize to " + fetchSize);
|
||||
|
||||
CQLSessionCache.get().getSession(activityDef).set(DefaultDriverOption.REQUEST_PAGE_SIZE, fetchSize);
|
||||
}
|
||||
|
||||
this.retryDelay = params.getOptionalLong("retrydelay").orElse(0L);
|
||||
this.maxRetryDelay = params.getOptionalLong("maxretrydelay").orElse(500L);
|
||||
this.retryReplace = params.getOptionalBoolean("retryreplace").orElse(false);
|
||||
this.maxTries = params.getOptionalInteger("maxtries").orElse(10);
|
||||
this.showcql = params.getOptionalBoolean("showcql").orElse(false);
|
||||
this.maxpages = params.getOptionalInteger("maxpages").orElse(1);
|
||||
|
||||
this.statementFilter = params.getOptionalString("tokens")
|
||||
.map(s -> new TokenRangeStmtFilter(getSession(), s))
|
||||
.orElse(null);
|
||||
|
||||
if (statementFilter != null) {
|
||||
logger.info("filtering statements" + statementFilter);
|
||||
}
|
||||
|
||||
errorHandler = configureErrorHandler();
|
||||
|
||||
params.getOptionalString("trace")
|
||||
.map(SimpleConfig::new)
|
||||
.map(TraceLogger::new)
|
||||
.ifPresent(
|
||||
tl -> {
|
||||
addResultSetCycleOperator(tl);
|
||||
addStatementModifier(tl);
|
||||
});
|
||||
|
||||
this.maxTotalOpsInFlight = params.getOptionalLong("async").orElse(1L);
|
||||
|
||||
// TODO: Support dynamic pooling options
|
||||
// Optional<String> dynpooling = params.getOptionalString("pooling");
|
||||
// if (dynpooling.isPresent()) {
|
||||
// logger.info("dynamically updating pooling");
|
||||
// if (!dynpooling.get().equals(this.pooling)) {
|
||||
// PoolingOptions opts = CQLOptions.poolingOptionsFor(dynpooling.get());
|
||||
// logger.info("pooling=>" + dynpooling.get());
|
||||
//
|
||||
// PoolingOptions cfg = getSession().getCluster().getConfiguration().getPoolingOptions();
|
||||
//
|
||||
// // This looks funny, because we have to set max conns per host
|
||||
// // in an order that will appease the driver, as there is no "apply settings"
|
||||
// // to do that for us, so we raise max first if it goes higher, and we lower
|
||||
// // it last, if it goes lower
|
||||
// int prior_mcph_l = cfg.getMaxConnectionsPerHost(HostDistance.LOCAL);
|
||||
// int mcph_l = opts.getMaxConnectionsPerHost(HostDistance.LOCAL);
|
||||
// int ccph_l = opts.getCoreConnectionsPerHost(HostDistance.LOCAL);
|
||||
// if (prior_mcph_l < mcph_l) {
|
||||
// logger.info("setting mcph_l to " + mcph_l);
|
||||
// cfg.setMaxConnectionsPerHost(HostDistance.LOCAL, mcph_l);
|
||||
// }
|
||||
// logger.info("setting ccph_l to " + ccph_l);
|
||||
// cfg.setCoreConnectionsPerHost(HostDistance.LOCAL, ccph_l);
|
||||
// if (mcph_l < prior_mcph_l) {
|
||||
// logger.info("setting mcph_l to " + mcph_l);
|
||||
// cfg.setMaxRequestsPerConnection(HostDistance.LOCAL, mcph_l);
|
||||
// }
|
||||
// cfg.setMaxRequestsPerConnection(HostDistance.LOCAL, opts.getMaxRequestsPerConnection(HostDistance.LOCAL));
|
||||
//
|
||||
// int prior_mcph_r = cfg.getMaxConnectionsPerHost(HostDistance.REMOTE);
|
||||
// int mcph_r = opts.getMaxConnectionsPerHost(HostDistance.REMOTE);
|
||||
// int ccph_r = opts.getCoreConnectionsPerHost(HostDistance.REMOTE);
|
||||
//
|
||||
// if (mcph_r > 0) {
|
||||
// if (mcph_r > prior_mcph_r) opts.setMaxConnectionsPerHost(HostDistance.REMOTE, mcph_r);
|
||||
// opts.setCoreConnectionsPerHost(HostDistance.REMOTE, ccph_r);
|
||||
// if (prior_mcph_r > mcph_r) opts.setMaxConnectionsPerHost(HostDistance.REMOTE, mcph_r);
|
||||
// if (opts.getMaxConnectionsPerHost(HostDistance.REMOTE) > 0) {
|
||||
// cfg.setMaxRequestsPerConnection(HostDistance.REMOTE, opts.getMaxRequestsPerConnection(HostDistance.REMOTE));
|
||||
// }
|
||||
// }
|
||||
// this.pooling = dynpooling.get();
|
||||
// }
|
||||
// }
|
||||
|
||||
}
|
||||
|
||||
// TODO: make error handler updates consistent under concurrent updates
|
||||
|
||||
private HashedCQLErrorHandler configureErrorHandler() {
|
||||
|
||||
HashedCQLErrorHandler newerrorHandler = new HashedCQLErrorHandler(exceptionCountMetrics);
|
||||
|
||||
String errors = activityDef.getParams()
|
||||
.getOptionalString("errors")
|
||||
.orElse("stop,retryable->retry,unverified->stop");
|
||||
|
||||
|
||||
String[] handlerSpecs = errors.split(",");
|
||||
for (String spec : handlerSpecs) {
|
||||
String[] keyval = spec.split("=|->|:", 2);
|
||||
if (keyval.length == 1) {
|
||||
String verb = keyval[0];
|
||||
newerrorHandler.setDefaultHandler(
|
||||
new NBCycleErrorHandler(
|
||||
ErrorResponse.valueOf(verb),
|
||||
exceptionCountMetrics,
|
||||
exceptionHistoMetrics,
|
||||
getParams().getOptionalLong("async").isEmpty()
|
||||
)
|
||||
);
|
||||
} else {
|
||||
String pattern = keyval[0];
|
||||
String verb = keyval[1];
|
||||
if (newerrorHandler.getGroupNames().contains(pattern)) {
|
||||
NBCycleErrorHandler handler =
|
||||
new NBCycleErrorHandler(
|
||||
ErrorResponse.valueOf(verb),
|
||||
exceptionCountMetrics,
|
||||
exceptionHistoMetrics,
|
||||
getParams().getOptionalLong("async").isEmpty()
|
||||
);
|
||||
logger.info("Handling error group '" + pattern + "' with handler:" + handler);
|
||||
newerrorHandler.setHandlerForGroup(pattern, handler);
|
||||
} else {
|
||||
NBCycleErrorHandler handler = new NBCycleErrorHandler(
|
||||
ErrorResponse.valueOf(keyval[1]),
|
||||
exceptionCountMetrics,
|
||||
exceptionHistoMetrics,
|
||||
getParams().getOptionalLong("async").isEmpty()
|
||||
);
|
||||
logger.info("Handling error pattern '" + pattern + "' with handler:" + handler);
|
||||
newerrorHandler.setHandlerForPattern(keyval[0], handler);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return newerrorHandler;
|
||||
}
|
||||
|
||||
public int getMaxTries() {
|
||||
return maxTries;
|
||||
}
|
||||
|
||||
public HashedCQLErrorHandler getCqlErrorHandler() {
|
||||
return this.errorHandler;
|
||||
}
|
||||
|
||||
public StatementFilter getStatementFilter() {
|
||||
return statementFilter;
|
||||
}
|
||||
|
||||
public void setStatementFilter(StatementFilter statementFilter) {
|
||||
this.statementFilter = statementFilter;
|
||||
}
|
||||
|
||||
public Boolean isShowCql() {
|
||||
return showcql;
|
||||
}
|
||||
|
||||
public OpSequence<ReadyCQLStatement> getOpSequencer() {
|
||||
return opsequence;
|
||||
}
|
||||
|
||||
public List<RowCycleOperator> getRowCycleOperators() {
|
||||
return rowCycleOperators;
|
||||
}
|
||||
|
||||
protected synchronized void addRowCycleOperator(RowCycleOperator rsOperator) {
|
||||
if (rowCycleOperators == null) {
|
||||
rowCycleOperators = new ArrayList<>();
|
||||
}
|
||||
rowCycleOperators.add(rsOperator);
|
||||
}
|
||||
|
||||
private void clearRowCycleOperators() {
|
||||
this.rowCycleOperators = null;
|
||||
}
|
||||
|
||||
public List<D4ResultSetCycleOperator> getPageInfoCycleOperators() {
|
||||
return pageInfoCycleOperators;
|
||||
}
|
||||
|
||||
protected synchronized void addResultSetCycleOperator(D4ResultSetCycleOperator pageInfoCycleOperator) {
|
||||
if (this.pageInfoCycleOperators == null) {
|
||||
this.pageInfoCycleOperators = new ArrayList<>();
|
||||
}
|
||||
this.pageInfoCycleOperators.add(pageInfoCycleOperator);
|
||||
}
|
||||
|
||||
private void clearResultSetCycleOperators() {
|
||||
this.pageInfoCycleOperators = null;
|
||||
}
|
||||
|
||||
public List<StatementModifier> getStatementModifiers() {
|
||||
return this.statementModifiers;
|
||||
}
|
||||
|
||||
protected synchronized void addStatementModifier(StatementModifier modifier) {
|
||||
if (this.statementModifiers == null) {
|
||||
this.statementModifiers = new ArrayList<>();
|
||||
}
|
||||
this.statementModifiers.add(modifier);
|
||||
}
|
||||
|
||||
private void clearStatementModifiers() {
|
||||
statementModifiers = null;
|
||||
}
|
||||
|
||||
public long getMaxOpsInFlight(int slot) {
|
||||
int threads = this.getActivityDef().getThreads();
|
||||
return maxTotalOpsInFlight / threads + (slot < (maxTotalOpsInFlight % threads) ? 1 : 0);
|
||||
}
|
||||
|
||||
public long getRetryDelay() {
|
||||
return retryDelay;
|
||||
}
|
||||
|
||||
public void setRetryDelay(long retryDelay) {
|
||||
this.retryDelay = retryDelay;
|
||||
}
|
||||
|
||||
public long getMaxRetryDelay() {
|
||||
return maxRetryDelay;
|
||||
}
|
||||
|
||||
public void setMaxRetryDelay(long maxRetryDelay) {
|
||||
this.maxRetryDelay = maxRetryDelay;
|
||||
}
|
||||
|
||||
public boolean isRetryReplace() {
|
||||
return retryReplace;
|
||||
}
|
||||
|
||||
public void setRetryReplace(boolean retryReplace) {
|
||||
this.retryReplace = retryReplace;
|
||||
}
|
||||
|
||||
public synchronized Writer getNamedWriter(String name) {
|
||||
Writer writer = namedWriters.computeIfAbsent(name, s -> {
|
||||
try {
|
||||
return new FileWriter(name, StandardCharsets.UTF_8);
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
});
|
||||
this.registerAutoCloseable(writer);
|
||||
return writer;
|
||||
}
|
||||
|
||||
|
||||
}
|
@ -1,82 +0,0 @@
|
||||
package io.nosqlbench.activitytype.cqld4.core;
|
||||
|
||||
|
||||
import com.datastax.oss.driver.api.core.data.TupleValue;
|
||||
import com.datastax.oss.driver.api.core.type.UserDefinedType;
|
||||
import io.nosqlbench.engine.api.activityapi.core.ActionDispenser;
|
||||
import io.nosqlbench.engine.api.activityapi.core.ActivityType;
|
||||
import io.nosqlbench.engine.api.activityimpl.ActivityDef;
|
||||
import io.nosqlbench.nb.annotations.Service;
|
||||
|
||||
import java.math.BigDecimal;
|
||||
import java.math.BigInteger;
|
||||
import java.net.InetAddress;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.time.Instant;
|
||||
import java.time.LocalDate;
|
||||
import java.time.LocalTime;
|
||||
import java.util.*;
|
||||
|
||||
@Service(ActivityType.class)
|
||||
public class CqlActivityType implements ActivityType<CqlActivity> {
|
||||
|
||||
public String getName() {
|
||||
return "cqld4";
|
||||
}
|
||||
|
||||
@Override
|
||||
public CqlActivity getActivity(ActivityDef activityDef) {
|
||||
|
||||
Optional<String> yaml = activityDef.getParams().getOptionalString("yaml", "workload");
|
||||
|
||||
// sanity check that we have a yaml parameter, which contains our statements and bindings
|
||||
if (yaml.isEmpty()) {
|
||||
throw new RuntimeException("Currently, the cql activity type requires yaml/workload activity parameter.");
|
||||
}
|
||||
|
||||
return new CqlActivity(activityDef);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the per-activity level dispenser. The ActionDispenser can then dispense
|
||||
* per-thread actions within the activity instance.
|
||||
* @param activity The activity instance which will parameterize this action
|
||||
*/
|
||||
@Override
|
||||
public ActionDispenser getActionDispenser(CqlActivity activity) {
|
||||
return new CqlActionDispenser(activity);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, Class<?>> getTypeMap() {
|
||||
Map<String,Class<?>> typemap = new LinkedHashMap<>();
|
||||
typemap.put("ascii",String.class);
|
||||
typemap.put("bigint",long.class);
|
||||
typemap.put("blob", ByteBuffer.class);
|
||||
typemap.put("boolean",boolean.class);
|
||||
typemap.put("counter",long.class);
|
||||
typemap.put("date", LocalDate.class);
|
||||
typemap.put("decimal", BigDecimal.class);
|
||||
typemap.put("double",double.class);
|
||||
// typemap.put("duration",CqlDuration.class);
|
||||
typemap.put("float",float.class);
|
||||
typemap.put("inet", InetAddress.class);
|
||||
typemap.put("int",int.class);
|
||||
typemap.put("list", List.class);
|
||||
typemap.put("map",Map.class);
|
||||
typemap.put("set", Set.class);
|
||||
typemap.put("smallint",short.class);
|
||||
typemap.put("text",String.class);
|
||||
typemap.put("time", LocalTime.class);
|
||||
typemap.put("timestamp", Instant.class);
|
||||
typemap.put("tinyint",byte.class);
|
||||
typemap.put("tuple", TupleValue.class);
|
||||
typemap.put("<udt>", UserDefinedType.class);
|
||||
typemap.put("uuid",UUID.class);
|
||||
typemap.put("timeuuid",UUID.class);
|
||||
typemap.put("varchar",String.class);
|
||||
typemap.put("varint", BigInteger.class);
|
||||
|
||||
return typemap;
|
||||
}
|
||||
}
|
@ -1,266 +0,0 @@
|
||||
package io.nosqlbench.activitytype.cqld4.core;
|
||||
|
||||
import com.codahale.metrics.Timer;
|
||||
import com.datastax.oss.driver.api.core.config.TypedDriverOption;
|
||||
import com.datastax.oss.driver.api.core.cql.AsyncResultSet;
|
||||
import com.datastax.oss.driver.api.core.cql.Row;
|
||||
import io.nosqlbench.activitytype.cqld4.api.ErrorResponse;
|
||||
import io.nosqlbench.activitytype.cqld4.api.D4ResultSetCycleOperator;
|
||||
import io.nosqlbench.activitytype.cqld4.api.RowCycleOperator;
|
||||
import io.nosqlbench.activitytype.cqld4.api.StatementFilter;
|
||||
import io.nosqlbench.activitytype.cqld4.errorhandling.ErrorStatus;
|
||||
import io.nosqlbench.activitytype.cqld4.errorhandling.HashedCQLErrorHandler;
|
||||
import io.nosqlbench.activitytype.cqld4.errorhandling.exceptions.CQLCycleWithStatementException;
|
||||
import io.nosqlbench.activitytype.cqld4.errorhandling.exceptions.ChangeUnappliedCycleException;
|
||||
import io.nosqlbench.activitytype.cqld4.errorhandling.exceptions.UnexpectedPagingException;
|
||||
import io.nosqlbench.activitytype.cqld4.statements.core.CQLSessionCache;
|
||||
import io.nosqlbench.activitytype.cqld4.statements.core.ReadyCQLStatement;
|
||||
import io.nosqlbench.engine.api.activityapi.core.BaseAsyncAction;
|
||||
import io.nosqlbench.engine.api.activityapi.core.ops.fluent.opfacets.FailedOp;
|
||||
import io.nosqlbench.engine.api.activityapi.core.ops.fluent.opfacets.StartedOp;
|
||||
import io.nosqlbench.engine.api.activityapi.core.ops.fluent.opfacets.SucceededOp;
|
||||
import io.nosqlbench.engine.api.activityapi.core.ops.fluent.opfacets.TrackedOp;
|
||||
import io.nosqlbench.engine.api.activityapi.planning.OpSequence;
|
||||
import io.nosqlbench.engine.api.activityimpl.ActivityDef;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.concurrent.CompletionStage;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.function.LongFunction;
|
||||
|
||||
@SuppressWarnings("Duplicates")
|
||||
public class CqlAsyncAction extends BaseAsyncAction<CqlOpData, CqlActivity> {
|
||||
|
||||
private final static Logger logger = LogManager.getLogger(CqlAsyncAction.class);
|
||||
private final ActivityDef activityDef;
|
||||
|
||||
private List<RowCycleOperator> rowOps;
|
||||
private List<D4ResultSetCycleOperator> cycleOps;
|
||||
private List<StatementModifier> modifiers;
|
||||
private StatementFilter statementFilter;
|
||||
private OpSequence<ReadyCQLStatement> sequencer;
|
||||
|
||||
// how many cycles a statement will be attempted for before giving up
|
||||
private int maxTries = 10;
|
||||
|
||||
private HashedCQLErrorHandler cqlActivityErrorHandler;
|
||||
|
||||
// private int pagesFetched = 0;
|
||||
// private long totalRowsFetchedForQuery = 0L;
|
||||
// private ResultSet pagingResultSet;
|
||||
// private Statement pagingStatement;
|
||||
// private ReadyCQLStatement pagingReadyStatement;
|
||||
private boolean showcql;
|
||||
// private long opsInFlight = 0L;
|
||||
// private long maxOpsInFlight = 1L;
|
||||
// private long pendingResults = 0;
|
||||
// private LinkedBlockingQueue<CqlOpContext> resultQueue = new LinkedBlockingQueue<>();
|
||||
|
||||
public CqlAsyncAction(CqlActivity activity, int slot) {
|
||||
super(activity, slot);
|
||||
onActivityDefUpdate(activity.getActivityDef());
|
||||
this.activityDef = activity.getActivityDef();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void init() {
|
||||
onActivityDefUpdate(activityDef);
|
||||
this.sequencer = activity.getOpSequencer();
|
||||
}
|
||||
|
||||
@Override
|
||||
public LongFunction<CqlOpData> getOpInitFunction() {
|
||||
return (l) -> {
|
||||
return new CqlOpData(l, this);
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public void startOpCycle(TrackedOp<CqlOpData> opc) {
|
||||
CqlOpData cqlop = opc.getOpData();
|
||||
long cycle = opc.getCycle();
|
||||
|
||||
// bind timer covers all statement selection and binding, skipping, transforming logic
|
||||
try (Timer.Context bindTime = activity.bindTimer.time()) {
|
||||
cqlop.readyCQLStatement = sequencer.get(cycle);
|
||||
cqlop.statement = cqlop.readyCQLStatement.bind(cycle);
|
||||
|
||||
// If a filter is defined, skip and count any statements that do not match it
|
||||
if (statementFilter != null) {
|
||||
if (!statementFilter.matches(cqlop.statement)) {
|
||||
activity.skippedTokensHisto.update(cycle);
|
||||
//opc.start().stop(-2);
|
||||
cqlop.skipped = true;
|
||||
opc.skip(0);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// Transform the statement if there are any statement transformers defined for this CQL activity
|
||||
if (modifiers != null) {
|
||||
for (StatementModifier modifier : modifiers) {
|
||||
cqlop.statement = modifier.modify(cqlop.statement, cycle);
|
||||
}
|
||||
}
|
||||
|
||||
// Maybe show the CQl in log/console - only for diagnostic use
|
||||
if (showcql) {
|
||||
logger.info("CQL(cycle=" + cycle + "):\n" + cqlop.readyCQLStatement.getQueryString(cycle));
|
||||
}
|
||||
}
|
||||
|
||||
StartedOp<CqlOpData> startedOp = opc.start();
|
||||
cqlop.startedOp = startedOp;
|
||||
|
||||
// The execute timer covers only the point at which EB hands the op to the driver to be executed
|
||||
try (Timer.Context executeTime = activity.executeTimer.time()) {
|
||||
CompletionStage<AsyncResultSet> completionStage = activity.getSession().executeAsync(cqlop.statement);
|
||||
completionStage.whenComplete(cqlop::handleAsyncResult);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public void onSuccess(StartedOp<CqlOpData> sop, AsyncResultSet resultSet) {
|
||||
CqlOpData cqlop = sop.getOpData();
|
||||
|
||||
HashedCQLErrorHandler.resetThreadStatusCode();
|
||||
if (cqlop.skipped) {
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
|
||||
cqlop.totalPagesFetchedForQuery++;
|
||||
|
||||
// Apply any defined ResultSetCycleOperators
|
||||
// TODO: Implement result and row operators for cqld4 actions
|
||||
// if (cycleOps != null) {
|
||||
// for (ResultSetCycleOperator cycleOp : cycleOps) {
|
||||
// cycleOp.apply(resultSet, cqlop.statement, cqlop.cycle);
|
||||
// resultSet.
|
||||
// }
|
||||
// }
|
||||
//
|
||||
int rowsInPage = 0;
|
||||
// if (rowOps==null) {
|
||||
for (Row row : resultSet.currentPage()) {
|
||||
rowsInPage++;
|
||||
}
|
||||
// } else {
|
||||
// for (Row row : resultSet.currentPage()) {
|
||||
// rowsInPage++;
|
||||
// for (RowCycleOperator rowOp : rowOps) {
|
||||
// rowOp.apply(row, cqlop.cycle);
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
cqlop.totalRowsFetchedForQuery += rowsInPage;
|
||||
|
||||
if (cqlop.totalPagesFetchedForQuery++ > activity.maxpages) {
|
||||
Integer pagesize = CQLSessionCache.get().getSession(activityDef).optionsMap.get(TypedDriverOption.REQUEST_PAGE_SIZE);
|
||||
throw new UnexpectedPagingException(
|
||||
cqlop.cycle,
|
||||
resultSet,
|
||||
cqlop.readyCQLStatement.getQueryString(cqlop.cycle),
|
||||
1,
|
||||
activity.maxpages,
|
||||
pagesize
|
||||
);
|
||||
}
|
||||
|
||||
if (!resultSet.wasApplied()) {
|
||||
// To make exception handling logic flow more uniformly
|
||||
throw new ChangeUnappliedCycleException(
|
||||
cqlop.cycle, resultSet, cqlop.readyCQLStatement.getQueryString(cqlop.cycle)
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
if (!resultSet.hasMorePages()) {
|
||||
logger.trace("async paging request " + cqlop.totalPagesFetchedForQuery + " for cycle " + cqlop.cycle);
|
||||
|
||||
resultSet.fetchNextPage().whenComplete(cqlop::handleAsyncResult);
|
||||
return;
|
||||
}
|
||||
|
||||
SucceededOp<CqlOpData> success = sop.succeed(0);
|
||||
cqlop.readyCQLStatement.onSuccess(cqlop.cycle, success.getServiceTimeNanos(), cqlop.totalRowsFetchedForQuery);
|
||||
|
||||
activity.triesHisto.update(cqlop.triesAttempted);
|
||||
activity.rowsCounter.mark(cqlop.totalRowsFetchedForQuery);
|
||||
activity.resultSuccessTimer.update(success.getServiceTimeNanos(), TimeUnit.NANOSECONDS);
|
||||
activity.resultSetSizeHisto.update(cqlop.totalRowsFetchedForQuery);
|
||||
activity.resultTimer.update(success.getServiceTimeNanos(), TimeUnit.NANOSECONDS);
|
||||
|
||||
} catch (Exception e) {
|
||||
long currentServiceTime = sop.getCurrentServiceTimeNanos();
|
||||
|
||||
cqlop.readyCQLStatement.onError(cqlop.cycle, currentServiceTime, e);
|
||||
|
||||
CQLCycleWithStatementException cqlCycleException = new CQLCycleWithStatementException(cqlop.cycle, currentServiceTime, e, cqlop.readyCQLStatement);
|
||||
ErrorStatus errorStatus = cqlActivityErrorHandler.handleError(cqlop.cycle, cqlCycleException);
|
||||
|
||||
if (errorStatus.isRetryable() && ++cqlop.triesAttempted < maxTries) {
|
||||
activity.getSession().executeAsync(cqlop.statement).whenComplete(cqlop::handleAsyncResult);
|
||||
return;
|
||||
} else {
|
||||
sop.fail(errorStatus.getResultCode());
|
||||
if (errorStatus.getResponse() == ErrorResponse.stop) {
|
||||
cqlop.throwable = cqlCycleException;
|
||||
activity.getActivityController().stopActivityWithErrorAsync(cqlCycleException);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public void onFailure(StartedOp<CqlOpData> startedOp) {
|
||||
|
||||
CqlOpData cqlop = startedOp.getOpData();
|
||||
long serviceTime = startedOp.getCurrentServiceTimeNanos();
|
||||
|
||||
// Even if this is retryable, we expose error events
|
||||
cqlop.readyCQLStatement.onError(startedOp.getCycle(),serviceTime,cqlop.throwable);
|
||||
|
||||
long cycle = startedOp.getCycle();
|
||||
CQLCycleWithStatementException cqlCycleException1 = new CQLCycleWithStatementException(cqlop.cycle, serviceTime, cqlop.throwable, cqlop.readyCQLStatement);
|
||||
ErrorStatus errorStatus = cqlActivityErrorHandler.handleError(startedOp.getCycle(), cqlCycleException1);
|
||||
|
||||
if (errorStatus.getResponse() == ErrorResponse.stop) {
|
||||
activity.getActivityController().stopActivityWithErrorAsync(cqlop.throwable);
|
||||
return;
|
||||
}
|
||||
|
||||
if (errorStatus.isRetryable() && cqlop.triesAttempted < maxTries) {
|
||||
startedOp.retry();
|
||||
try (Timer.Context executeTime = activity.executeTimer.time()) {
|
||||
activity.getSession().executeAsync(cqlop.statement).whenComplete(cqlop::handleAsyncResult);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
FailedOp<CqlOpData> failed = startedOp.fail(errorStatus.getResultCode());
|
||||
activity.resultTimer.update(failed.getServiceTimeNanos(), TimeUnit.NANOSECONDS);
|
||||
activity.triesHisto.update(cqlop.triesAttempted);
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void onActivityDefUpdate(ActivityDef activityDef) {
|
||||
this.maxTries = activity.getMaxTries();
|
||||
this.showcql = activity.isShowCql();
|
||||
this.cqlActivityErrorHandler = activity.getCqlErrorHandler();
|
||||
this.statementFilter = activity.getStatementFilter();
|
||||
this.rowOps = activity.getRowCycleOperators();
|
||||
this.cycleOps = activity.getPageInfoCycleOperators();
|
||||
this.modifiers = activity.getStatementModifiers();
|
||||
}
|
||||
|
||||
public String toString() {
|
||||
return "CqlAsyncAction["+this.slot+"]";
|
||||
}
|
||||
}
|
@ -1,55 +0,0 @@
|
||||
package io.nosqlbench.activitytype.cqld4.core;
|
||||
|
||||
import com.datastax.oss.driver.api.core.cql.AsyncResultSet;
|
||||
import com.datastax.oss.driver.api.core.cql.Row;
|
||||
import com.datastax.oss.driver.api.core.cql.Statement;
|
||||
import io.nosqlbench.activitytype.cqld4.statements.core.ReadyCQLStatement;
|
||||
import io.nosqlbench.engine.api.activityapi.core.ops.fluent.opfacets.StartedOp;
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
|
||||
import java.util.concurrent.CompletableFuture;
|
||||
import java.util.concurrent.CompletionStage;
|
||||
import java.util.function.BiFunction;
|
||||
import java.util.function.Function;
|
||||
|
||||
public class CqlOpData {
|
||||
|
||||
final long cycle;
|
||||
// public CompletionStage<AsyncResultSet> completionStage;
|
||||
|
||||
// op state is managed via callbacks, we keep a ref here
|
||||
StartedOp<CqlOpData> startedOp;
|
||||
|
||||
boolean skipped=false;
|
||||
private CqlAsyncAction action;
|
||||
int triesAttempted =0;
|
||||
|
||||
ReadyCQLStatement readyCQLStatement;
|
||||
Statement statement;
|
||||
|
||||
long totalRowsFetchedForQuery;
|
||||
long totalPagesFetchedForQuery;
|
||||
|
||||
public Throwable throwable;
|
||||
public long resultAt;
|
||||
private long errorAt;
|
||||
private Iterable<Row> page;
|
||||
|
||||
public CqlOpData(long cycle, CqlAsyncAction action) {
|
||||
this.cycle = cycle;
|
||||
this.action = action;
|
||||
}
|
||||
|
||||
public void handleAsyncResult(AsyncResultSet asyncResultSet, Throwable throwable) {
|
||||
if (throwable!=null) {
|
||||
this.throwable = throwable;
|
||||
this.errorAt = System.nanoTime();
|
||||
action.onFailure(startedOp);
|
||||
} else {
|
||||
this.page = asyncResultSet.currentPage();
|
||||
this.resultAt = System.nanoTime();
|
||||
action.onSuccess(startedOp, asyncResultSet);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
@ -1,26 +0,0 @@
|
||||
package io.nosqlbench.activitytype.cqld4.core;
|
||||
|
||||
import com.datastax.oss.driver.api.core.addresstranslation.AddressTranslator;
|
||||
|
||||
import java.net.InetSocketAddress;
|
||||
|
||||
|
||||
public class ProxyTranslator implements AddressTranslator {
|
||||
|
||||
private int hostsIndex = 0;
|
||||
|
||||
private InetSocketAddress address;
|
||||
|
||||
public ProxyTranslator(InetSocketAddress host){
|
||||
this.address= host;
|
||||
}
|
||||
|
||||
@Override
|
||||
public InetSocketAddress translate(InetSocketAddress address) {
|
||||
return address;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
}
|
||||
}
|
@ -1,11 +0,0 @@
|
||||
package io.nosqlbench.activitytype.cqld4.core;
|
||||
|
||||
import com.datastax.oss.driver.api.core.cql.Statement;
|
||||
|
||||
/**
|
||||
* Provides a modular way for any CQL activities to modify statements before execution.
|
||||
* Each active modifier returns a statement in turn.
|
||||
*/
|
||||
public interface StatementModifier {
|
||||
Statement<?> modify(Statement<?> unmodified, long cycleNum);
|
||||
}
|
@ -1,135 +0,0 @@
|
||||
package io.nosqlbench.activitytype.cqld4.errorhandling;
|
||||
|
||||
import com.datastax.oss.driver.api.core.RequestThrottlingException;
|
||||
import com.datastax.oss.driver.api.core.connection.ClosedConnectionException;
|
||||
import com.datastax.oss.driver.api.core.servererrors.*;
|
||||
import io.nosqlbench.activitytype.cqld4.errorhandling.exceptions.CqlGenericCycleException;
|
||||
import io.nosqlbench.engine.api.activityapi.cyclelog.buffers.results.ResultReadable;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* This enumerates all known exception classes, including supertypes,
|
||||
* for the purposes of stable naming in error handling.
|
||||
* This is current as of driver 4.6.0
|
||||
*
|
||||
* TODO: for cqld4, add all exceptions again, keeping the previous ones in their existing places, but eliding the
|
||||
* removed ones and leaving a place holder there, adding the new ones after
|
||||
*/
|
||||
public enum CQLExceptionEnum implements ResultReadable {
|
||||
|
||||
FrameTooLongException(com.datastax.oss.driver.api.core.connection.FrameTooLongException.class, 1),
|
||||
CodecNotFoundException(com.datastax.oss.driver.api.core.type.codec.CodecNotFoundException.class, 2),
|
||||
DriverException(com.datastax.oss.driver.api.core.DriverException.class, 3),
|
||||
|
||||
AuthenticationException(com.datastax.oss.driver.api.core.auth.AuthenticationException.class, 4),
|
||||
// TraceRetrievalException(TraceRetrievalException.class, 5),
|
||||
UnsupportedProtocolVersionException(com.datastax.oss.driver.api.core.UnsupportedProtocolVersionException.class, 6),
|
||||
// NoHostAvailableException(NoHostAvailableException.class, 7),
|
||||
QueryValidationException(com.datastax.oss.driver.api.core.servererrors.QueryValidationException.class, 8),
|
||||
InvalidQueryException(com.datastax.oss.driver.api.core.servererrors.InvalidQueryException.class, 9),
|
||||
InvalidConfigurationInQueryException(com.datastax.oss.driver.api.core.servererrors.InvalidConfigurationInQueryException.class, 10),
|
||||
UnauthorizedException(com.datastax.oss.driver.api.core.servererrors.UnauthorizedException.class, 11),
|
||||
SyntaxError(com.datastax.oss.driver.api.core.servererrors.SyntaxError.class, 12),
|
||||
AlreadyExistsException(AlreadyExistsException.class, 13),
|
||||
// UnpreparedException(UnpreparedException.class, 14),
|
||||
// InvalidTypeException(InvalidTypeException.class, 15),
|
||||
QueryExecutionException(QueryExecutionException.class, 16),
|
||||
UnavailableException(UnavailableException.class, 17),
|
||||
BootstrappingException(BootstrappingException.class, 18),
|
||||
OverloadedException(OverloadedException.class, 19),
|
||||
TruncateException(TruncateException.class, 20),
|
||||
QueryConsistencyException(QueryConsistencyException.class, 21),
|
||||
WriteTimeoutException(WriteTimeoutException.class, 22),
|
||||
WriteFailureException(WriteFailureException.class, 23),
|
||||
ReadFailureException(ReadFailureException.class, 24),
|
||||
ReadTimeoutException(ReadTimeoutException.class, 25),
|
||||
// FunctionExecutionException(FunctionExecutionException.class, 26),
|
||||
// DriverInternalError(DriverInternalError.class, 27),
|
||||
ProtocolError(ProtocolError.class, 28),
|
||||
ServerError(ServerError.class, 29),
|
||||
// BusyPoolException(BusyPoolException.class, 30),
|
||||
// ConnectionException(ConnectionException.class, 31),
|
||||
// TransportException(TransportException.class, 32),
|
||||
// OperationTimedOutException(OperationTimedOutException.class, 33),
|
||||
// PagingStateException(PagingStateException.class, 34),
|
||||
// UnresolvedUserTypeException(UnresolvedUserTypeException.class, 35),
|
||||
// UnsupportedFeatureException(UnsupportedFeatureException.class, 36),
|
||||
BusyConnectionException(com.datastax.oss.driver.api.core.connection.BusyConnectionException.class, 37),
|
||||
|
||||
ChangeUnappliedCycleException(io.nosqlbench.activitytype.cqld4.errorhandling.exceptions.ChangeUnappliedCycleException.class, 38),
|
||||
ResultSetVerificationException(io.nosqlbench.activitytype.cqld4.errorhandling.exceptions.ResultSetVerificationException.class, 39),
|
||||
RowVerificationException(io.nosqlbench.activitytype.cqld4.errorhandling.exceptions.RowVerificationException.class, 40),
|
||||
UnexpectedPagingException(io.nosqlbench.activitytype.cqld4.errorhandling.exceptions.UnexpectedPagingException.class, 41),
|
||||
EbdseCycleException(CqlGenericCycleException.class, 42),
|
||||
MaxTriesExhaustedException(io.nosqlbench.activitytype.cqld4.errorhandling.exceptions.MaxTriesExhaustedException.class,43),
|
||||
|
||||
// Added for 4.6
|
||||
ClusterNameMismatchException(com.datastax.oss.driver.internal.core.channel.ClusterNameMismatchException.class, 44),
|
||||
ComputationException(com.datastax.oss.driver.shaded.guava.common.collect.ComputationException.class,45),
|
||||
AllNodesFailedException(com.datastax.oss.driver.api.core.AllNodesFailedException.class,46),
|
||||
NoNodeAvailableException(com.datastax.oss.driver.api.core.NoNodeAvailableException.class,47),
|
||||
ClosedConnectionException(ClosedConnectionException.class,48),
|
||||
ConnectionInitException(com.datastax.oss.driver.api.core.connection.ConnectionInitException.class,49),
|
||||
CoordinatorException(CoordinatorException.class,50),
|
||||
FunctionFailureException(FunctionFailureException.class,51),
|
||||
UnfitClientException(com.datastax.dse.driver.api.core.servererrors.UnfitClientException.class,52),
|
||||
DriverExecutionException(com.datastax.oss.driver.api.core.DriverExecutionException.class,53),
|
||||
DriverTimeoutException(com.datastax.oss.driver.api.core.DriverTimeoutException.class,54),
|
||||
HeartbeatException(com.datastax.oss.driver.api.core.connection.HeartbeatException.class,55),
|
||||
InvalidKeyspaceException(com.datastax.oss.driver.api.core.InvalidKeyspaceException.class,56),
|
||||
RequestThrottlingException(RequestThrottlingException.class,57),
|
||||
CqlGenericCycleException(CqlGenericCycleException.class,58);
|
||||
|
||||
private final static Logger logger = LogManager.getLogger(CQLExceptionEnum.class);
|
||||
|
||||
private static Map<String, Integer> codesByName = getCodesByName();
|
||||
private static final String[] namesByCode = getNamesByCode();
|
||||
|
||||
private final Class<? extends Exception> exceptionClass;
|
||||
private final int resultCode;
|
||||
|
||||
CQLExceptionEnum(Class<? extends Exception> clazz, int resultCode) {
|
||||
this.exceptionClass = clazz;
|
||||
this.resultCode = resultCode;
|
||||
}
|
||||
|
||||
public Class<? extends Exception> getExceptionClass() {
|
||||
return exceptionClass;
|
||||
}
|
||||
|
||||
public int getResultCode() {
|
||||
return resultCode;
|
||||
}
|
||||
|
||||
public int getResult() {
|
||||
return this.resultCode;
|
||||
}
|
||||
|
||||
private static Map<String,Integer> getCodesByName() {
|
||||
codesByName = new HashMap<>();
|
||||
for (CQLExceptionEnum cqlExceptionEnum : CQLExceptionEnum.values()) {
|
||||
codesByName.put(cqlExceptionEnum.toString(), cqlExceptionEnum.resultCode);
|
||||
}
|
||||
codesByName.put("NONE",0);
|
||||
return codesByName;
|
||||
}
|
||||
|
||||
private static String[] getNamesByCode() {
|
||||
List<String> namesByCode = new ArrayList<>();
|
||||
namesByCode.add("NONE");
|
||||
for (CQLExceptionEnum cqlExceptionEnum : CQLExceptionEnum.values()) {
|
||||
int code = cqlExceptionEnum.resultCode;
|
||||
for (int i = namesByCode.size(); i <= code ; i++) {
|
||||
namesByCode.add("UNKNOWN");
|
||||
}
|
||||
namesByCode.set(code, cqlExceptionEnum.toString());
|
||||
}
|
||||
return namesByCode.toArray(new String[0]);
|
||||
}
|
||||
}
|
@ -1,31 +0,0 @@
|
||||
package io.nosqlbench.activitytype.cqld4.errorhandling;
|
||||
|
||||
import io.nosqlbench.activitytype.cqld4.api.ErrorResponse;
|
||||
|
||||
public class ErrorStatus {
|
||||
private boolean retryable;
|
||||
private int resultCode;
|
||||
private ErrorResponse response;
|
||||
|
||||
public ErrorStatus(ErrorResponse response, boolean retryable, int resultCode) {
|
||||
this.response = response;
|
||||
this.retryable = retryable;
|
||||
this.resultCode = resultCode;
|
||||
}
|
||||
|
||||
public boolean isRetryable() {
|
||||
return retryable;
|
||||
}
|
||||
|
||||
public int getResultCode() {
|
||||
return resultCode;
|
||||
}
|
||||
|
||||
public void setResultCode(int resultCode) {
|
||||
this.resultCode = resultCode;
|
||||
}
|
||||
|
||||
public ErrorResponse getResponse() {
|
||||
return response;
|
||||
}
|
||||
}
|
@ -1,99 +0,0 @@
|
||||
package io.nosqlbench.activitytype.cqld4.errorhandling;
|
||||
|
||||
import com.datastax.dse.driver.api.core.servererrors.UnfitClientException;
|
||||
import com.datastax.oss.driver.api.core.*;
|
||||
import com.datastax.oss.driver.api.core.auth.AuthenticationException;
|
||||
import com.datastax.oss.driver.api.core.connection.*;
|
||||
import com.datastax.oss.driver.api.core.servererrors.*;
|
||||
import com.datastax.oss.driver.api.core.type.codec.CodecNotFoundException;
|
||||
import com.datastax.oss.driver.internal.core.channel.ClusterNameMismatchException;
|
||||
import com.datastax.oss.driver.shaded.guava.common.collect.ComputationException;
|
||||
import io.nosqlbench.activitytype.cqld4.errorhandling.exceptions.*;
|
||||
import org.apache.tinkerpop.gremlin.driver.exception.ConnectionException;
|
||||
|
||||
import java.sql.Driver;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* This enumerates all known exception classes, including supertypes,
|
||||
* for the purposes of stable naming in error handling.
|
||||
* This is current as of CQL OSS driver 4.6.0
|
||||
*/
|
||||
public class ExceptionMap {
|
||||
|
||||
private final static Map<Class<? extends Exception>, Class<? extends Exception>> map
|
||||
= new LinkedHashMap<Class<? extends Exception>, Class<? extends Exception>>() {
|
||||
{
|
||||
|
||||
put(AuthenticationException.class, RuntimeException.class);
|
||||
put(ClusterNameMismatchException.class, RuntimeException.class);
|
||||
put(CodecNotFoundException.class, RuntimeException.class);
|
||||
put(ComputationException.class, RuntimeException.class);
|
||||
|
||||
|
||||
|
||||
// DriverException subtypes
|
||||
|
||||
put(AllNodesFailedException.class, DriverException.class);
|
||||
put(NoNodeAvailableException.class, AllNodesFailedException.class);
|
||||
put(BusyConnectionException.class, DriverException.class);
|
||||
put(ClosedConnectionException.class, DriverException.class);
|
||||
put(ConnectionInitException.class, DriverException.class);
|
||||
put(CoordinatorException.class, DriverException.class);
|
||||
put(ProtocolError.class, CoordinatorException.class);
|
||||
put(QueryExecutionException.class, CoordinatorException.class);
|
||||
put(BootstrappingException.class, QueryExecutionException.class);
|
||||
put(FunctionFailureException.class, QueryExecutionException.class);
|
||||
put(OverloadedException.class, QueryExecutionException.class);
|
||||
put(QueryConsistencyException.class, QueryExecutionException.class);
|
||||
put(ReadFailureException.class, QueryConsistencyException.class);
|
||||
put(ReadTimeoutException.class, QueryConsistencyException.class);
|
||||
put(WriteFailureException.class, QueryConsistencyException.class);
|
||||
put(WriteTimeoutException.class, QueryConsistencyException.class);
|
||||
put(TruncateException.class, QueryExecutionException.class);
|
||||
put(UnavailableException.class, QueryExecutionException.class);
|
||||
put(QueryValidationException.class, CoordinatorException.class);
|
||||
put(AlreadyExistsException.class, QueryValidationException.class);
|
||||
put(InvalidQueryException.class, QueryValidationException.class);
|
||||
put(InvalidConfigurationInQueryException.class, QueryValidationException.class);
|
||||
put(SyntaxError.class, QueryValidationException.class);
|
||||
put(UnauthorizedException.class, QueryValidationException.class);
|
||||
put(ServerError.class,CoordinatorException.class);
|
||||
put(UnfitClientException.class, CoordinatorException.class);
|
||||
put(DriverExecutionException.class, DriverException.class);
|
||||
put(DriverTimeoutException.class, DriverException.class);
|
||||
put(FrameTooLongException.class, DriverException.class);
|
||||
put(HeartbeatException.class,DriverException.class);
|
||||
put(InvalidKeyspaceException.class,DriverException.class);
|
||||
put(RequestThrottlingException.class,DriverException.class);
|
||||
put(UnsupportedProtocolVersionException.class, DriverException.class);
|
||||
|
||||
// package org.apache.tinkerpop.gremlin.driver.exception;
|
||||
put(ConnectionException.class, DriverException.class);
|
||||
|
||||
put(ChangeUnappliedCycleException.class, CqlGenericCycleException.class);
|
||||
put(ResultSetVerificationException.class, CqlGenericCycleException.class);
|
||||
put(RowVerificationException.class, CqlGenericCycleException.class);
|
||||
put(UnexpectedPagingException.class, CqlGenericCycleException.class);
|
||||
put(CqlGenericCycleException.class, RuntimeException.class);
|
||||
}
|
||||
};
|
||||
|
||||
public Class<? extends Exception> put(
|
||||
Class<? extends Exception> exceptionClass,
|
||||
Class<? extends Exception> parentClass) {
|
||||
if (exceptionClass.getSuperclass() != parentClass) {
|
||||
throw new RuntimeException("Sanity check failed: " + exceptionClass +
|
||||
" is not a parent class of " + parentClass);
|
||||
}
|
||||
return map.put(exceptionClass, parentClass);
|
||||
}
|
||||
|
||||
public static Map<Class<? extends Exception>, Class<? extends Exception>> getMap() {
|
||||
return map;
|
||||
}
|
||||
|
||||
|
||||
|
||||
}
|
@ -1,93 +0,0 @@
|
||||
package io.nosqlbench.activitytype.cqld4.errorhandling;
|
||||
|
||||
import com.datastax.oss.driver.api.core.DriverTimeoutException;
|
||||
import com.datastax.oss.driver.api.core.NoNodeAvailableException;
|
||||
import com.datastax.oss.driver.api.core.RequestThrottlingException;
|
||||
import com.datastax.oss.driver.api.core.connection.BusyConnectionException;
|
||||
import com.datastax.oss.driver.api.core.connection.ClosedConnectionException;
|
||||
import com.datastax.oss.driver.api.core.servererrors.OverloadedException;
|
||||
import com.datastax.oss.driver.api.core.servererrors.ReadTimeoutException;
|
||||
import com.datastax.oss.driver.api.core.servererrors.UnavailableException;
|
||||
import com.datastax.oss.driver.api.core.servererrors.WriteTimeoutException;
|
||||
import io.nosqlbench.activitytype.cqld4.errorhandling.exceptions.CQLCycleWithStatementException;
|
||||
import io.nosqlbench.activitytype.cqld4.errorhandling.exceptions.ChangeUnappliedCycleException;
|
||||
import io.nosqlbench.activitytype.cqld4.errorhandling.exceptions.ResultSetVerificationException;
|
||||
import io.nosqlbench.activitytype.cqld4.errorhandling.exceptions.RowVerificationException;
|
||||
import io.nosqlbench.engine.api.activityapi.errorhandling.CycleErrorHandler;
|
||||
import io.nosqlbench.engine.api.activityapi.errorhandling.HashedErrorHandler;
|
||||
import io.nosqlbench.engine.api.metrics.ExceptionCountMetrics;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
|
||||
public class HashedCQLErrorHandler extends HashedErrorHandler<Throwable, ErrorStatus> {
|
||||
private static final Logger logger = LogManager.getLogger(HashedCQLErrorHandler.class);
|
||||
|
||||
// private static Set<Class<? extends Throwable>> UNVERIFIED_ERRORS = new HashSet<Class<? extends Throwable>>() {{
|
||||
// add(RowVerificationException.class);
|
||||
// add(ResultSetVerificationException.class);
|
||||
// }};
|
||||
private final ExceptionCountMetrics exceptionCountMetrics;
|
||||
private static final ThreadLocal<Integer> tlResultCode = ThreadLocal.withInitial(() -> (0));
|
||||
|
||||
public HashedCQLErrorHandler(ExceptionCountMetrics exceptionCountMetrics) {
|
||||
this.exceptionCountMetrics = exceptionCountMetrics;
|
||||
this.setGroup("retryable",
|
||||
NoNodeAvailableException.class,
|
||||
UnavailableException.class,
|
||||
BusyConnectionException.class,
|
||||
ClosedConnectionException.class,
|
||||
OverloadedException.class,
|
||||
WriteTimeoutException.class,
|
||||
ReadTimeoutException.class,
|
||||
DriverTimeoutException.class,
|
||||
RequestThrottlingException.class
|
||||
);
|
||||
this.setGroup(
|
||||
"unapplied",
|
||||
ChangeUnappliedCycleException.class
|
||||
);
|
||||
this.setGroup("unverified",
|
||||
RowVerificationException.class,
|
||||
ResultSetVerificationException.class
|
||||
);
|
||||
// realerrors is everything else but the above
|
||||
}
|
||||
|
||||
private static class UncaughtErrorHandler implements CycleErrorHandler<Throwable, ErrorStatus> {
|
||||
@Override
|
||||
public ErrorStatus handleError(long cycle, Throwable error, String errMsg) {
|
||||
throw new RuntimeException(
|
||||
"An exception was thrown in cycle " + cycle + " that has no error: " + errMsg + ", error:" + error
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public ErrorStatus handleError(long cycle, Throwable throwable, String errMsg) {
|
||||
int resultCode = 127;
|
||||
if (throwable instanceof CQLCycleWithStatementException) {
|
||||
CQLCycleWithStatementException cce = (CQLCycleWithStatementException) throwable;
|
||||
Throwable cause = cce.getCause();
|
||||
try {
|
||||
String simpleName = cause.getClass().getSimpleName();
|
||||
CQLExceptionEnum cqlExceptionEnum = CQLExceptionEnum.valueOf(simpleName);
|
||||
resultCode = cqlExceptionEnum.getResult();
|
||||
} catch (Throwable t) {
|
||||
logger.warn("unrecognized exception while mapping status code via Enum: " + throwable.getClass());
|
||||
}
|
||||
} else {
|
||||
logger.warn("un-marshaled exception while mapping status code: " + throwable.getClass());
|
||||
}
|
||||
ErrorStatus errorStatus = super.handleError(cycle, throwable, errMsg);
|
||||
errorStatus.setResultCode(resultCode);
|
||||
return errorStatus;
|
||||
}
|
||||
|
||||
public static int getThreadStatusCode() {
|
||||
return tlResultCode.get();
|
||||
}
|
||||
|
||||
public static void resetThreadStatusCode() {
|
||||
tlResultCode.set(0);
|
||||
}
|
||||
}
|
@ -1,98 +0,0 @@
|
||||
package io.nosqlbench.activitytype.cqld4.errorhandling;
|
||||
|
||||
import io.nosqlbench.activitytype.cqld4.api.ErrorResponse;
|
||||
import io.nosqlbench.activitytype.cqld4.errorhandling.exceptions.CQLCycleWithStatementException;
|
||||
import io.nosqlbench.engine.api.activityapi.errorhandling.CycleErrorHandler;
|
||||
import io.nosqlbench.engine.api.metrics.ExceptionCountMetrics;
|
||||
import io.nosqlbench.engine.api.metrics.ExceptionHistoMetrics;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
|
||||
/**
|
||||
* A contextualized error handler that can catch a cycle-specific error.
|
||||
* In this class, the error handlers return a boolean, which indicates
|
||||
* to the call whether or not to retry the operation. This handler implements
|
||||
* the error handling stack approach, which allows the user to select an
|
||||
* entry point in the stack, with all lesser impacting handler rules
|
||||
* applied from most impacting to least impacting order.
|
||||
*
|
||||
* For simplicity, the handler stack is fixed as described below. It is not
|
||||
* possible to rearrange the verbs. Some care has been given to making sure
|
||||
* that the selected handlers are complete and intuitive.
|
||||
*
|
||||
* The standard handler stack looks like this:
|
||||
*
|
||||
* <ol>
|
||||
* <li>stop - log and throw an exception, which should escape to the
|
||||
* next level of exception handling, the level which causes ebdse
|
||||
* to stop running. In this case, and only in this case, the remaining
|
||||
* handlers in the stack are not used.
|
||||
* are not reached.</li>
|
||||
* <li>warn - log an exception without stopping execution.</li>
|
||||
* <li>retry - retry an operation up to a limit, IFF it is retryable</li>
|
||||
* <li>count - count, in metrics, the number of this particular error type</li>
|
||||
* <li>ignore - do nothing</li>
|
||||
* </ol>
|
||||
*
|
||||
* As indicated above, if you specify "warn" for a particular error type, this means
|
||||
* that also retry, count, will apply, as well as ignore, in that order. "ignore" is
|
||||
* simply a no-op that allows you to specify it as the minimum case.
|
||||
*/
|
||||
@SuppressWarnings("Duplicates")
|
||||
public class NBCycleErrorHandler implements CycleErrorHandler<Throwable, ErrorStatus> {
|
||||
|
||||
private static final Logger logger = LogManager.getLogger(NBCycleErrorHandler.class);
|
||||
|
||||
private final ErrorResponse errorResponse;
|
||||
private final ExceptionCountMetrics exceptionCountMetrics;
|
||||
private final ExceptionHistoMetrics exceptionHistoMetrics;
|
||||
private boolean throwExceptionOnStop=false;
|
||||
|
||||
public NBCycleErrorHandler(
|
||||
ErrorResponse errorResponse,
|
||||
ExceptionCountMetrics exceptionCountMetrics,
|
||||
ExceptionHistoMetrics exceptionHistoMetrics,
|
||||
boolean throwExceptionOnStop) {
|
||||
this.errorResponse = errorResponse;
|
||||
this.exceptionCountMetrics = exceptionCountMetrics;
|
||||
this.exceptionHistoMetrics = exceptionHistoMetrics;
|
||||
this.throwExceptionOnStop = throwExceptionOnStop;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ErrorStatus handleError(long cycle, Throwable contextError) {
|
||||
CQLCycleWithStatementException cce = (CQLCycleWithStatementException) contextError;
|
||||
Throwable error = cce.getCause();
|
||||
|
||||
boolean retry = false;
|
||||
switch (errorResponse) {
|
||||
case stop:
|
||||
logger.error("error with cycle " + cycle + ": statement: " + cce.getStatement() + " errmsg: ",error);
|
||||
if (throwExceptionOnStop) {
|
||||
throw new RuntimeException(error);
|
||||
}
|
||||
|
||||
case warn:
|
||||
logger.warn("error with cycle " + cycle + ": statement: " + cce.getStatement() + " errmsg: " + error.getMessage());
|
||||
case retry:
|
||||
retry = true;
|
||||
case histogram:
|
||||
exceptionHistoMetrics.update(error,cce.getDurationNanos());
|
||||
case count:
|
||||
exceptionCountMetrics.count(error);
|
||||
case ignore:
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return new ErrorStatus(errorResponse, retry,-1);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ErrorStatus handleError(long cycle, Throwable contextError, String errMsg) {
|
||||
return handleError(cycle,contextError);
|
||||
}
|
||||
|
||||
public String toString() {
|
||||
return this.errorResponse.toString();
|
||||
}
|
||||
}
|
@ -1,38 +0,0 @@
|
||||
package io.nosqlbench.activitytype.cqld4.errorhandling.exceptions;
|
||||
|
||||
import io.nosqlbench.activitytype.cqld4.statements.core.ReadyCQLStatement;
|
||||
|
||||
/**
|
||||
* In internal exception type that is used to saverow exception
|
||||
* context from within a CQL activity cycle.
|
||||
*/
|
||||
public class CQLCycleWithStatementException extends Exception {
|
||||
|
||||
private final long cycleValue;
|
||||
private final long durationNanos;
|
||||
private final ReadyCQLStatement readyCQLStatement;
|
||||
|
||||
public CQLCycleWithStatementException(long cycleValue, long durationNanos, Throwable e, ReadyCQLStatement readyCQLStatement) {
|
||||
super(e);
|
||||
this.cycleValue = cycleValue;
|
||||
this.durationNanos = durationNanos;
|
||||
this.readyCQLStatement = readyCQLStatement;
|
||||
}
|
||||
|
||||
public long getCycleValue() {
|
||||
return cycleValue;
|
||||
}
|
||||
|
||||
public long getDurationNanos() {
|
||||
return durationNanos;
|
||||
}
|
||||
|
||||
public ReadyCQLStatement getReadyCQLStatement() {
|
||||
return readyCQLStatement;
|
||||
}
|
||||
|
||||
public String getStatement() {
|
||||
return readyCQLStatement.getQueryString(cycleValue);
|
||||
}
|
||||
|
||||
}
|
@ -1,55 +0,0 @@
|
||||
package io.nosqlbench.activitytype.cqld4.errorhandling.exceptions;
|
||||
|
||||
import com.datastax.oss.driver.api.core.cql.*;
|
||||
|
||||
public abstract class CQLResultSetException extends CqlGenericCycleException {
|
||||
|
||||
private final Statement<?> statement;
|
||||
private final AsyncResultSet resultSet;
|
||||
|
||||
public CQLResultSetException(long cycle, AsyncResultSet resultSet, Statement<?> statement, String message,
|
||||
Throwable cause) {
|
||||
super(cycle,message,cause);
|
||||
this.resultSet = resultSet;
|
||||
this.statement = statement;
|
||||
}
|
||||
|
||||
public CQLResultSetException(long cycle, AsyncResultSet resultSet, Statement<?> statement) {
|
||||
super(cycle);
|
||||
this.resultSet = resultSet;
|
||||
this.statement = statement;
|
||||
}
|
||||
|
||||
public CQLResultSetException(long cycle, AsyncResultSet resultSet, Statement<?> statement, String message) {
|
||||
super(cycle,message);
|
||||
this.resultSet = resultSet;
|
||||
this.statement=statement;
|
||||
}
|
||||
|
||||
public CQLResultSetException(long cycle, AsyncResultSet resultSet, Statement<?> statement, Throwable cause) {
|
||||
super(cycle,cause);
|
||||
this.resultSet = resultSet;
|
||||
this.statement = statement;
|
||||
}
|
||||
|
||||
public Statement<?> getStatement() {
|
||||
return statement;
|
||||
}
|
||||
|
||||
public AsyncResultSet getResultSet() {
|
||||
return resultSet;
|
||||
}
|
||||
|
||||
protected static String getQueryString(Statement<?> stmt) {
|
||||
if (stmt instanceof BoundStatement) {
|
||||
|
||||
return ((BoundStatement)stmt).getPreparedStatement().getQuery();
|
||||
} else if (stmt instanceof SimpleStatement) {
|
||||
return ((SimpleStatement) stmt).getQuery();
|
||||
} else {
|
||||
return "UNKNOWN Statement type:" + stmt.getClass().getSimpleName();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
}
|
@ -1,35 +0,0 @@
|
||||
package io.nosqlbench.activitytype.cqld4.errorhandling.exceptions;
|
||||
|
||||
import com.datastax.oss.driver.api.core.cql.AsyncResultSet;
|
||||
import com.datastax.oss.driver.api.core.cql.ResultSet;
|
||||
|
||||
/**
|
||||
* This was added to nosqlbench because the error handling logic was
|
||||
* starting to look a bit contrived. Because we need to be able
|
||||
* to respond to different result outcomes, it
|
||||
* is just simpler to have a single type of error-handling logic for all outcomes.
|
||||
*/
|
||||
public class ChangeUnappliedCycleException extends CqlGenericCycleException {
|
||||
|
||||
private final ResultSet resultSet;
|
||||
private final String queryString;
|
||||
private final AsyncResultSet asyncResultSet;
|
||||
|
||||
public ChangeUnappliedCycleException(long cycle, AsyncResultSet asyncResultSet, String queryString) {
|
||||
super(cycle, "Operation was not applied:" + queryString);
|
||||
this.asyncResultSet = asyncResultSet;
|
||||
this.queryString = queryString;
|
||||
this.resultSet=null;
|
||||
}
|
||||
public ChangeUnappliedCycleException(long cycle, ResultSet resultSet, String queryString) {
|
||||
super(cycle, "Operation was not applied:" + queryString);
|
||||
this.resultSet = resultSet;
|
||||
this.queryString = queryString;
|
||||
this.asyncResultSet=null;
|
||||
}
|
||||
|
||||
public ResultSet getResultSet() {
|
||||
return resultSet;
|
||||
}
|
||||
public String getQueryString() { return queryString; }
|
||||
}
|
@ -1,38 +0,0 @@
|
||||
package io.nosqlbench.activitytype.cqld4.errorhandling.exceptions;
|
||||
|
||||
public abstract class CqlGenericCycleException extends RuntimeException {
|
||||
|
||||
private long cycle;
|
||||
|
||||
public CqlGenericCycleException(long cycle, Throwable cause) {
|
||||
super(cause);
|
||||
this.cycle = cycle;
|
||||
}
|
||||
|
||||
public CqlGenericCycleException(long cycle, String message) {
|
||||
super(message);
|
||||
this.cycle = cycle;
|
||||
}
|
||||
|
||||
public CqlGenericCycleException(long cycle, String message, Throwable cause) {
|
||||
super(message, cause);
|
||||
this.cycle = cycle;
|
||||
}
|
||||
|
||||
public CqlGenericCycleException(long cycle) {
|
||||
super();
|
||||
this.cycle = cycle;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getMessage() {
|
||||
return "cycle:" + cycle + " caused by:" + super.getMessage();
|
||||
}
|
||||
|
||||
public long getCycle() {
|
||||
return cycle;
|
||||
}
|
||||
|
||||
|
||||
|
||||
}
|
@ -1,20 +0,0 @@
|
||||
package io.nosqlbench.activitytype.cqld4.errorhandling.exceptions;
|
||||
|
||||
public class MaxTriesExhaustedException extends CqlGenericCycleException {
|
||||
|
||||
private int maxtries;
|
||||
|
||||
public MaxTriesExhaustedException(long cycle, int maxtries) {
|
||||
super(cycle);
|
||||
this.maxtries = maxtries;
|
||||
}
|
||||
|
||||
public int getMaxTries() {
|
||||
return maxtries;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getMessage() {
|
||||
return "Exhausted max tries (" + getMaxTries() + ") on cycle " + getCycle() + ".";
|
||||
}
|
||||
}
|
@ -1,18 +0,0 @@
|
||||
package io.nosqlbench.activitytype.cqld4.errorhandling.exceptions;
|
||||
|
||||
import com.datastax.oss.driver.api.core.cql.AsyncResultSet;
|
||||
import com.datastax.oss.driver.api.core.cql.ResultSet;
|
||||
import com.datastax.oss.driver.api.core.cql.Statement;
|
||||
|
||||
public class ResultSetVerificationException extends CQLResultSetException {
|
||||
|
||||
public ResultSetVerificationException(
|
||||
long cycle, AsyncResultSet resultSet, Statement<?> statement, Throwable cause) {
|
||||
super(cycle, resultSet, statement, cause);
|
||||
}
|
||||
|
||||
public ResultSetVerificationException(
|
||||
long cycle, AsyncResultSet resultSet, Statement<?> statement, String s) {
|
||||
super(cycle, resultSet, statement, s + ", \nquery string:\n" + getQueryString(statement));
|
||||
}
|
||||
}
|
@ -1,33 +0,0 @@
|
||||
package io.nosqlbench.activitytype.cqld4.errorhandling.exceptions;
|
||||
|
||||
import com.datastax.oss.driver.api.core.cql.Row;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* This exception is thrown when read verification fails.
|
||||
*/
|
||||
public class RowVerificationException extends CqlGenericCycleException {
|
||||
|
||||
private Map<String, Object> expected;
|
||||
private Row row;
|
||||
|
||||
public RowVerificationException(long cycle, Row row, Map<String, Object> expected, String detail) {
|
||||
super(cycle, detail);
|
||||
this.expected = expected;
|
||||
this.row = row;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getMessage() {
|
||||
return "cycle:" + getCycle() + ": " + super.getMessage();
|
||||
}
|
||||
|
||||
public Map<String,Object> getExpectedValues() {
|
||||
return expected;
|
||||
}
|
||||
|
||||
public Row getRow() {
|
||||
return row;
|
||||
}
|
||||
}
|
@ -1,58 +0,0 @@
|
||||
package io.nosqlbench.activitytype.cqld4.errorhandling.exceptions;
|
||||
|
||||
import com.datastax.oss.driver.api.core.cql.AsyncResultSet;
|
||||
import com.datastax.oss.driver.api.core.cql.ResultSet;
|
||||
|
||||
/**
|
||||
* <p>This is not a core exception. It was added to the CQL activity type
|
||||
* driver for nosqlbench specifically to catch the following unexpected
|
||||
* condition:
|
||||
* Paging would be needed to read all the results from a read query, but the user
|
||||
* is not expecting to intentionally check and iterate the result sets for paging.
|
||||
* <p>
|
||||
* This should only be thrown if a result set would need paging, but configuration
|
||||
* options specific that it should not expect to. Rather than assume paging is completely
|
||||
* expected or unexpected, we simply assume that only 1 page is allowed, being the
|
||||
* first page, or what is thought of as "not paging".
|
||||
* <p>If this error is thrown, and paging is expected, then the user can adjust
|
||||
* fetchsize or maxpages in order to open up paging to the degree that is allowable or
|
||||
* expected.
|
||||
*/
|
||||
public class UnexpectedPagingException extends CqlGenericCycleException {
|
||||
|
||||
private final AsyncResultSet resultSet;
|
||||
private final String queryString;
|
||||
private final int fetchSize;
|
||||
private int fetchedPages;
|
||||
private int maxpages;
|
||||
|
||||
public UnexpectedPagingException(
|
||||
long cycle,
|
||||
AsyncResultSet resultSet,
|
||||
String queryString,
|
||||
int fetchedPages,
|
||||
int maxpages,
|
||||
int fetchSize) {
|
||||
super(cycle);
|
||||
this.resultSet = resultSet;
|
||||
this.queryString = queryString;
|
||||
this.fetchedPages = fetchedPages;
|
||||
this.maxpages = maxpages;
|
||||
this.fetchSize = fetchSize;
|
||||
}
|
||||
|
||||
public AsyncResultSet getAsyncResultSet() {
|
||||
return resultSet;
|
||||
}
|
||||
|
||||
public String getMessage() {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append("Additional paging would be required to read the results from this query fully" +
|
||||
", but the user has not explicitly indicated that paging was expected.")
|
||||
.append(" fetched/allowed: ").append(fetchedPages).append("/").append(maxpages)
|
||||
.append(" fetchSize(").append(fetchSize).append("): ").append(queryString).append(", note this value " +
|
||||
"is shown for reference from the default driver profile. If you are using a custom profile, it may be " +
|
||||
"different.");
|
||||
return sb.toString();
|
||||
}
|
||||
}
|
@ -1,65 +0,0 @@
|
||||
package io.nosqlbench.activitytype.cqld4.filtering;
|
||||
|
||||
import io.nosqlbench.activitytype.cqld4.errorhandling.CQLExceptionEnum;
|
||||
import io.nosqlbench.engine.api.activityapi.cyclelog.buffers.results.ResultReadable;
|
||||
import io.nosqlbench.engine.api.activityapi.cyclelog.filters.ResultFilterDispenser;
|
||||
import io.nosqlbench.engine.api.activityapi.cyclelog.filters.ResultValueFilterType;
|
||||
import io.nosqlbench.engine.api.activityapi.cyclelog.filters.tristate.EnumReadableMappingFilter;
|
||||
import io.nosqlbench.engine.api.activityapi.cyclelog.filters.tristate.TristateFilter;
|
||||
import io.nosqlbench.engine.api.util.ConfigTuples;
|
||||
import io.nosqlbench.nb.annotations.Service;
|
||||
|
||||
import java.util.function.Predicate;
|
||||
|
||||
@Service(ResultValueFilterType.class)
|
||||
public class CQLResultFilterType implements ResultValueFilterType {
|
||||
|
||||
@Override
|
||||
public String getName() {
|
||||
return "cql";
|
||||
}
|
||||
|
||||
@Override
|
||||
public ResultFilterDispenser getDispenser(String config) {
|
||||
return new Dispenser(config);
|
||||
}
|
||||
|
||||
private class Dispenser implements ResultFilterDispenser {
|
||||
private final ConfigTuples conf;
|
||||
private final EnumReadableMappingFilter<CQLExceptionEnum> enumFilter;
|
||||
private final Predicate<ResultReadable> filter;
|
||||
|
||||
public Dispenser(String config) {
|
||||
this.conf = new ConfigTuples(config);
|
||||
ConfigTuples inout = conf.getAllMatching("in.*", "ex.*");
|
||||
|
||||
// Default policy is opposite of leading rule
|
||||
TristateFilter.Policy defaultPolicy = TristateFilter.Policy.Discard;
|
||||
if (conf.get(0).get(0).startsWith("ex")) {
|
||||
defaultPolicy = TristateFilter.Policy.Keep;
|
||||
}
|
||||
|
||||
this.enumFilter =
|
||||
new EnumReadableMappingFilter<>(CQLExceptionEnum.values(), TristateFilter.Policy.Ignore);
|
||||
|
||||
for (ConfigTuples.Section section : inout) {
|
||||
if (section.get(0).startsWith("in")) {
|
||||
this.enumFilter.addPolicy(section.get(1), TristateFilter.Policy.Keep);
|
||||
} else if (section.get(0).startsWith("ex")) {
|
||||
this.enumFilter.addPolicy(section.get(1), TristateFilter.Policy.Discard);
|
||||
} else {
|
||||
throw new RuntimeException("Section must start with in(clude) or ex(clude), but instead it is " + section);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
this.filter = this.enumFilter.toDefaultingPredicate(defaultPolicy);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Predicate<ResultReadable> getResultFilter() {
|
||||
return filter;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
@ -1,29 +0,0 @@
|
||||
package io.nosqlbench.activitytype.cqld4.statements.binders;
|
||||
|
||||
import com.datastax.oss.driver.api.core.CqlSession;
|
||||
import com.datastax.oss.driver.api.core.cql.PreparedStatement;
|
||||
import com.datastax.oss.driver.api.core.cql.Statement;
|
||||
import com.datastax.oss.driver.api.core.session.Session;
|
||||
import io.nosqlbench.virtdata.core.bindings.ValuesArrayBinder;
|
||||
|
||||
import java.util.function.Function;
|
||||
|
||||
public enum CqlBinderTypes {
|
||||
|
||||
direct_array(DirectArrayValuesBinder::new),
|
||||
unset_aware(UnsettableValuesBinder::new),
|
||||
diag_binder(DiagnosticPreparedBinder::new);
|
||||
|
||||
private final Function<CqlSession, ValuesArrayBinder<PreparedStatement, Statement<?>>> mapper;
|
||||
|
||||
CqlBinderTypes(Function<CqlSession,ValuesArrayBinder<PreparedStatement,Statement<?>>> mapper) {
|
||||
this.mapper = mapper;
|
||||
}
|
||||
|
||||
public final static CqlBinderTypes DEFAULT = unset_aware;
|
||||
|
||||
public ValuesArrayBinder<PreparedStatement,Statement<?>> get(CqlSession session) {
|
||||
return mapper.apply(session);
|
||||
}
|
||||
|
||||
}
|
@ -1,59 +0,0 @@
|
||||
package io.nosqlbench.activitytype.cqld4.statements.binders;
|
||||
|
||||
import com.datastax.oss.driver.api.core.CqlSession;
|
||||
import com.datastax.oss.driver.api.core.cql.*;
|
||||
import com.datastax.oss.driver.api.core.type.DataType;
|
||||
import io.nosqlbench.activitytype.cqld4.core.CQLBindHelper;
|
||||
import io.nosqlbench.virtdata.core.bindings.ValuesArrayBinder;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* This binder is not meant to be used primarily by default. It gives detailed
|
||||
* diagnostics, but in order to do so by default it does lots of processing.
|
||||
* Other binders will call to this one in an exception handler when needed in
|
||||
* order to explain in more detail what is happening for users.
|
||||
*/
|
||||
public class DiagnosticPreparedBinder implements ValuesArrayBinder<PreparedStatement, Statement<?>> {
|
||||
|
||||
public static final Logger logger = LogManager.getLogger(DiagnosticPreparedBinder.class);
|
||||
private final CqlSession session;
|
||||
|
||||
public DiagnosticPreparedBinder(CqlSession session) {
|
||||
this.session = session;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Statement<?> bindValues(PreparedStatement prepared, Object[] values) {
|
||||
ColumnDefinitions columnDefinitions = prepared.getVariableDefinitions();
|
||||
BoundStatement bound = prepared.bind();
|
||||
|
||||
List<ColumnDefinition> columnDefList = new ArrayList<>();
|
||||
prepared.getVariableDefinitions().forEach(columnDefList::add);
|
||||
|
||||
if (columnDefList.size() != values.length) {
|
||||
throw new RuntimeException("The number of named anchors in your statement does not match the number of bindings provided.");
|
||||
}
|
||||
|
||||
int i = 0;
|
||||
for (Object value : values) {
|
||||
if (columnDefList.size() <= i) {
|
||||
logger.error("what gives?");
|
||||
}
|
||||
ColumnDefinition columnDef = columnDefList.get(i);
|
||||
String colName = columnDef.getName().toString();
|
||||
DataType type =columnDef.getType();
|
||||
try {
|
||||
new CQLBindHelper(session).bindStatement(bound, colName, value, type);
|
||||
} catch (ClassCastException e) {
|
||||
logger.error(String.format("Unable to bind column %s to cql type %s with value %s", colName, type, value));
|
||||
throw e;
|
||||
}
|
||||
i++;
|
||||
}
|
||||
return bound;
|
||||
}
|
||||
}
|
@ -1,43 +0,0 @@
|
||||
package io.nosqlbench.activitytype.cqld4.statements.binders;
|
||||
|
||||
import com.datastax.oss.driver.api.core.CqlSession;
|
||||
import com.datastax.oss.driver.api.core.cql.PreparedStatement;
|
||||
import com.datastax.oss.driver.api.core.cql.Statement;
|
||||
import io.nosqlbench.virtdata.core.bindings.ValuesArrayBinder;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
|
||||
import java.util.Arrays;
|
||||
|
||||
/**
|
||||
* This is now the main binder again, but if there are any exceptions, it delegates to the diagnostic
|
||||
* one in order to explain what happened. This is to allow for higher performance in the general
|
||||
* case, but with better user support when something goes wrong.
|
||||
*
|
||||
* If you want to force the client to use the array passing method of initializing a statement,
|
||||
* use this one, known as 'directarray'. This does give up the benefit of allowing unset values
|
||||
* to be modeled, and at no clear benefit. Thus the {@link CqlBinderTypes#unset_aware} one
|
||||
* will become the default.
|
||||
*/
|
||||
public class DirectArrayValuesBinder implements ValuesArrayBinder<PreparedStatement, Statement<?>> {
|
||||
public final static Logger logger = LogManager.getLogger(DirectArrayValuesBinder.class);
|
||||
private final CqlSession session;
|
||||
|
||||
public DirectArrayValuesBinder(CqlSession session) {
|
||||
this.session = session;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Statement<?> bindValues(PreparedStatement preparedStatement, Object[] objects) {
|
||||
try {
|
||||
return preparedStatement.bind(objects);
|
||||
} catch (Exception e) {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append("Error binding objects to prepared statement directly, falling back to diagnostic binding layer:");
|
||||
sb.append(Arrays.toString(objects));
|
||||
logger.warn(sb.toString(),e);
|
||||
DiagnosticPreparedBinder diag = new DiagnosticPreparedBinder(session);
|
||||
return diag.bindValues(preparedStatement, objects);
|
||||
}
|
||||
}
|
||||
}
|
@ -1,55 +0,0 @@
|
||||
package io.nosqlbench.activitytype.cqld4.statements.binders;
|
||||
|
||||
import com.datastax.oss.driver.api.core.ConsistencyLevel;
|
||||
import com.datastax.oss.driver.api.core.cql.SimpleStatement;
|
||||
import com.datastax.oss.driver.api.core.cql.Statement;
|
||||
import io.nosqlbench.virtdata.core.bindings.ValuesArrayBinder;
|
||||
|
||||
/**
|
||||
* This binder is not meant to be used with anything but DDL or statements
|
||||
* which should not be trying to parameterize values in general.
|
||||
* Parametrized values are still possible through parameterized constructor parameter.
|
||||
* This binder should be avoided in favor of binders returning PreparedStatement
|
||||
*/
|
||||
public class SimpleStatementValuesBinder
|
||||
implements ValuesArrayBinder<SimpleStatement, Statement> {
|
||||
|
||||
private final boolean parameterized;
|
||||
|
||||
public SimpleStatementValuesBinder(boolean parameterized) {
|
||||
this.parameterized = parameterized;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Statement bindValues(SimpleStatement context, Object[] values) {
|
||||
String query = context.getQuery();
|
||||
if (parameterized) {
|
||||
String[] splits = query.split("\\?");
|
||||
assert splits.length == values.length + 1;
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append(splits[0]);
|
||||
for (int i = 1; i < splits.length; i++) {
|
||||
sb.append(values[i - 1]);
|
||||
sb.append(splits[i]);
|
||||
}
|
||||
query = sb.toString();
|
||||
System.out.println(query);
|
||||
|
||||
}
|
||||
SimpleStatement simpleStatement = SimpleStatement.newInstance(query);
|
||||
ConsistencyLevel cl = context.getConsistencyLevel();
|
||||
if(cl != null){
|
||||
simpleStatement.setConsistencyLevel(context.getConsistencyLevel());
|
||||
}
|
||||
//Does it really makes senses?
|
||||
ConsistencyLevel serial_cl = context.getSerialConsistencyLevel();
|
||||
if(serial_cl != null){
|
||||
simpleStatement.setSerialConsistencyLevel(context.getSerialConsistencyLevel());
|
||||
}
|
||||
Boolean idempotent = context.isIdempotent();
|
||||
if(idempotent != null){
|
||||
simpleStatement.setIdempotent(idempotent);
|
||||
}
|
||||
return simpleStatement;
|
||||
}
|
||||
}
|
@ -1,83 +0,0 @@
|
||||
package io.nosqlbench.activitytype.cqld4.statements.binders;
|
||||
|
||||
import com.datastax.oss.driver.api.core.ProtocolVersion;
|
||||
import com.datastax.oss.driver.api.core.cql.*;
|
||||
import com.datastax.oss.driver.api.core.session.Session;
|
||||
import com.datastax.oss.driver.api.core.type.DataType;
|
||||
import com.datastax.oss.driver.api.core.type.codec.TypeCodec;
|
||||
import com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry;
|
||||
import io.nosqlbench.virtdata.api.bindings.VALUE;
|
||||
import io.nosqlbench.virtdata.core.bindings.ValuesArrayBinder;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
public class UnsettableValuesBinder implements ValuesArrayBinder<PreparedStatement, Statement<?>> {
|
||||
private final static Logger logger = LogManager.getLogger(UnsettableValuesBinder.class);
|
||||
|
||||
private final Session session;
|
||||
private final CodecRegistry codecRegistry;
|
||||
private final ProtocolVersion protocolVersion;
|
||||
|
||||
public UnsettableValuesBinder(Session session) {
|
||||
this.session = session;
|
||||
this.codecRegistry = session.getContext().getCodecRegistry();
|
||||
this.protocolVersion = this.session.getContext().getProtocolVersion();
|
||||
}
|
||||
|
||||
|
||||
// TODO: Allow for warning when nulls are passed and they aren't expected
|
||||
@Override
|
||||
public Statement bindValues(PreparedStatement preparedStatement, Object[] objects) {
|
||||
int i = -1;
|
||||
try {
|
||||
BoundStatement boundStmt = preparedStatement.bind();
|
||||
ColumnDefinitions variableDefinitions = preparedStatement.getVariableDefinitions();
|
||||
for (i = 0; i < objects.length; i++) {
|
||||
Object value = objects[i];
|
||||
if (VALUE.unset != value) {
|
||||
if (null == value) {
|
||||
boundStmt.setToNull(i);
|
||||
} else {
|
||||
ColumnDefinition definition = variableDefinitions.get(i);
|
||||
DataType cqlType = definition.getType();
|
||||
TypeCodec<Object> objectTypeCodec = codecRegistry.codecFor(cqlType, value);
|
||||
ByteBuffer serialized = objectTypeCodec.encode(value, protocolVersion);
|
||||
boundStmt.setBytesUnsafe(i, serialized);
|
||||
}
|
||||
}
|
||||
}
|
||||
return boundStmt;
|
||||
} catch (Exception e) {
|
||||
String typNam = (objects[i] == null ? "NULL" : objects[i].getClass().getCanonicalName());
|
||||
List<ColumnDefinition> cdefs = new ArrayList<>();
|
||||
preparedStatement.getVariableDefinitions().forEach(cdefs::add);
|
||||
|
||||
logger.error("Error binding column " + cdefs.get(i).getName() + " with class " + typNam, e);
|
||||
throw e;
|
||||
// StringBuilder sb = new StringBuilder();
|
||||
// sb.append("Error binding objects to prepared statement directly, falling back to diagnostic binding layer:");
|
||||
// sb.append(Arrays.toString(objects));
|
||||
// logger.warn(sb.toString(),e);
|
||||
// DiagnosticPreparedBinder diag = new DiagnosticPreparedBinder();
|
||||
// return diag.bindValues(preparedStatement, objects);
|
||||
}
|
||||
}
|
||||
|
||||
// static void setObject(Session session, BoundStatement bs, int index, Object value) {
|
||||
//
|
||||
// DataType cqlType = bs.preparedStatement().getVariables().getType(index);
|
||||
//
|
||||
// CodecRegistry codecRegistry = session.getCluster().getConfiguration().getCodecRegistry();
|
||||
// ProtocolVersion protocolVersion =
|
||||
// session.getCluster().getConfiguration().getProtocolOptions().getProtocolVersion();
|
||||
//
|
||||
// TypeCodec<Object> codec = codecRegistry.codecFor(cqlType, value);
|
||||
// bs.setBytesUnsafe(index, codec.serialize(value, protocolVersion));
|
||||
// }
|
||||
|
||||
|
||||
}
|
@ -1,50 +0,0 @@
|
||||
package io.nosqlbench.activitytype.cqld4.statements.core;
|
||||
|
||||
import io.nosqlbench.engine.api.util.TagFilter;
|
||||
|
||||
import java.util.*;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
public class AvailableCQLStatements {
|
||||
|
||||
private List<TaggedCQLStatementDefs> availableDefs = new ArrayList<>();
|
||||
|
||||
public AvailableCQLStatements(List<TaggedCQLStatementDefs> allStatementDef) {
|
||||
this.availableDefs = allStatementDef;
|
||||
}
|
||||
|
||||
public List<TaggedCQLStatementDefs> getRawTagged() {
|
||||
return availableDefs;
|
||||
}
|
||||
|
||||
public Map<String, String> getFilteringDetails(String tagSpec) {
|
||||
Map<String, String> details = new LinkedHashMap<>();
|
||||
TagFilter ts = new TagFilter(tagSpec);
|
||||
for (TaggedCQLStatementDefs availableDef : availableDefs) {
|
||||
TagFilter.Result result = ts.matchesTaggedResult(availableDef);
|
||||
String names = availableDef.getStatements().stream()
|
||||
.map(CQLStatementDef::getName).collect(Collectors.joining(","));
|
||||
details.put(names, result.getLog());
|
||||
}
|
||||
return details;
|
||||
}
|
||||
|
||||
public List<CQLStatementDefParser> getMatching(String tagSpec) {
|
||||
|
||||
List<CQLStatementDefParser> defs = new ArrayList<>();
|
||||
TagFilter ts = new TagFilter(tagSpec);
|
||||
List<CQLStatementDefParser> CQLStatementDefParsers =
|
||||
availableDefs.stream()
|
||||
.filter(ts::matchesTagged)
|
||||
.map(TaggedCQLStatementDefs::getStatements)
|
||||
.flatMap(Collection::stream)
|
||||
.map(p -> new CQLStatementDefParser(p.getName(), p.getStatement()))
|
||||
.collect(Collectors.toList());
|
||||
|
||||
return CQLStatementDefParsers;
|
||||
}
|
||||
|
||||
public List<CQLStatementDefParser> getAll() {
|
||||
return getMatching("");
|
||||
}
|
||||
}
|
@ -1,345 +0,0 @@
|
||||
package io.nosqlbench.activitytype.cqld4.statements.core;
|
||||
|
||||
import com.datastax.oss.driver.api.core.CqlSession;
|
||||
import com.datastax.oss.driver.api.core.CqlSessionBuilder;
|
||||
import com.datastax.oss.driver.api.core.config.*;
|
||||
import com.datastax.oss.driver.api.core.loadbalancing.LoadBalancingPolicy;
|
||||
import com.datastax.oss.driver.api.core.metadata.EndPoint;
|
||||
import com.datastax.oss.driver.api.core.metadata.Node;
|
||||
import com.datastax.oss.driver.api.core.retry.RetryPolicy;
|
||||
import com.datastax.oss.driver.api.core.session.Session;
|
||||
import com.datastax.oss.driver.api.core.specex.SpeculativeExecutionPolicy;
|
||||
import com.datastax.oss.driver.internal.core.config.map.MapBasedDriverConfigLoader;
|
||||
import com.datastax.oss.driver.internal.core.config.typesafe.DefaultDriverConfigLoader;
|
||||
import com.datastax.oss.driver.internal.core.retry.DefaultRetryPolicy;
|
||||
import com.typesafe.config.ConfigFactory;
|
||||
import io.nosqlbench.activitytype.cqld4.config.CQLD4OptionsMapper;
|
||||
import io.nosqlbench.activitytype.cqld4.core.CQLOptions;
|
||||
import io.nosqlbench.activitytype.cqld4.core.ProxyTranslator;
|
||||
import io.nosqlbench.engine.api.activityapi.core.Shutdownable;
|
||||
import io.nosqlbench.engine.api.activityimpl.ActivityDef;
|
||||
import io.nosqlbench.engine.api.metrics.ActivityMetrics;
|
||||
import io.nosqlbench.engine.api.scripting.GraalJsEvaluator;
|
||||
import io.nosqlbench.engine.api.util.SSLKsFactory;
|
||||
import org.apache.tinkerpop.gremlin.driver.Cluster;
|
||||
import org.graalvm.options.OptionMap;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.Paths;
|
||||
import java.util.*;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
|
||||
public class CQLSessionCache implements Shutdownable {
|
||||
|
||||
private final static Logger logger = LogManager.getLogger(CQLSessionCache.class);
|
||||
private final static String DEFAULT_SESSION_ID = "default";
|
||||
private static final CQLSessionCache instance = new CQLSessionCache();
|
||||
private final Map<String, SessionConfig> sessionCache = new HashMap<>();
|
||||
|
||||
|
||||
public final static class SessionConfig extends ConcurrentHashMap<String,String> {
|
||||
public CqlSession session;
|
||||
public OptionsMap optionsMap;
|
||||
|
||||
public SessionConfig(CqlSession session, OptionsMap optionsMap) {
|
||||
this.session = session;
|
||||
this.optionsMap = optionsMap;
|
||||
}
|
||||
|
||||
public void set(DefaultDriverOption intOption, Object value) {
|
||||
CQLD4OptionsMapper.apply(optionsMap,intOption.getPath(), value.toString());
|
||||
}
|
||||
}
|
||||
|
||||
private CQLSessionCache() {
|
||||
}
|
||||
|
||||
public static CQLSessionCache get() {
|
||||
return instance;
|
||||
}
|
||||
|
||||
public void stopSession(ActivityDef activityDef) {
|
||||
String key = activityDef.getParams().getOptionalString("sessionid").orElse(DEFAULT_SESSION_ID);
|
||||
SessionConfig sessionConfig = sessionCache.get(key);
|
||||
sessionConfig.session.close();
|
||||
}
|
||||
|
||||
public SessionConfig getSession(ActivityDef activityDef) {
|
||||
String key = activityDef.getParams().getOptionalString("sessionid").orElse(DEFAULT_SESSION_ID);
|
||||
String profileName = activityDef.getParams().getOptionalString("profile").orElse("default");
|
||||
SessionConfig sessionConfig = sessionCache.computeIfAbsent(key, (cid) -> createSession(activityDef, key, profileName));
|
||||
return sessionConfig;
|
||||
}
|
||||
|
||||
// cbopts=\".withLoadBalancingPolicy(LatencyAwarePolicy.builder(new TokenAwarePolicy(new DCAwareRoundRobinPolicy(\"dc1-us-east\", 0, false))).build()).withRetryPolicy(new LoggingRetryPolicy(DefaultRetryPolicy.INSTANCE))\"
|
||||
|
||||
private SessionConfig createSession(ActivityDef activityDef, String sessid, String profileName) {
|
||||
|
||||
String host = activityDef.getParams().getOptionalString("host").orElse("localhost");
|
||||
int port = activityDef.getParams().getOptionalInteger("port").orElse(9042);
|
||||
|
||||
activityDef.getParams().getOptionalString("cqldriver").ifPresent(v -> {
|
||||
logger.warn("The cqldriver parameter is not needed in this version of the driver.");
|
||||
});
|
||||
|
||||
|
||||
// TODO: Figure out how to layer configs with the new TypeSafe Config layer in the Datastax Java Driver
|
||||
// TODO: Or give up and bulk import options into the map, because the config API is a labyrinth
|
||||
|
||||
CqlSessionBuilder builder = CqlSession.builder();
|
||||
//
|
||||
// OptionsMap optionsMap = new OptionsMap();
|
||||
//
|
||||
// OptionsMap defaults = OptionsMap.driverDefaults();
|
||||
// DriverConfigLoader cl = DriverConfigLoader.fromMap(defaults);
|
||||
// DriverConfig cfg = cl.getInitialConfig();
|
||||
|
||||
|
||||
Optional<Path> scb = activityDef.getParams().getOptionalString("secureconnectbundle")
|
||||
.map(Path::of);
|
||||
|
||||
Optional<List<String>> hosts = activityDef.getParams().getOptionalString("host", "hosts")
|
||||
.map(h -> h.split(",")).map(Arrays::asList);
|
||||
|
||||
Optional<Integer> port1 = activityDef.getParams().getOptionalInteger("port");
|
||||
|
||||
|
||||
if (scb.isPresent()) {
|
||||
scb.map(b -> {
|
||||
logger.debug("adding secureconnectbundle: " + b.toString());
|
||||
return b;
|
||||
}).ifPresent(builder::withCloudSecureConnectBundle);
|
||||
|
||||
if (hosts.isPresent()) {
|
||||
logger.warn("The host parameter is not valid when using secureconnectbundle=");
|
||||
}
|
||||
if (port1.isPresent()) {
|
||||
logger.warn("the port parameter is not used with CQL when using secureconnectbundle=");
|
||||
}
|
||||
} else {
|
||||
hosts.orElse(List.of("localhost"))
|
||||
.stream()
|
||||
.map(h -> InetSocketAddress.createUnresolved(h,port))
|
||||
.peek(h-> logger.debug("adding contact endpoint: " + h.getHostName()+":"+h.getPort()))
|
||||
.forEachOrdered(builder::addContactPoint);
|
||||
}
|
||||
|
||||
// builder.withCompression(ProtocolOptions.Compression.NONE);
|
||||
// TODO add map based configuration with compression defaults
|
||||
|
||||
Optional<String> usernameOpt = activityDef.getParams().getOptionalString("username");
|
||||
Optional<String> passwordOpt = activityDef.getParams().getOptionalString("password");
|
||||
Optional<String> passfileOpt = activityDef.getParams().getOptionalString("passfile");
|
||||
Optional<String> authIdOpt = activityDef.getParams().getOptionalString("authid");
|
||||
|
||||
|
||||
if (usernameOpt.isPresent()) {
|
||||
String username = usernameOpt.get();
|
||||
String password;
|
||||
if (passwordOpt.isPresent()) {
|
||||
password = passwordOpt.get();
|
||||
} else if (passfileOpt.isPresent()) {
|
||||
Path path = Paths.get(passfileOpt.get());
|
||||
try {
|
||||
password = Files.readAllLines(path).get(0);
|
||||
} catch (IOException e) {
|
||||
String error = "Error while reading password from file:" + passfileOpt;
|
||||
logger.error(error, e);
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
} else {
|
||||
String error = "username is present, but neither password nor passfile are defined.";
|
||||
logger.error(error);
|
||||
throw new RuntimeException(error);
|
||||
}
|
||||
if (authIdOpt.isPresent()) {
|
||||
builder.withAuthCredentials(username, password, authIdOpt.get());
|
||||
} else {
|
||||
builder.withAuthCredentials(username, password);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Optional<String> clusteropts = activityDef.getParams().getOptionalString("cbopts");
|
||||
if (clusteropts.isPresent()) {
|
||||
try {
|
||||
logger.info("applying cbopts:" + clusteropts.get());
|
||||
GraalJsEvaluator<CqlSessionBuilder> clusterEval = new GraalJsEvaluator<>(CqlSessionBuilder.class);
|
||||
clusterEval.put("builder", builder);
|
||||
String importEnv =
|
||||
"load(\"nashorn:mozilla_compat.js\");\n" +
|
||||
" importPackage(com.google.common.collect.Lists);\n" +
|
||||
" importPackage(com.google.common.collect.Maps);\n" +
|
||||
" importPackage(com.datastax.driver);\n" +
|
||||
" importPackage(com.datastax.driver.core);\n" +
|
||||
" importPackage(com.datastax.driver.core.policies);\n" +
|
||||
"builder" + clusteropts.get() + "\n";
|
||||
clusterEval.script(importEnv);
|
||||
builder = clusterEval.eval();
|
||||
logger.info("successfully applied:" + clusteropts.get());
|
||||
} catch (Exception e) {
|
||||
logger.error("Unable to evaluate: " + clusteropts.get() + " in script context:" + e.getMessage());
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Support speculative=>
|
||||
// SpeculativeExecutionPolicy speculativePolicy = activityDef.getParams()
|
||||
// .getOptionalString("speculative")
|
||||
// .map(speculative -> {
|
||||
// logger.info("speculative=>" + speculative);
|
||||
// return speculative;
|
||||
// })
|
||||
// .map(CQLOptions::speculativeFor)
|
||||
// .orElse(CQLOptions.defaultSpeculativePolicy());
|
||||
// builder.withSpeculativeExecutionPolicy(speculativePolicy);
|
||||
|
||||
// TODO: Support socketoptions=>
|
||||
// activityDef.getParams().getOptionalString("socketoptions")
|
||||
// .map(sockopts -> {
|
||||
// logger.info("socketoptions=>" + sockopts);
|
||||
// return sockopts;
|
||||
// })
|
||||
// .map(CQLOptions::socketOptionsFor)
|
||||
// .ifPresent(builder::withSocketOptions);
|
||||
//
|
||||
|
||||
// TODO: Support reconnectpolicy
|
||||
// activityDef.getParams().getOptionalString("reconnectpolicy")
|
||||
// .map(reconnectpolicy-> {
|
||||
// logger.info("reconnectpolicy=>" + reconnectpolicy);
|
||||
// return reconnectpolicy;
|
||||
// })
|
||||
// .map(CQLOptions::reconnectPolicyFor)
|
||||
// .ifPresent(builder::withReconnectionPolicy);
|
||||
|
||||
|
||||
// TODO: support pooling options
|
||||
// activityDef.getParams().getOptionalString("pooling")
|
||||
// .map(pooling -> {
|
||||
// logger.info("pooling=>" + pooling);
|
||||
// return pooling;
|
||||
// })
|
||||
// .map(CQLOptions::poolingOptionsFor)
|
||||
// .ifPresent(builder::withPoolingOptions);
|
||||
|
||||
// TODO: support whitelist options
|
||||
// activityDef.getParams().getOptionalString("whitelist")
|
||||
// .map(whitelist -> {
|
||||
// logger.info("whitelist=>" + whitelist);
|
||||
// return whitelist;
|
||||
// })
|
||||
// .map(p -> CQLOptions.whitelistFor(p, null))
|
||||
// .ifPresent(builder::withLoadBalancingPolicy);
|
||||
//
|
||||
|
||||
// TODO: support tickduration
|
||||
// activityDef.getParams().getOptionalString("tickduration")
|
||||
// .map(tickduration -> {
|
||||
// logger.info("tickduration=>" + tickduration);
|
||||
// return tickduration;
|
||||
// })
|
||||
// .map(CQLOptions::withTickDuration)
|
||||
// .ifPresent(builder::withNettyOptions);
|
||||
|
||||
// TODO: support compression
|
||||
// activityDef.getParams().getOptionalString("compression")
|
||||
// .map(compression -> {
|
||||
// logger.info("compression=>" + compression);
|
||||
// return compression;
|
||||
// })
|
||||
// .map(CQLOptions::withCompression)
|
||||
// .ifPresent(builder::withCompression);
|
||||
|
||||
// TODO: Support SSL standard config interface
|
||||
// if (activityDef.getParams().getOptionalString("ssl").isPresent()) {
|
||||
// logger.info("Cluster builder proceeding with SSL but no Client Auth");
|
||||
// Object context = SSLKsFactory.get().getContext(activityDef);
|
||||
// SSLOptions sslOptions;
|
||||
// if (context instanceof javax.net.ssl.SSLContext) {
|
||||
// sslOptions = RemoteEndpointAwareJdkSSLOptions.builder()
|
||||
// .withSSLContext((javax.net.ssl.SSLContext) context).build();
|
||||
// builder.withSSL(sslOptions);
|
||||
// } else if (context instanceof io.netty.handler.ssl.SslContext) {
|
||||
// sslOptions =
|
||||
// new RemoteEndpointAwareNettySSLOptions((io.netty.handler.ssl.SslContext) context);
|
||||
// } else {
|
||||
// throw new RuntimeException("Unrecognized ssl context object type: " + context.getClass().getCanonicalName());
|
||||
// }
|
||||
// builder.withSSL(sslOptions);
|
||||
// }
|
||||
|
||||
// TODO: Support retry policy
|
||||
// RetryPolicy retryPolicy = activityDef.getParams()
|
||||
// .getOptionalString("retrypolicy")
|
||||
// .map(CQLOptions::retryPolicyFor).orElse(DefaultRetryPolicy.INSTANCE);
|
||||
//
|
||||
// if (retryPolicy instanceof LoggingRetryPolicy) {
|
||||
// logger.info("using LoggingRetryPolicy");
|
||||
// }
|
||||
//
|
||||
// builder.withRetryPolicy(retryPolicy);
|
||||
|
||||
// TODO: Support JMX reporting toggle
|
||||
// if (!activityDef.getParams().getOptionalBoolean("jmxreporting").orElse(false)) {
|
||||
// builder.withoutJMXReporting();
|
||||
// }
|
||||
|
||||
// TODO: Support single-endpoint options?
|
||||
// // Proxy Translator and Whitelist for use with DS Cloud on-demand single-endpoint setup
|
||||
// if (activityDef.getParams().getOptionalBoolean("single-endpoint").orElse(false)) {
|
||||
// InetSocketAddress inetHost = new InetSocketAddress(host, port);
|
||||
// final List<InetSocketAddress> whiteList = new ArrayList<>();
|
||||
// whiteList.add(inetHost);
|
||||
//
|
||||
// LoadBalancingPolicy whitelistPolicy = new WhiteListPolicy(new RoundRobinPolicy(), whiteList);
|
||||
// builder.withAddressTranslator(new ProxyTranslator(inetHost)).withLoadBalancingPolicy(whitelistPolicy);
|
||||
// }
|
||||
|
||||
CqlSession session = builder.build();
|
||||
|
||||
// Cluster cl = builder.build();
|
||||
|
||||
|
||||
// TODO: Support default idempotence
|
||||
// // Apply default idempotence, if set
|
||||
// activityDef.getParams().getOptionalBoolean("defaultidempotence").map(
|
||||
// b -> cl.getConfiguration().getQueryOptions().setDefaultIdempotence(b)
|
||||
// );
|
||||
|
||||
// This also forces init of metadata
|
||||
|
||||
|
||||
Map<UUID, Node> nodes = session.getMetadata().getNodes();
|
||||
if (nodes.size()>25) {
|
||||
logger.info("Found " + nodes.size() + " nodes in cluster.");
|
||||
} else {
|
||||
nodes.forEach((k,v)->{
|
||||
logger.info("found node " + k);
|
||||
});
|
||||
}
|
||||
logger.info("cluster-metadata-allhosts:\n" + session.getMetadata().getNodes());
|
||||
|
||||
if (activityDef.getParams().getOptionalBoolean("drivermetrics").orElse(false)) {
|
||||
String driverPrefix = activityDef.getParams().getOptionalString("driverprefix").orElse("driver."+sessid) + ".";
|
||||
session.getMetrics().ifPresent(m -> ActivityMetrics.mountSubRegistry(driverPrefix,m.getRegistry()));
|
||||
}
|
||||
|
||||
OptionsMap optionsMap = OptionsMap.driverDefaults();
|
||||
|
||||
return new SessionConfig(session,optionsMap);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void shutdown() {
|
||||
for (SessionConfig session : sessionCache.values()) {
|
||||
session.session.close();
|
||||
}
|
||||
}
|
||||
}
|
@ -1,105 +0,0 @@
|
||||
package io.nosqlbench.activitytype.cqld4.statements.core;
|
||||
|
||||
import com.datastax.oss.driver.api.core.ConsistencyLevel;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
|
||||
import java.util.*;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
public class CQLStatementDef {
|
||||
private final static Logger logger = LogManager.getLogger(CQLStatementDef.class);
|
||||
|
||||
private Map<String,String> params = new HashMap<>();
|
||||
private String name = "";
|
||||
private String statement = "";
|
||||
private boolean prepared = true;
|
||||
private String cl = ConsistencyLevel.LOCAL_ONE.name();
|
||||
private Map<String, String> bindings = new HashMap<>();
|
||||
|
||||
public CQLStatementDef() {
|
||||
}
|
||||
|
||||
public String getGenSpec(String s) {
|
||||
return bindings.get(s);
|
||||
}
|
||||
|
||||
public String getName() {
|
||||
return name;
|
||||
}
|
||||
|
||||
public void setName(String name) {
|
||||
this.name = name;
|
||||
}
|
||||
|
||||
public String getStatement() {
|
||||
return statement;
|
||||
}
|
||||
|
||||
public void setStatement(String statement) {
|
||||
this.statement = statement;
|
||||
}
|
||||
|
||||
public Map<String, String> getBindings() {
|
||||
return bindings;
|
||||
}
|
||||
|
||||
public void setBindings(Map<String, String> bindings) {
|
||||
this.bindings = bindings;
|
||||
}
|
||||
|
||||
public String toString() {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append(" name:").append(this.getName()).append("\n");
|
||||
sb.append(" statement: |").append("\n");
|
||||
String formattedStmt = Arrays.asList(getStatement().split("\\r*\n"))
|
||||
.stream().map(s -> " " + s)
|
||||
.collect(Collectors.joining("\n"));
|
||||
sb.append(formattedStmt);
|
||||
if (bindings.size() > 0) {
|
||||
sb.append(" bindings:\n");
|
||||
Optional<Integer> maxLen = this.bindings.keySet().stream().map(String::length).reduce(Integer::max);
|
||||
for (String bindName : this.bindings.keySet()) {
|
||||
sb
|
||||
.append(String.format(" %-" + (maxLen.orElse(20) + 2) + "s", bindName)).append(" : ")
|
||||
.append(bindings.get(bindName))
|
||||
.append("\n");
|
||||
}
|
||||
}
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
public boolean isPrepared() {
|
||||
return prepared;
|
||||
}
|
||||
|
||||
public void setPrepared(boolean prepared) {
|
||||
this.prepared = prepared;
|
||||
}
|
||||
|
||||
public String getConsistencyLevel() {
|
||||
return this.cl;
|
||||
}
|
||||
|
||||
public void setConsistencyLevel(String consistencyLevel) {
|
||||
this.cl = consistencyLevel;
|
||||
}
|
||||
|
||||
|
||||
public void setCl(String consistencyLevel) {
|
||||
setConsistencyLevel(consistencyLevel);
|
||||
}
|
||||
|
||||
public Map<String, String> getParams() {
|
||||
return params;
|
||||
}
|
||||
|
||||
public void setParams(Map<String, String> params) {
|
||||
this.params = params;
|
||||
}
|
||||
|
||||
public long getRatio() {
|
||||
return Long.parseLong(Optional.ofNullable(params.get("ratio")).orElse("1"));
|
||||
}
|
||||
|
||||
}
|
@ -1,159 +0,0 @@
|
||||
package io.nosqlbench.activitytype.cqld4.statements.core;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
|
||||
import java.util.*;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
public class CQLStatementDefParser {
|
||||
private final static Logger logger = LogManager.getLogger(CQLStatementDefParser.class);
|
||||
// private final static Pattern templateToken = Pattern.compile("<<(\\w+(:(.+?))?)>>");
|
||||
private final static Pattern stmtToken = Pattern.compile("\\?(\\w+[-_\\d\\w]*)|\\{(\\w+[-_\\d\\w.]*)}");
|
||||
private final static String UNSET_VALUE = "UNSET-VALUE";
|
||||
private final String stmt;
|
||||
private final String name;
|
||||
|
||||
private CQLStatementDef deprecatedDef; // deprecated, to be removed
|
||||
|
||||
public void setBindings(Map<String, String> bindings) {
|
||||
this.bindings = bindings;
|
||||
}
|
||||
|
||||
private Map<String, String> bindings;
|
||||
|
||||
public CQLStatementDef getDeprecatedDef() {
|
||||
return deprecatedDef;
|
||||
}
|
||||
|
||||
public void setDeprecatedDef(CQLStatementDef deprecatedDef) {
|
||||
this.deprecatedDef = deprecatedDef;
|
||||
}
|
||||
|
||||
public CQLStatementDefParser(String name, String stmt) {
|
||||
this.stmt = stmt;
|
||||
this.name = name;
|
||||
this.bindings = bindings;
|
||||
}
|
||||
|
||||
public Map<String,String> getBindings() {
|
||||
return bindings;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return bindableNames in order as specified in the parameter placeholders
|
||||
*/
|
||||
public List<String> getBindableNames() {
|
||||
Matcher m = stmtToken.matcher(stmt);
|
||||
List<String> bindNames = new ArrayList<>();
|
||||
while (m.find()) {
|
||||
String form1 = m.group(1);
|
||||
String form2 = m.group(2);
|
||||
bindNames.add( (form1!=null && !form1.isEmpty()) ? form1 : form2 );
|
||||
}
|
||||
return bindNames;
|
||||
}
|
||||
|
||||
public String getName() {
|
||||
return name;
|
||||
}
|
||||
|
||||
|
||||
public String getParsedStatementOrError(Set<String> namedBindings) {
|
||||
ParseResult result = getParseResult(namedBindings);
|
||||
if (result.hasError()) {
|
||||
throw new RuntimeException("Statement template has errors:\n" + result.toString());
|
||||
}
|
||||
return result.getStatement();
|
||||
}
|
||||
|
||||
public ParseResult getParseResult(Set<String> namedBindings) {
|
||||
|
||||
HashSet<String> missingAnchors = new HashSet<String>() {{ addAll(namedBindings); }};
|
||||
HashSet<String> missingBindings = new HashSet<String>();
|
||||
|
||||
String statement = this.stmt;
|
||||
StringBuilder cooked = new StringBuilder();
|
||||
|
||||
Matcher m = stmtToken.matcher(statement);
|
||||
int lastMatch = 0;
|
||||
String remainder = "";
|
||||
while (m.find(lastMatch)) {
|
||||
String pre = statement.substring(lastMatch, m.start());
|
||||
|
||||
String form1 = m.group(1);
|
||||
String form2 = m.group(2);
|
||||
String tokenName = (form1!=null && !form1.isEmpty()) ? form1 : form2;
|
||||
lastMatch = m.end();
|
||||
cooked.append(pre);
|
||||
cooked.append("?");
|
||||
|
||||
if (!namedBindings.contains(tokenName)) {
|
||||
missingBindings.add(tokenName);
|
||||
} else {
|
||||
missingAnchors.remove(tokenName);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// add remainder of unmatched
|
||||
if (lastMatch>=0) {
|
||||
cooked.append(statement.substring(lastMatch));
|
||||
}
|
||||
else {
|
||||
cooked.append(statement);
|
||||
}
|
||||
|
||||
logger.info("Parsed statement as: " + cooked.toString().replaceAll("\\n","\\\\n"));
|
||||
|
||||
return new ParseResult(cooked.toString(),name,bindings,missingBindings,missingAnchors);
|
||||
}
|
||||
|
||||
public static class ParseResult {
|
||||
private final Set<String> missingGenerators;
|
||||
private final Set<String> missingAnchors;
|
||||
private final String statement;
|
||||
private Map<String,String> bindings;
|
||||
private final String name;
|
||||
|
||||
public ParseResult(String stmt, String name, Map<String,String> bindings, Set<String> missingGenerators, Set<String> missingAnchors) {
|
||||
this.missingGenerators = missingGenerators;
|
||||
this.missingAnchors = missingAnchors;
|
||||
this.statement = stmt;
|
||||
this.name = name;
|
||||
}
|
||||
|
||||
public String toString() {
|
||||
String generatorsSummary = (this.missingGenerators.size() > 0) ?
|
||||
"\nundefined generators:" + this.missingGenerators.stream().collect(Collectors.joining(",", "[", "]")) : "";
|
||||
return "STMT:" + statement + "\n" + generatorsSummary;
|
||||
}
|
||||
|
||||
public String getName() {
|
||||
return name;
|
||||
}
|
||||
|
||||
public Map<String,String> getBindings() {
|
||||
return bindings;
|
||||
}
|
||||
|
||||
public boolean hasError() {
|
||||
return missingGenerators.size() > 0;
|
||||
}
|
||||
|
||||
public String getStatement() {
|
||||
return statement;
|
||||
}
|
||||
|
||||
public Set<String> getMissingAnchors() {
|
||||
return missingAnchors;
|
||||
}
|
||||
|
||||
public Set<String> getMissingGenerators() {
|
||||
return missingGenerators;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
@ -1,37 +0,0 @@
|
||||
package io.nosqlbench.activitytype.cqld4.statements.core;
|
||||
|
||||
import java.util.*;
|
||||
|
||||
public class CQLStatementGroups {
|
||||
|
||||
private Map<String,List<CQLStatementDefParser>> statementGroups = new HashMap<>();
|
||||
|
||||
public CQLStatementGroups(Map<String,List<CQLStatementDefParser>> statementGroups) {
|
||||
this.statementGroups = statementGroups;
|
||||
|
||||
}
|
||||
|
||||
public List<CQLStatementDefParser> getGroups(String... groupNames) {
|
||||
List<CQLStatementDefParser> statements = new ArrayList<CQLStatementDefParser>();
|
||||
for (String groupName : groupNames) {
|
||||
List<CQLStatementDefParser> adding = statementGroups.getOrDefault(groupName, Collections.emptyList());
|
||||
statements.addAll(adding);
|
||||
}
|
||||
return statements;
|
||||
}
|
||||
|
||||
public String toString() {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
List<String> groups = new ArrayList<String>(statementGroups.keySet());
|
||||
Collections.sort(groups);
|
||||
sb.append("groups:\n");
|
||||
for (String group : groups) {
|
||||
// sb.append("section:").append(section).append("\n");
|
||||
for (CQLStatementDefParser statementDef : statementGroups.get(group)) {
|
||||
sb.append(statementDef.toString());
|
||||
}
|
||||
sb.append("\n");
|
||||
}
|
||||
return sb.toString();
|
||||
}
|
||||
}
|
@ -1,182 +0,0 @@
|
||||
package io.nosqlbench.activitytype.cqld4.statements.core;
|
||||
|
||||
import com.codahale.metrics.Histogram;
|
||||
import com.codahale.metrics.Timer;
|
||||
import com.datastax.oss.driver.api.core.cql.PreparedStatement;
|
||||
import com.datastax.oss.driver.api.core.cql.SimpleStatement;
|
||||
import com.datastax.oss.driver.api.core.cql.Statement;
|
||||
import io.nosqlbench.activitytype.cqld4.api.D4ResultSetCycleOperator;
|
||||
import io.nosqlbench.activitytype.cqld4.api.RowCycleOperator;
|
||||
import io.nosqlbench.virtdata.core.bindings.ContextualArrayBindings;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Writer;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
/**
|
||||
* A ReadyCQLStatement instantiates new statements to be executed at some mix ratio.
|
||||
* It optionally holds metrics objects for a named statement.
|
||||
*/
|
||||
public class ReadyCQLStatement {
|
||||
|
||||
private String name;
|
||||
private ContextualArrayBindings<?, Statement<?>> contextualBindings;
|
||||
private long ratio;
|
||||
private D4ResultSetCycleOperator[] resultSetOperators = null;
|
||||
private RowCycleOperator[] rowCycleOperators = null;
|
||||
|
||||
private Timer successTimer;
|
||||
private Timer errorTimer;
|
||||
private Histogram rowsFetchedHisto;
|
||||
private Writer resultCsvWriter;
|
||||
|
||||
public ReadyCQLStatement(ContextualArrayBindings<?, Statement<?>> contextualBindings, long ratio, String name) {
|
||||
this.contextualBindings = contextualBindings;
|
||||
this.ratio = ratio;
|
||||
this.name = name;
|
||||
}
|
||||
|
||||
public ReadyCQLStatement withMetrics(Timer successTimer, Timer errorTimer, Histogram rowsFetchedHisto) {
|
||||
this.successTimer = successTimer;
|
||||
this.errorTimer = errorTimer;
|
||||
this.rowsFetchedHisto = rowsFetchedHisto;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Statement bind(long value) {
|
||||
return contextualBindings.bind(value);
|
||||
}
|
||||
|
||||
public D4ResultSetCycleOperator[] getResultSetOperators() {
|
||||
return resultSetOperators;
|
||||
}
|
||||
|
||||
public ContextualArrayBindings getContextualBindings() {
|
||||
return this.contextualBindings;
|
||||
}
|
||||
|
||||
public String getQueryString(long value) {
|
||||
Object stmt = contextualBindings.getContext();
|
||||
if (stmt instanceof PreparedStatement) {
|
||||
String queryString = ((PreparedStatement)stmt).getQuery();
|
||||
StringBuilder sb = new StringBuilder(queryString.length()*2);
|
||||
sb.append("(prepared) ");
|
||||
return getQueryStringValues(value, queryString, sb);
|
||||
} else if (stmt instanceof SimpleStatement) {
|
||||
String queryString = ((SimpleStatement) stmt).getQuery();
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append("(simple) ");
|
||||
return getQueryStringValues(value, queryString, sb);
|
||||
}
|
||||
if (stmt instanceof String) {
|
||||
return (String)stmt;
|
||||
}
|
||||
throw new RuntimeException("context object not recognized for query string:" + stmt.getClass().getCanonicalName());
|
||||
}
|
||||
|
||||
private String getQueryStringValues(long value, String queryString, StringBuilder sb) {
|
||||
if (!queryString.endsWith("\n")) {
|
||||
sb.append("\n");
|
||||
}
|
||||
sb.append(queryString).append(" VALUES[");
|
||||
Object[] all = contextualBindings.getBindings().getAll(value);
|
||||
String delim="";
|
||||
for (Object o : all) {
|
||||
sb.append(delim);
|
||||
delim=",";
|
||||
sb.append(o.toString());
|
||||
}
|
||||
sb.append("]");
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
public long getRatio() {
|
||||
return ratio;
|
||||
}
|
||||
|
||||
public void setRatio(long ratio) {
|
||||
this.ratio = ratio;
|
||||
}
|
||||
|
||||
/**
|
||||
* This method should be called when an associated statement is executed successfully.
|
||||
* @param cycleValue The cycle associated with the execution.
|
||||
* @param nanoTime The nanoTime duration of the execution.
|
||||
* @param rowsFetched The number of rows fetched for this cycle
|
||||
*/
|
||||
public void onSuccess(long cycleValue, long nanoTime, long rowsFetched) {
|
||||
if (successTimer!=null) {
|
||||
successTimer.update(nanoTime, TimeUnit.NANOSECONDS);
|
||||
}
|
||||
if (rowsFetchedHisto!=null) {
|
||||
rowsFetchedHisto.update(rowsFetched);
|
||||
}
|
||||
if (resultCsvWriter!=null) {
|
||||
try {
|
||||
synchronized(resultCsvWriter) {
|
||||
// <cycle>,(SUCCESS|FAILURE),<nanos>,<rowsfetched>,<errorname>\n
|
||||
resultCsvWriter
|
||||
.append(String.valueOf(cycleValue)).append(",")
|
||||
.append("SUCCESS,")
|
||||
.append(String.valueOf(nanoTime)).append(",")
|
||||
.append(String.valueOf(rowsFetched))
|
||||
.append(",NONE")
|
||||
.append("\n");
|
||||
}
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* This method should be called when an associated statement is executed unsuccessfully.
|
||||
* It should be called only once per cycle in the case of execution error.
|
||||
* @param cycleValue The cycle associated with the erred execution.
|
||||
* @param resultNanos The nanoTime duration of the execution.
|
||||
* @param t The associated throwable
|
||||
*/
|
||||
public void onError(long cycleValue, long resultNanos, Throwable t) {
|
||||
if (errorTimer!=null) {
|
||||
errorTimer.update(resultNanos, TimeUnit.NANOSECONDS);
|
||||
}
|
||||
if (resultCsvWriter!=null) {
|
||||
try {
|
||||
synchronized(resultCsvWriter) {
|
||||
// <cycle>,(SUCCESS|FAILURE),<nanos>,<rowsfetched>,<errorname>\n
|
||||
resultCsvWriter
|
||||
.append(String.valueOf(cycleValue)).append(",")
|
||||
.append("FAILURE,")
|
||||
.append(String.valueOf(resultNanos)).append(",")
|
||||
.append("0,")
|
||||
.append(t.getClass().getSimpleName()).append(",")
|
||||
.append("\n");
|
||||
}
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
public ReadyCQLStatement withResultSetCycleOperators(D4ResultSetCycleOperator[] pageInfoCycleOperators) {
|
||||
this.resultSetOperators = pageInfoCycleOperators;
|
||||
return this;
|
||||
}
|
||||
|
||||
public ReadyCQLStatement withRowCycleOperators(RowCycleOperator[] rowCycleOperators) {
|
||||
this.rowCycleOperators = rowCycleOperators;
|
||||
return this;
|
||||
}
|
||||
|
||||
public RowCycleOperator[] getRowCycleOperators() {
|
||||
return this.rowCycleOperators;
|
||||
}
|
||||
|
||||
public ReadyCQLStatement withResultCsvWriter(Writer resultCsvWriter) {
|
||||
this.resultCsvWriter = resultCsvWriter;
|
||||
return this;
|
||||
}
|
||||
}
|
@ -1,125 +0,0 @@
|
||||
package io.nosqlbench.activitytype.cqld4.statements.core;
|
||||
|
||||
import com.codahale.metrics.Histogram;
|
||||
import com.codahale.metrics.Timer;
|
||||
import com.datastax.oss.driver.api.core.CqlSession;
|
||||
import com.datastax.oss.driver.api.core.cql.PreparedStatement;
|
||||
import com.datastax.oss.driver.api.core.cql.SimpleStatement;
|
||||
import com.datastax.oss.driver.api.core.cql.Statement;
|
||||
import com.datastax.oss.driver.api.core.session.Session;
|
||||
import io.nosqlbench.activitytype.cqld4.api.D4ResultSetCycleOperator;
|
||||
import io.nosqlbench.activitytype.cqld4.api.RowCycleOperator;
|
||||
import io.nosqlbench.activitytype.cqld4.core.CqlActivity;
|
||||
import io.nosqlbench.activitytype.cqld4.statements.binders.CqlBinderTypes;
|
||||
import io.nosqlbench.activitytype.cqld4.statements.binders.SimpleStatementValuesBinder;
|
||||
import io.nosqlbench.engine.api.metrics.ActivityMetrics;
|
||||
import io.nosqlbench.virtdata.core.bindings.BindingsTemplate;
|
||||
import io.nosqlbench.virtdata.core.bindings.ContextualBindingsArrayTemplate;
|
||||
import io.nosqlbench.virtdata.core.bindings.ValuesArrayBinder;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
|
||||
import java.io.Writer;
|
||||
import java.util.Map;
|
||||
|
||||
public class ReadyCQLStatementTemplate {
|
||||
|
||||
private final static Logger logger = LogManager.getLogger(ReadyCQLStatementTemplate.class);
|
||||
private final Session session;
|
||||
private final ContextualBindingsArrayTemplate<?, Statement<?>> template;
|
||||
private final long ratio;
|
||||
private final String name;
|
||||
|
||||
private D4ResultSetCycleOperator[] pageInfoCycleOperators;
|
||||
private RowCycleOperator[] rowCycleOperators;
|
||||
|
||||
private Timer successTimer;
|
||||
private Timer errorTimer;
|
||||
private Histogram rowsFetchedHisto;
|
||||
private Writer resultCsvWriter;
|
||||
|
||||
public ReadyCQLStatementTemplate(
|
||||
Map<String,Object> fconfig,
|
||||
CqlBinderTypes binderType,
|
||||
CqlSession session,
|
||||
PreparedStatement preparedStmt,
|
||||
long ratio,
|
||||
String name
|
||||
) {
|
||||
this.session = session;
|
||||
this.name = name;
|
||||
ValuesArrayBinder<PreparedStatement, Statement<?>> binder = binderType.get(session);
|
||||
logger.trace("Using binder_type=>" + binder.toString());
|
||||
|
||||
template = new ContextualBindingsArrayTemplate<>(
|
||||
preparedStmt,
|
||||
new BindingsTemplate(fconfig),
|
||||
binder
|
||||
);
|
||||
this.ratio = ratio;
|
||||
}
|
||||
|
||||
public ReadyCQLStatementTemplate(
|
||||
Map<String,Object> fconfig,
|
||||
Session session,
|
||||
SimpleStatement simpleStatement,
|
||||
long ratio,
|
||||
String name,
|
||||
boolean parameterized
|
||||
) {
|
||||
this.session = session;
|
||||
this.name = name;
|
||||
template = new ContextualBindingsArrayTemplate(
|
||||
simpleStatement,
|
||||
new BindingsTemplate(fconfig),
|
||||
new SimpleStatementValuesBinder(parameterized)
|
||||
);
|
||||
this.ratio = ratio;
|
||||
}
|
||||
|
||||
public ReadyCQLStatement resolve() {
|
||||
return new ReadyCQLStatement(template.resolveBindings(), ratio, name)
|
||||
.withMetrics(this.successTimer, this.errorTimer, this.rowsFetchedHisto)
|
||||
.withResultSetCycleOperators(pageInfoCycleOperators)
|
||||
.withRowCycleOperators(rowCycleOperators)
|
||||
.withResultCsvWriter(resultCsvWriter);
|
||||
}
|
||||
|
||||
public ContextualBindingsArrayTemplate<?, Statement<?>> getContextualBindings() {
|
||||
return template;
|
||||
}
|
||||
|
||||
|
||||
public String getName() {
|
||||
return name;
|
||||
}
|
||||
|
||||
public void instrument(CqlActivity activity) {
|
||||
this.successTimer = ActivityMetrics.timer(activity.getActivityDef(), name + "--success");
|
||||
this.errorTimer = ActivityMetrics.timer(activity.getActivityDef(), name + "--error");
|
||||
this.rowsFetchedHisto = ActivityMetrics.histogram(activity.getActivityDef(), name + "--resultset-size");
|
||||
}
|
||||
|
||||
public void logResultCsv(CqlActivity activity, String name) {
|
||||
this.resultCsvWriter = activity.getNamedWriter(name);
|
||||
}
|
||||
|
||||
public void addResultSetOperators(D4ResultSetCycleOperator... addingOperators) {
|
||||
pageInfoCycleOperators = (pageInfoCycleOperators ==null) ? new D4ResultSetCycleOperator[0]: pageInfoCycleOperators;
|
||||
|
||||
D4ResultSetCycleOperator[] newOperators = new D4ResultSetCycleOperator[pageInfoCycleOperators.length + addingOperators.length];
|
||||
System.arraycopy(pageInfoCycleOperators,0,newOperators,0, pageInfoCycleOperators.length);
|
||||
System.arraycopy(addingOperators,0,newOperators, pageInfoCycleOperators.length,addingOperators.length);
|
||||
this.pageInfoCycleOperators =newOperators;
|
||||
}
|
||||
|
||||
public void addRowCycleOperators(RowCycleOperator... addingOperators) {
|
||||
rowCycleOperators = (rowCycleOperators==null) ? new RowCycleOperator[0]: rowCycleOperators;
|
||||
RowCycleOperator[] newOperators = new RowCycleOperator[rowCycleOperators.length + addingOperators.length];
|
||||
System.arraycopy(rowCycleOperators,0,newOperators,0,rowCycleOperators.length);
|
||||
System.arraycopy(addingOperators, 0, newOperators,rowCycleOperators.length,addingOperators.length);
|
||||
this.rowCycleOperators = newOperators;
|
||||
}
|
||||
|
||||
|
||||
}
|
@ -1,25 +0,0 @@
|
||||
package io.nosqlbench.activitytype.cqld4.statements.core;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
public class ReadyCQLStatementsTemplate {
|
||||
|
||||
private List<ReadyCQLStatementTemplate> readyStatementList = new ArrayList<>();
|
||||
|
||||
public void addTemplate(ReadyCQLStatementTemplate t) {
|
||||
this.readyStatementList.add(t);
|
||||
}
|
||||
|
||||
public List<ReadyCQLStatement> resolve() {
|
||||
return readyStatementList.stream()
|
||||
.map(ReadyCQLStatementTemplate::resolve)
|
||||
.collect(Collectors.toList());
|
||||
}
|
||||
|
||||
public int size() {
|
||||
return readyStatementList.size();
|
||||
}
|
||||
|
||||
}
|
@ -1,57 +0,0 @@
|
||||
package io.nosqlbench.activitytype.cqld4.statements.core;
|
||||
|
||||
import io.nosqlbench.engine.api.util.Tagged;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
public class TaggedCQLStatementDefs implements Tagged {
|
||||
|
||||
private List<CQLStatementDef> statements = new ArrayList<>();
|
||||
private Map<String,String> tags = new HashMap<>();
|
||||
private Map<String,String> params = new HashMap<>();
|
||||
|
||||
public TaggedCQLStatementDefs(Map<String,String> tags, Map<String,String> params, List<CQLStatementDef> statements) {
|
||||
this.tags = tags;
|
||||
this.params = params;
|
||||
this.statements = statements;
|
||||
}
|
||||
public TaggedCQLStatementDefs(Map<String,String> tags, List<CQLStatementDef> statements) {
|
||||
this.tags = tags;
|
||||
this.statements = statements;
|
||||
}
|
||||
|
||||
public TaggedCQLStatementDefs(List<CQLStatementDef> statements) {
|
||||
this.statements = statements;
|
||||
}
|
||||
|
||||
|
||||
public TaggedCQLStatementDefs() {
|
||||
}
|
||||
|
||||
public List<CQLStatementDef> getStatements() {
|
||||
return statements;
|
||||
}
|
||||
|
||||
public void setStatements(List<CQLStatementDef> statements) {
|
||||
this.statements = statements;
|
||||
}
|
||||
|
||||
public Map<String, String> getTags() {
|
||||
return tags;
|
||||
}
|
||||
|
||||
public void setTags(Map<String, String> tags) {
|
||||
this.tags = tags;
|
||||
}
|
||||
|
||||
public Map<String, String> getParams() {
|
||||
return params;
|
||||
}
|
||||
|
||||
public void setParams(Map<String, String> params) {
|
||||
this.params = params;
|
||||
}
|
||||
}
|
@ -1,71 +0,0 @@
|
||||
package io.nosqlbench.activitytype.cqld4.statements.core;
|
||||
|
||||
import io.nosqlbench.engine.api.activityimpl.ActivityInitializationError;
|
||||
import io.nosqlbench.nb.api.content.Content;
|
||||
import io.nosqlbench.nb.api.content.NBIO;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.yaml.snakeyaml.TypeDescription;
|
||||
import org.yaml.snakeyaml.Yaml;
|
||||
import org.yaml.snakeyaml.constructor.Constructor;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.function.Function;
|
||||
|
||||
@SuppressWarnings("ALL")
|
||||
public class YamlCQLStatementLoader {
|
||||
|
||||
private final static Logger logger = LogManager.getLogger(YamlCQLStatementLoader.class);
|
||||
List<Function<String, String>> transformers = new ArrayList<>();
|
||||
|
||||
public YamlCQLStatementLoader() {
|
||||
}
|
||||
|
||||
public YamlCQLStatementLoader(Function<String, String>... transformers) {
|
||||
this.transformers.addAll(Arrays.asList(transformers));
|
||||
}
|
||||
|
||||
public AvailableCQLStatements load(String fromPath, String... searchPaths) {
|
||||
|
||||
Content<?> yamlContent = NBIO.all().prefix(searchPaths).name(fromPath).extension("yaml").one();
|
||||
String data = yamlContent.asString();
|
||||
|
||||
for (Function<String, String> xform : transformers) {
|
||||
try {
|
||||
logger.debug("Applying string transformer to yaml data:" + xform);
|
||||
data = xform.apply(data);
|
||||
} catch (Exception e) {
|
||||
RuntimeException t = new ActivityInitializationError("Error applying string transform to input", e);
|
||||
throw t;
|
||||
}
|
||||
}
|
||||
|
||||
Yaml yaml = getCustomYaml();
|
||||
|
||||
try {
|
||||
Iterable<Object> objects = yaml.loadAll(data);
|
||||
List<TaggedCQLStatementDefs> stmtListList = new ArrayList<>();
|
||||
for (Object object : objects) {
|
||||
TaggedCQLStatementDefs tsd = (TaggedCQLStatementDefs) object;
|
||||
stmtListList.add(tsd);
|
||||
}
|
||||
return new AvailableCQLStatements(stmtListList);
|
||||
|
||||
} catch (Exception e) {
|
||||
logger.error("Error loading yaml from " + fromPath, e);
|
||||
throw e;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private Yaml getCustomYaml() {
|
||||
Constructor constructor = new Constructor(TaggedCQLStatementDefs.class);
|
||||
TypeDescription tds = new TypeDescription(TaggedCQLStatementDefs.class);
|
||||
tds.putListPropertyType("statements", CQLStatementDef.class);
|
||||
constructor.addTypeDescription(tds);
|
||||
return new Yaml(constructor);
|
||||
}
|
||||
|
||||
}
|
@ -1,17 +0,0 @@
|
||||
package io.nosqlbench.activitytype.cqld4.statements.rowoperators;
|
||||
|
||||
import com.datastax.oss.driver.api.core.cql.Row;
|
||||
import io.nosqlbench.activitytype.cqld4.api.RowCycleOperator;
|
||||
|
||||
/**
|
||||
* Save specific variables to the thread local object map
|
||||
*/
|
||||
public class Print implements RowCycleOperator {
|
||||
|
||||
@Override
|
||||
public int apply(Row row, long cycle) {
|
||||
System.out.println("ROW:" + row);
|
||||
return 0;
|
||||
}
|
||||
|
||||
}
|
@ -1,35 +0,0 @@
|
||||
package io.nosqlbench.activitytype.cqld4.statements.rowoperators;
|
||||
|
||||
import io.nosqlbench.activitytype.cqld4.api.RowCycleOperator;
|
||||
|
||||
public enum RowCycleOperators {
|
||||
|
||||
saverows(SaveThreadRows.class),
|
||||
savevars(SaveThreadVars.class),
|
||||
saveglobalvars(SaveGlobalVars.class),
|
||||
print(Print.class);
|
||||
|
||||
private final Class<? extends RowCycleOperator> implClass;
|
||||
|
||||
RowCycleOperators(Class<? extends RowCycleOperator> traceLoggerClass) {
|
||||
this.implClass = traceLoggerClass;
|
||||
}
|
||||
|
||||
|
||||
public Class<? extends RowCycleOperator> getImplementation() {
|
||||
return implClass;
|
||||
}
|
||||
|
||||
public RowCycleOperator getInstance() {
|
||||
try {
|
||||
return getImplementation().getConstructor().newInstance();
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
public static RowCycleOperator newOperator(String name) {
|
||||
return RowCycleOperators.valueOf(name).getInstance();
|
||||
}
|
||||
|
||||
}
|
@ -1,47 +0,0 @@
|
||||
package io.nosqlbench.activitytype.cqld4.statements.rowoperators;
|
||||
|
||||
import com.datastax.oss.driver.api.core.cql.ColumnDefinition;
|
||||
import com.datastax.oss.driver.api.core.cql.Row;
|
||||
import io.nosqlbench.activitytype.cqld4.api.RowCycleOperator;
|
||||
import io.nosqlbench.virtdata.library.basics.core.threadstate.SharedState;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.HashMap;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.Stream;
|
||||
import java.util.stream.StreamSupport;
|
||||
|
||||
/**
|
||||
* Save specific variables to the thread local object map
|
||||
*/
|
||||
public class Save implements RowCycleOperator {
|
||||
private final static Logger logger = LogManager.getLogger(Save.class);
|
||||
|
||||
ThreadLocal<HashMap<String, Object>> tl_objectMap = SharedState.tl_ObjectMap;
|
||||
|
||||
private final String[] varnames;
|
||||
|
||||
public Save(String... varnames) {
|
||||
this.varnames = varnames;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int apply(Row row, long cycle) {
|
||||
try {
|
||||
HashMap<String, Object> tlvars = tl_objectMap.get();
|
||||
for (String varname : varnames) {
|
||||
Object object = row.getObject(varname);
|
||||
tlvars.put(varname, object);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
Stream<ColumnDefinition> stream = StreamSupport.stream(row.getColumnDefinitions().spliterator(), false);
|
||||
logger.error("Unable to save '" + Arrays.toString(varnames) + "' from " + stream.map(d -> d.getName().toString())
|
||||
.collect(Collectors.joining(",", "[", "]")) + ": " + e.getMessage(), e);
|
||||
throw e;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
}
|
@ -1,31 +0,0 @@
|
||||
package io.nosqlbench.activitytype.cqld4.statements.rowoperators;
|
||||
|
||||
import com.datastax.oss.driver.api.core.cql.ColumnDefinition;
|
||||
import com.datastax.oss.driver.api.core.cql.Row;
|
||||
import io.nosqlbench.activitytype.cqld4.api.RowCycleOperator;
|
||||
import io.nosqlbench.virtdata.library.basics.core.threadstate.SharedState;
|
||||
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
|
||||
/**
|
||||
* Stores the current row into the global object map. Key names are set from the field names. Null values are stored
|
||||
* as empty strings.
|
||||
*/
|
||||
public class SaveGlobalVars implements RowCycleOperator {
|
||||
|
||||
ConcurrentHashMap<String, Object> gl_vars = SharedState.gl_ObjectMap;
|
||||
|
||||
@Override
|
||||
public int apply(Row row, long cycle) {
|
||||
for (ColumnDefinition definition : row.getColumnDefinitions()) {
|
||||
String name = definition.getName().toString();
|
||||
Object object = row.getObject(name);
|
||||
if (object == null){
|
||||
object = "";
|
||||
}
|
||||
gl_vars.put(name,object);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
}
|
@ -1,21 +0,0 @@
|
||||
package io.nosqlbench.activitytype.cqld4.statements.rowoperators;
|
||||
|
||||
import com.datastax.oss.driver.api.core.cql.Row;
|
||||
import io.nosqlbench.activitytype.cqld4.api.RowCycleOperator;
|
||||
import io.nosqlbench.activitytype.cqld4.statements.rsoperators.PerThreadCQLData;
|
||||
|
||||
import java.util.LinkedList;
|
||||
|
||||
/**
|
||||
* Adds the current row to the per-thread row cache.
|
||||
*/
|
||||
public class SaveThreadRows implements RowCycleOperator {
|
||||
|
||||
@Override
|
||||
public int apply(Row row, long cycle) {
|
||||
LinkedList<Row>rows = PerThreadCQLData.rows.get();
|
||||
rows.add(row);
|
||||
return 0;
|
||||
}
|
||||
|
||||
}
|
@ -1,30 +0,0 @@
|
||||
package io.nosqlbench.activitytype.cqld4.statements.rowoperators;
|
||||
|
||||
import com.datastax.oss.driver.api.core.cql.ColumnDefinition;
|
||||
import com.datastax.oss.driver.api.core.cql.Row;
|
||||
import io.nosqlbench.activitytype.cqld4.api.RowCycleOperator;
|
||||
import io.nosqlbench.virtdata.library.basics.core.threadstate.SharedState;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* Saves all the values in this row to the thread-local object map,
|
||||
* with the field names as keys.
|
||||
*/
|
||||
public class SaveThreadVars implements RowCycleOperator {
|
||||
|
||||
ThreadLocal<HashMap<String, Object>> tl_objectMap = SharedState.tl_ObjectMap;
|
||||
|
||||
@Override
|
||||
public int apply(Row row, long cycle) {
|
||||
HashMap<String, Object> tlvars= tl_objectMap.get();
|
||||
for (ColumnDefinition cd : row.getColumnDefinitions()) {
|
||||
String name = cd.getName().toString();
|
||||
Object object = row.getObject(name);
|
||||
tlvars.put(name,object);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
}
|
@ -1,26 +0,0 @@
|
||||
package io.nosqlbench.activitytype.cqld4.statements.rsoperators;
|
||||
|
||||
import com.datastax.oss.driver.api.core.cql.AsyncResultSet;
|
||||
import com.datastax.oss.driver.api.core.cql.ResultSet;
|
||||
import com.datastax.oss.driver.api.core.cql.Statement;
|
||||
import io.nosqlbench.activitytype.cqld4.api.D4ResultSetCycleOperator;
|
||||
import io.nosqlbench.activitytype.cqld4.errorhandling.exceptions.ResultSetVerificationException;
|
||||
|
||||
/**
|
||||
* Throws a {@link ResultSetVerificationException} unless there is exactly one row in the result set.
|
||||
*/
|
||||
public class AssertSingleRowD4ResultSet implements D4ResultSetCycleOperator {
|
||||
|
||||
@Override
|
||||
public int apply(AsyncResultSet resultSet, Statement<?> statement, long cycle) {
|
||||
int rowsIncoming = resultSet.remaining();
|
||||
if (rowsIncoming<1) {
|
||||
throw new ResultSetVerificationException(cycle, resultSet, statement, "no row in result set, expected exactly 1");
|
||||
}
|
||||
if (rowsIncoming>1) {
|
||||
throw new ResultSetVerificationException(cycle, resultSet, statement, "more than one row in result set, expected exactly 1");
|
||||
}
|
||||
return rowsIncoming;
|
||||
}
|
||||
|
||||
}
|
@ -1,16 +0,0 @@
|
||||
package io.nosqlbench.activitytype.cqld4.statements.rsoperators;
|
||||
|
||||
import com.datastax.oss.driver.api.core.cql.AsyncResultSet;
|
||||
import com.datastax.oss.driver.api.core.cql.ResultSet;
|
||||
import com.datastax.oss.driver.api.core.cql.Statement;
|
||||
import io.nosqlbench.activitytype.cqld4.api.D4ResultSetCycleOperator;
|
||||
import io.nosqlbench.virtdata.library.basics.core.threadstate.SharedState;
|
||||
|
||||
public class ClearVars implements D4ResultSetCycleOperator {
|
||||
|
||||
@Override
|
||||
public int apply(AsyncResultSet pageInfo, Statement<?> statement, long cycle) {
|
||||
SharedState.tl_ObjectMap.get().clear();
|
||||
return 0;
|
||||
}
|
||||
}
|
@ -1,40 +0,0 @@
|
||||
package io.nosqlbench.activitytype.cqld4.statements.rsoperators;
|
||||
|
||||
import com.datastax.oss.driver.api.core.cql.*;
|
||||
import io.nosqlbench.activitytype.cqld4.api.D4ResultSetCycleOperator;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
|
||||
/**
|
||||
* Logs a trace-level event for the result set, including
|
||||
* cycles, rows, fetched row count, and the statement.
|
||||
*/
|
||||
public class CqlD4ResultSetLogger implements D4ResultSetCycleOperator {
|
||||
private final static Logger logger = LogManager.getLogger(CqlD4ResultSetLogger.class);
|
||||
|
||||
private static String getQueryString(Statement stmt) {
|
||||
if (stmt instanceof PreparedStatement) {
|
||||
return "(prepared) " + ((PreparedStatement) stmt).getQuery();
|
||||
} else if (stmt instanceof SimpleStatement) {
|
||||
return "(simple) " + ((SimpleStatement) stmt).getQuery();
|
||||
} else if (stmt instanceof BoundStatement) {
|
||||
return "(bound) " + ((BoundStatement) stmt).getPreparedStatement().getQuery();
|
||||
} else {
|
||||
return "(unknown) " + stmt.toString();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public int apply(AsyncResultSet resultSet, Statement statement, long cycle) {
|
||||
logger.debug("result-set-logger: "
|
||||
+ " cycle=" + cycle
|
||||
+ " remaining=" + resultSet.remaining()
|
||||
+ " hasmore=" + resultSet.hasMorePages()
|
||||
+ " statement=" + getQueryString(statement).stripTrailing()
|
||||
);
|
||||
for (Row row : resultSet.currentPage()) {
|
||||
logger.trace(row.toString());
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
}
|
@ -1,14 +0,0 @@
|
||||
package io.nosqlbench.activitytype.cqld4.statements.rsoperators;
|
||||
|
||||
import com.datastax.oss.driver.api.core.cql.Row;
|
||||
|
||||
import java.util.LinkedList;
|
||||
|
||||
/**
|
||||
* This contains a linked list of {@link Row} objects. This is per-thread.
|
||||
* You can use this list as a per-thread data cache for sharing data between
|
||||
* cycles in the same thread.
|
||||
*/
|
||||
public class PerThreadCQLData {
|
||||
public final static ThreadLocal<LinkedList<Row>> rows = ThreadLocal.withInitial(LinkedList::new);
|
||||
}
|
@ -1,24 +0,0 @@
|
||||
package io.nosqlbench.activitytype.cqld4.statements.rsoperators;
|
||||
|
||||
import com.datastax.oss.driver.api.core.cql.AsyncResultSet;
|
||||
import com.datastax.oss.driver.api.core.cql.ResultSet;
|
||||
import com.datastax.oss.driver.api.core.cql.Statement;
|
||||
import io.nosqlbench.activitytype.cqld4.api.D4ResultSetCycleOperator;
|
||||
import io.nosqlbench.virtdata.library.basics.core.threadstate.SharedState;
|
||||
|
||||
import java.util.HashMap;
|
||||
|
||||
public class PopVars implements D4ResultSetCycleOperator {
|
||||
|
||||
@Override
|
||||
public int apply(AsyncResultSet resultSet, Statement<?> statement, long cycle) {
|
||||
HashMap<String, Object> stringObjectHashMap = SharedState.tl_ObjectMap.get();
|
||||
Object o = SharedState.tl_ObjectStack.get().pollLast();
|
||||
if (o != null && o instanceof HashMap) {
|
||||
SharedState.tl_ObjectMap.set((HashMap) o);
|
||||
return 0;
|
||||
} else {
|
||||
throw new RuntimeException("Tried to pop thread local data from stack, but there was none.");
|
||||
}
|
||||
}
|
||||
}
|
@ -1,15 +0,0 @@
|
||||
package io.nosqlbench.activitytype.cqld4.statements.rsoperators;
|
||||
|
||||
import com.datastax.oss.driver.api.core.cql.AsyncResultSet;
|
||||
import com.datastax.oss.driver.api.core.cql.ResultSet;
|
||||
import com.datastax.oss.driver.api.core.cql.Statement;
|
||||
import io.nosqlbench.activitytype.cqld4.api.D4ResultSetCycleOperator;
|
||||
|
||||
public class Print implements D4ResultSetCycleOperator {
|
||||
|
||||
@Override
|
||||
public int apply(AsyncResultSet resultSet, Statement<?> statement, long cycle) {
|
||||
System.out.println("RS:"+ resultSet.toString());
|
||||
return 0;
|
||||
}
|
||||
}
|
@ -1,20 +0,0 @@
|
||||
package io.nosqlbench.activitytype.cqld4.statements.rsoperators;
|
||||
|
||||
import com.datastax.oss.driver.api.core.cql.AsyncResultSet;
|
||||
import com.datastax.oss.driver.api.core.cql.Statement;
|
||||
import io.nosqlbench.activitytype.cqld4.api.D4ResultSetCycleOperator;
|
||||
import io.nosqlbench.virtdata.library.basics.core.threadstate.SharedState;
|
||||
|
||||
import java.util.HashMap;
|
||||
|
||||
public class PushVars implements D4ResultSetCycleOperator {
|
||||
|
||||
@Override
|
||||
public int apply(AsyncResultSet resultSet, Statement<?> statement, long cycle) {
|
||||
HashMap<String, Object> existingVars = SharedState.tl_ObjectMap.get();
|
||||
HashMap<String, Object> topush = new HashMap<>(existingVars);
|
||||
|
||||
SharedState.tl_ObjectStack.get().addLast(topush);
|
||||
return 0;
|
||||
}
|
||||
}
|
@ -1,40 +0,0 @@
|
||||
package io.nosqlbench.activitytype.cqld4.statements.rsoperators;
|
||||
|
||||
import io.nosqlbench.activitytype.cqld4.api.D4ResultSetCycleOperator;
|
||||
|
||||
public enum ResultSetCycleOperators {
|
||||
|
||||
pushvars(PushVars.class),
|
||||
popvars(PopVars.class),
|
||||
clearvars(ClearVars.class),
|
||||
|
||||
trace(TraceLogger.class),
|
||||
log(CqlD4ResultSetLogger.class),
|
||||
assert_singlerow(AssertSingleRowD4ResultSet.class),
|
||||
|
||||
print(Print.class);
|
||||
|
||||
private final Class<? extends D4ResultSetCycleOperator> implClass;
|
||||
|
||||
ResultSetCycleOperators(Class<? extends D4ResultSetCycleOperator> traceLoggerClass) {
|
||||
this.implClass = traceLoggerClass;
|
||||
}
|
||||
|
||||
|
||||
public Class<? extends D4ResultSetCycleOperator> getImplementation() {
|
||||
return implClass;
|
||||
}
|
||||
|
||||
public D4ResultSetCycleOperator getInstance() {
|
||||
try {
|
||||
return getImplementation().getConstructor().newInstance();
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
public static D4ResultSetCycleOperator newOperator(String name) {
|
||||
return ResultSetCycleOperators.valueOf(name).getInstance();
|
||||
}
|
||||
|
||||
}
|
@ -1,16 +0,0 @@
|
||||
package io.nosqlbench.activitytype.cqld4.statements.rsoperators;
|
||||
|
||||
import com.datastax.oss.driver.api.core.cql.AsyncResultSet;
|
||||
import com.datastax.oss.driver.api.core.cql.Row;
|
||||
import com.datastax.oss.driver.api.core.cql.Statement;
|
||||
import io.nosqlbench.activitytype.cqld4.api.D4ResultSetCycleOperator;
|
||||
|
||||
import java.util.LinkedList;
|
||||
|
||||
public class RowCapture implements D4ResultSetCycleOperator {
|
||||
@Override
|
||||
public int apply(AsyncResultSet resultSet, Statement<?> statement, long cycle) {
|
||||
ThreadLocal<LinkedList<Row>> rows = PerThreadCQLData.rows;
|
||||
return 0;
|
||||
}
|
||||
}
|
@ -1,95 +0,0 @@
|
||||
package io.nosqlbench.activitytype.cqld4.statements.rsoperators;
|
||||
|
||||
import com.datastax.oss.driver.api.core.cql.*;
|
||||
import io.nosqlbench.activitytype.cqld4.api.D4ResultSetCycleOperator;
|
||||
import io.nosqlbench.activitytype.cqld4.core.StatementModifier;
|
||||
import io.nosqlbench.engine.api.util.SimpleConfig;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
|
||||
import java.io.FileDescriptor;
|
||||
import java.io.FileWriter;
|
||||
import java.io.IOException;
|
||||
import java.text.SimpleDateFormat;
|
||||
import java.util.Date;
|
||||
|
||||
public class TraceLogger implements D4ResultSetCycleOperator, StatementModifier {
|
||||
|
||||
private final static Logger logger = LogManager.getLogger(TraceLogger.class);
|
||||
|
||||
private static final SimpleDateFormat sdf = new SimpleDateFormat("HH:mm:ss.SSS");
|
||||
private final long modulo;
|
||||
private final String filename;
|
||||
private final FileWriter writer;
|
||||
private final ThreadLocal<StringBuilder> tlsb = ThreadLocal.withInitial(StringBuilder::new);
|
||||
|
||||
public TraceLogger(SimpleConfig conf) {
|
||||
this(
|
||||
conf.getLong("modulo").orElse(1L),
|
||||
conf.getString("filename").orElse("tracelog")
|
||||
);
|
||||
}
|
||||
|
||||
public TraceLogger(long modulo, String filename) {
|
||||
this.modulo = modulo;
|
||||
this.filename = filename;
|
||||
try {
|
||||
if (filename.equals("stdout")) {
|
||||
writer = new FileWriter(FileDescriptor.out);
|
||||
} else {
|
||||
writer = new FileWriter(filename);
|
||||
}
|
||||
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public int apply(AsyncResultSet rs, Statement<?> statement, long cycle) {
|
||||
rs.getExecutionInfo().getQueryTrace();
|
||||
if ((cycle%modulo)!=0) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
ExecutionInfo ei = rs.getExecutionInfo();
|
||||
QueryTrace qt = ei.getQueryTrace();
|
||||
StringBuilder sb = tlsb.get();
|
||||
sb.setLength(0);
|
||||
sb.append("\n---------------------------- QueryTrace Summary ---------------------------\n");
|
||||
sb.append("\n Coordinator: ").append(qt.getCoordinator());
|
||||
sb.append("\n Cycle: ").append(cycle);
|
||||
sb.append("\nServer-side query duration (us): ").append(qt.getDurationMicros());
|
||||
sb.append("\n Request type: ").append(qt.getRequestType());
|
||||
sb.append("\n Start time: ").append(qt.getStartedAt());
|
||||
sb.append("\n Trace UUID: ").append(qt.getTracingId());
|
||||
sb.append("\n Params: ").append(qt.getParameters());
|
||||
sb.append("\n--------------------------------------------------------------------------\n");
|
||||
sb.append("\n---------------------------- QueryTrace Events ---------------------------\n");
|
||||
for (TraceEvent event : qt.getEvents()) {
|
||||
sb.append("\n Date: ").append(sdf.format(new Date(event.getTimestamp())));
|
||||
sb.append("\n Source: ").append(event.getSource());
|
||||
sb.append("\nSourceElapsedMicros: ").append(event.getSourceElapsedMicros());
|
||||
sb.append("\n Thread: ").append(event.getThreadName());
|
||||
sb.append("\n Activity: ").append(event.getActivity()).append("\n");
|
||||
}
|
||||
sb.append("\n--------------------------------------------------------------------------\n");
|
||||
|
||||
try {
|
||||
writer.append(sb.toString());
|
||||
writer.flush();
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Statement modify(Statement statement, long cycle) {
|
||||
if ((cycle%modulo)==0) {
|
||||
return statement.setTracing(true);
|
||||
}
|
||||
return statement;
|
||||
}
|
||||
}
|
@ -1,2 +0,0 @@
|
||||
io.nosqlbench.virtdata.api.processors.FunctionDocInfoProcessor
|
||||
io.nosqlbench.nb.annotations.ServiceProcessor
|
@ -1,106 +0,0 @@
|
||||
# nb -v run driver=cql yaml=cql-iot tags=phase:schema host=dsehost
|
||||
description: An IOT workload which more optimal DSE settings
|
||||
|
||||
scenarios:
|
||||
default:
|
||||
- run driver=cql tags==phase:schema threads==1 cycles==UNDEF
|
||||
- run driver=cql tags==phase:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
|
||||
- run driver=cql tags==phase:main cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
bindings:
|
||||
machine_id: Mod(<<sources:10000>>); ToHashedUUID() -> java.util.UUID
|
||||
sensor_name: HashedLineToString('data/variable_words.txt')
|
||||
time: Mul(<<timespeed:100>>L); Div(<<sources:10000>>L); ToDate()
|
||||
cell_timestamp: Mul(<<timespeed:100>>L); Div(<<sources:10000>>L); Mul(1000L)
|
||||
sensor_value: Normal(0.0,5.0); Add(100.0) -> double
|
||||
station_id: Div(<<sources:10000>>);Mod(<<stations:100>>); ToHashedUUID() -> java.util.UUID
|
||||
data: HashedFileExtractToString('data/lorem_ipsum_full.txt',800,1200)
|
||||
blocks:
|
||||
- tags:
|
||||
phase: schema
|
||||
params:
|
||||
prepared: false
|
||||
statements:
|
||||
- create-keyspace: |
|
||||
create keyspace if not exists <<keyspace:baselines>>
|
||||
WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '<<rf:1>>'}
|
||||
AND durable_writes = true;
|
||||
tags:
|
||||
name: create-keyspace
|
||||
- create-table : |
|
||||
create table if not exists <<keyspace:baselines>>.<<table:iot>> (
|
||||
machine_id UUID, // source machine
|
||||
sensor_name text, // sensor name
|
||||
time timestamp, // timestamp of collection
|
||||
sensor_value double, //
|
||||
station_id UUID, // source location
|
||||
data text,
|
||||
PRIMARY KEY ((machine_id, sensor_name), time)
|
||||
) WITH CLUSTERING ORDER BY (time DESC)
|
||||
AND compression = { 'sstable_compression' : '<<compression:LZ4Compressor>>' }
|
||||
AND nodesync={'enabled': 'true'}
|
||||
AND compaction = {
|
||||
'class': 'TimeWindowCompactionStrategy',
|
||||
'compaction_window_size': <<expiry_minutes:60>>,
|
||||
'compaction_window_unit': 'MINUTES',
|
||||
'split_during_flush': true
|
||||
};
|
||||
tags:
|
||||
name: create-table
|
||||
- truncate-table: |
|
||||
truncate table <<keyspace:baselines>>.<<table:iot>>;
|
||||
tags:
|
||||
name: truncate-table
|
||||
- tags:
|
||||
phase: rampup
|
||||
params:
|
||||
cl: <<write_cl:LOCAL_QUORUM>>
|
||||
statements:
|
||||
- insert-rampup: |
|
||||
insert into <<keyspace:baselines>>.<<table:iot>>
|
||||
(machine_id, sensor_name, time, sensor_value, station_id, data)
|
||||
values ({machine_id}, {sensor_name}, {time}, {sensor_value}, {station_id}, {data})
|
||||
using timestamp {cell_timestamp}
|
||||
idempotent: true
|
||||
tags:
|
||||
name: insert-rampup
|
||||
- tags:
|
||||
phase: verify
|
||||
type: read
|
||||
params:
|
||||
ratio: 1
|
||||
cl: <<read_cl:LOCAL_QUORUM>>
|
||||
statements:
|
||||
- select-verify: |
|
||||
select * from <<keyspace:baselines>>.<<table:iot>>
|
||||
where machine_id={machine_id} and sensor_name={sensor_name} and time={time};
|
||||
verify-fields: "*, -cell_timestamp"
|
||||
tags:
|
||||
name: select-verify
|
||||
- tags:
|
||||
phase: main
|
||||
type: read
|
||||
params:
|
||||
ratio: <<read_ratio:1>>
|
||||
cl: <<read_cl:LOCAL_QUORUM>>
|
||||
statements:
|
||||
- select-read: |
|
||||
select * from <<keyspace:baselines>>.<<table:iot>>
|
||||
where machine_id={machine_id} and sensor_name={sensor_name}
|
||||
limit <<limit:10>>
|
||||
tags:
|
||||
name: select-read
|
||||
- tags:
|
||||
phase: main
|
||||
type: write
|
||||
params:
|
||||
ratio: <<write_ratio:9>>
|
||||
cl: <<write_cl:LOCAL_QUORUM>>
|
||||
statements:
|
||||
- insert-main: |
|
||||
insert into <<keyspace:baselines>>.<<table:iot>>
|
||||
(machine_id, sensor_name, time, sensor_value, station_id, data)
|
||||
values ({machine_id}, {sensor_name}, {time}, {sensor_value}, {station_id}, {data})
|
||||
using timestamp {cell_timestamp}
|
||||
idempotent: true
|
||||
tags:
|
||||
name: insert-main
|
@ -1,93 +0,0 @@
|
||||
---
|
||||
title: CQL IoT
|
||||
weight: 2
|
||||
---
|
||||
|
||||
# CQL IoT
|
||||
|
||||
## Description
|
||||
|
||||
The CQL IoT workload demonstrates a time-series telemetry system as typically found in IoT applications. The bulk of the
|
||||
traffic is telemetry ingest. This is useful for establishing steady-state capacity with an actively managed data
|
||||
lifecycle. This is a steady-state workload, where inserts are 90% of the operations and queries are the remaining 10%.
|
||||
|
||||
## Named Scenarios
|
||||
|
||||
### default
|
||||
|
||||
The default scenario for cql-iot.yaml runs the conventional test phases: schema, rampup, main
|
||||
|
||||
## Testing Considerations
|
||||
|
||||
For in-depth testing, this workload will take some time to build up data density where TTLs begin purging expired data.
|
||||
At this point, the test should be considered steady-state.
|
||||
|
||||
## Data Set
|
||||
|
||||
### baselines.iot dataset (rampup,main)
|
||||
|
||||
- machine_id - 1000 unique values
|
||||
- sensor_name - 100 symbolic names, from a seed file
|
||||
- time - monotonically increasing timestamp
|
||||
- station_id - 100 unique values
|
||||
- sensor_value - normal distribution, median 100, stddev 5.0
|
||||
|
||||
## Operations
|
||||
|
||||
### insert (rampup, main)
|
||||
|
||||
insert into baselines.iot
|
||||
(machine_id, sensor_name, time, sensor_value, station_id)
|
||||
values (?,?,?,?,?)
|
||||
|
||||
### query (main)
|
||||
|
||||
select * from baselines.iot
|
||||
where machine_id=? and sensor_name=?
|
||||
limit 10
|
||||
|
||||
## Workload Parameters
|
||||
|
||||
This workload has no adjustable parameters when used in the baseline tests.
|
||||
|
||||
When used for additional testing, the following parameters should be supported:
|
||||
|
||||
- machines - the number of unique sources (default: 1000)
|
||||
- stations - the number of unique stations (default: 100)
|
||||
- limit - the limit for rows in reads (default: 10)
|
||||
- expiry_minutes - the TTL for data in minutes.
|
||||
- compression - enabled or disabled, to disable, set compression=''
|
||||
- write_cl - the consistency level for writes (default: LOCAL_QUORUM)
|
||||
- read_cl - the consistency level for reads (defaultL LOCAL_QUORUM)
|
||||
|
||||
## Key Performance Metrics
|
||||
|
||||
Client side metrics are a more accurate measure of the system behavior from a user's perspective. For microbench and
|
||||
baseline tests, these are the only required metrics. When gathering metrics from multiple server nodes, they should be
|
||||
kept in aggregate form, for min, max, and average for each time interval in monitoring. For example, the avg p99 latency
|
||||
for reads should be kept, as well as the min p99 latency for reads. If possible metrics, should be kept in plot form,
|
||||
with discrete histogram values per interval.
|
||||
|
||||
### Client-Side
|
||||
|
||||
- read ops/s
|
||||
- write ops/s
|
||||
- read latency histograms
|
||||
- write latency histograms
|
||||
- exception counts
|
||||
|
||||
### Server-Side
|
||||
|
||||
- bytes compacted over time
|
||||
- pending compactions
|
||||
- active data on disk
|
||||
- total data on disk
|
||||
|
||||
## Notes on Interpretation
|
||||
|
||||
- In order for this test to show useful performance contrasts, it has to be ramped to steady-state.
|
||||
- Ingest of 1G rows yields an on-disk data density of 20.8 GB using default compression settings.
|
||||
|
||||
|
||||
|
||||
|
@ -1,117 +0,0 @@
|
||||
# nb -v run driver=cql yaml=cql-iot tags=phase:schema host=dsehost
|
||||
description: |
|
||||
This workload emulates a time-series data model and access patterns.
|
||||
|
||||
scenarios:
|
||||
default:
|
||||
- run driver=cql tags==phase:schema threads==1 cycles==UNDEF
|
||||
- run driver=cql tags==phase:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
|
||||
- run driver=cql tags==phase:main cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
params:
|
||||
instrument: TEMPLATE(instrument,false)
|
||||
bindings:
|
||||
machine_id: Mod(<<sources:10000>>); ToHashedUUID() -> java.util.UUID
|
||||
sensor_name: HashedLineToString('data/variable_words.txt')
|
||||
time: Mul(<<timespeed:100>>L); Div(<<sources:10000>>L); ToDate()
|
||||
cell_timestamp: Mul(<<timespeed:100>>L); Div(<<sources:10000>>L); Mul(1000L)
|
||||
sensor_value: Normal(0.0,5.0); Add(100.0) -> double
|
||||
station_id: Div(<<sources:10000>>);Mod(<<stations:100>>); ToHashedUUID() -> java.util.UUID
|
||||
data: HashedFileExtractToString('data/lorem_ipsum_full.txt',800,1200)
|
||||
blocks:
|
||||
- tags:
|
||||
phase: schema
|
||||
params:
|
||||
prepared: false
|
||||
statements:
|
||||
- create-keyspace: |
|
||||
create keyspace if not exists <<keyspace:baselines>>
|
||||
WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '<<rf:1>>'}
|
||||
AND durable_writes = true;
|
||||
tags:
|
||||
name: create-keyspace
|
||||
- create-table : |
|
||||
create table if not exists <<keyspace:baselines>>.<<table:iot>> (
|
||||
machine_id UUID, // source machine
|
||||
sensor_name text, // sensor name
|
||||
time timestamp, // timestamp of collection
|
||||
sensor_value double, //
|
||||
station_id UUID, // source location
|
||||
data text,
|
||||
PRIMARY KEY ((machine_id, sensor_name), time)
|
||||
) WITH CLUSTERING ORDER BY (time DESC)
|
||||
AND compression = { 'sstable_compression' : '<<compression:LZ4Compressor>>' }
|
||||
AND compaction = {
|
||||
'class': 'TimeWindowCompactionStrategy',
|
||||
'compaction_window_size': <<expiry_minutes:60>>,
|
||||
'compaction_window_unit': 'MINUTES'
|
||||
};
|
||||
tags:
|
||||
name: create-table
|
||||
- truncate-table: |
|
||||
truncate table <<keyspace:baselines>>.<<table:iot>>;
|
||||
tags:
|
||||
name: truncate-table
|
||||
- tags:
|
||||
phase: rampup
|
||||
params:
|
||||
cl: <<write_cl:LOCAL_QUORUM>>
|
||||
statements:
|
||||
- insert-rampup: |
|
||||
insert into <<keyspace:baselines>>.<<table:iot>>
|
||||
(machine_id, sensor_name, time, sensor_value, station_id, data)
|
||||
values ({machine_id}, {sensor_name}, {time}, {sensor_value}, {station_id}, {data})
|
||||
using timestamp {cell_timestamp}
|
||||
idempotent: true
|
||||
tags:
|
||||
name: insert-rampup
|
||||
params:
|
||||
instrument: TEMPLATE(instrument-writes,TEMPLATE(instrument,false))
|
||||
- tags:
|
||||
phase: verify
|
||||
type: read
|
||||
params:
|
||||
ratio: 1
|
||||
cl: <<read_cl:LOCAL_QUORUM>>
|
||||
statements:
|
||||
- select-verify: |
|
||||
select * from <<keyspace:baselines>>.<<table:iot>>
|
||||
where machine_id={machine_id} and sensor_name={sensor_name} and time={time};
|
||||
verify-fields: "*, -cell_timestamp"
|
||||
tags:
|
||||
name: select-verify
|
||||
params:
|
||||
instrument: TEMPLATE(instrument-reads,TEMPLATE(instrument,false))
|
||||
- tags:
|
||||
phase: main
|
||||
type: read
|
||||
params:
|
||||
ratio: <<read_ratio:1>>
|
||||
cl: <<read_cl:LOCAL_QUORUM>>
|
||||
statements:
|
||||
- select-read: |
|
||||
select * from <<keyspace:baselines>>.<<table:iot>>
|
||||
where machine_id={machine_id} and sensor_name={sensor_name}
|
||||
limit <<limit:10>>
|
||||
tags:
|
||||
name: select-read
|
||||
params:
|
||||
instrument: TEMPLATE(instrument-reads,TEMPLATE(instrument,false))
|
||||
|
||||
- tags:
|
||||
phase: main
|
||||
type: write
|
||||
params:
|
||||
ratio: <<write_ratio:9>>
|
||||
cl: <<write_cl:LOCAL_QUORUM>>
|
||||
statements:
|
||||
- insert-main: |
|
||||
insert into <<keyspace:baselines>>.<<table:iot>>
|
||||
(machine_id, sensor_name, time, sensor_value, station_id, data)
|
||||
values ({machine_id}, {sensor_name}, {time}, {sensor_value}, {station_id}, {data})
|
||||
using timestamp {cell_timestamp}
|
||||
idempotent: true
|
||||
tags:
|
||||
name: insert-main
|
||||
params:
|
||||
instrument: TEMPLATE(instrument-writes,TEMPLATE(instrument,false))
|
||||
|
@ -1,77 +0,0 @@
|
||||
---
|
||||
title: CQL Key-Value
|
||||
weight: 1
|
||||
---
|
||||
|
||||
## Description
|
||||
|
||||
The CQL Key-Value workload demonstrates the simplest possible schema with payload data. This is useful for measuring
|
||||
system capacity most directly in terms of raw operations. As a reference point, provides some insight around types of
|
||||
workloads that are constrained around messaging, threading, and tasking, rather than bulk throughput.
|
||||
|
||||
During preload, all keys are set with a value. During the main phase of the workload, random keys from the known
|
||||
population are replaced with new values which never repeat. During the main phase, random partitions are selected for
|
||||
upsert, with row values never repeating.
|
||||
|
||||
## Operations
|
||||
|
||||
### insert (rampup, main)
|
||||
|
||||
insert into baselines.keyvalue (key, value) values (?,?);
|
||||
|
||||
### read (main)
|
||||
|
||||
select * from baselines.keyvalue where key=?key;
|
||||
|
||||
## Data Set
|
||||
|
||||
### baselines.keyvalue insert (rampup)
|
||||
|
||||
- key - text, number as string, selected sequentially up to keycount
|
||||
- value - text, number as string, selected sequentially up to valuecount
|
||||
|
||||
### baselines.keyvalue insert (main)
|
||||
|
||||
- key - text, number as string, selected uniformly within keycount
|
||||
- value - text, number as string, selected uniformly within valuecount
|
||||
|
||||
### baselines.keyvalue read (main)
|
||||
|
||||
- key - text, number as string, selected uniformly within keycount
|
||||
|
||||
## Workload Parameters
|
||||
|
||||
This workload has no adjustable parameters when used in the baseline tests.
|
||||
|
||||
When used for additional testing, the following parameters should be supported:
|
||||
|
||||
- keycount - the number of unique keys
|
||||
- valuecount - the number of unique values
|
||||
|
||||
## Key Performance Metrics
|
||||
|
||||
Client side metrics are a more accurate measure of the system behavior from a user's perspective. For microbench and
|
||||
baseline tests, these are the only required metrics. When gathering metrics from multiple server nodes, they should be
|
||||
kept in aggregate form, for min, max, and average for each time interval in monitoring. For example, the avg p99 latency
|
||||
for reads should be kept, as well as the min p99 latency for reads. If possible metrics, should be kept in plot form,
|
||||
with discrete histogram values per interval.
|
||||
|
||||
### Client-Side
|
||||
|
||||
- read ops/s
|
||||
- write ops/s
|
||||
- read latency histograms
|
||||
- write latency histograms
|
||||
- exception counts
|
||||
|
||||
### Server-Side
|
||||
|
||||
- pending compactions
|
||||
- bytes compacted
|
||||
- active data on disk
|
||||
- total data on disk
|
||||
|
||||
# Notes on Interpretation
|
||||
|
||||
Once the average ratio of overwrites starts to balance with the rate of compaction, a steady state should be achieved.
|
||||
At this point, pending compactions and bytes compacted should be mostly flat over time.
|
@ -1,82 +0,0 @@
|
||||
# nb -v run driver=cql yaml=cql-keyvalue tags=phase:schema host=dsehost
|
||||
scenarios:
|
||||
default:
|
||||
- run driver=cql tags==phase:schema threads==1 cycles==UNDEF
|
||||
- run driver=cql tags==phase:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
|
||||
- run driver=cql tags==phase:main cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
bindings:
|
||||
seq_key: Mod(<<keycount:1000000000>>); ToString() -> String
|
||||
seq_value: Hash(); Mod(<<valuecount:1000000000>>); ToString() -> String
|
||||
rw_key: <<keydist:Uniform(0,1000000000)->int>>; ToString() -> String
|
||||
rw_value: Hash(); <<valdist:Uniform(0,1000000000)->int>>; ToString() -> String
|
||||
|
||||
blocks:
|
||||
- name: schema
|
||||
tags:
|
||||
phase: schema
|
||||
params:
|
||||
prepared: false
|
||||
statements:
|
||||
- create-keyspace: |
|
||||
create keyspace if not exists <<keyspace:baselines>>
|
||||
WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '<<rf:1>>'}
|
||||
AND durable_writes = true;
|
||||
tags:
|
||||
name: create-keyspace
|
||||
- create-table: |
|
||||
create table if not exists <<keyspace:baselines>>.<<table:keyvalue>> (
|
||||
key text,
|
||||
value text,
|
||||
PRIMARY KEY (key)
|
||||
);
|
||||
tags:
|
||||
name: create-table
|
||||
- name: rampup
|
||||
tags:
|
||||
phase: rampup
|
||||
params:
|
||||
cl: <<write_cl:LOCAL_QUORUM>>
|
||||
statements:
|
||||
- rampup-insert: |
|
||||
insert into <<keyspace:baselines>>.<<table:keyvalue>>
|
||||
(key, value)
|
||||
values ({seq_key},{seq_value});
|
||||
tags:
|
||||
name: rampup-insert
|
||||
- name: verify
|
||||
tags:
|
||||
phase: verify
|
||||
type: read
|
||||
params:
|
||||
cl: <<read_cl:LOCAL_QUORUM>>
|
||||
statements:
|
||||
- verify-select: |
|
||||
select * from <<keyspace:baselines>>.<<table:keyvalue>> where key={seq_key};
|
||||
verify-fields: key->seq_key, value->seq_value
|
||||
tags:
|
||||
name: verify
|
||||
- name: main-read
|
||||
tags:
|
||||
phase: main
|
||||
type: read
|
||||
params:
|
||||
ratio: 5
|
||||
cl: <<read_cl:LOCAL_QUORUM>>
|
||||
statements:
|
||||
- main-select: |
|
||||
select * from <<keyspace:baselines>>.<<table:keyvalue>> where key={rw_key};
|
||||
tags:
|
||||
name: main-select
|
||||
- name: main-write
|
||||
tags:
|
||||
phase: main
|
||||
type: write
|
||||
params:
|
||||
ratio: 5
|
||||
cl: <<write_cl:LOCAL_QUORUM>>
|
||||
statements:
|
||||
- main-insert: |
|
||||
insert into <<keyspace:baselines>>.<<table:keyvalue>>
|
||||
(key, value) values ({rw_key}, {rw_value});
|
||||
tags:
|
||||
name: main-insert
|
@ -1,91 +0,0 @@
|
||||
# nb -v cql-tabular rampup-cycles=1E6 main-cycles=1E9
|
||||
scenarios:
|
||||
default:
|
||||
- run driver=cql tags==phase:schema threads==1 cycles==UNDEF
|
||||
- run driver=cql tags==phase:rampup cycles===TEMPLATE(rampup-cycles,10000000) threads=auto
|
||||
- run driver=cql tags==phase:main cycles===TEMPLATE(main-cycles,10000000) threads=auto
|
||||
bindings:
|
||||
# for ramp-up and verify
|
||||
part_layout: Div(<<partsize:1000000>>); ToString() -> String
|
||||
clust_layout: Mod(<<partsize:1000000>>); ToString() -> String
|
||||
data: HashedFileExtractToString('data/lorem_ipsum_full.txt',50,150)
|
||||
# for read
|
||||
limit: Uniform(1,10) -> int
|
||||
part_read: Uniform(0,<<partcount:100>>)->int; ToString() -> String
|
||||
clust_read: Add(1); Uniform(0,<<partsize:1000000>>)->int; ToString() -> String
|
||||
# for write
|
||||
part_write: Hash(); Uniform(0,<<partcount:100>>)->int; ToString() -> String
|
||||
clust_write: Hash(); Add(1); Uniform(0,<<partsize:1000000>>)->int; ToString() -> String
|
||||
data_write: Hash(); HashedFileExtractToString('data/lorem_ipsum_full.txt',50,150) -> String
|
||||
|
||||
blocks:
|
||||
- name: schema
|
||||
tags:
|
||||
phase: schema
|
||||
params:
|
||||
prepared: false
|
||||
statements:
|
||||
- create-keyspace: |
|
||||
create keyspace if not exists <<keyspace:baselines>>
|
||||
WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '<<rf:1>>'}
|
||||
AND durable_writes = true;
|
||||
tags:
|
||||
name: create-keyspace
|
||||
- create-table: |
|
||||
create table if not exists <<keyspace:baselines>>.<<table:tabular>> (
|
||||
part text,
|
||||
clust text,
|
||||
data text,
|
||||
PRIMARY KEY (part,clust)
|
||||
);
|
||||
tags:
|
||||
name: create-table
|
||||
- name: rampup
|
||||
tags:
|
||||
phase: rampup
|
||||
params:
|
||||
cl: <<write_cl:LOCAL_QUORUM>>
|
||||
statements:
|
||||
- rampup-insert: |
|
||||
insert into <<keyspace:baselines>>.<<table:tabular>>
|
||||
(part,clust,data)
|
||||
values ({part_layout},{clust_layout},{data})
|
||||
tags:
|
||||
name: rampup-insert
|
||||
- name: verify
|
||||
tags:
|
||||
phase: verify
|
||||
type: read
|
||||
params:
|
||||
cl: <<read_cl:LOCAL_QUORUM>>
|
||||
statements:
|
||||
- verify-select: |
|
||||
select * from <<keyspace:baselines>>.<<table:tabular>> where part={part_layout} and clust={clust_layout}
|
||||
tags:
|
||||
name: verify-select
|
||||
- name: main-read
|
||||
tags:
|
||||
phase: main
|
||||
type: read
|
||||
params:
|
||||
ratio: 5
|
||||
cl: <<read_cl:LOCAL_QUORUM>>
|
||||
statements:
|
||||
- main-select: |
|
||||
select * from <<keyspace:baselines>>.<<table:tabular>> where part={part_read} limit {limit};
|
||||
tags:
|
||||
name: main-select
|
||||
- name: main-write
|
||||
tags:
|
||||
phase: main
|
||||
type: write
|
||||
params:
|
||||
ratio: 5
|
||||
cl: <<write_cl:LOCAL_QUORUM>>
|
||||
statements:
|
||||
- main-write: |
|
||||
insert into <<keyspace:baselines>>.<<table:tabular>>
|
||||
(part, clust, data)
|
||||
values ({part_write},{clust_write},{data_write})
|
||||
tags:
|
||||
name: main-write
|
@ -1,86 +0,0 @@
|
||||
---
|
||||
title: CQL Wide Rows
|
||||
weight: 3
|
||||
---
|
||||
|
||||
## Description
|
||||
|
||||
The CQL Wide Rows workload provides a way to tax a system with wide rows of a given size. This is useful to help
|
||||
understand underlying performance differences between version and configuration options when using data models that have
|
||||
wide rows.
|
||||
|
||||
For in-depth testing, this workload needs significant density of partitions in combination with fully populated wide
|
||||
rows. For exploratory or parameter contrasting tests, ensure that the rampup phase is configured correctly to establish
|
||||
this initial state.
|
||||
|
||||
## Data Set
|
||||
|
||||
### baselines.widerows dataset (rampup)
|
||||
|
||||
- part - text, number in string form, sequentially from 1..1E9
|
||||
- clust - text, number in string form, sequentially from 1..1E9
|
||||
- data - text, extract from lorem ipsum between 50 and 150 characters
|
||||
|
||||
### baselines.widerows dataset (main)
|
||||
|
||||
- part - text, number in string form, sequentially from 1..1E9
|
||||
- clust - text, number in string form, sequentially from 1..<partsize>
|
||||
- data - text, extract from lorem ipsum between 50 and 150 characters
|
||||
|
||||
- machine_id - 1000 unique values
|
||||
- sensor_name - 100 symbolic names, from a seed file
|
||||
- time - monotonically increasing timestamp
|
||||
- station_id - 100 unique values
|
||||
- sensor_value - normal distribution, median 100, stddev 5.0
|
||||
|
||||
## Operations
|
||||
|
||||
### insert (rampup, main)
|
||||
|
||||
insert into baselines.iot
|
||||
(machine_id, sensor_name, time, sensor_value, station_id)
|
||||
values (?,?,?,?,?)
|
||||
|
||||
### query (main)
|
||||
|
||||
select * from baselines.iot
|
||||
where machine_id=? and sensor_name=?
|
||||
limit 10
|
||||
|
||||
## Workload Parameters
|
||||
|
||||
This workload has no adjustable parameters when used in the baseline tests.
|
||||
|
||||
When used for additional testing, the following parameters should be supported:
|
||||
|
||||
- partcount - the number of unique partitions
|
||||
- partsize - the number of logical rows within a CQL partition
|
||||
|
||||
## Key Performance Metrics
|
||||
|
||||
Client side metrics are a more accurate measure of the system behavior from a user's perspective. For microbench and
|
||||
baseline tests, these are the only required metrics. When gathering metrics from multiple server nodes, they should be
|
||||
kept in aggregate form, for min, max, and average for each time interval in monitoring. For example, the avg p99 latency
|
||||
for reads should be kept, as well as the min p99 latency for reads. If possible metrics, should be kept in plot form,
|
||||
with discrete histogram values per interval.
|
||||
|
||||
### Client-Side
|
||||
|
||||
- read ops/s
|
||||
- write ops/s
|
||||
- read latency histograms
|
||||
- write latency histograms
|
||||
- exception counts
|
||||
|
||||
### Server-Side
|
||||
|
||||
- bytes compacted over time
|
||||
- pending compactions
|
||||
- active data on disk
|
||||
- total data on disk
|
||||
|
||||
## Notes on Interpretation
|
||||
|
||||
|
||||
|
||||
|
@ -1,97 +0,0 @@
|
||||
# cql driver - advanced features
|
||||
|
||||
This is an addendum to the standard CQL Activity Type docs. For that,
|
||||
see "cql". Use the features in this guide carefully. They do not come
|
||||
with as much documentation as they are less used than the main CQL
|
||||
features.
|
||||
|
||||
### ResultSet and Row operators
|
||||
|
||||
Within the CQL Activity type, synchronous mode (activities with out the
|
||||
async= parameter), you have the ability to attach operators to a given
|
||||
statement such that it will get per-statement handling. These operators
|
||||
are ways of interrogating the result of an operation, saving values, or
|
||||
managing other side-effects for specific types of testing.
|
||||
|
||||
When enabled for a statement, operators are applied in this order:
|
||||
|
||||
1. Activity-level ResultSet operators are applied in specified order.
|
||||
2. Statement-level ResultSet operators are applied in specified order.
|
||||
3. Activity-level Row operators are applied in specified order.
|
||||
4. Statement-level Row operators are applied in specified order.
|
||||
|
||||
The result set handling does not go to any extra steps of making
|
||||
a copy of the data. When a row is read from the result set,
|
||||
it is consumed from it. Thus, if you want to do anything with
|
||||
row data, you must apply a row operator as explained below.
|
||||
|
||||
|
||||
### CQL Statement Parameters
|
||||
|
||||
- **rsoperators** - If provided as a CQL statement param, then the
|
||||
list of operator names that follow, separated by a comma, will
|
||||
be used to attach ResultSet operators to the given statement.
|
||||
Such operators act on the whole result set of a statement.
|
||||
|
||||
- **rowoperators** - If provided as a CQL statement param, then the
|
||||
list of operator names that follow, separated by a comma, will
|
||||
be used to attache Row operators to the given statement.
|
||||
|
||||
## Available ResultSet Operators
|
||||
|
||||
- pushvars - Push a copy of the current thread local variables onto
|
||||
the thread-local stack. This does nothing with the ResultSet data,
|
||||
but is meant to be used for stateful management of these in
|
||||
conjunction with the row operators below.
|
||||
- popvars - Pop the last thread local variable set from the thread-local
|
||||
stack into vars, replacing the previous content. This does nothing
|
||||
with the ResultSet data.
|
||||
- clearvars - Clears the contents of the thread local variables. This
|
||||
does nothign with the ResultSet data.
|
||||
- trace - Flags a statement to be traced on the server-side and then
|
||||
logs the details of the trace to the trace log file.
|
||||
- log - Logs basic data to the main log. This is useful to verify that
|
||||
operators are loading and triggering as expected.
|
||||
- assert_singlerow - Throws an exception (ResultSetVerificationException)
|
||||
if the ResultSet has more or less than one row.
|
||||
|
||||
Examples:
|
||||
|
||||
```yaml
|
||||
statements:
|
||||
- s1: |
|
||||
a statement
|
||||
rsoperators: pushvars, clearvars
|
||||
```
|
||||
## Available Row Operators:
|
||||
|
||||
- savevars - Copies the values of the row into the thread-local variables.
|
||||
- saverows - Copies the rows into a special CQL-only thread local row state.
|
||||
|
||||
Examples:
|
||||
|
||||
```yaml
|
||||
statements:
|
||||
- s2: |
|
||||
a statement
|
||||
rowoperators: saverows
|
||||
```
|
||||
|
||||
## Injecting additional Queries (Future)
|
||||
|
||||
It is possible to inject new operations to an activity. However, such operations are _indirect_ to cycles, since they
|
||||
must be based on the results of other operations. As such, they will not be represented in cycle output or other
|
||||
advanced features. This is a specific feature for the CQL activity -- implemented internal to the way a CQL cycle is
|
||||
processed. A future version of NoSQLBench will provide a more uniform way to achieve this result across activity types.
|
||||
For now, remember that this is a CQL-only capability.
|
||||
|
||||
- subquery-statement - Adds additional operations to the current cycle, based
|
||||
on the contents of the thread-local row state. The value to this parameter
|
||||
is a name of a statement in the current YAML.
|
||||
|
||||
local thread based on contents
|
||||
of the CQL-only thread local row state. Each row is consumed from this list,
|
||||
and a new operation is added to the current cycle.
|
||||
- subquery-concurrency - Allow subqueries to execute with concurrency, up to
|
||||
the level specified.
|
||||
default: 1
|
@ -1,198 +0,0 @@
|
||||
# cql error handling
|
||||
|
||||
The error handling facility utilizes a type-aware error handler
|
||||
provided by nosqlbench. However, it is much more modular and configurable
|
||||
than most error handlers found in other testing tools. The trade-off here
|
||||
is that so many options may bewilder newer users. If you agree, then
|
||||
simply use one of these basic recipes in your activity parameters:
|
||||
|
||||
# error and stop on *any exception
|
||||
# incidentally, this is the same as the deprecated diagnose=true option
|
||||
errors=stop
|
||||
|
||||
# error and stop for (usually) unrecoverable errors
|
||||
# warn and retry everything else (this is actually the default)
|
||||
|
||||
errors=stop,retryable->retry
|
||||
|
||||
# record histograms for WriteTimeoutException, error and stop
|
||||
# for everything else.
|
||||
|
||||
errors=stop,WriteTimeoutException:histogram
|
||||
|
||||
As you can see, the error handling format is pretty basic. Behind this basic
|
||||
format is modular and flexible configuration scheme that should allow for either
|
||||
simple or advanced testing setups. The errors value is simply a list of error to
|
||||
hander verbs mappings, but also allows for a simple verb to be specified to
|
||||
cover all error types. Going from left to right, each mapping is applied in
|
||||
order. You can use any of ':', '->', or '=' for the error to verb assignment
|
||||
operator.
|
||||
|
||||
Anytime you assign a value to the *errors* parameter for a cql activity, you are
|
||||
replacing the default 'stop,retryable->retry,unverified->stop' configuration.
|
||||
That is, each time this value is assigned, a new error handler is configured and
|
||||
installed according to the new value.
|
||||
|
||||
### errors= parameter format
|
||||
|
||||
The errors parameter contains a comma-separated list of one or more
|
||||
handler assignments where the error can be in any of these forms:
|
||||
|
||||
- group name ( "unapplied" | "retryable" | "unverified" )
|
||||
- a single exception name like 'WriteTimeoutException', or a substring of
|
||||
that which is long enough to avoid ambiguity (only one match allowed)
|
||||
- A regex, like '.*WriteTimeout.*' (multiple matches allowed)
|
||||
|
||||
The verb can be any of the named starting points in the error handler
|
||||
stack, as explained below.
|
||||
|
||||
As a special case, if the handler assignment consists of only a single word,
|
||||
then it is assumed to be the default handler verb. This gets applied
|
||||
as a last resort to any errors which do not match another handler by class
|
||||
type or parent class type. This allows for simple hard wiring of a
|
||||
handler default for all non-specific errors in the form:
|
||||
|
||||
# force the test to stop with any error, even retryable ones
|
||||
errors=stop
|
||||
|
||||
### Error Handler Verbs
|
||||
|
||||
When an error occurs, you can control how it is handled for the most part.
|
||||
This is the error handler stack:
|
||||
|
||||
- **stop** - logs an error, and then rethrows the causing exception,
|
||||
causing nosqlbench to shutdown the current scenario.
|
||||
- **warn** - log a warning in the log, with details about the error
|
||||
and associated statement.
|
||||
- **retry** - Retry the operation if the number of retries hasn't been
|
||||
used up *and* the causing exception falls in the set of
|
||||
*retryable* errors.
|
||||
- **histogram** - keep a histogram of the exception counts, under the
|
||||
name errorhistos.classname, using the simple class name.
|
||||
The magnitude of these histos is how long the operation was pending
|
||||
before the related error occurred.
|
||||
- **count** - keep a count in metrics for the exception, under the name
|
||||
errorcounts.classname, using the simple class name.
|
||||
- **ignore** - do nothing, do not even retry or count
|
||||
|
||||
Each handling verb above is ordered from the most invasive to least
|
||||
invasive starting at the top. With the exception of the **stop**
|
||||
handler, the rest of them will be applied to an error all the way
|
||||
to the bottom. For now, the error handling stack is exactly as above.
|
||||
You can't modify it, although it may be made configurable in the future.
|
||||
|
||||
One way to choose the right handler is to say "How serious is this type
|
||||
of error to the test results if it happens?" In general, it is best
|
||||
to be more conservative and choose a more aggressive setting unless you
|
||||
are specifically wanting to measure how often a given error happens,
|
||||
for example.
|
||||
|
||||
Each exception type will have one and only one error handler at all times.
|
||||
No matter how you set an error handler for a class, only the most
|
||||
recently assigned handler stack will be active for it. This might be
|
||||
important to keep in mind when you make multiple assignments to potentially
|
||||
overlapping sets of error types. In any case, the default 'stop' handler
|
||||
will always catch an error that does not otherwise have a more specific
|
||||
handler assigned to it.
|
||||
|
||||
##### Error Types
|
||||
|
||||
The errors that can be handled are simply all the exception types that
|
||||
can be thrown by either the DataStax Java Driver for DSE, *or* the
|
||||
nosqlbench client itself. This includes errors that indicate a potentially
|
||||
intermittent failure condition. It also includes errors that are more
|
||||
permanent in nature, like WriteFailure, which would continue to occur
|
||||
on subsequent retries without some form of intervention. The nosqlbench
|
||||
application will also generate some additional exceptions that capture
|
||||
common error cases that the Java driver doesn't or shouldn't have a
|
||||
special case for, but which may be important for nosqlbench testing purposes.
|
||||
|
||||
In nosqlbench, all error handlers are specific to a particular kind of
|
||||
exception that you would catch in a typical application that uses DSE,
|
||||
although you can tell a handler to take care of a whole category
|
||||
of problems as long as you know the right name to use.
|
||||
|
||||
##### Assigned by Java Exception Type
|
||||
|
||||
Error handlers can be assigned to a common parent type in order to also handle
|
||||
all known subtypes, hence the default on the top line applies to all of the
|
||||
driver exceptions that do not have a more specific handler assigned, either
|
||||
by a closer parent or directly.
|
||||
|
||||
##### Assigning by Error Group Name
|
||||
|
||||
Error types for which you would commonly assign the same handling behavior
|
||||
are also grouped in predefined names. If a handler is assigned to one
|
||||
of the group names, then the handler is assigned all of the exceptions
|
||||
in the group individually. For example, 'errors=retryable=stop'
|
||||
|
||||
### Recognized Exceptions
|
||||
|
||||
The whole hierarchy of exceptions as of DSE Driver 3.2.0 is as follows,
|
||||
with the default configuration shown.
|
||||
|
||||
DriverException -> stop
|
||||
FrameTooLongException
|
||||
CodecNotFoundException
|
||||
AuthenticationException
|
||||
TraceRetrievalException
|
||||
UnsupportedProtocolVersionException
|
||||
NoHostAvailableException -> retry (group: retryable)
|
||||
QueryValidationException (abstract)
|
||||
InvalidQueryException
|
||||
InvalidConfigurationInQueryException
|
||||
UnauthorizedException
|
||||
SyntaxError
|
||||
AlreadyExistsException
|
||||
UnpreparedException
|
||||
InvalidTypeException
|
||||
QueryExecutionException (abstract)
|
||||
UnavailableException
|
||||
BootstrappingException -> retry (group: retryable)
|
||||
OverloadedException -> retry (group: retryable)
|
||||
TruncateException
|
||||
QueryConsistencyException (abstract)
|
||||
WriteTimeoutException -> retry (group: retryable)
|
||||
WriteFailureException -> retry (group: retryable)
|
||||
ReadFailureException
|
||||
ReadTimeoutException
|
||||
FunctionExecutionException
|
||||
DriverInternalError
|
||||
ProtocolError
|
||||
ServerError
|
||||
BusyPoolException
|
||||
ConnectionException
|
||||
TransportException
|
||||
OperationTimedOutException -> retry (group: retryable)
|
||||
PagingStateException
|
||||
UnresolvedUserTypeException
|
||||
UnsupportedFeatureException
|
||||
BusyConnectionException
|
||||
EbdseException (abstract) -> stop
|
||||
CQLResultSetException (abstract)
|
||||
UnexpectedPagingException
|
||||
ResultSetVerificationException
|
||||
RowVerificationException
|
||||
ChangeUnappliedCycleException (group:unapplied)
|
||||
RetriesExhaustedCycleException -> count
|
||||
|
||||
##### Additional Exceptions
|
||||
|
||||
The following exceptions are synthesized directly by nosqlbench, but get
|
||||
handled alongside the normal exceptions as explained above.
|
||||
|
||||
1. ChangeUnappliedException - The change unapplied condition is important to
|
||||
detect when it is not expected, although some testing may intentionally send
|
||||
changes that can't be applied. For this reason, it is kept as a separately
|
||||
controllable error group "unapplied".
|
||||
2. UnexpectedPaging - The UnexpectedPaging exception is meant to keep users from
|
||||
being surprised when there is paging activity in the workload, as this can have
|
||||
other implications for tuning and performance. See the details on the
|
||||
**maxpages** parameter, and the *fetch size* parameter in the java driver for
|
||||
details.
|
||||
3. Unverified\* Exceptions - For data set verification; These exceptions
|
||||
indicate when a cqlverify activity has found rows that differ from what
|
||||
was expected.
|
||||
4. RetriesExhaustedException - Indicates that all retries were exhausted before
|
||||
a given operation could complete successfully.
|
||||
|
@ -1,42 +0,0 @@
|
||||
DriverException -> stop
|
||||
1 FrameTooLongException
|
||||
2 CodecNotFoundException
|
||||
3 AuthenticationException
|
||||
4 TraceRetrievalException
|
||||
5 UnsupportedProtocolVersionException
|
||||
6 NoHostAvailableException
|
||||
7 QueryValidationException (abstract)
|
||||
8 InvalidQueryException
|
||||
9 InvalidConfigurationInQueryException
|
||||
10 UnauthorizedException
|
||||
11 SyntaxError
|
||||
12 AlreadyExistsException
|
||||
13 UnpreparedException
|
||||
14 InvalidTypeException
|
||||
15 QueryExecutionException (abstract) -> retry
|
||||
16 UnavailableException
|
||||
17 BootstrappingException
|
||||
18 OverloadedException
|
||||
19 TruncateException
|
||||
20 QueryConsistencyException (abstract)
|
||||
21 WriteTimeoutException
|
||||
22 WriteFailureException
|
||||
23 ReadFailureException
|
||||
24 ReadTimeoutException
|
||||
25 FunctionExecutionException
|
||||
26 DriverInternalError
|
||||
27 ProtocolError
|
||||
28 ServerError
|
||||
29 BusyPoolException
|
||||
30 ConnectionException
|
||||
31 TransportException
|
||||
32 OperationTimedOutException
|
||||
33 PagingStateException
|
||||
34 UnresolvedUserTypeException
|
||||
35 UnsupportedFeatureException
|
||||
36 BusyConnectionException
|
||||
41 EbdseCycleException (abstract) -> stop
|
||||
37 ChangeUnappliedCycleException
|
||||
38 ResultSetVerificationException
|
||||
39 RowVerificationException (abstract)
|
||||
40 UnexpectedPagingException
|
@ -1,402 +0,0 @@
|
||||
# cql-d4 driver
|
||||
|
||||
This is the CQL version 4 driver for NoSQLBench. As it gets more use, we will make it the primary driver under the 'cql'
|
||||
name. For now, the 'cql' refers to the version 1.9 driver, while 'cqld4' refers to this one. The drivers will have
|
||||
identical features where possible, but new enhancements will be targeted at this one first.
|
||||
|
||||
In the alpha release of this NoSQLBench CQL driver, some of the options previously available on the CQL 1.9 driver will
|
||||
not be supported. We are working to add these in an idiomatic way ASAP.
|
||||
|
||||
This is an driver which allows for the execution of CQL statements. This driver supports both sync and async modes, with
|
||||
detailed metrics provided for both.
|
||||
|
||||
TEMPORARY EDITORS NOTE: This will use a more consistent layout as shown below. The topics are meant to be searchable in
|
||||
the newer doc system scheme.
|
||||
|
||||
## Activity Params
|
||||
|
||||
There are the parameters that you can provide when starting an activity with this driver.
|
||||
|
||||
Any parameter that is marked as *required* must be provided or an error will be thrown at activity startup. All other
|
||||
parameters are marked as *optional*.
|
||||
|
||||
Any parameter that is marked as *static* may not be changed while an activity is running. All other parameters are
|
||||
marked as *dynamic*, meaning that they may be changed while an activity is running via scripting.
|
||||
|
||||
#### sessionid
|
||||
|
||||
*optional*, *static*
|
||||
|
||||
The `sessionid` parameter allows you to logically assign a named instance of a session and session configuration to each
|
||||
activity that you run. This allows for different driver settings to be used within the same scenario.
|
||||
|
||||
Default
|
||||
: default
|
||||
|
||||
Example:
|
||||
: `sessionid=test43`
|
||||
|
||||
#### profile
|
||||
|
||||
*optional*, *static*
|
||||
|
||||
Controls the configuration profile used by the driver. If you provide a value for this parameter, then a configuration
|
||||
file under the name must exist, or an error will be thrown. This a driver configuration file, as documented in [DataStax
|
||||
Java Driver - Configurat](https://docs.datastax.com/en/developer/java-driver/4.6/manual/core/configuration/).
|
||||
|
||||
The profile is keyed to the sessionid, as each session id will be configured with the named profile just as you would
|
||||
see with normal file-based driver configuration. Thus, changing the configuration within the profile will affect future
|
||||
operations which share the same session.
|
||||
|
||||
While the profile itself is not changeable after it has been set, the parameters that are in the profile may be
|
||||
dynamically changed, depending on how they are annotated below.
|
||||
|
||||
*All other driver settings are part of the named profile for an activity, and will override the values provided from the
|
||||
named profile unless otherwise stated. These overrides do not affect the named file, only the runtime behavior of the
|
||||
driver.*
|
||||
|
||||
Default
|
||||
: 'default'
|
||||
|
||||
Examples
|
||||
: `profile=experimental-settings`
|
||||
|
||||
#### secureconnectbundle
|
||||
|
||||
*optional*, *static*
|
||||
|
||||
This parameter is used to connect to Astra Database as a Service. This option accepts a path to the secure connect
|
||||
bundle that is downloaded from the Astra UI.
|
||||
|
||||
Default
|
||||
: undefined
|
||||
|
||||
Examples
|
||||
: `secureconnectbundle=/tmp/secure-connect-my_db.zip`
|
||||
: `secureconnectbundle="/home/automaton/secure-connect-my_db.zip"`
|
||||
|
||||
|
||||
#### hosts
|
||||
|
||||
*optional*, *static*
|
||||
|
||||
The host or hosts to use to connect to the cluster. If you specify multiple values here, use commas with no spaces.
|
||||
*This option is not valid when the `secureconnectbundle` option is used.*
|
||||
|
||||
Default
|
||||
: localhost
|
||||
|
||||
Examples
|
||||
: `host=192.168.1.25`
|
||||
: `host=192.168.1.25,testhost42`
|
||||
|
||||
#### port
|
||||
|
||||
*optional*, *static*
|
||||
|
||||
The port to connect with. *This option is not valid when the `secureconnectbundle` option is used.*
|
||||
|
||||
Default
|
||||
: 9042
|
||||
|
||||
Examples:
|
||||
- `port=9042`
|
||||
|
||||
#### cl
|
||||
|
||||
*optional*, *static*
|
||||
|
||||
An override to consistency levels for the activity. If this option is used, then all consistency levels will be set to
|
||||
this by default for the current activity, and a log line explaining the difference with respect to the yaml will be
|
||||
emitted. This is not a dynamic parameter. It will only be applied at activity start.
|
||||
|
||||
|
||||
#### whitelist
|
||||
|
||||
|
||||
---- below this line needs to be curated for the new driver ----
|
||||
|
||||
|
||||
- **whitelist** default: none - Applies a whitelist policy to the load balancing
|
||||
policy in the driver. If used, a WhitelistPolicy(RoundRobinPolicy())
|
||||
will be created and added to the cluster builder on startup.
|
||||
Examples:
|
||||
- whitelist=127.0.0.1
|
||||
- whitelist=127.0.0.1:9042,127.0.0.2:1234
|
||||
|
||||
- **cbopts** - default: none - this is how you customize the cluster
|
||||
settings for the client, including policies, compression, etc. This
|
||||
is a string of *Java*-like method calls just as you would use them
|
||||
in the Cluster.Builder fluent API. They are evaluated inline
|
||||
with the default Cluster.Builder options not covered below.
|
||||
Example: cbopts=".withCompression(ProtocolOptions.Compression.NONE)"
|
||||
- **retrypolicy** default: none - Applies a retry policy in the driver
|
||||
The only option supported for this version is `retrypolicy=logging`,
|
||||
which uses the default retry policy, but with logging added.
|
||||
|
||||
- **reconnectpolicy** default: none - Applies a reconnection policy in the driver
|
||||
Supports either `reconnectpolicy=exponential(minDelayInMs,maxDelayInMs)` or `reconnectpolicy=constant(delayInMs)`.
|
||||
The driver reconnects using this policy when the entire cluster becomes unavailable.
|
||||
|
||||
|
||||
- **pooling** default: none - Applies the connection pooling options
|
||||
to the policy.
|
||||
Examples:
|
||||
- `pooling=4:10`
|
||||
keep between 4 and 10 connections to LOCAL hosts
|
||||
- `pooling=4:10,2:5`
|
||||
keep 4-10 connections to LOCAL hosts and 2-5 to REMOTE
|
||||
- `pooling=4:10:2000`
|
||||
keep between 4-10 connections to LOCAL hosts with
|
||||
up to 2000 requests per connection
|
||||
- `pooling=5:10:2000,2:4:1000` keep between 5-10 connections to
|
||||
LOCAL hosts with up to 2000 requests per connection, and 2-4
|
||||
connection to REMOTE hosts with up to 1000 requests per connection
|
||||
|
||||
Additionally, you may provide the following options on pooling. Any
|
||||
of these that are provided must appear in this order:
|
||||
`,heartbeat_interval_s:n,idle_timeout_s:n,pool_timeout_ms:n`, so a
|
||||
full example with all options set would appear as:
|
||||
`pooling=5:10:2000,2:4:1000,heartbeat_interval_s:30,idle_timeout_s:120,pool_timeout_ms:5`
|
||||
|
||||
- **socketoptions** default: none - Applies any of the valid socket
|
||||
options to the client when the session is built. Each of the options
|
||||
uses the long form of the name, with either a numeric or boolean
|
||||
value. Individual sub-parameters should be separated by a comma, and
|
||||
the parameter names and values can be separated by either equals or a
|
||||
colon. All of these values may be changed:
|
||||
- read_timeout_ms
|
||||
- connect_timeout_ms
|
||||
- keep_alive
|
||||
- reuse_address
|
||||
- so_linger
|
||||
- tcp_no_delay
|
||||
- receive_buffer_size
|
||||
- send_buffer_size
|
||||
|
||||
Examples:
|
||||
- `socketoptions=read_timeout_ms=23423,connect_timeout_ms=4444`
|
||||
- `socketoptions=tcp_no_delay=true
|
||||
|
||||
- **tokens** default: unset - Only executes statements that fall within
|
||||
any of the specified token ranges. Others are counted in metrics
|
||||
as skipped-tokens, with a histogram value of the cycle number.
|
||||
Examples:
|
||||
- tokens=1:10000,100000:1000000
|
||||
- tokens=1:123456
|
||||
- **maxtries** - default: 10 - how many times an operation may be
|
||||
attempted before it is disregarded
|
||||
- **maxpages** - default: 1 - how many pages can be read from a query which
|
||||
is larger than the fetchsize. If more than this number of pages
|
||||
is required for such a query, then an UnexpectedPaging excpetion
|
||||
is passed to the error handler as explained below.
|
||||
- **fetchsize** - controls the driver parameter of the same name.
|
||||
Suffixed units can be used here, such as "50K". If this parameter
|
||||
is not present, then the driver option is not set.
|
||||
- **cycles** - standard, however the cql activity type will default
|
||||
this to however many statements are included in the current
|
||||
activity, after tag filtering, etc.
|
||||
- **username** - the user to authenticate as. This option requires
|
||||
that one of **password** or **passfile** also be defined.
|
||||
- **password** - the password to authenticate with. This will be
|
||||
ignored if passfile is also present.
|
||||
- **passfile** - the file to read the password from. The first
|
||||
line of this file is used as the password.
|
||||
- **ssl** - specifies the type of the SSL implementation.
|
||||
Disabled by default, possible values are `jdk`, and `openssl`.
|
||||
Depending on type, additional parameters need to be provided.
|
||||
- **tlsversion** - specify the TLS version to use for SSL.
|
||||
Examples:
|
||||
- `tlsversion=TLSv1.2` (the default)
|
||||
- **truststore** (`jdk`, `openssl`) - specify the path to the SSL truststore.
|
||||
Examples:
|
||||
- `truststore=file.truststore`
|
||||
- **tspass** (`jdk`, `openssl`) - specify the password for the SSL truststore.
|
||||
Examples:
|
||||
- `tspass=mypass`
|
||||
- **keystore** (`jdk`) - specify the path to the SSL keystore.
|
||||
Examples:
|
||||
- `keystore=file.keystore`
|
||||
- **kspass** (`jdk`) - specify the password for the SSL keystore.
|
||||
Examples:
|
||||
- `kspass=mypass`
|
||||
- **keyFilePath** (`openssl`) - path to the OpenSSL key file.
|
||||
Examples:
|
||||
- `keyFilePath=file.key`
|
||||
- **keyPassword** (`openssl`) - key password;
|
||||
Examples:
|
||||
- `keyPassword=password`
|
||||
- **caCertFilePath** (`openssl`) - path to the X509 CA certificate file.
|
||||
Examples:
|
||||
- `caCertFilePath=cacert.pem`
|
||||
- **certFilePath** (`openssl`) - path to the X509 certificate file.
|
||||
Examples:
|
||||
- `certFilePath=ca.pem`
|
||||
- **jmxreporting** - enable JMX reporting if needed.
|
||||
Examples:
|
||||
- `jmxreporting=true`
|
||||
- `jmxreporting=false` (the default)
|
||||
- **alias** - this is a standard nosqlbench parameter, however the cql type will use the workload value also as the
|
||||
alias value when not specified.
|
||||
- **errors** - error handler configuration.
|
||||
(default errors=stop,retryable->retry,unverified->stop)
|
||||
Examples:
|
||||
- errors=stop,WriteTimeoutException=histogram
|
||||
- errors=count
|
||||
- errors=warn,retryable=count
|
||||
See the separate help on 'cqlerrors' for detailed
|
||||
configuration options.
|
||||
- **defaultidempotence** - sets default idempotence on the
|
||||
driver options, but only if it has a value.
|
||||
(default unset, valid values: true or false)
|
||||
- **speculative** - sets the speculative retry policy on the cluster.
|
||||
(default unset)
|
||||
This can be in one of the following forms:
|
||||
- pT:E:L - where :L is optional and
|
||||
T is a floating point threshold between 0.0 and 100.0 and
|
||||
E is an allowed number of concurrent speculative executions and
|
||||
L is the maximum latency tracked in the tracker instance
|
||||
(L defaults to 15000 when left out)
|
||||
Examples:
|
||||
- p99.8:5:15000ms - 99.8 percentile, 5 executions, 15000ms max tracked
|
||||
- p98:2:10000ms - 98.0 percentile, 2 executions allowed, 10s max tracked
|
||||
- Tms:E - where :E is optional and
|
||||
T is a constant threshold latency and
|
||||
E is the allowed number of concurrent speculative retries
|
||||
(E default to 5 when left out)
|
||||
Examples:
|
||||
- 100ms:5 - constant threshold of 100ms and 5 allowed executions
|
||||
- **seq** - selects the statement sequencer used with statement ratios.
|
||||
(default: bucket)
|
||||
(options: concat | bucket | interval)
|
||||
The concat sequencer repeats each statement in order until the ratio
|
||||
is achieved.
|
||||
The bucket sequencer uses simple round-robin distribution to plan
|
||||
statement ratios, a simple but unbalanced form of interleaving.
|
||||
The interval sequencer apportions statements over time and then by
|
||||
order of appearance for ties. This has the effect of interleaving
|
||||
statements from an activity more evenly, but is less obvious in how
|
||||
it works.
|
||||
All of the sequencers create deterministic schedules which use an internal
|
||||
lookup table for indexing into a list of possible statements.
|
||||
- **trace** - enables a trace on a subset of operations. This is disabled
|
||||
by default.
|
||||
Examples:
|
||||
`trace=modulo:100,filename:trace.log`
|
||||
The above traces every 100th cycle to a file named trace.log.
|
||||
`trace=modulo:1000,filename:stdout`
|
||||
The above traces every 1000th cycle to stdout.
|
||||
If the trace log is not specified, then 'tracelog' is assumed.
|
||||
If the filename is specified as stdout, then traces are dumped to stdout.
|
||||
- **clusterid** - names the configuration to be used for this activity. Within
|
||||
a given scenario, any activities that use the same name for clusterid will
|
||||
share a session and cluster.
|
||||
default: 'default'
|
||||
- **drivermetrics** - enable reporting of driver metrics.
|
||||
default: false
|
||||
- **driverprefix** - set the metrics name that will prefix all CQL driver metrics.
|
||||
default: 'driver.*clusterid*.'
|
||||
The clusterid specified is included so that separate cluster and session
|
||||
contexts can be reported independently for advanced tests.
|
||||
- **usercodecs** - enable the loading of user codec libraries
|
||||
for more details see: com.datastax.codecs.framework.UDTCodecInjector in the nosqlbench
|
||||
code base. This is for dynamic codec loading with user-provided codecs mapped
|
||||
via the internal UDT APIs.
|
||||
default: false
|
||||
- **insights** - Set to false to disable the driver from sending insights monitoring information
|
||||
- `insights=false`
|
||||
- **tickduration** - sets the tickDuration (milliseconds) of HashedWheelTimer of the
|
||||
java driver. This timer is used to schedule speculative requests.
|
||||
Examples:
|
||||
- `tickduration=10`
|
||||
- `tickduration=100` (driver default value)
|
||||
- **compression** - sets the transport compression to use for this
|
||||
activity. Valid values are 'LZ4' and 'SNAPPY'. Both types are bundled
|
||||
with EBDSE.
|
||||
- **showcql** - logs cql statements as INFO (to see INFO messages in stdout use -v or greater) Note: this is expensive
|
||||
and should only be done to troubleshoot workloads. Do not use `showcql` for your tests.
|
||||
|
||||
### CQL YAML Parameters
|
||||
|
||||
A uniform YAML configuration format was introduced with engineblock 2.0.
|
||||
As part of this format, statement parameters were added for the CQL Activity Type.
|
||||
These parameters will be consolidated with the above parameters in time, but for
|
||||
now **they are limited to a YAML params block**:
|
||||
|
||||
params:
|
||||
|
||||
ratio: 1
|
||||
# Sets the statement ratio within the operation sequencer
|
||||
# scheme. Integers only.
|
||||
# When preparing the operation order (AKA sequencing),
|
||||
# frequency of the associated statements.
|
||||
|
||||
cl: ONE
|
||||
# Sets the consistency level, using any of the standard
|
||||
# identifiers from com.datastax.driver.core.ConsistencyLevel,
|
||||
# any one of:
|
||||
# LOCAL_QUORUM, ANY, ONE, TWO, THREE, QUORUM, ALL,
|
||||
# EACH_QUORUM, SERIAL, LOCAL_SERIAL, LOCAL_ONE
|
||||
|
||||
prepared: true
|
||||
# By default, all statements are prepared. If you are
|
||||
# creating schema, set this to false.
|
||||
|
||||
idempotent: false
|
||||
# For statements that are known to be idempotent, set this
|
||||
# to true
|
||||
|
||||
instrument: false
|
||||
# If a statement has instrument set to true, then
|
||||
# individual Timer metrics will be tracked for
|
||||
# that statement for both successes and errors,
|
||||
# using the given statement name.
|
||||
|
||||
logresultcsv: true
|
||||
OR
|
||||
logresultcsv: myfilename.csv
|
||||
# If a statement has logresultcsv set to true,
|
||||
# then individual operations will be logged to a CSV file.
|
||||
# In this case the CSV file will be named as
|
||||
# <statement-name>--results.csv.
|
||||
# If the value is present and not "true", then the value will
|
||||
# be used as the name of the file.
|
||||
#
|
||||
# The format of the file is:
|
||||
# <cycle>,(SUCCESS|FAILURE),<nanos>,<rows-fetched>,(<error-class,NONE)
|
||||
# NOTES:
|
||||
# 1) BE CAREFUL with this setting. A single logged line per
|
||||
# result is not useful for high-speed testing as it will
|
||||
# impose IO loads on the client to slow it down.
|
||||
# 2) BE CAREFUL with the name. It is best to just pick good
|
||||
# names for your statement defs so that everything remains
|
||||
# coherent and nothing gets accidentally overwritten.
|
||||
# 3) If logresultcsv is provided at the activity level, it
|
||||
# applies to all statements, and the only value values
|
||||
# there are true and false.
|
||||
|
||||
### Metrics
|
||||
|
||||
- alias.result - A timer which tracks the performance of an op result only.
|
||||
This is the async get on the future, broken out as a separate step.
|
||||
- alias.result-success - A timer that records rate and histograms of the time
|
||||
it takes from submitting a query to completely reading the result
|
||||
set that it returns, across all pages. This metric is only counted
|
||||
for non-exceptional results, while the result metric above includes
|
||||
all operations.
|
||||
- alias.bind - A timer which tracks the performance of the statement
|
||||
binding logic, including the generation of data immediately prior
|
||||
- alias.execute - A timer which tracks the performance of op submission
|
||||
only. This is the async execution call, broken out as a separate step.
|
||||
- alias.tries - A histogram of how many tries were required to get a
|
||||
completed operation
|
||||
- alias.pages - A timer which tracks the performance of paging, specific
|
||||
to more than 1-page query results. i.e., if all reads return within 1
|
||||
page, this metric will not have any data.
|
||||
- alias.strides - A timer around each stride of operations within a thread
|
||||
- alias.skipped-tokens - A histogram that records the count and cycle values
|
||||
of skipped tokens.
|
||||
|
||||
## YAML Examples
|
||||
|
||||
Please see the bundled activities with nosqlbench for examples.
|
@ -1,39 +0,0 @@
|
||||
# You can run this file with this command line to see the values printed to stdout:
|
||||
# ./ebdse run driver=stdout yaml=bindings/date.yaml cycles=10
|
||||
|
||||
# This file demonstrates different types of timestamp recipes
|
||||
# that you can use with virtdata. (The bindings used in ebdse)
|
||||
|
||||
# If you want to control the output, uncomment and edit the statement template below
|
||||
# and modify the named anchors to suit your output requirements.
|
||||
|
||||
#statements:
|
||||
# example1: "{fullname}\n"
|
||||
|
||||
bindings:
|
||||
# All uncommented lines under this are indented, so they become named bindings below
|
||||
# the entry above
|
||||
|
||||
# Normally, the value that you get with a cycle starts at 0.
|
||||
|
||||
cycleNum: Identity();
|
||||
|
||||
# here we convert the cycle number to a Date by casting.
|
||||
|
||||
id: Identity(); ToDate();
|
||||
|
||||
# Date during 2017 (number of milliseconds in a year: 31,536,000,000)
|
||||
date: StartingEpochMillis('2017-01-01 23:59:59'); AddHashRange(0L,31536000000L); StringDateWrapper("YYYY-MM-dd")
|
||||
|
||||
# Example output:
|
||||
|
||||
# date : 2017-09-17
|
||||
# date : 2017-08-01
|
||||
# date : 2017-04-22
|
||||
# date : 2017-04-09
|
||||
# date : 2017-05-28
|
||||
# date : 2017-08-06
|
||||
# date : 2017-07-05
|
||||
# date : 2017-02-07
|
||||
# date : 2017-05-25
|
||||
# date : 2017-12-02
|
@ -1,28 +0,0 @@
|
||||
|
||||
# You can run this file with this command line to see the values printed to stdout:
|
||||
# ./ebdse run driver=stdout yaml=bindings/expr.yaml cycles=10
|
||||
|
||||
# This file demonstrates different types of timestamp recipes
|
||||
# that you can use with virtdata. (The bindings used in ebdse)
|
||||
|
||||
# If you want to control the output, uncomment and edit the statement template below
|
||||
# and modify the named anchors to suit your output requirements.
|
||||
|
||||
#statements:
|
||||
# example1: "{fullname}\n"
|
||||
|
||||
bindings:
|
||||
# flight times based on hour / minute / second computation
|
||||
hour: HashRange(0,2); ToInt()
|
||||
minute: Shuffle(0,2); ToInt()
|
||||
second: HashRange(0,60); ToInt()
|
||||
flightDate: HashRange(0,2); Mul(3600000); Save('hour'); Shuffle(0,2); Mul(60000); Save('minute'); HashRange(0,60); Mul(1000); Save('second'); Expr('hour + minute + second'); StartingEpochMillis('2018-10-02 04:00:00'); ToDate(); ToString()
|
||||
flightDateFixed: Save('cycle'); HashRange(0,2); Mul(3600000); Load('cycle'); Save('hour'); Shuffle(0,2); Mul(60000); Save('minute'); Load('cycle'); HashRange(0,60); Mul(1000); Save('second'); Expr('hour + minute + second'); StartingEpochMillis('2018-10-02 04:00:00'); ToDate(); ToString()
|
||||
flightDateLong: Save('cycle'); HashRange(0,2); Mul(3600000); Load('cycle'); Save('hour'); Shuffle(0,2); Mul(60000); Save('minute'); Load('cycle'); HashRange(0,60); Mul(1000); Save('second'); Expr('hour + minute + second'); ToString()
|
||||
|
||||
# status that depends on score
|
||||
riskScore: Normal(0.0,5.0); Clamp(1, 100); Save('riskScore') -> int
|
||||
status: |
|
||||
Expr('riskScore > 90 ? 0 : 1') -> long; ToBoolean(); ToString()
|
||||
status_2: |
|
||||
ToInt(); Expr('riskScore >90 ? 0 : 1') -> int; WeightedStrings('accepted:1;rejected:1')
|
@ -1,172 +0,0 @@
|
||||
# You can run this file with this command line to see the values printed to stdout:
|
||||
# ./ebdse run driver=stdout yaml=bindings/text.yaml cycles=10
|
||||
|
||||
# This file demonstrates different types of timestamp recipes
|
||||
# that you can use with virtdata. (The bindings used in ebdse)
|
||||
|
||||
# If you want to control the output, uncomment and edit the statement template below
|
||||
# and modify the named anchors to suit your output requirements.
|
||||
|
||||
#statements:
|
||||
# example1: "{fullname}\n"
|
||||
|
||||
bindings:
|
||||
|
||||
# All uncommented lines under this are indented, so they become named bindings below
|
||||
# the entry above
|
||||
|
||||
# Normally, the value that you get with a cycle starts at 0.
|
||||
|
||||
cycleNum: Identity();
|
||||
|
||||
# here we convert the cycle number to a text by casting.
|
||||
id: Identity(); ToString()
|
||||
|
||||
## Names
|
||||
# See http://docs.virtdata.io/functions/funcref_premade/
|
||||
# Full name
|
||||
fullname: FullNames()
|
||||
|
||||
# Example output:
|
||||
|
||||
# fullname : Norman Wolf
|
||||
# fullname : Lisa Harris
|
||||
# fullname : John Williams
|
||||
# fullname : Freda Gaytan
|
||||
# fullname : Violet Ferguson
|
||||
# fullname : Larry Roberts
|
||||
# fullname : Andrew Daniels
|
||||
# fullname : Jean Keys
|
||||
# fullname : Mark Cole
|
||||
# fullname : Roberta Bounds
|
||||
|
||||
|
||||
# Name with last name first
|
||||
fullname_lastname_first: Template('{}, {}', LastNames(), FirstNames())
|
||||
|
||||
# Example output:
|
||||
|
||||
# fullname_lastname_first : Miracle, Lisa
|
||||
# fullname_lastname_first : Wolf, John
|
||||
# fullname_lastname_first : Harris, Freda
|
||||
# fullname_lastname_first : Williams, Violet
|
||||
# fullname_lastname_first : Gaytan, Larry
|
||||
# fullname_lastname_first : Ferguson, Andrew
|
||||
# fullname_lastname_first : Roberts, Jean
|
||||
# fullname_lastname_first : Daniels, Mark
|
||||
# fullname_lastname_first : Keys, Roberta
|
||||
# fullname_lastname_first : Cole, Timothy
|
||||
|
||||
# Phone
|
||||
phone: compose HashRange(10000000000L,99999999999L); Combinations('0-9;0-9;0-9;-;0-9;0-9;0-9;-;0-9;0-9;0-9;0-9')
|
||||
|
||||
# Example output:
|
||||
|
||||
# $ ebdse run driver=stdout yaml=example-bindings format=readout cycles=10
|
||||
# phone : 241-478-6787
|
||||
# phone : 784-482-7668
|
||||
# phone : 804-068-5502
|
||||
# phone : 044-195-5579
|
||||
# phone : 237-202-5601
|
||||
# phone : 916-390-8911
|
||||
# phone : 550-943-7851
|
||||
# phone : 762-031-1362
|
||||
# phone : 234-050-2563
|
||||
# phone : 312-672-0039
|
||||
|
||||
## Career
|
||||
career: HashedLineToString('data/careers.txt')
|
||||
|
||||
# Example output:
|
||||
|
||||
# career : Paper Goods Machine Setters, Operators, and Tenders
|
||||
# career : Training and Development Specialists
|
||||
# career : Embossing Machine Set-Up Operators
|
||||
# career : Airframe-and-Power-Plant Mechanics
|
||||
# career : Sales Representatives, Agricultural
|
||||
# career : Automotive Body and Related Repairers
|
||||
# career : Community Health Workers
|
||||
# career : Billing, Posting, and Calculating Machine Operators
|
||||
# career : Data Processing Equipment Repairers
|
||||
# career : Sawing Machine Setters and Set-Up Operators
|
||||
|
||||
## Job Description
|
||||
jobdescription: Add(0); HashedLineToString('data/jobdescription.txt')
|
||||
|
||||
# Example output:
|
||||
|
||||
# jobdescription: Add(0); HashedLineToString('data/jobdescription.txt')
|
||||
|
||||
## Weighted enumerated values
|
||||
# Sorting hat (even distribution)
|
||||
house: WeightedStrings('Gryffindor:0.2;Hufflepuff:0.2;Ravenclaw:0.2;Slytherin:0.2')
|
||||
# Example output:
|
||||
|
||||
# house : Hufflepuff
|
||||
# house : Ravenclaw
|
||||
# house : Slytherin
|
||||
# house : Slytherin
|
||||
# house : Gryffindor
|
||||
# house : Hufflepuff
|
||||
# house : Ravenclaw
|
||||
# house : Ravenclaw
|
||||
# house : Hufflepuff
|
||||
# house : Hufflepuff
|
||||
|
||||
## Weighted prefixes
|
||||
prefix: WeightedStrings('Mr:0.45;Mrs:0.25;Ms:0.1;Miss:0.1;Dr:0.05')
|
||||
|
||||
# Example output:
|
||||
|
||||
# prefix : Mr
|
||||
# prefix : Mrs
|
||||
# prefix : Miss
|
||||
# prefix : Miss
|
||||
# prefix : Mr
|
||||
# prefix : Mrs
|
||||
# prefix : Mrs
|
||||
# prefix : Mrs
|
||||
# prefix : Mr
|
||||
# prefix : Mr
|
||||
# prefix : Mr
|
||||
# prefix : Mr
|
||||
# prefix : Mrs
|
||||
# prefix : Mrs
|
||||
# prefix : Mr
|
||||
# prefix : Mr
|
||||
# prefix : Mrs
|
||||
# prefix : Miss
|
||||
# prefix : Ms
|
||||
# prefix : Dr
|
||||
|
||||
## Current Employer
|
||||
current_employer: HashedLineToString('data/companies.txt')
|
||||
|
||||
# Example output:
|
||||
|
||||
# current_employer : Monsanto Company
|
||||
# current_employer : International Flavors & Fragrances
|
||||
# current_employer : Carpenter Technology Corporation
|
||||
# current_employer : Union Pacific Corporation
|
||||
# current_employer : Rush Enterprises
|
||||
# current_employer : Peabody Energy Corporation
|
||||
# current_employer : Rockwell Automation
|
||||
# current_employer : Auto-Owners Insurance Group
|
||||
# current_employer : ArcBest Corporation
|
||||
# current_employer : WGL Holdings
|
||||
|
||||
## Sensor
|
||||
sensor_name: HashedLineToString('data/variable_words.txt')
|
||||
|
||||
# Example output:
|
||||
|
||||
# sensor_name : rotational_latency
|
||||
# sensor_name : half_life
|
||||
# sensor_name : clarity
|
||||
# sensor_name : fairness
|
||||
# sensor_name : diversity
|
||||
# sensor_name : turbulence
|
||||
# sensor_name : mode
|
||||
# sensor_name : current
|
||||
# sensor_name : rating
|
||||
# sensor_name : stall_speed
|
@ -1,72 +0,0 @@
|
||||
# You can run this file with this command line to see the values printed to stdout:
|
||||
# ./ebdse run driver=stdout yaml=bindings/timestamp.yaml cycles=10
|
||||
|
||||
# This file demonstrates different types of timestamp recipes
|
||||
# that you can use with virtdata. (The bindings used in ebdse)
|
||||
|
||||
# If you want to control the output, uncomment and edit the statement template below
|
||||
# and modify the named anchors to suit your output requirements.
|
||||
|
||||
#statements:
|
||||
# example1: "{epochMillis}\n"
|
||||
|
||||
bindings:
|
||||
|
||||
# All uncommented lines under this are indented, so they become named bindings below
|
||||
# the entry above
|
||||
|
||||
# Normally, the value that you get with a cycle starts at 0.
|
||||
|
||||
cycleNum: Identity();
|
||||
|
||||
# So far, we've only been dealing in milliseconds. This is important to get working
|
||||
# before adding the next step, converting to a more specific type.
|
||||
# You can take any millisecond output and add conversion functions as shown below.
|
||||
|
||||
# this one converts to a java.util.Time
|
||||
|
||||
randomDateWithinFeb2018: AddHashRange(0,2419200000L); StartingEpochMillis('2018-02-01 05:00:00'); ToDate();
|
||||
|
||||
# ToDate(...) supports a few argument forms that you can experiment with.
|
||||
# ToDate(int) will space the dates apart by this many milliseconds.
|
||||
# ToDate(int,int) will space the dates apart by some millis and also repeat the value for some number of cycles.
|
||||
|
||||
# Alternately, you might want to use a org.joda.time.DateTime instead of a java.util.Time:
|
||||
|
||||
randomJodaDateWithinFeb2018: AddHashRange(0,2419200000L); StartingEpochMillis('2018-02-01 05:00:00'); ToJodaDateTime();
|
||||
|
||||
# ToJodaDateTime(...) also supports the space and repeat forms as shown above for ToDate(...)
|
||||
|
||||
# You can also have the dates in order, but with some limited out-of-order pertubation.
|
||||
# In this case, we are swizzling the offset by some pseudo-random amount, up to an hour (in millis)
|
||||
|
||||
randomDateWithinFeb2018Jittery: AddHashRange(0,3600000L); StartingEpochMillis('2018-02-01 05:00:00'); ToDate();
|
||||
|
||||
# If you want to have the result be a string-formatted date representation for testing, try this:
|
||||
# You can use any formatter from here: http://joda-time.sourceforge.net/apidocs/org/joda/time/format/DateTimeFormat.html
|
||||
|
||||
timeuuid_string: AddHashRange(0,2419200000L); StartingEpochMillis('2018-02-01 05:00:00'); StringDateWrapper("yyyy-MM-dd HH:mm:ss.SSS");
|
||||
|
||||
|
||||
|
||||
|
||||
# ebdse bundles some specialized mapping functions in addition to those explained above, which
|
||||
# come with eb. These are shown below.
|
||||
|
||||
# You can create a com.datastax.driver.core.LocalDate for use with the java driver.
|
||||
# This takes as its input, the number of days since the unix epoch.
|
||||
|
||||
localdate: LongToLocalDateDays()
|
||||
|
||||
# You can also take the millis from any of the examples above which provide epoch millis,
|
||||
# and convert the output to a millisecond-stable value, analogous to the CQL functions
|
||||
# that do the same.
|
||||
|
||||
minUUID: AddHashRange(0,3600000); StartingEpochMillis('2018-02-01 05:00:00'); ToTimeUUIDMin();
|
||||
|
||||
maxUUID: AddHashRange(0,3600000); StartingEpochMillis('2018-02-01 05:00:00'); ToTimeUUIDMax();
|
||||
|
||||
# If you find useful recipes which are needed by others, please contribute them back to our examples!
|
||||
|
||||
|
||||
|
@ -1,62 +0,0 @@
|
||||
# You can run this file with this command line to see the values printed to stdout:
|
||||
# ./ebdse run driver=stdout yaml=bindings/timeuuid.yaml cycles=10
|
||||
|
||||
# This file demonstrates different types of timestamp recipes
|
||||
# that you can use with virtdata. (The bindings used in ebdse)
|
||||
|
||||
# If you want to control the output, uncomment and edit the statement template below
|
||||
# and modify the named anchors to suit your output requirements.
|
||||
|
||||
#statements:
|
||||
# example1: "{fullname}\n"
|
||||
|
||||
bindings:
|
||||
|
||||
# All uncommented lines under this are indented, so they become named bindings below
|
||||
# the entry above
|
||||
|
||||
# Normally, the value that you get with a cycle starts at 0.
|
||||
|
||||
cycleNum: Identity();
|
||||
# here we convert the cycle number to a TIMEUUID by casting.
|
||||
|
||||
id: Identity(); ToEpochTimeUUID()
|
||||
|
||||
## Client ID
|
||||
client_id: AddHashRange(0L, 2000000000000L); ToEpochTimeUUID()
|
||||
|
||||
# Example output:
|
||||
|
||||
# client_id : 4eb369b0-91de-11bd-8000-000000000000
|
||||
# client_id : 0b9edab0-5401-11e7-8000-000000000000
|
||||
# client_id : 58f21c30-0eec-11f3-8000-000000000000
|
||||
# client_id : 4f547e60-a48a-11ca-8000-000000000000
|
||||
# client_id : 42db8510-cad8-11bb-8000-000000000000
|
||||
# client_id : 78cc7790-529c-11d6-8000-000000000000
|
||||
# client_id : 55382200-9cfd-11d7-8000-000000000000
|
||||
# client_id : 1ebdbef0-b6dc-11b7-8000-000000000000
|
||||
# client_id : 8bc58ba0-57fe-11da-8000-000000000000
|
||||
# client_id : 03d1b690-ba64-11f5-8000-000000000000
|
||||
|
||||
# If you wanted a java.util.UUID instead of a java.util.Date type, you can use something like below.
|
||||
# This form avoids setting the non-time fields in the timeuuid value. This makes testing determinstically
|
||||
# possible, when the basic data type as used in practice, is designed specifically to avoid repeatability.
|
||||
|
||||
timeuuid1: AddHashRange(0,2419200000L); StartingEpochMillis('2018-02-01 05:00:00'); ToEpochTimeUUID();
|
||||
|
||||
# There is a shortcut for this version supported directly by ToEpochTimeUUID(..) as seen here:
|
||||
|
||||
timeuuid2: AddHashRange(0,2419200000L); ToEpochTimeUUID('2018-02-01 05:00:00');
|
||||
|
||||
# You can also access the finest level of resolution of the timeuuid type, where each cycle value represents
|
||||
# the smallest possible change for a timeuuid. Bear in mind that this represents many many sub-millisecond
|
||||
# level timestamp values which may not be easy to see in normal timestamp formats. In this case, millisecond
|
||||
# semantics are not appropriate, so make sure you adjust the input values accordingly.
|
||||
|
||||
timeuuid_finest1: ToFinestTimeUUID();
|
||||
|
||||
# However, since starting at some reference time is a popular option, ToFinestTimeUUID(...) also supports
|
||||
# the shortcut version just like ToEpochTimeUUID(). This is provided because converting between epoch
|
||||
# millis and timeuuid ticks is not fun.
|
||||
|
||||
timeuuid_finest_relative: ToFinestTimeUUID('2018-02-01 05:00:00');
|
@ -1,39 +0,0 @@
|
||||
# You can run this file with this command line to see the values printed to stdout:
|
||||
# ./ebdse run driver=stdout yaml=bindings/uuid.yaml cycles=10
|
||||
|
||||
# This file demonstrates different types of timestamp recipes
|
||||
# that you can use with virtdata. (The bindings used in ebdse)
|
||||
|
||||
# If you want to control the output, uncomment and edit the statement template below
|
||||
# and modify the named anchors to suit your output requirements.
|
||||
|
||||
#statements:
|
||||
# example1: "{fullname}\n"
|
||||
|
||||
bindings:
|
||||
|
||||
# All uncommented lines under this are indented, so they become named bindings below
|
||||
# the entry above
|
||||
|
||||
# Normally, the value that you get with a cycle starts at 0.
|
||||
|
||||
cycleNum: Identity();
|
||||
|
||||
# here we convert the cycle number to a UUID by casting.
|
||||
id: Identity(); ToHashedUUID()
|
||||
|
||||
## Station ID (100 unique UUID values, can override stations on the command-line)
|
||||
station_id: Mod(<<stations:100>>); ToHashedUUID()
|
||||
|
||||
# Example output:
|
||||
|
||||
# station_id : 28df63b7-cc57-43cb-9752-fae69d1653da
|
||||
# station_id : 5752fae6-9d16-43da-b20f-557a1dd5c571
|
||||
# station_id : 720f557a-1dd5-4571-afb2-0dd47d657943
|
||||
# station_id : 6fb20dd4-7d65-4943-9967-459343efafdd
|
||||
# station_id : 19674593-43ef-4fdd-bdf4-98b19568b584
|
||||
# station_id : 3df498b1-9568-4584-96fd-76f6081da01a
|
||||
# station_id : 56fd76f6-081d-401a-85eb-b1d9e5bba058
|
||||
# station_id : 45ebb1d9-e5bb-4058-b75d-d51547d31952
|
||||
# station_id : 375dd515-47d3-4952-a49d-236be9a5c070
|
||||
# station_id : 249d236b-e9a5-4070-9afa-8fae9060d959
|
@ -1,54 +0,0 @@
|
||||
scenarios:
|
||||
default:
|
||||
schema: run driver=cql tags==phase:schema cycles==UNDEF threads==1
|
||||
rampup: run driver=cql tags==phase:rampup cycles=TEMPLATE(rampup-cycles,100K) threads=auto
|
||||
|
||||
bindings:
|
||||
userid: Template('user-{}',ToString()); SaveString('userid');
|
||||
interest: Template('interest-{}',ToString());
|
||||
|
||||
blocks:
|
||||
- name: schema
|
||||
tags:
|
||||
phase: schema
|
||||
statements:
|
||||
- create-keyspace: |
|
||||
create KEYSPACE if not exists TEMPLATE(keyspace,examples)
|
||||
WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'}
|
||||
AND durable_writes = 'true';
|
||||
- create-users-table: |
|
||||
create table if not exists TEMPLATE(keyspace,examples).users (
|
||||
userid text PRIMARY KEY
|
||||
);
|
||||
- create-interests-table: |
|
||||
create table if not exists TEMPLATE(keyspace,examples).interests (
|
||||
userid text,
|
||||
interest text,
|
||||
primary key (interest, userid)
|
||||
);
|
||||
- name: rampup
|
||||
tags:
|
||||
phase: rampup
|
||||
statements:
|
||||
- insert-users: |
|
||||
insert into TEMPLATE(keyspace,examples).users (userid) VALUES ({userid});
|
||||
tags:
|
||||
entity: users
|
||||
- insert-interests: |
|
||||
insert into TEMPLATE(keyspace,examples).interests(
|
||||
interest, userid
|
||||
) VALUES (
|
||||
{interest}, {userid}
|
||||
);
|
||||
tags:
|
||||
entity: interests
|
||||
- name: main
|
||||
tags:
|
||||
phase: main
|
||||
statements:
|
||||
- read-user: |
|
||||
select * from TEMPLATE(keyspace,examples).users
|
||||
where userid={userid};
|
||||
- read interests: |
|
||||
select * from TEMPLATE(keyspace,examples).interests
|
||||
where interest={interest};
|
@ -1,4 +0,0 @@
|
||||
# cql help topics
|
||||
- cql
|
||||
- cql-errors
|
||||
- cql-exception-list
|
@ -1,22 +0,0 @@
|
||||
package com.datastax.ebdrivers.cql;
|
||||
|
||||
import io.nosqlbench.activitytype.cqld4.core.CqlAction;
|
||||
import io.nosqlbench.activitytype.cqld4.core.CqlActivity;
|
||||
import io.nosqlbench.engine.api.activityimpl.ActivityDef;
|
||||
import org.junit.Ignore;
|
||||
import org.junit.Test;
|
||||
|
||||
public class CqlActionTest {
|
||||
|
||||
@Test
|
||||
@Ignore
|
||||
public void testCqlAction() {
|
||||
ActivityDef ad = ActivityDef.parseActivityDef("driver=ebdrivers;alias=foo;yaml=write-telemetry.yaml;");
|
||||
CqlActivity cac = new CqlActivity(ad);
|
||||
CqlAction cq = new CqlAction(ad, 0, cac);
|
||||
cq.init();
|
||||
cq.runCycle(5);
|
||||
}
|
||||
|
||||
|
||||
}
|
@ -1,61 +0,0 @@
|
||||
package com.datastax.ebdrivers.cql.statements;
|
||||
|
||||
import io.nosqlbench.activitytype.cqld4.statements.core.CQLStatementDefParser;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
public class CQLCQLStatementDefParserTest {
|
||||
|
||||
// TODO: Implment support for default values in yaml
|
||||
|
||||
@Test
|
||||
public void testBasicParsing() {
|
||||
HashMap<String, String> bindings = new HashMap<String, String>() {{
|
||||
put("not", "even");
|
||||
}};
|
||||
CQLStatementDefParser sdp = new CQLStatementDefParser("test-name","This is ?not an error.");
|
||||
CQLStatementDefParser.ParseResult r = sdp.getParseResult(bindings.keySet());
|
||||
assertThat(r.hasError()).isFalse();
|
||||
assertThat(r.getStatement()).isEqualTo("This is ? an error.");
|
||||
assertThat(r.getMissingAnchors().size()).isEqualTo(0);
|
||||
assertThat(r.getMissingGenerators().size()).isEqualTo(0);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testParsingDiagnostics() {
|
||||
|
||||
HashMap<String, String> bindings = new HashMap<String, String>() {{
|
||||
put("BINDABLE", "two");
|
||||
put("EXTRABINDING", "5");
|
||||
}};
|
||||
CQLStatementDefParser sdp = new CQLStatementDefParser("test-name","This is a test of ?BINDABLE interpolation and ?MISSINGBINDING.");
|
||||
List<String> bindableNames = sdp.getBindableNames();
|
||||
CQLStatementDefParser.ParseResult result = sdp.getParseResult(bindings.keySet());
|
||||
assertThat(result.hasError()).isTrue();
|
||||
assertThat(result.getStatement()).isEqualTo("This is a test of ? interpolation and ?.");
|
||||
assertThat(result.getMissingAnchors().size()).isEqualTo(1);
|
||||
assertThat(result.getMissingGenerators().size()).isEqualTo(1);
|
||||
assertThat(result.getMissingAnchors()).contains("EXTRABINDING");
|
||||
assertThat(result.getMissingGenerators()).contains("MISSINGBINDING");
|
||||
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testParsingPatterns() {
|
||||
HashMap<String, String> bindings = new HashMap<String, String>() {{
|
||||
put("B-1", "one");
|
||||
put("B_-1.2", "two");
|
||||
}};
|
||||
CQLStatementDefParser sdp = new CQLStatementDefParser("test-name","This is a test of ?B-1 and {B_-1.2}");
|
||||
List<String> bindableNames = sdp.getBindableNames();
|
||||
assertThat(bindableNames).containsExactly("B-1","B_-1.2");
|
||||
CQLStatementDefParser.ParseResult parseResult = sdp.getParseResult(bindings.keySet());
|
||||
assertThat(parseResult.hasError()).isFalse();
|
||||
assertThat(parseResult.getStatement()).isEqualTo("This is a test of ? and ?");
|
||||
}
|
||||
|
||||
}
|
@ -1,21 +0,0 @@
|
||||
package io.nosqlbench.activitytype.cqld4.config;
|
||||
|
||||
import com.datastax.oss.driver.api.core.config.OptionsMap;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
public class MapExampleTest {
|
||||
|
||||
@Test
|
||||
public void easyMaps() {
|
||||
// OptionsMap map = new OptionsMap();
|
||||
// Map<String, Map<String,Object>> profiles = map.getProfiles();
|
||||
// Map<String,Object> profile1 = profiles.get("profile1");
|
||||
// profile1.putAll(OptionsMap.driverDefaultsMap());
|
||||
// profile1.set("basic.request.timeout",10000);
|
||||
}
|
||||
|
||||
}
|
@ -1,14 +0,0 @@
|
||||
tags:
|
||||
group: read
|
||||
statements:
|
||||
- name: read-telemetry
|
||||
statement: |
|
||||
select * from <<KEYSPACE:testks>>.<<TABLE:testtable>>_telemetry
|
||||
where source={source}
|
||||
and epoch_hour={epoch_hour}
|
||||
and param={param}
|
||||
limit 10
|
||||
bindings:
|
||||
source: ThreadNumGenerator
|
||||
epoch_hour: DateSequenceFieldGenerator(1000,'YYYY-MM-dd-HH')
|
||||
param: LineExtractGenerator('data/variable_words.txt')
|
Loading…
Reference in New Issue
Block a user