mirror of
https://github.com/nosqlbench/nosqlbench.git
synced 2025-02-25 18:55:28 -06:00
add design sketch backgrounder for workload synthesis
This commit is contained in:
parent
5bbf81776f
commit
392bbcc595
8
devdocs/sketches/datashape.dot
Normal file
8
devdocs/sketches/datashape.dot
Normal file
@ -0,0 +1,8 @@
|
||||
graph {
|
||||
|
||||
{ rank=same;
|
||||
operations -- dataset;
|
||||
}
|
||||
dataset -- datashape;
|
||||
datashape -- operations;
|
||||
}
|
46
devdocs/sketches/datashape.svg
Normal file
46
devdocs/sketches/datashape.svg
Normal file
@ -0,0 +1,46 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN"
|
||||
"http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
|
||||
<!-- Generated by graphviz version 2.40.1 (20161225.0304)
|
||||
-->
|
||||
<!-- Title: %3 Pages: 1 -->
|
||||
<svg width="192pt" height="116pt"
|
||||
viewBox="0.00 0.00 191.54 116.00" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
|
||||
<g id="graph0" class="graph" transform="scale(1 1) rotate(0) translate(4 112)">
|
||||
<title>%3</title>
|
||||
<polygon fill="#ffffff" stroke="transparent" points="-4,4 -4,-112 187.5427,-112 187.5427,4 -4,4"/>
|
||||
<!-- operations -->
|
||||
<g id="node1" class="node">
|
||||
<title>operations</title>
|
||||
<ellipse fill="none" stroke="#000000" cx="47.4458" cy="-90" rx="47.3916" ry="18"/>
|
||||
<text text-anchor="middle" x="47.4458" y="-86.3" font-family="Times,serif" font-size="14.00" fill="#000000">operations</text>
|
||||
</g>
|
||||
<!-- dataset -->
|
||||
<g id="node2" class="node">
|
||||
<title>dataset</title>
|
||||
<ellipse fill="none" stroke="#000000" cx="148.4458" cy="-90" rx="35.194" ry="18"/>
|
||||
<text text-anchor="middle" x="148.4458" y="-86.3" font-family="Times,serif" font-size="14.00" fill="#000000">dataset</text>
|
||||
</g>
|
||||
<!-- operations--dataset -->
|
||||
<g id="edge1" class="edge">
|
||||
<title>operations--dataset</title>
|
||||
<path fill="none" stroke="#000000" d="M95.1841,-90C101.1483,-90 107.1125,-90 113.0767,-90"/>
|
||||
</g>
|
||||
<!-- datashape -->
|
||||
<g id="node3" class="node">
|
||||
<title>datashape</title>
|
||||
<ellipse fill="none" stroke="#000000" cx="97.4458" cy="-18" rx="45.4919" ry="18"/>
|
||||
<text text-anchor="middle" x="97.4458" y="-14.3" font-family="Times,serif" font-size="14.00" fill="#000000">datashape</text>
|
||||
</g>
|
||||
<!-- dataset--datashape -->
|
||||
<g id="edge2" class="edge">
|
||||
<title>dataset--datashape</title>
|
||||
<path fill="none" stroke="#000000" d="M136.3595,-72.937C128.3475,-61.626 117.8658,-46.8282 109.7996,-35.4407"/>
|
||||
</g>
|
||||
<!-- datashape--operations -->
|
||||
<g id="edge3" class="edge">
|
||||
<title>datashape--operations</title>
|
||||
<path fill="none" stroke="#000000" d="M85.2803,-35.5182C77.4648,-46.7727 67.3528,-61.3339 59.5494,-72.5708"/>
|
||||
</g>
|
||||
</g>
|
||||
</svg>
|
After Width: | Height: | Size: 2.1 KiB |
25
devdocs/sketches/workload_analysis.dot
Normal file
25
devdocs/sketches/workload_analysis.dot
Normal file
@ -0,0 +1,25 @@
|
||||
digraph {
|
||||
|
||||
{ rank = same; data_shape; op_shape; }
|
||||
{ rank = same; data_types; data_samples; }
|
||||
system -> op_log;
|
||||
op_log -> op_syntax_samples;
|
||||
{ rank = same; op_syntax_samples; op_field_samples; }
|
||||
op_syntax_samples -> op_shape;
|
||||
op_log -> op_field_samples;
|
||||
op_field_samples -> data_samples;
|
||||
schema -> data_types;
|
||||
schema -> op_shape;
|
||||
data_shape -> workload;
|
||||
op_shape -> workload;
|
||||
exported_data -> data_samples;
|
||||
interactive_sampling -> data_samples;
|
||||
interactive_sampling -> schema;
|
||||
dataset -> exported_data;
|
||||
dataset -> interactive_sampling;
|
||||
|
||||
data_types -> data_shape;
|
||||
data_samples -> data_shape;
|
||||
|
||||
workload [label = "synthesized\nworkload"]
|
||||
}
|
42
devdocs/sketches/workload_synthesis.dot
Normal file
42
devdocs/sketches/workload_synthesis.dot
Normal file
@ -0,0 +1,42 @@
|
||||
digraph ws {
|
||||
node [shape = none]
|
||||
label = "Workload Synthesis Data Flow"
|
||||
edge [fontsize = 8]
|
||||
|
||||
{
|
||||
rank = min;
|
||||
app [label = "application"];
|
||||
analyzer;
|
||||
nosqlbench;
|
||||
}
|
||||
|
||||
subgraph clusterf {
|
||||
rank = same;
|
||||
label="";
|
||||
|
||||
appops [label = "ops in\nflight"];
|
||||
oplog [label="full query\nlog"];
|
||||
workload [label="Synthesized\nWorkload"];
|
||||
test_ops[label="ops in\nflight"];
|
||||
}
|
||||
|
||||
{
|
||||
rank = same;
|
||||
system[label="Capture\nTarget"];
|
||||
test_system[label="Test\nTarget"];
|
||||
}
|
||||
|
||||
app -> appops[label="normal\noperation"];
|
||||
appops -> system;
|
||||
system -> oplog;
|
||||
|
||||
analyzer [rank = min]
|
||||
oplog -> analyzer
|
||||
analyzer -> workload [label="pattern\nanalysis"];
|
||||
|
||||
workload -> nosqlbench;
|
||||
nosqlbench -> test_ops [label = "run\nscenario"];
|
||||
test_ops -> test_system;
|
||||
|
||||
|
||||
}
|
210
devdocs/sketches/workload_synthesis.md
Normal file
210
devdocs/sketches/workload_synthesis.md
Normal file
@ -0,0 +1,210 @@
|
||||
# Workload Synthesis
|
||||
|
||||
This document describes the background and essential details for understanding workload
|
||||
synthesis as a potential NoSQLBench capability. Here, workload synthesis means constructing
|
||||
a NoSQLBench workload based on op templates which is both representative of some recorded
|
||||
or implied workload as informed by schema, recorded operations, or data set. The goal is
|
||||
to construct a template-based workload description that aligns to application access patterns
|
||||
as closely as the provided data sources allows.
|
||||
|
||||
With the release of Apache Cassandra 4.0 imminent, the full query log capability it provides
|
||||
offers a starting point for advanced workload characterization. However, FQL is only a starting
|
||||
point for serious testing at scale. The reasons for this can be discussed in detail elsewhere,
|
||||
but the main reason is that the test apparatus has inertia due to the weight of the data logs
|
||||
and the operational cost of piping bulk data around. Mechanical sympathy suggests a different
|
||||
representation that will maintain headroom in testing apparatus, which directly translates to
|
||||
simplicity in setup as well as accuracy in results.
|
||||
|
||||
Further, the capabilities needed to do workload synthesis are not unique to CQL. This is a general
|
||||
type of approach that can be used for multiple drivers.
|
||||
|
||||
# Getting there from here
|
||||
|
||||
There are several operational scenarios and possibilities to consider. These can be thought of as incremental goals to getting full workload synthesis into nosqlbech:
|
||||
|
||||
1) Schema-driven Workload - Suppose a workload with no operations visibility
|
||||
|
||||
This means taking an existing schema as the basis for some supposed set of operations. There are
|
||||
many possibilites to consider in terms of mapping schema to operations, but this plan only considers
|
||||
the most basic possible operations which can exercise a schema:
|
||||
|
||||
- write data to a database given a schema - Construct insert operations using very basic and default
|
||||
value generation for the known row structure
|
||||
- read data to a database, given a schema - Construct read operations using very basic and default
|
||||
value generation for the known identifier structure
|
||||
|
||||
Users should be allowed to specify which keyspaces and/or tables should be included.
|
||||
|
||||
Users will be allowed to specify the relative cardinality of values used at each level of identifier,
|
||||
and the type of value nesting.
|
||||
|
||||
The source of the schema will be presumed to be a live system, with the workload generation being done
|
||||
on-the-fly, but with an option to save the workload description instead of running it. In the case of
|
||||
running the workload, the option to save it will be allowed additionally.
|
||||
|
||||
The option to provide the schema as a local file (a database dump of describe keyspace, for example)
|
||||
should be provided as an enhancement.
|
||||
|
||||
**PROS**
|
||||
|
||||
This method can allow users to get started testing quickly on the data model that they've chosen
|
||||
with *nearly zero* effort.
|
||||
|
||||
**CONS**
|
||||
|
||||
This method only knows how to assume some valid operations from the user's provided schema. This means
|
||||
that the data model will be accurate, but it doesn't know how to construct data that is representative
|
||||
of the production data. It doesn't have the ability to emulate more sophisticated operational
|
||||
patterns or even inter-operational access patterns. it doesn't know how to construct inter-entity
|
||||
relationships.
|
||||
|
||||
In essence, this is a bare-bones getting started method for users who just want
|
||||
to exercize a specific data model irrespective of other important factors. It should only be used
|
||||
for very basic testing or as a quick getting started workflow for more accurate workload definitions.
|
||||
As such, this is still a serious improvement to the user workflow for getting started.
|
||||
|
||||
2) Raw Replay - Replay raw operations with no schema visibility
|
||||
|
||||
This is simply the ability to take a raw data file which includes operations and play it back via
|
||||
some protocol. This is a protocol-specific mechanism, since it requires integration with the
|
||||
driver level tools of a given protocol. As such it will only be avilable (to start) on a per-driver
|
||||
basis. For the purposes of this plan, assume this means "CQL".
|
||||
|
||||
This level of workload generation may depend on the first "schema-driven workload" as in many cases,
|
||||
users must have access to DDL statements before running tests with DML statements. In some scenarios,
|
||||
this may not be required as the testing may be done in-situ against a system that is already populated.
|
||||
|
||||
Further, it should be allowed that a user provide a local schema or point their test client at an existing
|
||||
system to gather the schema needed to replay from raw data in the absence of a schema.
|
||||
|
||||
**PROS**
|
||||
|
||||
This allows users to start sending operations to a target system that are facsimile copies of operations
|
||||
previously observed, with *some* effort.
|
||||
|
||||
**CONS**
|
||||
|
||||
Capturing logs and moving them around is an operational pain. This brings the challenge of managing
|
||||
big data directly into the workflow of testing at speed and scale.
|
||||
|
||||
Specifically: Unless users can adapt the testing apparatus to scale better (in-situ) than the
|
||||
system under test, they will get invalid results. There is a basic principle at play here that
|
||||
requires a testing instrument to have more headroom than the thing being tested in order to avoid
|
||||
simply testing the test instrument itself. This is more difficult than users are generally prepared
|
||||
to deal with in practice, and navigating it successfully requires more diligence and investment
|
||||
than just accepting the tools as-is. This means that the results are often unreliable as the
|
||||
perceived cost of doing it "right" are too high. This doesn't have to be the case.
|
||||
|
||||
The raw statement replay is not able to take advantage of optimizations that applications of
|
||||
scale depend on for efficiency, such as prepared statements.
|
||||
|
||||
Raw statement replay may depend on native-API level access ot the source format. Particularly
|
||||
in the FQL form from Apache Cassandra 4.*, the native form depends on internal buffering formats
|
||||
which have no typed-ness or marshalling support for external consumers. Export formats can be
|
||||
provided, but the current built-in export format is textual and extremely inefficient for use
|
||||
by off-board tools.
|
||||
|
||||
The amount of data captured is the amount of data available for replay.
|
||||
|
||||
The data captured may not be representative across a whole system unless it is sampled across
|
||||
all clients and connections.
|
||||
|
||||
3) Workload Synthesis - Combine schema and operations visibility into a representative workload.
|
||||
|
||||
This builds on the availability of schema details and operation logs to create a workload which
|
||||
is reasonably accurate to the workload that was observed in terms of data used in operations as
|
||||
well as relative frequency of operations. Incremental pattern analysis can be used to increase
|
||||
the level of awareness about obvious patterns as the toolkit evolves.
|
||||
|
||||
The degree of realism in the emulated data set and operational patterns depends on a degree of deep analysis
|
||||
which will start out decidedly simple: relative rates of identifiable statement patterns, and simple
|
||||
statistical shaping of fields in operations.
|
||||
|
||||
**PROS**
|
||||
|
||||
This method allows users to achieve a highly representative workload that exactly reproduces the
|
||||
statement forms in their application, with a mix of operations which is representative, with
|
||||
a set of data which is representative, with *some* effort. Once this workload is synthesized,
|
||||
they can take that as much more accurate starting point for experimentation. Changes from this point
|
||||
are not dependent on big data, but on a simple recipe and description that can be changed
|
||||
in a text file and immediately use again with different test conditions.
|
||||
|
||||
This method allows the test client to run at full speed, using the more efficient and extremely
|
||||
portable procedural data generation methods of current state-of-the-art testing methods in nb.
|
||||
|
||||
This workload description also serves as a fingerprint of the shape of data in recorded operations.
|
||||
|
||||
**CONS**
|
||||
|
||||
This requires and additional step of analysis and workload characterization. For the raw data collection,
|
||||
the same challenges associated with raw replay apply. This can be caveated with the option that
|
||||
users may run the workload analsysi tool on system nodes where the data resides locally and then
|
||||
use the synthesized workload on the client with no need to move data around.
|
||||
|
||||
As explained in the _Operations and Data_ section, this is an operation-centric approach to
|
||||
workload analysis. While only a minor caveat, this distinction may still be important with respect
|
||||
to dataset-centric approaches.
|
||||
|
||||
**PRO & CON**
|
||||
|
||||
The realism of the test depends directly on the quality of the analysis used to synthesize the
|
||||
workload, which will start with simple rules and then improve over time.
|
||||
|
||||
## Workflows
|
||||
|
||||
To break these down, we'll start with the full synthesis view using the following "toolchain" diagram:
|
||||
|
||||

|
||||
|
||||
In this view, the tools are on the top row, the data (in-flight and otherwise) in the middle,
|
||||
and the target systems in the bottom. This diagram shows how the data flows between tools
|
||||
and how is manipulated at each step.
|
||||
|
||||
## Operations and Data
|
||||
|
||||
Data in fields of operations and data within a dataset are related, but not in a way that allows a user to fully understand one through the lens of the other, except in special cases.
|
||||
|
||||
This section contrasts different levels of connectedness, in simple terms, between operations
|
||||
and data that result from them.
|
||||
|
||||
First lest start with an "affine" example. Assume you have a data set which was build from additive operations such as (pseudo-code)
|
||||
|
||||
for all I in 0..1000
|
||||
for all names in A B C D E
|
||||
insert row (I, name), values ...
|
||||
|
||||
This is an incremental process wherein the data of the iterators will map exactly to the
|
||||
data in the dataset. Now take an example like this:
|
||||
|
||||
for all I in 0..1000
|
||||
for all names in A B C D E
|
||||
insert row ((I mod 37), name), values ...
|
||||
|
||||
In this case all we've done is reduce the cardinality of the effective row identifier. Yet, the operations are not limited to 37 unique operations. As a mathematician, you could still work out
|
||||
the resulting data set. As a DBA, you would never want to be required to do so.
|
||||
|
||||
Let's take it to the next level.
|
||||
|
||||
for all I in 0..1000
|
||||
for all names in A B C D E
|
||||
insert row ((now() mod 37), name), values...
|
||||
|
||||
In this case, we've introduced a form of indeterminacy which seems to make it impossible to predict
|
||||
the resulting state of the dataset. This is actually much more trivial than what happens in practice
|
||||
as soon as you start using UUIDs, for example.
|
||||
|
||||
Attempts have been made to restore the simplicity of using sequences as identifiers in distributed systems, yet no current implementation seems to have a solid solution without self-defeating
|
||||
trade-offs in other places. Thus, we have to accept that the relationship between operations and dataset is _complicated_ in practice. This is merely one example of how this relationship gets weakened in practice.
|
||||
|
||||
The point of explaining this at this fundamental level of detail is make it clear that we need to treat data of operations an datasets as independent types of data.
|
||||
|
||||
To be precise, the data used within operations will be called **op data**. In contrast, the term **dataset** will be taken to mean data as it resides within storage, distinct
|
||||
from op data.
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
124
devdocs/sketches/workload_synthesis.svg
Normal file
124
devdocs/sketches/workload_synthesis.svg
Normal file
@ -0,0 +1,124 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN"
|
||||
"http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
|
||||
<!-- Generated by graphviz version 2.40.1 (20161225.0304)
|
||||
-->
|
||||
<!-- Title: ws Pages: 1 -->
|
||||
<svg width="340pt" height="281pt"
|
||||
viewBox="0.00 0.00 340.00 281.00" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
|
||||
<g id="graph0" class="graph" transform="scale(1 1) rotate(0) translate(4 277)">
|
||||
<title>ws</title>
|
||||
<polygon fill="#ffffff" stroke="transparent" points="-4,4 -4,-277 336,-277 336,4 -4,4"/>
|
||||
<text text-anchor="middle" x="166" y="-7.8" font-family="Times,serif" font-size="14.00" fill="#000000">Workload Synthesis Data Flow</text>
|
||||
<g id="clust2" class="cluster">
|
||||
<title>clusterf</title>
|
||||
<polygon fill="none" stroke="#000000" points="8,-116 8,-193 324,-193 324,-116 8,-116"/>
|
||||
<text text-anchor="middle" x="166" y="-177.8" font-family="Times,serif" font-size="14.00" fill="#000000">data</text>
|
||||
</g>
|
||||
<!-- app -->
|
||||
<g id="node1" class="node">
|
||||
<title>app</title>
|
||||
<text text-anchor="middle" x="43" y="-251.3" font-family="Times,serif" font-size="14.00" fill="#000000">application</text>
|
||||
</g>
|
||||
<!-- appops -->
|
||||
<g id="node4" class="node">
|
||||
<title>appops</title>
|
||||
<text text-anchor="middle" x="43" y="-146.8" font-family="Times,serif" font-size="14.00" fill="#000000">ops in</text>
|
||||
<text text-anchor="middle" x="43" y="-131.8" font-family="Times,serif" font-size="14.00" fill="#000000">flight</text>
|
||||
</g>
|
||||
<!-- app->appops -->
|
||||
<g id="edge1" class="edge">
|
||||
<title>app->appops</title>
|
||||
<path fill="none" stroke="#000000" d="M43,-236.5055C43,-219.1257 43,-192.8262 43,-172.5411"/>
|
||||
<polygon fill="#000000" stroke="#000000" points="46.5001,-172.2734 43,-162.2734 39.5001,-172.2734 46.5001,-172.2734"/>
|
||||
<text text-anchor="middle" x="58.5" y="-212.6" font-family="Times,serif" font-size="8.00" fill="#000000">normal</text>
|
||||
<text text-anchor="middle" x="58.5" y="-203.6" font-family="Times,serif" font-size="8.00" fill="#000000">operation</text>
|
||||
</g>
|
||||
<!-- analyzer -->
|
||||
<g id="node2" class="node">
|
||||
<title>analyzer</title>
|
||||
<text text-anchor="middle" x="162" y="-251.3" font-family="Times,serif" font-size="14.00" fill="#000000">analyzer</text>
|
||||
</g>
|
||||
<!-- workload -->
|
||||
<g id="node6" class="node">
|
||||
<title>workload</title>
|
||||
<text text-anchor="middle" x="203" y="-146.8" font-family="Times,serif" font-size="14.00" fill="#000000">Synthesized</text>
|
||||
<text text-anchor="middle" x="203" y="-131.8" font-family="Times,serif" font-size="14.00" fill="#000000">Workload</text>
|
||||
</g>
|
||||
<!-- analyzer->workload -->
|
||||
<g id="edge5" class="edge">
|
||||
<title>analyzer->workload</title>
|
||||
<path fill="none" stroke="#000000" d="M168.7703,-236.5055C175.2205,-218.8855 185.0269,-192.0974 192.4926,-171.7032"/>
|
||||
<polygon fill="#000000" stroke="#000000" points="195.7936,-172.8672 195.9446,-162.2734 189.2202,-170.4608 195.7936,-172.8672"/>
|
||||
</g>
|
||||
<!-- nosqlbench -->
|
||||
<g id="node3" class="node">
|
||||
<title>nosqlbench</title>
|
||||
<text text-anchor="middle" x="258" y="-251.3" font-family="Times,serif" font-size="14.00" fill="#000000">nosqlbench</text>
|
||||
</g>
|
||||
<!-- test_ops -->
|
||||
<g id="node7" class="node">
|
||||
<title>test_ops</title>
|
||||
<text text-anchor="middle" x="289" y="-146.8" font-family="Times,serif" font-size="14.00" fill="#000000">ops in</text>
|
||||
<text text-anchor="middle" x="289" y="-131.8" font-family="Times,serif" font-size="14.00" fill="#000000">flight</text>
|
||||
</g>
|
||||
<!-- nosqlbench->test_ops -->
|
||||
<g id="edge7" class="edge">
|
||||
<title>nosqlbench->test_ops</title>
|
||||
<path fill="none" stroke="#000000" d="M263.119,-236.5055C267.9738,-218.9656 275.3432,-192.3407 280.9783,-171.9817"/>
|
||||
<polygon fill="#000000" stroke="#000000" points="284.3709,-172.8447 283.6654,-162.2734 277.6246,-170.9774 284.3709,-172.8447"/>
|
||||
<text text-anchor="middle" x="286" y="-212.6" font-family="Times,serif" font-size="8.00" fill="#000000">run</text>
|
||||
<text text-anchor="middle" x="286" y="-203.6" font-family="Times,serif" font-size="8.00" fill="#000000">scenario</text>
|
||||
</g>
|
||||
<!-- system -->
|
||||
<g id="node8" class="node">
|
||||
<title>system</title>
|
||||
<text text-anchor="middle" x="75" y="-45.8" font-family="Times,serif" font-size="14.00" fill="#000000">Capture</text>
|
||||
<text text-anchor="middle" x="75" y="-30.8" font-family="Times,serif" font-size="14.00" fill="#000000">Target</text>
|
||||
</g>
|
||||
<!-- appops->system -->
|
||||
<g id="edge2" class="edge">
|
||||
<title>appops->system</title>
|
||||
<path fill="none" stroke="#000000" d="M49.1699,-123.5262C53.8926,-108.6204 60.4807,-87.8266 65.8213,-70.9703"/>
|
||||
<polygon fill="#000000" stroke="#000000" points="69.2737,-71.6616 68.9575,-61.0715 62.6006,-69.5473 69.2737,-71.6616"/>
|
||||
</g>
|
||||
<!-- oplog -->
|
||||
<g id="node5" class="node">
|
||||
<title>oplog</title>
|
||||
<text text-anchor="middle" x="116" y="-139.3" font-family="Times,serif" font-size="14.00" fill="#000000">OpLog</text>
|
||||
</g>
|
||||
<!-- oplog->analyzer -->
|
||||
<g id="edge4" class="edge">
|
||||
<title>oplog->analyzer</title>
|
||||
<path fill="none" stroke="#000000" d="M123.4546,-161.1504C130.7726,-178.9681 142.0375,-206.3958 150.5028,-227.0069"/>
|
||||
<polygon fill="#000000" stroke="#000000" points="147.3672,-228.585 154.404,-236.5055 153.8424,-225.9255 147.3672,-228.585"/>
|
||||
</g>
|
||||
<!-- workload->nosqlbench -->
|
||||
<g id="edge6" class="edge">
|
||||
<title>workload->nosqlbench</title>
|
||||
<path fill="none" stroke="#000000" d="M212.4646,-162.2734C221.2873,-180.2395 234.5404,-207.2276 244.4771,-227.4625"/>
|
||||
<polygon fill="#000000" stroke="#000000" points="241.3683,-229.0722 248.9179,-236.5055 247.6515,-225.9866 241.3683,-229.0722"/>
|
||||
</g>
|
||||
<!-- test_system -->
|
||||
<g id="node9" class="node">
|
||||
<title>test_system</title>
|
||||
<text text-anchor="middle" x="289" y="-45.8" font-family="Times,serif" font-size="14.00" fill="#000000">Test</text>
|
||||
<text text-anchor="middle" x="289" y="-30.8" font-family="Times,serif" font-size="14.00" fill="#000000">Target</text>
|
||||
</g>
|
||||
<!-- test_ops->test_system -->
|
||||
<g id="edge8" class="edge">
|
||||
<title>test_ops->test_system</title>
|
||||
<path fill="none" stroke="#000000" d="M289,-123.5262C289,-108.761 289,-88.2184 289,-71.4484"/>
|
||||
<polygon fill="#000000" stroke="#000000" points="292.5001,-71.0715 289,-61.0715 285.5001,-71.0715 292.5001,-71.0715"/>
|
||||
</g>
|
||||
<!-- system->oplog -->
|
||||
<g id="edge3" class="edge">
|
||||
<title>system->oplog</title>
|
||||
<path fill="none" stroke="#000000" d="M82.8249,-61.276C89.05,-76.6111 97.8417,-98.2686 104.8106,-115.4359"/>
|
||||
<polygon fill="#000000" stroke="#000000" points="101.6657,-116.994 108.67,-124.9432 108.1516,-114.3611 101.6657,-116.994"/>
|
||||
<text text-anchor="middle" x="110.5" y="-99.6" font-family="Times,serif" font-size="8.00" fill="#000000">full</text>
|
||||
<text text-anchor="middle" x="110.5" y="-90.6" font-family="Times,serif" font-size="8.00" fill="#000000">query</text>
|
||||
<text text-anchor="middle" x="110.5" y="-81.6" font-family="Times,serif" font-size="8.00" fill="#000000">log</text>
|
||||
</g>
|
||||
</g>
|
||||
</svg>
|
After Width: | Height: | Size: 6.7 KiB |
Loading…
Reference in New Issue
Block a user